]> git.proxmox.com Git - mirror_ovs.git/blob - lib/nx-match.c
netdev-offload-tc: Use single 'once' variable for probing tc features
[mirror_ovs.git] / lib / nx-match.c
1 /*
2 * Copyright (c) 2010-2017, 2020 Nicira, Inc.
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <config.h>
18
19 #include "nx-match.h"
20
21 #include <netinet/icmp6.h>
22
23 #include "classifier.h"
24 #include "colors.h"
25 #include "openvswitch/hmap.h"
26 #include "openflow/nicira-ext.h"
27 #include "openvswitch/dynamic-string.h"
28 #include "openvswitch/meta-flow.h"
29 #include "openvswitch/ofp-actions.h"
30 #include "openvswitch/ofp-errors.h"
31 #include "openvswitch/ofp-match.h"
32 #include "openvswitch/ofp-port.h"
33 #include "openvswitch/ofpbuf.h"
34 #include "openvswitch/vlog.h"
35 #include "packets.h"
36 #include "openvswitch/shash.h"
37 #include "tun-metadata.h"
38 #include "unaligned.h"
39 #include "util.h"
40 #include "vl-mff-map.h"
41
42 VLOG_DEFINE_THIS_MODULE(nx_match);
43
44 /* OXM headers.
45 *
46 *
47 * Standard OXM/NXM
48 * ================
49 *
50 * The header is 32 bits long. It looks like this:
51 *
52 * |31 16 15 9| 8 7 0
53 * +----------------------------------+---------------+--+------------------+
54 * | oxm_class | oxm_field |hm| oxm_length |
55 * +----------------------------------+---------------+--+------------------+
56 *
57 * where hm stands for oxm_hasmask. It is followed by oxm_length bytes of
58 * payload. When oxm_hasmask is 0, the payload is the value of the field
59 * identified by the header; when oxm_hasmask is 1, the payload is a value for
60 * the field followed by a mask of equal length.
61 *
62 * Internally, we represent a standard OXM header as a 64-bit integer with the
63 * above information in the most-significant bits.
64 *
65 *
66 * Experimenter OXM
67 * ================
68 *
69 * The header is 64 bits long. It looks like the diagram above except that a
70 * 32-bit experimenter ID, which we call oxm_vendor and which identifies a
71 * vendor, is inserted just before the payload. Experimenter OXMs are
72 * identified by an all-1-bits oxm_class (OFPXMC12_EXPERIMENTER). The
73 * oxm_length value *includes* the experimenter ID, so that the real payload is
74 * only oxm_length - 4 bytes long.
75 *
76 * Internally, we represent an experimenter OXM header as a 64-bit integer with
77 * the standard header in the upper 32 bits and the experimenter ID in the
78 * lower 32 bits. (It would be more convenient to swap the positions of the
79 * two 32-bit words, but this would be more error-prone because experimenter
80 * OXMs are very rarely used, so accidentally passing one through a 32-bit type
81 * somewhere in the OVS code would be hard to find.)
82 */
83
84 /*
85 * OXM Class IDs.
86 * The high order bit differentiate reserved classes from member classes.
87 * Classes 0x0000 to 0x7FFF are member classes, allocated by ONF.
88 * Classes 0x8000 to 0xFFFE are reserved classes, reserved for standardisation.
89 */
90 enum ofp12_oxm_class {
91 OFPXMC12_NXM_0 = 0x0000, /* Backward compatibility with NXM */
92 OFPXMC12_NXM_1 = 0x0001, /* Backward compatibility with NXM */
93 OFPXMC12_OPENFLOW_BASIC = 0x8000, /* Basic class for OpenFlow */
94 OFPXMC15_PACKET_REGS = 0x8001, /* Packet registers (pipeline fields). */
95 OFPXMC12_EXPERIMENTER = 0xffff, /* Experimenter class */
96 };
97
98 /* Functions for extracting raw field values from OXM/NXM headers. */
99 static uint32_t nxm_vendor(uint64_t header) { return header; }
100 static int nxm_class(uint64_t header) { return header >> 48; }
101 static int nxm_field(uint64_t header) { return (header >> 41) & 0x7f; }
102 static bool nxm_hasmask(uint64_t header) { return (header >> 40) & 1; }
103 static int nxm_length(uint64_t header) { return (header >> 32) & 0xff; }
104 static uint64_t nxm_no_len(uint64_t header) { return header & 0xffffff80ffffffffULL; }
105
106 static bool
107 is_experimenter_oxm(uint64_t header)
108 {
109 return nxm_class(header) == OFPXMC12_EXPERIMENTER;
110 }
111
112 /* The OXM header "length" field is somewhat tricky:
113 *
114 * - For a standard OXM header, the length is the number of bytes of the
115 * payload, and the payload consists of just the value (and mask, if
116 * present).
117 *
118 * - For an experimenter OXM header, the length is the number of bytes in
119 * the payload plus 4 (the length of the experimenter ID). That is, the
120 * experimenter ID is included in oxm_length.
121 *
122 * This function returns the length of the experimenter ID field in 'header'.
123 * That is, for an experimenter OXM (when an experimenter ID is present), it
124 * returns 4, and for a standard OXM (when no experimenter ID is present), it
125 * returns 0. */
126 static int
127 nxm_experimenter_len(uint64_t header)
128 {
129 return is_experimenter_oxm(header) ? 4 : 0;
130 }
131
132 /* Returns the number of bytes that follow the header for an NXM/OXM entry
133 * with the given 'header'. */
134 static int
135 nxm_payload_len(uint64_t header)
136 {
137 return nxm_length(header) - nxm_experimenter_len(header);
138 }
139
140 /* Returns the number of bytes in the header for an NXM/OXM entry with the
141 * given 'header'. */
142 static int
143 nxm_header_len(uint64_t header)
144 {
145 return 4 + nxm_experimenter_len(header);
146 }
147
148 #define NXM_HEADER(VENDOR, CLASS, FIELD, HASMASK, LENGTH) \
149 (((uint64_t) (CLASS) << 48) | \
150 ((uint64_t) (FIELD) << 41) | \
151 ((uint64_t) (HASMASK) << 40) | \
152 ((uint64_t) (LENGTH) << 32) | \
153 (VENDOR))
154
155 #define NXM_HEADER_FMT "%#"PRIx32":%d:%d:%d:%d"
156 #define NXM_HEADER_ARGS(HEADER) \
157 nxm_vendor(HEADER), nxm_class(HEADER), nxm_field(HEADER), \
158 nxm_hasmask(HEADER), nxm_length(HEADER)
159
160 /* Functions for turning the "hasmask" bit on or off. (This also requires
161 * adjusting the length.) */
162 static uint64_t
163 nxm_make_exact_header(uint64_t header)
164 {
165 int new_len = nxm_payload_len(header) / 2 + nxm_experimenter_len(header);
166 return NXM_HEADER(nxm_vendor(header), nxm_class(header),
167 nxm_field(header), 0, new_len);
168 }
169 static uint64_t
170 nxm_make_wild_header(uint64_t header)
171 {
172 int new_len = nxm_payload_len(header) * 2 + nxm_experimenter_len(header);
173 return NXM_HEADER(nxm_vendor(header), nxm_class(header),
174 nxm_field(header), 1, new_len);
175 }
176
177 /* Flow cookie.
178 *
179 * This may be used to gain the OpenFlow 1.1-like ability to restrict
180 * certain NXM-based Flow Mod and Flow Stats Request messages to flows
181 * with specific cookies. See the "nx_flow_mod" and "nx_flow_stats_request"
182 * structure definitions for more details. This match is otherwise not
183 * allowed. */
184 #define NXM_NX_COOKIE NXM_HEADER (0, 0x0001, 30, 0, 8)
185 #define NXM_NX_COOKIE_W nxm_make_wild_header(NXM_NX_COOKIE)
186
187 struct nxm_field {
188 uint64_t header;
189 enum ofp_version version;
190 const char *name; /* e.g. "NXM_OF_IN_PORT". */
191
192 enum mf_field_id id;
193 };
194
195 static const struct nxm_field *nxm_field_by_header(uint64_t header, bool is_action, enum ofperr *h_error);
196 static const struct nxm_field *nxm_field_by_name(const char *name, size_t len);
197 static const struct nxm_field *nxm_field_by_mf_id(enum mf_field_id,
198 enum ofp_version);
199
200 static void nx_put_header__(struct ofpbuf *, uint64_t header, bool masked);
201 static void nx_put_header_len(struct ofpbuf *, enum mf_field_id field,
202 enum ofp_version version, bool masked,
203 size_t n_bytes);
204
205 /* Rate limit for nx_match parse errors. These always indicate a bug in the
206 * peer and so there's not much point in showing a lot of them. */
207 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
208
209 static const struct nxm_field *
210 mf_parse_subfield_name(const char *name, int name_len, bool *wild);
211
212 /* Returns the preferred OXM header to use for field 'id' in OpenFlow version
213 * 'version'. Specify 0 for 'version' if an NXM legacy header should be
214 * preferred over any standardized OXM header. Returns 0 if field 'id' cannot
215 * be expressed in NXM or OXM. */
216 static uint64_t
217 mf_oxm_header(enum mf_field_id id, enum ofp_version version)
218 {
219 const struct nxm_field *f = nxm_field_by_mf_id(id, version);
220 return f ? f->header : 0;
221 }
222
223 /* Returns the 32-bit OXM or NXM header to use for field 'id', preferring an
224 * NXM legacy header over any standardized OXM header. Returns 0 if field 'id'
225 * cannot be expressed with a 32-bit NXM or OXM header.
226 *
227 * Whenever possible, use nx_pull_header() instead of this function, because
228 * this function cannot support 64-bit experimenter OXM headers. */
229 uint32_t
230 mf_nxm_header(enum mf_field_id id)
231 {
232 uint64_t oxm = mf_oxm_header(id, 0);
233 return is_experimenter_oxm(oxm) ? 0 : oxm >> 32;
234 }
235
236 /* Returns the 32-bit OXM or NXM header to use for field 'mff'. If 'mff' is
237 * a mapped variable length mf_field, update the header with the configured
238 * length of 'mff'. Returns 0 if 'mff' cannot be expressed with a 32-bit NXM
239 * or OXM header.*/
240 uint32_t
241 nxm_header_from_mff(const struct mf_field *mff)
242 {
243 uint64_t oxm = mf_oxm_header(mff->id, 0);
244
245 if (mff->mapped) {
246 oxm = nxm_no_len(oxm) | ((uint64_t) mff->n_bytes << 32);
247 }
248
249 return is_experimenter_oxm(oxm) ? 0 : oxm >> 32;
250 }
251
252 static const struct mf_field *
253 mf_from_oxm_header(uint64_t header, const struct vl_mff_map *vl_mff_map,
254 bool is_action, enum ofperr *h_error)
255 {
256 const struct nxm_field *f = nxm_field_by_header(header, is_action, h_error);
257
258 if (f) {
259 const struct mf_field *mff = mf_from_id(f->id);
260 const struct mf_field *vl_mff = mf_get_vl_mff(mff, vl_mff_map);
261 return vl_mff ? vl_mff : mff;
262 } else {
263 return NULL;
264 }
265 }
266
267 /* Returns the "struct mf_field" that corresponds to NXM or OXM header
268 * 'header', or NULL if 'header' doesn't correspond to any known field. */
269 const struct mf_field *
270 mf_from_nxm_header(uint32_t header, const struct vl_mff_map *vl_mff_map)
271 {
272 return mf_from_oxm_header((uint64_t) header << 32, vl_mff_map, false, NULL);
273 }
274
275 /* Returns the width of the data for a field with the given 'header', in
276 * bytes. */
277 static int
278 nxm_field_bytes(uint64_t header)
279 {
280 unsigned int length = nxm_payload_len(header);
281 return nxm_hasmask(header) ? length / 2 : length;
282 }
283 \f
284 /* nx_pull_match() and helpers. */
285
286 /* Given NXM/OXM value 'value' and mask 'mask' associated with 'header', checks
287 * for any 1-bit in the value where there is a 0-bit in the mask. Returns 0 if
288 * none, otherwise an error code. */
289 static bool
290 is_mask_consistent(uint64_t header, const uint8_t *value, const uint8_t *mask)
291 {
292 unsigned int width = nxm_field_bytes(header);
293 unsigned int i;
294
295 for (i = 0; i < width; i++) {
296 if (value[i] & ~mask[i]) {
297 if (!VLOG_DROP_WARN(&rl)) {
298 VLOG_WARN_RL(&rl, "Rejecting NXM/OXM entry "NXM_HEADER_FMT " "
299 "with 1-bits in value for bits wildcarded by the "
300 "mask.", NXM_HEADER_ARGS(header));
301 }
302 return false;
303 }
304 }
305 return true;
306 }
307
308 static bool
309 is_cookie_pseudoheader(uint64_t header)
310 {
311 return header == NXM_NX_COOKIE || header == NXM_NX_COOKIE_W;
312 }
313
314 static enum ofperr
315 nx_pull_header__(struct ofpbuf *b, bool allow_cookie,
316 const struct vl_mff_map *vl_mff_map, uint64_t *header,
317 const struct mf_field **field, bool is_action)
318 {
319 if (b->size < 4) {
320 goto bad_len;
321 }
322
323 *header = ((uint64_t) ntohl(get_unaligned_be32(b->data))) << 32;
324 if (is_experimenter_oxm(*header)) {
325 if (b->size < 8) {
326 goto bad_len;
327 }
328 *header = ntohll(get_unaligned_be64(b->data));
329 }
330 if (nxm_length(*header) < nxm_experimenter_len(*header)) {
331 VLOG_WARN_RL(&rl, "OXM header "NXM_HEADER_FMT" has invalid length %d "
332 "(minimum is %d)",
333 NXM_HEADER_ARGS(*header), nxm_length(*header),
334 nxm_header_len(*header));
335 goto error;
336 }
337 ofpbuf_pull(b, nxm_header_len(*header));
338
339 if (field) {
340 enum ofperr h_error = 0;
341 *field = mf_from_oxm_header(*header, vl_mff_map, is_action, &h_error);
342 if (!*field && !(allow_cookie && is_cookie_pseudoheader(*header))) {
343 VLOG_DBG_RL(&rl, "OXM header "NXM_HEADER_FMT" is unknown",
344 NXM_HEADER_ARGS(*header));
345 if (is_action) {
346 if (h_error) {
347 *field = NULL;
348 return h_error;
349 }
350 return OFPERR_OFPBAC_BAD_SET_TYPE;
351 } else {
352 return OFPERR_OFPBMC_BAD_FIELD;
353 }
354 } else if (mf_vl_mff_invalid(*field, vl_mff_map)) {
355 return OFPERR_NXFMFC_INVALID_TLV_FIELD;
356 }
357 }
358
359 return 0;
360
361 bad_len:
362 VLOG_DBG_RL(&rl, "encountered partial (%"PRIu32"-byte) OXM entry",
363 b->size);
364 error:
365 *header = 0;
366 if (field) {
367 *field = NULL;
368 }
369 return OFPERR_OFPBMC_BAD_LEN;
370 }
371
372 static void
373 copy_entry_value(const struct mf_field *field, union mf_value *value,
374 const uint8_t *payload, int width)
375 {
376 int copy_len;
377 void *copy_dst;
378
379 copy_dst = value;
380 copy_len = MIN(width, field ? field->n_bytes : sizeof *value);
381
382 if (field && field->variable_len) {
383 memset(value, 0, field->n_bytes);
384 copy_dst = &value->u8 + field->n_bytes - copy_len;
385 }
386
387 memcpy(copy_dst, payload, copy_len);
388 }
389
390 static enum ofperr
391 nx_pull_entry__(struct ofpbuf *b, bool allow_cookie,
392 const struct vl_mff_map *vl_mff_map, uint64_t *header,
393 const struct mf_field **field_,
394 union mf_value *value, union mf_value *mask, bool is_action)
395 {
396 const struct mf_field *field;
397 enum ofperr header_error;
398 unsigned int payload_len;
399 const uint8_t *payload;
400 int width;
401
402 header_error = nx_pull_header__(b, allow_cookie, vl_mff_map, header,
403 &field, is_action);
404 if (header_error && header_error != OFPERR_OFPBMC_BAD_FIELD) {
405 return header_error;
406 }
407
408 payload_len = nxm_payload_len(*header);
409 payload = ofpbuf_try_pull(b, payload_len);
410 if (!payload) {
411 VLOG_DBG_RL(&rl, "OXM header "NXM_HEADER_FMT" calls for %u-byte "
412 "payload but only %"PRIu32" bytes follow OXM header",
413 NXM_HEADER_ARGS(*header), payload_len, b->size);
414 return OFPERR_OFPBMC_BAD_LEN;
415 }
416
417 width = nxm_field_bytes(*header);
418 if (nxm_hasmask(*header)
419 && !is_mask_consistent(*header, payload, payload + width)) {
420 return OFPERR_OFPBMC_BAD_WILDCARDS;
421 }
422
423 copy_entry_value(field, value, payload, width);
424
425 if (mask) {
426 if (nxm_hasmask(*header)) {
427 copy_entry_value(field, mask, payload + width, width);
428 } else {
429 memset(mask, 0xff, sizeof *mask);
430 }
431 } else if (nxm_hasmask(*header)) {
432 VLOG_DBG_RL(&rl, "OXM header "NXM_HEADER_FMT" includes mask but "
433 "masked OXMs are not allowed here",
434 NXM_HEADER_ARGS(*header));
435 return OFPERR_OFPBMC_BAD_MASK;
436 }
437
438 if (field_) {
439 *field_ = field;
440 return header_error;
441 }
442
443 return 0;
444 }
445
446 /* Attempts to pull an NXM or OXM header, value, and mask (if present) from the
447 * beginning of 'b'. If successful, stores a pointer to the "struct mf_field"
448 * corresponding to the pulled header in '*field', the value into '*value',
449 * and the mask into '*mask', and returns 0. On error, returns an OpenFlow
450 * error; in this case, some bytes might have been pulled off 'b' anyhow, and
451 * the output parameters might have been modified.
452 *
453 * If a NULL 'mask' is supplied, masked OXM or NXM entries are treated as
454 * errors (with OFPERR_OFPBMC_BAD_MASK).
455 *
456 * The "bool is_action" is supplied to differentiate between match and action
457 * headers. This is done in order to return appropriate error type and code for
458 * bad match or bad action conditions. If set to True, indicates that the
459 * OXM or NXM entries belong to an action header.
460 */
461 enum ofperr
462 nx_pull_entry(struct ofpbuf *b, const struct vl_mff_map *vl_mff_map,
463 const struct mf_field **field, union mf_value *value,
464 union mf_value *mask, bool is_action)
465 {
466 uint64_t header;
467
468 return nx_pull_entry__(b, false, vl_mff_map, &header, field, value, mask, is_action);
469 }
470
471 /* Attempts to pull an NXM or OXM header from the beginning of 'b'. If
472 * successful, stores a pointer to the "struct mf_field" corresponding to the
473 * pulled header in '*field', stores the header's hasmask bit in '*masked'
474 * (true if hasmask=1, false if hasmask=0), and returns 0. On error, returns
475 * an OpenFlow error; in this case, some bytes might have been pulled off 'b'
476 * anyhow, and the output parameters might have been modified.
477 *
478 * If NULL 'masked' is supplied, masked OXM or NXM headers are treated as
479 * errors (with OFPERR_OFPBMC_BAD_MASK).
480 */
481 enum ofperr
482 nx_pull_header(struct ofpbuf *b, const struct vl_mff_map *vl_mff_map,
483 const struct mf_field **field, bool *masked)
484 {
485 enum ofperr error;
486 uint64_t header;
487
488 error = nx_pull_header__(b, false, vl_mff_map, &header, field, false);
489 if (masked) {
490 *masked = !error && nxm_hasmask(header);
491 } else if (!error && nxm_hasmask(header)) {
492 error = OFPERR_OFPBMC_BAD_MASK;
493 }
494 return error;
495 }
496
497 static enum ofperr
498 nx_pull_match_entry(struct ofpbuf *b, bool allow_cookie,
499 const struct vl_mff_map *vl_mff_map,
500 const struct mf_field **field,
501 union mf_value *value, union mf_value *mask)
502 {
503 enum ofperr error;
504 uint64_t header;
505
506 error = nx_pull_entry__(b, allow_cookie, vl_mff_map, &header, field, value,
507 mask, false);
508 if (error) {
509 return error;
510 }
511 if (field && *field) {
512 if (!mf_is_mask_valid(*field, mask)) {
513 VLOG_DBG_RL(&rl, "bad mask for field %s", (*field)->name);
514 return OFPERR_OFPBMC_BAD_MASK;
515 }
516 if (!mf_is_value_valid(*field, value)) {
517 VLOG_DBG_RL(&rl, "bad value for field %s", (*field)->name);
518 return OFPERR_OFPBMC_BAD_VALUE;
519 }
520 }
521 return 0;
522 }
523
524 /* Prerequisites will only be checked when 'strict' is 'true'. This allows
525 * decoding conntrack original direction 5-tuple IP addresses without the
526 * ethertype being present, when decoding metadata only. */
527 static enum ofperr
528 nx_pull_raw(const uint8_t *p, unsigned int match_len, bool strict,
529 bool pipeline_fields_only, struct match *match, ovs_be64 *cookie,
530 ovs_be64 *cookie_mask, const struct tun_table *tun_table,
531 const struct vl_mff_map *vl_mff_map)
532 {
533 ovs_assert((cookie != NULL) == (cookie_mask != NULL));
534
535 match_init_catchall(match);
536 match->flow.tunnel.metadata.tab = tun_table;
537 if (cookie) {
538 *cookie = *cookie_mask = htonll(0);
539 }
540
541 struct ofpbuf b = ofpbuf_const_initializer(p, match_len);
542 while (b.size) {
543 const uint8_t *pos = b.data;
544 const struct mf_field *field;
545 union mf_value value;
546 union mf_value mask;
547 enum ofperr error;
548
549 error = nx_pull_match_entry(&b, cookie != NULL, vl_mff_map, &field,
550 &value, &mask);
551 if (error) {
552 if (error == OFPERR_OFPBMC_BAD_FIELD && !strict) {
553 continue;
554 }
555 } else if (!field) {
556 if (!cookie) {
557 error = OFPERR_OFPBMC_BAD_FIELD;
558 } else if (*cookie_mask) {
559 error = OFPERR_OFPBMC_DUP_FIELD;
560 } else {
561 *cookie = value.be64;
562 *cookie_mask = mask.be64;
563 }
564 } else if (strict && !mf_are_match_prereqs_ok(field, match)) {
565 error = OFPERR_OFPBMC_BAD_PREREQ;
566 } else if (!mf_is_all_wild(field, &match->wc)) {
567 error = OFPERR_OFPBMC_DUP_FIELD;
568 } else if (pipeline_fields_only && !mf_is_pipeline_field(field)) {
569 error = OFPERR_OFPBRC_PIPELINE_FIELDS_ONLY;
570 } else {
571 char *err_str;
572
573 mf_set(field, &value, &mask, match, &err_str);
574 if (err_str) {
575 VLOG_DBG_RL(&rl, "error parsing OXM at offset %"PRIdPTR" "
576 "within match (%s)", pos - p, err_str);
577 free(err_str);
578 return OFPERR_OFPBMC_BAD_VALUE;
579 }
580
581 match_add_ethernet_prereq(match, field);
582 }
583
584 if (error) {
585 VLOG_DBG_RL(&rl, "error parsing OXM at offset %"PRIdPTR" "
586 "within match (%s)", pos -
587 p, ofperr_to_string(error));
588 return error;
589 }
590 }
591
592 match->flow.tunnel.metadata.tab = NULL;
593 return 0;
594 }
595
596 static enum ofperr
597 nx_pull_match__(struct ofpbuf *b, unsigned int match_len, bool strict,
598 bool pipeline_fields_only, struct match *match,
599 ovs_be64 *cookie, ovs_be64 *cookie_mask,
600 const struct tun_table *tun_table,
601 const struct vl_mff_map *vl_mff_map)
602 {
603 uint8_t *p = NULL;
604
605 if (match_len) {
606 p = ofpbuf_try_pull(b, ROUND_UP(match_len, 8));
607 if (!p) {
608 VLOG_DBG_RL(&rl, "nx_match length %u, rounded up to a "
609 "multiple of 8, is longer than space in message (max "
610 "length %"PRIu32")", match_len, b->size);
611 return OFPERR_OFPBMC_BAD_LEN;
612 }
613 }
614
615 return nx_pull_raw(p, match_len, strict, pipeline_fields_only, match,
616 cookie, cookie_mask, tun_table, vl_mff_map);
617 }
618
619 /* Parses the nx_match formatted match description in 'b' with length
620 * 'match_len'. Stores the results in 'match'. If 'cookie' and 'cookie_mask'
621 * are valid pointers, then stores the cookie and mask in them if 'b' contains
622 * a "NXM_NX_COOKIE*" match. Otherwise, stores 0 in both.
623 * If 'pipeline_fields_only' is true, this function returns
624 * OFPERR_OFPBRC_PIPELINE_FIELDS_ONLY if there is any non pipeline fields
625 * in 'b'.
626 *
627 * 'vl_mff_map" is an optional parameter that is used to validate the length
628 * of variable length mf_fields in 'match'. If it is not provided, the
629 * default mf_fields with maximum length will be used.
630 *
631 * Fails with an error upon encountering an unknown NXM header.
632 *
633 * Returns 0 if successful, otherwise an OpenFlow error code. */
634 enum ofperr
635 nx_pull_match(struct ofpbuf *b, unsigned int match_len, struct match *match,
636 ovs_be64 *cookie, ovs_be64 *cookie_mask,
637 bool pipeline_fields_only, const struct tun_table *tun_table,
638 const struct vl_mff_map *vl_mff_map)
639 {
640 return nx_pull_match__(b, match_len, true, pipeline_fields_only, match,
641 cookie, cookie_mask, tun_table, vl_mff_map);
642 }
643
644 /* Behaves the same as nx_pull_match(), but skips over unknown NXM headers,
645 * instead of failing with an error, and does not check for field
646 * prerequisites. */
647 enum ofperr
648 nx_pull_match_loose(struct ofpbuf *b, unsigned int match_len,
649 struct match *match, ovs_be64 *cookie,
650 ovs_be64 *cookie_mask, bool pipeline_fields_only,
651 const struct tun_table *tun_table)
652 {
653 return nx_pull_match__(b, match_len, false, pipeline_fields_only, match,
654 cookie, cookie_mask, tun_table, NULL);
655 }
656
657 static enum ofperr
658 oxm_pull_match__(struct ofpbuf *b, bool strict, bool pipeline_fields_only,
659 const struct tun_table *tun_table,
660 const struct vl_mff_map *vl_mff_map, struct match *match)
661 {
662 struct ofp11_match_header *omh = b->data;
663 uint8_t *p;
664 uint16_t match_len;
665
666 if (b->size < sizeof *omh) {
667 return OFPERR_OFPBMC_BAD_LEN;
668 }
669
670 match_len = ntohs(omh->length);
671 if (match_len < sizeof *omh) {
672 return OFPERR_OFPBMC_BAD_LEN;
673 }
674
675 if (omh->type != htons(OFPMT_OXM)) {
676 return OFPERR_OFPBMC_BAD_TYPE;
677 }
678
679 p = ofpbuf_try_pull(b, ROUND_UP(match_len, 8));
680 if (!p) {
681 VLOG_DBG_RL(&rl, "oxm length %u, rounded up to a "
682 "multiple of 8, is longer than space in message (max "
683 "length %"PRIu32")", match_len, b->size);
684 return OFPERR_OFPBMC_BAD_LEN;
685 }
686
687 return nx_pull_raw(p + sizeof *omh, match_len - sizeof *omh,
688 strict, pipeline_fields_only, match, NULL, NULL,
689 tun_table, vl_mff_map);
690 }
691
692 /* Parses the oxm formatted match description preceded by a struct
693 * ofp11_match_header in 'b'. Stores the result in 'match'.
694 * If 'pipeline_fields_only' is true, this function returns
695 * OFPERR_OFPBRC_PIPELINE_FIELDS_ONLY if there is any non pipeline fields
696 * in 'b'.
697 *
698 * 'vl_mff_map' is an optional parameter that is used to validate the length
699 * of variable length mf_fields in 'match'. If it is not provided, the
700 * default mf_fields with maximum length will be used.
701 *
702 * Fails with an error when encountering unknown OXM headers.
703 *
704 * Returns 0 if successful, otherwise an OpenFlow error code. */
705 enum ofperr
706 oxm_pull_match(struct ofpbuf *b, bool pipeline_fields_only,
707 const struct tun_table *tun_table,
708 const struct vl_mff_map *vl_mff_map, struct match *match)
709 {
710 return oxm_pull_match__(b, true, pipeline_fields_only, tun_table,
711 vl_mff_map, match);
712 }
713
714 /* Behaves the same as oxm_pull_match() with two exceptions. Skips over
715 * unknown OXM headers instead of failing with an error when they are
716 * encountered, and does not check for field prerequisites. */
717 enum ofperr
718 oxm_pull_match_loose(struct ofpbuf *b, bool pipeline_fields_only,
719 const struct tun_table *tun_table, struct match *match)
720 {
721 return oxm_pull_match__(b, false, pipeline_fields_only, tun_table, NULL,
722 match);
723 }
724
725 /* Parses the OXM match description in the 'oxm_len' bytes in 'oxm'. Stores
726 * the result in 'match'.
727 *
728 * Returns 0 if successful, otherwise an OpenFlow error code.
729 *
730 * If 'loose' is true, encountering unknown OXM headers or missing field
731 * prerequisites are not considered as error conditions.
732 */
733 enum ofperr
734 oxm_decode_match(const void *oxm, size_t oxm_len, bool loose,
735 const struct tun_table *tun_table,
736 const struct vl_mff_map *vl_mff_map, struct match *match)
737 {
738 return nx_pull_raw(oxm, oxm_len, !loose, false, match, NULL, NULL,
739 tun_table, vl_mff_map);
740 }
741
742 /* Verify an array of OXM TLVs treating value of each TLV as a mask,
743 * disallowing masks in each TLV and ignoring pre-requisites. */
744 enum ofperr
745 oxm_pull_field_array(const void *fields_data, size_t fields_len,
746 struct field_array *fa)
747 {
748 struct ofpbuf b = ofpbuf_const_initializer(fields_data, fields_len);
749 while (b.size) {
750 const uint8_t *pos = b.data;
751 const struct mf_field *field;
752 union mf_value value;
753 enum ofperr error;
754 uint64_t header;
755
756 error = nx_pull_entry__(&b, false, NULL, &header, &field, &value,
757 NULL, false);
758 if (error) {
759 VLOG_DBG_RL(&rl, "error pulling field array field");
760 } else if (!field) {
761 VLOG_DBG_RL(&rl, "unknown field array field");
762 error = OFPERR_OFPBMC_BAD_FIELD;
763 } else if (bitmap_is_set(fa->used.bm, field->id)) {
764 VLOG_DBG_RL(&rl, "duplicate field array field '%s'", field->name);
765 error = OFPERR_OFPBMC_DUP_FIELD;
766 } else if (!mf_is_mask_valid(field, &value)) {
767 VLOG_DBG_RL(&rl, "bad mask in field array field '%s'", field->name);
768 error = OFPERR_OFPBMC_BAD_MASK;
769 } else {
770 field_array_set(field->id, &value, fa);
771 }
772
773 if (error) {
774 const uint8_t *start = fields_data;
775
776 VLOG_DBG_RL(&rl, "error parsing OXM at offset %"PRIdPTR" "
777 "within field array (%s)", pos - start,
778 ofperr_to_string(error));
779
780 free(fa->values);
781 fa->values = NULL;
782 return error;
783 }
784 }
785
786 return 0;
787 }
788 \f
789 /* nx_put_match() and helpers.
790 *
791 * 'put' functions whose names end in 'w' add a wildcarded field.
792 * 'put' functions whose names end in 'm' add a field that might be wildcarded.
793 * Other 'put' functions add exact-match fields.
794 */
795
796 struct nxm_put_ctx {
797 struct ofpbuf *output;
798 bool implied_ethernet;
799 };
800
801 void
802 nxm_put_entry_raw(struct ofpbuf *b,
803 enum mf_field_id field, enum ofp_version version,
804 const void *value, const void *mask, size_t n_bytes)
805 {
806 nx_put_header_len(b, field, version, !!mask, n_bytes);
807 ofpbuf_put(b, value, n_bytes);
808 if (mask) {
809 ofpbuf_put(b, mask, n_bytes);
810 }
811 }
812
813 static void
814 nxm_put__(struct nxm_put_ctx *ctx,
815 enum mf_field_id field, enum ofp_version version,
816 const void *value, const void *mask, size_t n_bytes)
817 {
818 nxm_put_entry_raw(ctx->output, field, version, value, mask, n_bytes);
819 if (!ctx->implied_ethernet && mf_from_id(field)->prereqs != MFP_NONE) {
820 ctx->implied_ethernet = true;
821 }
822 }
823
824 static void
825 nxm_put(struct nxm_put_ctx *ctx,
826 enum mf_field_id field, enum ofp_version version,
827 const void *value, const void *mask, size_t n_bytes)
828 {
829 if (!is_all_zeros(mask, n_bytes)) {
830 bool masked = !is_all_ones(mask, n_bytes);
831 nxm_put__(ctx, field, version, value, masked ? mask : NULL, n_bytes);
832 }
833 }
834
835 static void
836 nxm_put_8m(struct nxm_put_ctx *ctx,
837 enum mf_field_id field, enum ofp_version version,
838 uint8_t value, uint8_t mask)
839 {
840 nxm_put(ctx, field, version, &value, &mask, sizeof value);
841 }
842
843 static void
844 nxm_put_8(struct nxm_put_ctx *ctx,
845 enum mf_field_id field, enum ofp_version version, uint8_t value)
846 {
847 nxm_put__(ctx, field, version, &value, NULL, sizeof value);
848 }
849
850 static void
851 nxm_put_16m(struct nxm_put_ctx *ctx,
852 enum mf_field_id field, enum ofp_version version,
853 ovs_be16 value, ovs_be16 mask)
854 {
855 nxm_put(ctx, field, version, &value, &mask, sizeof value);
856 }
857
858 static void
859 nxm_put_16(struct nxm_put_ctx *ctx,
860 enum mf_field_id field, enum ofp_version version, ovs_be16 value)
861 {
862 nxm_put__(ctx, field, version, &value, NULL, sizeof value);
863 }
864
865 static void
866 nxm_put_32m(struct nxm_put_ctx *ctx,
867 enum mf_field_id field, enum ofp_version version,
868 ovs_be32 value, ovs_be32 mask)
869 {
870 nxm_put(ctx, field, version, &value, &mask, sizeof value);
871 }
872
873 static void
874 nxm_put_32(struct nxm_put_ctx *ctx,
875 enum mf_field_id field, enum ofp_version version, ovs_be32 value)
876 {
877 nxm_put__(ctx, field, version, &value, NULL, sizeof value);
878 }
879
880 static void
881 nxm_put_64m(struct nxm_put_ctx *ctx,
882 enum mf_field_id field, enum ofp_version version,
883 ovs_be64 value, ovs_be64 mask)
884 {
885 nxm_put(ctx, field, version, &value, &mask, sizeof value);
886 }
887
888 static void
889 nxm_put_128m(struct nxm_put_ctx *ctx,
890 enum mf_field_id field, enum ofp_version version,
891 const ovs_be128 value, const ovs_be128 mask)
892 {
893 nxm_put(ctx, field, version, &value, &mask, sizeof(value));
894 }
895
896 static void
897 nxm_put_eth_masked(struct nxm_put_ctx *ctx,
898 enum mf_field_id field, enum ofp_version version,
899 const struct eth_addr value, const struct eth_addr mask)
900 {
901 nxm_put(ctx, field, version, value.ea, mask.ea, ETH_ADDR_LEN);
902 }
903
904 static void
905 nxm_put_ipv6(struct nxm_put_ctx *ctx,
906 enum mf_field_id field, enum ofp_version version,
907 const struct in6_addr *value, const struct in6_addr *mask)
908 {
909 nxm_put(ctx, field, version, value->s6_addr, mask->s6_addr,
910 sizeof value->s6_addr);
911 }
912
913 static void
914 nxm_put_frag(struct nxm_put_ctx *ctx, const struct match *match,
915 enum ofp_version version)
916 {
917 uint8_t nw_frag = match->flow.nw_frag & FLOW_NW_FRAG_MASK;
918 uint8_t nw_frag_mask = match->wc.masks.nw_frag & FLOW_NW_FRAG_MASK;
919
920 nxm_put_8m(ctx, MFF_IP_FRAG, version, nw_frag,
921 nw_frag_mask == FLOW_NW_FRAG_MASK ? UINT8_MAX : nw_frag_mask);
922 }
923
924 /* Appends to 'b' a set of OXM or NXM matches for the IPv4 or IPv6 fields in
925 * 'match'. */
926 static void
927 nxm_put_ip(struct nxm_put_ctx *ctx,
928 const struct match *match, enum ofp_version oxm)
929 {
930 const struct flow *flow = &match->flow;
931 ovs_be16 dl_type = get_dl_type(flow);
932
933 if (dl_type == htons(ETH_TYPE_IP)) {
934 nxm_put_32m(ctx, MFF_IPV4_SRC, oxm,
935 flow->nw_src, match->wc.masks.nw_src);
936 nxm_put_32m(ctx, MFF_IPV4_DST, oxm,
937 flow->nw_dst, match->wc.masks.nw_dst);
938 } else {
939 nxm_put_ipv6(ctx, MFF_IPV6_SRC, oxm,
940 &flow->ipv6_src, &match->wc.masks.ipv6_src);
941 nxm_put_ipv6(ctx, MFF_IPV6_DST, oxm,
942 &flow->ipv6_dst, &match->wc.masks.ipv6_dst);
943 }
944
945 nxm_put_frag(ctx, match, oxm);
946
947 if (match->wc.masks.nw_tos & IP_DSCP_MASK) {
948 if (oxm) {
949 nxm_put_8(ctx, MFF_IP_DSCP_SHIFTED, oxm,
950 flow->nw_tos >> 2);
951 } else {
952 nxm_put_8(ctx, MFF_IP_DSCP, oxm,
953 flow->nw_tos & IP_DSCP_MASK);
954 }
955 }
956
957 if (match->wc.masks.nw_tos & IP_ECN_MASK) {
958 nxm_put_8(ctx, MFF_IP_ECN, oxm,
959 flow->nw_tos & IP_ECN_MASK);
960 }
961
962 if (match->wc.masks.nw_ttl) {
963 nxm_put_8(ctx, MFF_IP_TTL, oxm, flow->nw_ttl);
964 }
965
966 nxm_put_32m(ctx, MFF_IPV6_LABEL, oxm,
967 flow->ipv6_label, match->wc.masks.ipv6_label);
968
969 if (match->wc.masks.nw_proto) {
970 nxm_put_8(ctx, MFF_IP_PROTO, oxm, flow->nw_proto);
971
972 if (flow->nw_proto == IPPROTO_TCP) {
973 nxm_put_16m(ctx, MFF_TCP_SRC, oxm,
974 flow->tp_src, match->wc.masks.tp_src);
975 nxm_put_16m(ctx, MFF_TCP_DST, oxm,
976 flow->tp_dst, match->wc.masks.tp_dst);
977 nxm_put_16m(ctx, MFF_TCP_FLAGS, oxm,
978 flow->tcp_flags, match->wc.masks.tcp_flags);
979 } else if (flow->nw_proto == IPPROTO_UDP) {
980 nxm_put_16m(ctx, MFF_UDP_SRC, oxm,
981 flow->tp_src, match->wc.masks.tp_src);
982 nxm_put_16m(ctx, MFF_UDP_DST, oxm,
983 flow->tp_dst, match->wc.masks.tp_dst);
984 } else if (flow->nw_proto == IPPROTO_SCTP) {
985 nxm_put_16m(ctx, MFF_SCTP_SRC, oxm, flow->tp_src,
986 match->wc.masks.tp_src);
987 nxm_put_16m(ctx, MFF_SCTP_DST, oxm, flow->tp_dst,
988 match->wc.masks.tp_dst);
989 } else if (is_icmpv4(flow, NULL)) {
990 if (match->wc.masks.tp_src) {
991 nxm_put_8(ctx, MFF_ICMPV4_TYPE, oxm,
992 ntohs(flow->tp_src));
993 }
994 if (match->wc.masks.tp_dst) {
995 nxm_put_8(ctx, MFF_ICMPV4_CODE, oxm,
996 ntohs(flow->tp_dst));
997 }
998 } else if (is_icmpv6(flow, NULL)) {
999 if (match->wc.masks.tp_src) {
1000 nxm_put_8(ctx, MFF_ICMPV6_TYPE, oxm,
1001 ntohs(flow->tp_src));
1002 }
1003 if (match->wc.masks.tp_dst) {
1004 nxm_put_8(ctx, MFF_ICMPV6_CODE, oxm,
1005 ntohs(flow->tp_dst));
1006 }
1007 if (is_nd(flow, NULL)) {
1008 if (match->wc.masks.igmp_group_ip4) {
1009 nxm_put_32(ctx, MFF_ND_RESERVED, oxm,
1010 flow->igmp_group_ip4);
1011 }
1012 nxm_put_ipv6(ctx, MFF_ND_TARGET, oxm,
1013 &flow->nd_target, &match->wc.masks.nd_target);
1014 if (match->wc.masks.tcp_flags) {
1015 nxm_put_8(ctx, MFF_ND_OPTIONS_TYPE, oxm,
1016 ntohs(flow->tcp_flags));
1017 }
1018 if (flow->tp_src == htons(ND_NEIGHBOR_SOLICIT)) {
1019 nxm_put_eth_masked(ctx, MFF_ND_SLL, oxm,
1020 flow->arp_sha, match->wc.masks.arp_sha);
1021 }
1022 if (flow->tp_src == htons(ND_NEIGHBOR_ADVERT)) {
1023 nxm_put_eth_masked(ctx, MFF_ND_TLL, oxm,
1024 flow->arp_tha, match->wc.masks.arp_tha);
1025 }
1026 }
1027 }
1028 }
1029 }
1030
1031 /* Appends to 'b' the nx_match format that expresses 'match'. For Flow Mod and
1032 * Flow Stats Requests messages, a 'cookie' and 'cookie_mask' may be supplied.
1033 * Otherwise, 'cookie_mask' should be zero.
1034 *
1035 * Specify 'oxm' as 0 to express the match in NXM format; otherwise, specify
1036 * 'oxm' as the OpenFlow version number for the OXM format to use.
1037 *
1038 * This function can cause 'b''s data to be reallocated.
1039 *
1040 * Returns the number of bytes appended to 'b', excluding padding.
1041 *
1042 * If 'match' is a catch-all rule that matches every packet, then this function
1043 * appends nothing to 'b' and returns 0. */
1044 static int
1045 nx_put_raw(struct ofpbuf *b, enum ofp_version oxm, const struct match *match,
1046 ovs_be64 cookie, ovs_be64 cookie_mask)
1047 {
1048 const struct flow *flow = &match->flow;
1049 const size_t start_len = b->size;
1050 ovs_be16 dl_type = get_dl_type(flow);
1051 ovs_be32 spi_mask;
1052 int match_len;
1053
1054 BUILD_ASSERT_DECL(FLOW_WC_SEQ == 42);
1055
1056 struct nxm_put_ctx ctx = { .output = b, .implied_ethernet = false };
1057
1058 /* OpenFlow Packet Type. Must be first. */
1059 if (match->wc.masks.packet_type && !match_has_default_packet_type(match)) {
1060 nxm_put_32m(&ctx, MFF_PACKET_TYPE, oxm, flow->packet_type,
1061 match->wc.masks.packet_type);
1062 }
1063
1064 /* Metadata. */
1065 if (match->wc.masks.dp_hash) {
1066 nxm_put_32m(&ctx, MFF_DP_HASH, oxm,
1067 htonl(flow->dp_hash), htonl(match->wc.masks.dp_hash));
1068 }
1069
1070 if (match->wc.masks.recirc_id) {
1071 nxm_put_32(&ctx, MFF_RECIRC_ID, oxm, htonl(flow->recirc_id));
1072 }
1073
1074 if (match->wc.masks.conj_id) {
1075 nxm_put_32(&ctx, MFF_CONJ_ID, oxm, htonl(flow->conj_id));
1076 }
1077
1078 if (match->wc.masks.in_port.ofp_port) {
1079 ofp_port_t in_port = flow->in_port.ofp_port;
1080 if (oxm) {
1081 nxm_put_32(&ctx, MFF_IN_PORT_OXM, oxm,
1082 ofputil_port_to_ofp11(in_port));
1083 } else {
1084 nxm_put_16(&ctx, MFF_IN_PORT, oxm,
1085 htons(ofp_to_u16(in_port)));
1086 }
1087 }
1088 if (match->wc.masks.actset_output) {
1089 nxm_put_32(&ctx, MFF_ACTSET_OUTPUT, oxm,
1090 ofputil_port_to_ofp11(flow->actset_output));
1091 }
1092
1093 /* Ethernet. */
1094 nxm_put_eth_masked(&ctx, MFF_ETH_SRC, oxm,
1095 flow->dl_src, match->wc.masks.dl_src);
1096 nxm_put_eth_masked(&ctx, MFF_ETH_DST, oxm,
1097 flow->dl_dst, match->wc.masks.dl_dst);
1098 nxm_put_16m(&ctx, MFF_ETH_TYPE, oxm,
1099 ofputil_dl_type_to_openflow(flow->dl_type),
1100 match->wc.masks.dl_type);
1101
1102 /* 802.1Q. */
1103 if (oxm) {
1104 ovs_be16 VID_CFI_MASK = htons(VLAN_VID_MASK | VLAN_CFI);
1105 ovs_be16 vid = flow->vlans[0].tci & VID_CFI_MASK;
1106 ovs_be16 mask = match->wc.masks.vlans[0].tci & VID_CFI_MASK;
1107
1108 if (mask == htons(VLAN_VID_MASK | VLAN_CFI)) {
1109 nxm_put_16(&ctx, MFF_VLAN_VID, oxm, vid);
1110 } else if (mask) {
1111 nxm_put_16m(&ctx, MFF_VLAN_VID, oxm, vid, mask);
1112 }
1113
1114 if (vid && vlan_tci_to_pcp(match->wc.masks.vlans[0].tci)) {
1115 nxm_put_8(&ctx, MFF_VLAN_PCP, oxm,
1116 vlan_tci_to_pcp(flow->vlans[0].tci));
1117 }
1118
1119 } else {
1120 nxm_put_16m(&ctx, MFF_VLAN_TCI, oxm, flow->vlans[0].tci,
1121 match->wc.masks.vlans[0].tci);
1122 }
1123
1124 /* MPLS. */
1125 if (eth_type_mpls(dl_type)) {
1126 if (match->wc.masks.mpls_lse[0] & htonl(MPLS_TC_MASK)) {
1127 nxm_put_8(&ctx, MFF_MPLS_TC, oxm,
1128 mpls_lse_to_tc(flow->mpls_lse[0]));
1129 }
1130
1131 if (match->wc.masks.mpls_lse[0] & htonl(MPLS_BOS_MASK)) {
1132 nxm_put_8(&ctx, MFF_MPLS_BOS, oxm,
1133 mpls_lse_to_bos(flow->mpls_lse[0]));
1134 }
1135
1136 if (match->wc.masks.mpls_lse[0] & htonl(MPLS_LABEL_MASK)) {
1137 nxm_put_32(&ctx, MFF_MPLS_LABEL, oxm,
1138 htonl(mpls_lse_to_label(flow->mpls_lse[0])));
1139 }
1140 }
1141
1142 /* L3. */
1143 if (is_ip_any(flow)) {
1144 nxm_put_ip(&ctx, match, oxm);
1145 } else if (dl_type == htons(ETH_TYPE_ARP) ||
1146 dl_type == htons(ETH_TYPE_RARP)) {
1147 /* ARP. */
1148 if (match->wc.masks.nw_proto) {
1149 nxm_put_16(&ctx, MFF_ARP_OP, oxm,
1150 htons(flow->nw_proto));
1151 }
1152 nxm_put_32m(&ctx, MFF_ARP_SPA, oxm,
1153 flow->nw_src, match->wc.masks.nw_src);
1154 nxm_put_32m(&ctx, MFF_ARP_TPA, oxm,
1155 flow->nw_dst, match->wc.masks.nw_dst);
1156 nxm_put_eth_masked(&ctx, MFF_ARP_SHA, oxm,
1157 flow->arp_sha, match->wc.masks.arp_sha);
1158 nxm_put_eth_masked(&ctx, MFF_ARP_THA, oxm,
1159 flow->arp_tha, match->wc.masks.arp_tha);
1160 }
1161
1162 /* Tunnel ID. */
1163 nxm_put_64m(&ctx, MFF_TUN_ID, oxm,
1164 flow->tunnel.tun_id, match->wc.masks.tunnel.tun_id);
1165
1166 /* Other tunnel metadata. */
1167 nxm_put_16m(&ctx, MFF_TUN_FLAGS, oxm,
1168 htons(flow->tunnel.flags), htons(match->wc.masks.tunnel.flags));
1169 nxm_put_32m(&ctx, MFF_TUN_SRC, oxm,
1170 flow->tunnel.ip_src, match->wc.masks.tunnel.ip_src);
1171 nxm_put_32m(&ctx, MFF_TUN_DST, oxm,
1172 flow->tunnel.ip_dst, match->wc.masks.tunnel.ip_dst);
1173 nxm_put_ipv6(&ctx, MFF_TUN_IPV6_SRC, oxm,
1174 &flow->tunnel.ipv6_src, &match->wc.masks.tunnel.ipv6_src);
1175 nxm_put_ipv6(&ctx, MFF_TUN_IPV6_DST, oxm,
1176 &flow->tunnel.ipv6_dst, &match->wc.masks.tunnel.ipv6_dst);
1177 nxm_put_16m(&ctx, MFF_TUN_GBP_ID, oxm,
1178 flow->tunnel.gbp_id, match->wc.masks.tunnel.gbp_id);
1179 nxm_put_8m(&ctx, MFF_TUN_GBP_FLAGS, oxm,
1180 flow->tunnel.gbp_flags, match->wc.masks.tunnel.gbp_flags);
1181 tun_metadata_to_nx_match(b, oxm, match);
1182
1183 /* ERSPAN */
1184 nxm_put_32m(&ctx, MFF_TUN_ERSPAN_IDX, oxm,
1185 htonl(flow->tunnel.erspan_idx),
1186 htonl(match->wc.masks.tunnel.erspan_idx));
1187 nxm_put_8m(&ctx, MFF_TUN_ERSPAN_VER, oxm,
1188 flow->tunnel.erspan_ver, match->wc.masks.tunnel.erspan_ver);
1189 nxm_put_8m(&ctx, MFF_TUN_ERSPAN_DIR, oxm,
1190 flow->tunnel.erspan_dir, match->wc.masks.tunnel.erspan_dir);
1191 nxm_put_8m(&ctx, MFF_TUN_ERSPAN_HWID, oxm,
1192 flow->tunnel.erspan_hwid, match->wc.masks.tunnel.erspan_hwid);
1193
1194 /* GTP-U */
1195 nxm_put_8m(&ctx, MFF_TUN_GTPU_FLAGS, oxm, flow->tunnel.gtpu_flags,
1196 match->wc.masks.tunnel.gtpu_flags);
1197 nxm_put_8m(&ctx, MFF_TUN_GTPU_MSGTYPE, oxm, flow->tunnel.gtpu_msgtype,
1198 match->wc.masks.tunnel.gtpu_msgtype);
1199
1200 /* Network Service Header */
1201 nxm_put_8m(&ctx, MFF_NSH_FLAGS, oxm, flow->nsh.flags,
1202 match->wc.masks.nsh.flags);
1203 nxm_put_8m(&ctx, MFF_NSH_TTL, oxm, flow->nsh.ttl,
1204 match->wc.masks.nsh.ttl);
1205 nxm_put_8m(&ctx, MFF_NSH_MDTYPE, oxm, flow->nsh.mdtype,
1206 match->wc.masks.nsh.mdtype);
1207 nxm_put_8m(&ctx, MFF_NSH_NP, oxm, flow->nsh.np,
1208 match->wc.masks.nsh.np);
1209 spi_mask = nsh_path_hdr_to_spi(match->wc.masks.nsh.path_hdr);
1210 if (spi_mask == htonl(NSH_SPI_MASK >> NSH_SPI_SHIFT)) {
1211 spi_mask = OVS_BE32_MAX;
1212 }
1213 nxm_put_32m(&ctx, MFF_NSH_SPI, oxm,
1214 nsh_path_hdr_to_spi(flow->nsh.path_hdr),
1215 spi_mask);
1216 nxm_put_8m(&ctx, MFF_NSH_SI, oxm,
1217 nsh_path_hdr_to_si(flow->nsh.path_hdr),
1218 nsh_path_hdr_to_si(match->wc.masks.nsh.path_hdr));
1219 for (int i = 0; i < 4; i++) {
1220 nxm_put_32m(&ctx, MFF_NSH_C1 + i, oxm, flow->nsh.context[i],
1221 match->wc.masks.nsh.context[i]);
1222 }
1223
1224 /* Registers. */
1225 if (oxm < OFP15_VERSION) {
1226 for (int i = 0; i < FLOW_N_REGS; i++) {
1227 nxm_put_32m(&ctx, MFF_REG0 + i, oxm,
1228 htonl(flow->regs[i]), htonl(match->wc.masks.regs[i]));
1229 }
1230 } else {
1231 for (int i = 0; i < FLOW_N_XREGS; i++) {
1232 nxm_put_64m(&ctx, MFF_XREG0 + i, oxm,
1233 htonll(flow_get_xreg(flow, i)),
1234 htonll(flow_get_xreg(&match->wc.masks, i)));
1235 }
1236 }
1237
1238 /* Packet mark. */
1239 nxm_put_32m(&ctx, MFF_PKT_MARK, oxm, htonl(flow->pkt_mark),
1240 htonl(match->wc.masks.pkt_mark));
1241
1242 /* Connection tracking. */
1243 nxm_put_32m(&ctx, MFF_CT_STATE, oxm, htonl(flow->ct_state),
1244 htonl(match->wc.masks.ct_state));
1245 nxm_put_16m(&ctx, MFF_CT_ZONE, oxm, htons(flow->ct_zone),
1246 htons(match->wc.masks.ct_zone));
1247 nxm_put_32m(&ctx, MFF_CT_MARK, oxm, htonl(flow->ct_mark),
1248 htonl(match->wc.masks.ct_mark));
1249 nxm_put_128m(&ctx, MFF_CT_LABEL, oxm, hton128(flow->ct_label),
1250 hton128(match->wc.masks.ct_label));
1251 nxm_put_32m(&ctx, MFF_CT_NW_SRC, oxm,
1252 flow->ct_nw_src, match->wc.masks.ct_nw_src);
1253 nxm_put_ipv6(&ctx, MFF_CT_IPV6_SRC, oxm,
1254 &flow->ct_ipv6_src, &match->wc.masks.ct_ipv6_src);
1255 nxm_put_32m(&ctx, MFF_CT_NW_DST, oxm,
1256 flow->ct_nw_dst, match->wc.masks.ct_nw_dst);
1257 nxm_put_ipv6(&ctx, MFF_CT_IPV6_DST, oxm,
1258 &flow->ct_ipv6_dst, &match->wc.masks.ct_ipv6_dst);
1259 if (flow->ct_nw_proto) {
1260 nxm_put_8m(&ctx, MFF_CT_NW_PROTO, oxm, flow->ct_nw_proto,
1261 match->wc.masks.ct_nw_proto);
1262 nxm_put_16m(&ctx, MFF_CT_TP_SRC, oxm,
1263 flow->ct_tp_src, match->wc.masks.ct_tp_src);
1264 nxm_put_16m(&ctx, MFF_CT_TP_DST, oxm,
1265 flow->ct_tp_dst, match->wc.masks.ct_tp_dst);
1266 }
1267 /* OpenFlow 1.1+ Metadata. */
1268 nxm_put_64m(&ctx, MFF_METADATA, oxm,
1269 flow->metadata, match->wc.masks.metadata);
1270
1271 /* Cookie. */
1272 if (cookie_mask) {
1273 bool masked = cookie_mask != OVS_BE64_MAX;
1274
1275 cookie &= cookie_mask;
1276 nx_put_header__(b, NXM_NX_COOKIE, masked);
1277 ofpbuf_put(b, &cookie, sizeof cookie);
1278 if (masked) {
1279 ofpbuf_put(b, &cookie_mask, sizeof cookie_mask);
1280 }
1281 }
1282
1283 if (match_has_default_packet_type(match) && !ctx.implied_ethernet) {
1284 uint64_t pt_stub[16 / 8];
1285 struct ofpbuf pt;
1286 ofpbuf_use_stack(&pt, pt_stub, sizeof pt_stub);
1287 nxm_put_entry_raw(&pt, MFF_PACKET_TYPE, oxm, &flow->packet_type,
1288 NULL, sizeof flow->packet_type);
1289
1290 ofpbuf_insert(b, start_len, pt.data, pt.size);
1291 }
1292
1293 match_len = b->size - start_len;
1294 return match_len;
1295 }
1296
1297 /* Appends to 'b' the nx_match format that expresses 'match', plus enough zero
1298 * bytes to pad the nx_match out to a multiple of 8. For Flow Mod and Flow
1299 * Stats Requests messages, a 'cookie' and 'cookie_mask' may be supplied.
1300 * Otherwise, 'cookie_mask' should be zero.
1301 *
1302 * This function can cause 'b''s data to be reallocated.
1303 *
1304 * Returns the number of bytes appended to 'b', excluding padding. The return
1305 * value can be zero if it appended nothing at all to 'b' (which happens if
1306 * 'cr' is a catch-all rule that matches every packet). */
1307 int
1308 nx_put_match(struct ofpbuf *b, const struct match *match,
1309 ovs_be64 cookie, ovs_be64 cookie_mask)
1310 {
1311 int match_len = nx_put_raw(b, 0, match, cookie, cookie_mask);
1312
1313 ofpbuf_put_zeros(b, PAD_SIZE(match_len, 8));
1314 return match_len;
1315 }
1316
1317 /* Appends to 'b' an struct ofp11_match_header followed by the OXM format that
1318 * expresses 'match', plus enough zero bytes to pad the data appended out to a
1319 * multiple of 8.
1320 *
1321 * OXM differs slightly among versions of OpenFlow. Specify the OpenFlow
1322 * version in use as 'version'.
1323 *
1324 * This function can cause 'b''s data to be reallocated.
1325 *
1326 * Returns the number of bytes appended to 'b', excluding the padding. Never
1327 * returns zero. */
1328 int
1329 oxm_put_match(struct ofpbuf *b, const struct match *match,
1330 enum ofp_version version)
1331 {
1332 int match_len;
1333 struct ofp11_match_header *omh;
1334 size_t start_len = b->size;
1335 ovs_be64 cookie = htonll(0), cookie_mask = htonll(0);
1336
1337 ofpbuf_put_uninit(b, sizeof *omh);
1338 match_len = (nx_put_raw(b, version, match, cookie, cookie_mask)
1339 + sizeof *omh);
1340 ofpbuf_put_zeros(b, PAD_SIZE(match_len, 8));
1341
1342 omh = ofpbuf_at(b, start_len, sizeof *omh);
1343 omh->type = htons(OFPMT_OXM);
1344 omh->length = htons(match_len);
1345
1346 return match_len;
1347 }
1348
1349 /* Appends to 'b' the OXM formats that expresses 'match', without header or
1350 * padding.
1351 *
1352 * OXM differs slightly among versions of OpenFlow. Specify the OpenFlow
1353 * version in use as 'version'.
1354 *
1355 * This function can cause 'b''s data to be reallocated. */
1356 void
1357 oxm_put_raw(struct ofpbuf *b, const struct match *match,
1358 enum ofp_version version)
1359 {
1360 nx_put_raw(b, version, match, 0, 0);
1361 }
1362
1363 /* Appends to 'b' the nx_match format that expresses the tlv corresponding
1364 * to 'id'. If mask is not all-ones then it is also formated as the value
1365 * of the tlv. */
1366 static void
1367 nx_format_mask_tlv(struct ds *ds, enum mf_field_id id,
1368 const union mf_value *mask)
1369 {
1370 const struct mf_field *mf = mf_from_id(id);
1371
1372 ds_put_format(ds, "%s", mf->name);
1373
1374 if (!is_all_ones(mask, mf->n_bytes)) {
1375 ds_put_char(ds, '=');
1376 mf_format(mf, mask, NULL, NULL, ds);
1377 }
1378
1379 ds_put_char(ds, ',');
1380 }
1381
1382 /* Appends a string representation of 'fa_' to 'ds'.
1383 * The TLVS value of 'fa_' is treated as a mask and
1384 * only the name of fields is formated if it is all ones. */
1385 void
1386 oxm_format_field_array(struct ds *ds, const struct field_array *fa)
1387 {
1388 size_t start_len = ds->length;
1389 size_t i, offset = 0;
1390
1391 BITMAP_FOR_EACH_1 (i, MFF_N_IDS, fa->used.bm) {
1392 const struct mf_field *mf = mf_from_id(i);
1393 union mf_value value;
1394
1395 memcpy(&value, fa->values + offset, mf->n_bytes);
1396 nx_format_mask_tlv(ds, i, &value);
1397 offset += mf->n_bytes;
1398 }
1399
1400 if (ds->length > start_len) {
1401 ds_chomp(ds, ',');
1402 }
1403 }
1404
1405 /* Appends to 'b' a series of OXM TLVs corresponding to the series
1406 * of enum mf_field_id and value tuples in 'fa_'.
1407 *
1408 * OXM differs slightly among versions of OpenFlow. Specify the OpenFlow
1409 * version in use as 'version'.
1410 *
1411 * This function can cause 'b''s data to be reallocated.
1412 *
1413 * Returns the number of bytes appended to 'b'. May return zero. */
1414 int
1415 oxm_put_field_array(struct ofpbuf *b, const struct field_array *fa,
1416 enum ofp_version version)
1417 {
1418 size_t start_len = b->size;
1419
1420 /* XXX Some care might need to be taken of different TLVs that handle the
1421 * same flow fields. In particular:
1422
1423 * - VLAN_TCI, VLAN_VID and MFF_VLAN_PCP
1424 * - IP_DSCP_MASK and DSCP_SHIFTED
1425 * - REGS and XREGS
1426 */
1427
1428 size_t i, offset = 0;
1429
1430 BITMAP_FOR_EACH_1 (i, MFF_N_IDS, fa->used.bm) {
1431 const struct mf_field *mf = mf_from_id(i);
1432 union mf_value value;
1433
1434 memcpy(&value, fa->values + offset, mf->n_bytes);
1435
1436 int len = mf_field_len(mf, &value, NULL, NULL);
1437 nxm_put_entry_raw(b, i, version,
1438 &value + mf->n_bytes - len, NULL, len);
1439 offset += mf->n_bytes;
1440 }
1441
1442 return b->size - start_len;
1443 }
1444
1445 static void
1446 nx_put_header__(struct ofpbuf *b, uint64_t header, bool masked)
1447 {
1448 uint64_t masked_header = masked ? nxm_make_wild_header(header) : header;
1449 ovs_be64 network_header = htonll(masked_header);
1450
1451 ofpbuf_put(b, &network_header, nxm_header_len(header));
1452 }
1453
1454 void
1455 nx_put_header(struct ofpbuf *b, enum mf_field_id field,
1456 enum ofp_version version, bool masked)
1457 {
1458 nx_put_header__(b, mf_oxm_header(field, version), masked);
1459 }
1460
1461 void nx_put_mff_header(struct ofpbuf *b, const struct mf_field *mff,
1462 enum ofp_version version, bool masked)
1463 {
1464 if (mff->mapped) {
1465 nx_put_header_len(b, mff->id, version, masked, mff->n_bytes);
1466 } else {
1467 nx_put_header(b, mff->id, version, masked);
1468 }
1469 }
1470
1471 static void
1472 nx_put_header_len(struct ofpbuf *b, enum mf_field_id field,
1473 enum ofp_version version, bool masked, size_t n_bytes)
1474 {
1475 uint64_t header = mf_oxm_header(field, version);
1476
1477 header = NXM_HEADER(nxm_vendor(header), nxm_class(header),
1478 nxm_field(header), false,
1479 nxm_experimenter_len(header) + n_bytes);
1480
1481 nx_put_header__(b, header, masked);
1482 }
1483
1484 void
1485 nx_put_entry(struct ofpbuf *b, const struct mf_field *mff,
1486 enum ofp_version version, const union mf_value *value,
1487 const union mf_value *mask)
1488 {
1489 bool masked;
1490 int len, offset;
1491
1492 len = mf_field_len(mff, value, mask, &masked);
1493 offset = mff->n_bytes - len;
1494
1495 nxm_put_entry_raw(b, mff->id, version,
1496 &value->u8 + offset, masked ? &mask->u8 + offset : NULL,
1497 len);
1498 }
1499 \f
1500 /* nx_match_to_string() and helpers. */
1501
1502 static void format_nxm_field_name(struct ds *, uint64_t header);
1503
1504 char *
1505 nx_match_to_string(const uint8_t *p, unsigned int match_len)
1506 {
1507 if (!match_len) {
1508 return xstrdup("<any>");
1509 }
1510
1511 struct ofpbuf b = ofpbuf_const_initializer(p, match_len);
1512 struct ds s = DS_EMPTY_INITIALIZER;
1513 while (b.size) {
1514 union mf_value value;
1515 union mf_value mask;
1516 enum ofperr error;
1517 uint64_t header;
1518 int value_len;
1519
1520 error = nx_pull_entry__(&b, true, NULL, &header, NULL, &value, &mask, false);
1521 if (error) {
1522 break;
1523 }
1524 value_len = MIN(sizeof value, nxm_field_bytes(header));
1525
1526 if (s.length) {
1527 ds_put_cstr(&s, ", ");
1528 }
1529
1530 format_nxm_field_name(&s, header);
1531 ds_put_char(&s, '(');
1532
1533 for (int i = 0; i < value_len; i++) {
1534 ds_put_format(&s, "%02x", ((const uint8_t *) &value)[i]);
1535 }
1536 if (nxm_hasmask(header)) {
1537 ds_put_char(&s, '/');
1538 for (int i = 0; i < value_len; i++) {
1539 ds_put_format(&s, "%02x", ((const uint8_t *) &mask)[i]);
1540 }
1541 }
1542 ds_put_char(&s, ')');
1543 }
1544
1545 if (b.size) {
1546 if (s.length) {
1547 ds_put_cstr(&s, ", ");
1548 }
1549
1550 ds_put_format(&s, "<%u invalid bytes>", b.size);
1551 }
1552
1553 return ds_steal_cstr(&s);
1554 }
1555
1556 char *
1557 oxm_match_to_string(const struct ofpbuf *p, unsigned int match_len)
1558 {
1559 const struct ofp11_match_header *omh = p->data;
1560 uint16_t match_len_;
1561 struct ds s;
1562
1563 ds_init(&s);
1564
1565 if (match_len < sizeof *omh) {
1566 ds_put_format(&s, "<match too short: %u>", match_len);
1567 goto err;
1568 }
1569
1570 if (omh->type != htons(OFPMT_OXM)) {
1571 ds_put_format(&s, "<bad match type field: %u>", ntohs(omh->type));
1572 goto err;
1573 }
1574
1575 match_len_ = ntohs(omh->length);
1576 if (match_len_ < sizeof *omh) {
1577 ds_put_format(&s, "<match length field too short: %u>", match_len_);
1578 goto err;
1579 }
1580
1581 if (match_len_ != match_len) {
1582 ds_put_format(&s, "<match length field incorrect: %u != %u>",
1583 match_len_, match_len);
1584 goto err;
1585 }
1586
1587 return nx_match_to_string(ofpbuf_at(p, sizeof *omh, 0),
1588 match_len - sizeof *omh);
1589
1590 err:
1591 return ds_steal_cstr(&s);
1592 }
1593
1594 void
1595 nx_format_field_name(enum mf_field_id id, enum ofp_version version,
1596 struct ds *s)
1597 {
1598 format_nxm_field_name(s, mf_oxm_header(id, version));
1599 }
1600
1601 static void
1602 format_nxm_field_name(struct ds *s, uint64_t header)
1603 {
1604 const struct nxm_field *f = nxm_field_by_header(header, false, NULL);
1605 if (f) {
1606 ds_put_cstr(s, f->name);
1607 if (nxm_hasmask(header)) {
1608 ds_put_cstr(s, "_W");
1609 }
1610 } else if (header == NXM_NX_COOKIE) {
1611 ds_put_cstr(s, "NXM_NX_COOKIE");
1612 } else if (header == NXM_NX_COOKIE_W) {
1613 ds_put_cstr(s, "NXM_NX_COOKIE_W");
1614 } else {
1615 ds_put_format(s, "%d:%d", nxm_class(header), nxm_field(header));
1616 }
1617 }
1618
1619 static bool
1620 streq_len(const char *a, size_t a_len, const char *b)
1621 {
1622 return strlen(b) == a_len && !memcmp(a, b, a_len);
1623 }
1624
1625 static uint64_t
1626 parse_nxm_field_name(const char *name, int name_len)
1627 {
1628 const struct nxm_field *f;
1629 bool wild;
1630
1631 f = mf_parse_subfield_name(name, name_len, &wild);
1632 if (f) {
1633 if (!wild) {
1634 return f->header;
1635 } else if (mf_from_id(f->id)->maskable != MFM_NONE) {
1636 return nxm_make_wild_header(f->header);
1637 }
1638 }
1639
1640 if (streq_len(name, name_len, "NXM_NX_COOKIE")) {
1641 return NXM_NX_COOKIE;
1642 } else if (streq_len(name, name_len, "NXM_NX_COOKIE_W")) {
1643 return NXM_NX_COOKIE_W;
1644 }
1645
1646 /* Check whether it's a field header value as hex.
1647 * (This isn't ordinarily useful except for testing error behavior.) */
1648 if (name_len == 8) {
1649 uint64_t header;
1650 bool ok;
1651
1652 header = hexits_value(name, name_len, &ok) << 32;
1653 if (ok) {
1654 return header;
1655 }
1656 } else if (name_len == 16) {
1657 uint64_t header;
1658 bool ok;
1659
1660 header = hexits_value(name, name_len, &ok);
1661 if (ok && is_experimenter_oxm(header)) {
1662 return header;
1663 }
1664 }
1665
1666 return 0;
1667 }
1668 \f
1669 /* nx_match_from_string(). */
1670
1671 static int
1672 nx_match_from_string_raw(const char *s, struct ofpbuf *b)
1673 {
1674 const char *full_s = s;
1675 const size_t start_len = b->size;
1676
1677 if (!strcmp(s, "<any>")) {
1678 /* Ensure that 'b->data' isn't actually null. */
1679 ofpbuf_prealloc_tailroom(b, 1);
1680 return 0;
1681 }
1682
1683 for (s += strspn(s, ", "); *s; s += strspn(s, ", ")) {
1684 const char *name;
1685 uint64_t header;
1686 ovs_be64 nw_header;
1687 int name_len;
1688 size_t n;
1689
1690 name = s;
1691 name_len = strcspn(s, "(");
1692 if (s[name_len] != '(') {
1693 ovs_fatal(0, "%s: missing ( at end of nx_match", full_s);
1694 }
1695
1696 header = parse_nxm_field_name(name, name_len);
1697 if (!header) {
1698 ovs_fatal(0, "%s: unknown field `%.*s'", full_s, name_len, s);
1699 }
1700
1701 s += name_len + 1;
1702
1703 b->header = ofpbuf_put_uninit(b, nxm_header_len(header));
1704 s = ofpbuf_put_hex(b, s, &n);
1705 if (n != nxm_field_bytes(header)) {
1706 const struct mf_field *field = mf_from_oxm_header(header, NULL, false, NULL);
1707
1708 if (field && field->variable_len) {
1709 if (n <= field->n_bytes) {
1710 int len = (nxm_hasmask(header) ? n * 2 : n) +
1711 nxm_experimenter_len(header);
1712
1713 header = NXM_HEADER(nxm_vendor(header), nxm_class(header),
1714 nxm_field(header),
1715 nxm_hasmask(header) ? 1 : 0, len);
1716 } else {
1717 ovs_fatal(0, "expected to read at most %d bytes but got "
1718 "%"PRIuSIZE, field->n_bytes, n);
1719 }
1720 } else {
1721 ovs_fatal(0, "expected to read %d bytes but got %"PRIuSIZE,
1722 nxm_field_bytes(header), n);
1723 }
1724 }
1725 nw_header = htonll(header);
1726 memcpy(b->header, &nw_header, nxm_header_len(header));
1727
1728 if (nxm_hasmask(header)) {
1729 s += strspn(s, " ");
1730 if (*s != '/') {
1731 ovs_fatal(0, "%s: missing / in masked field %.*s",
1732 full_s, name_len, name);
1733 }
1734 s = ofpbuf_put_hex(b, s + 1, &n);
1735 if (n != nxm_field_bytes(header)) {
1736 ovs_fatal(0, "%.2s: hex digits expected", s);
1737 }
1738 }
1739
1740 s += strspn(s, " ");
1741 if (*s != ')') {
1742 ovs_fatal(0, "%s: missing ) following field %.*s",
1743 full_s, name_len, name);
1744 }
1745 s++;
1746 }
1747
1748 return b->size - start_len;
1749 }
1750
1751 int
1752 nx_match_from_string(const char *s, struct ofpbuf *b)
1753 {
1754 int match_len = nx_match_from_string_raw(s, b);
1755 ofpbuf_put_zeros(b, PAD_SIZE(match_len, 8));
1756 return match_len;
1757 }
1758
1759 int
1760 oxm_match_from_string(const char *s, struct ofpbuf *b)
1761 {
1762 int match_len;
1763 struct ofp11_match_header *omh;
1764 size_t start_len = b->size;
1765
1766 ofpbuf_put_uninit(b, sizeof *omh);
1767 match_len = nx_match_from_string_raw(s, b) + sizeof *omh;
1768 ofpbuf_put_zeros(b, PAD_SIZE(match_len, 8));
1769
1770 omh = ofpbuf_at(b, start_len, sizeof *omh);
1771 omh->type = htons(OFPMT_OXM);
1772 omh->length = htons(match_len);
1773
1774 return match_len;
1775 }
1776 \f
1777 /* Parses 's' as a "move" action, in the form described in ovs-actions(7), into
1778 * '*move'.
1779 *
1780 * Returns NULL if successful, otherwise a malloc()'d string describing the
1781 * error. The caller is responsible for freeing the returned string. */
1782 char * OVS_WARN_UNUSED_RESULT
1783 nxm_parse_reg_move(struct ofpact_reg_move *move, const char *s)
1784 {
1785 const char *full_s = s;
1786 char *error;
1787
1788 error = mf_parse_subfield__(&move->src, &s);
1789 if (error) {
1790 return error;
1791 }
1792 if (strncmp(s, "->", 2)) {
1793 return xasprintf("%s: missing `->' following source", full_s);
1794 }
1795 s += 2;
1796 error = mf_parse_subfield(&move->dst, s);
1797 if (error) {
1798 return error;
1799 }
1800
1801 if (move->src.n_bits != move->dst.n_bits) {
1802 return xasprintf("%s: source field is %d bits wide but destination is "
1803 "%d bits wide", full_s,
1804 move->src.n_bits, move->dst.n_bits);
1805 }
1806 return NULL;
1807 }
1808 \f
1809 /* nxm_format_reg_move(). */
1810
1811 void
1812 nxm_format_reg_move(const struct ofpact_reg_move *move, struct ds *s)
1813 {
1814 ds_put_format(s, "%smove:%s", colors.special, colors.end);
1815 mf_format_subfield(&move->src, s);
1816 ds_put_format(s, "%s->%s", colors.special, colors.end);
1817 mf_format_subfield(&move->dst, s);
1818 }
1819
1820 \f
1821 enum ofperr
1822 nxm_reg_move_check(const struct ofpact_reg_move *move,
1823 const struct match *match)
1824 {
1825 enum ofperr error;
1826
1827 error = mf_check_src(&move->src, match);
1828 if (error) {
1829 return error;
1830 }
1831
1832 return mf_check_dst(&move->dst, match);
1833 }
1834 \f
1835 /* nxm_execute_reg_move(). */
1836
1837 void
1838 nxm_reg_load(const struct mf_subfield *dst, uint64_t src_data,
1839 struct flow *flow, struct flow_wildcards *wc)
1840 {
1841 union mf_subvalue src_subvalue;
1842 union mf_subvalue mask_value;
1843 ovs_be64 src_data_be = htonll(src_data);
1844
1845 memset(&mask_value, 0xff, sizeof mask_value);
1846 mf_write_subfield_flow(dst, &mask_value, &wc->masks);
1847
1848 bitwise_copy(&src_data_be, sizeof src_data_be, 0,
1849 &src_subvalue, sizeof src_subvalue, 0,
1850 sizeof src_data_be * 8);
1851 mf_write_subfield_flow(dst, &src_subvalue, flow);
1852 }
1853 \f
1854 /* nxm_parse_stack_action, works for both push() and pop(). */
1855
1856 /* Parses 's' as a "push" or "pop" action, in the form described in
1857 * ovs-actions(7), into '*stack_action'.
1858 *
1859 * Returns NULL if successful, otherwise a malloc()'d string describing the
1860 * error. The caller is responsible for freeing the returned string. */
1861 char * OVS_WARN_UNUSED_RESULT
1862 nxm_parse_stack_action(struct ofpact_stack *stack_action, const char *s)
1863 {
1864 char *error;
1865
1866 error = mf_parse_subfield__(&stack_action->subfield, &s);
1867 if (error) {
1868 return error;
1869 }
1870
1871 if (*s != '\0') {
1872 return xasprintf("%s: trailing garbage following push or pop", s);
1873 }
1874
1875 return NULL;
1876 }
1877
1878 void
1879 nxm_format_stack_push(const struct ofpact_stack *push, struct ds *s)
1880 {
1881 ds_put_format(s, "%spush:%s", colors.param, colors.end);
1882 mf_format_subfield(&push->subfield, s);
1883 }
1884
1885 void
1886 nxm_format_stack_pop(const struct ofpact_stack *pop, struct ds *s)
1887 {
1888 ds_put_format(s, "%spop:%s", colors.param, colors.end);
1889 mf_format_subfield(&pop->subfield, s);
1890 }
1891
1892 enum ofperr
1893 nxm_stack_push_check(const struct ofpact_stack *push,
1894 const struct match *match)
1895 {
1896 return mf_check_src(&push->subfield, match);
1897 }
1898
1899 enum ofperr
1900 nxm_stack_pop_check(const struct ofpact_stack *pop,
1901 const struct match *match)
1902 {
1903 return mf_check_dst(&pop->subfield, match);
1904 }
1905
1906 /* nxm_execute_stack_push(), nxm_execute_stack_pop().
1907 *
1908 * A stack is an ofpbuf with 'data' pointing to the bottom of the stack and
1909 * 'size' indexing the top of the stack. Each value of some byte length is
1910 * stored to the stack immediately followed by the length of the value as an
1911 * unsigned byte. This way a POP operation can first read the length byte, and
1912 * then the appropriate number of bytes from the stack. This also means that
1913 * it is only possible to traverse the stack from top to bottom. It is
1914 * possible, however, to push values also to the bottom of the stack, which is
1915 * useful when a stack has been serialized to a wire format in reverse order
1916 * (topmost value first).
1917 */
1918
1919 /* Push value 'v' of length 'bytes' to the top of 'stack'. */
1920 void
1921 nx_stack_push(struct ofpbuf *stack, const void *v, uint8_t bytes)
1922 {
1923 ofpbuf_put(stack, v, bytes);
1924 ofpbuf_put(stack, &bytes, sizeof bytes);
1925 }
1926
1927 /* Push value 'v' of length 'bytes' to the bottom of 'stack'. */
1928 void
1929 nx_stack_push_bottom(struct ofpbuf *stack, const void *v, uint8_t bytes)
1930 {
1931 ofpbuf_push(stack, &bytes, sizeof bytes);
1932 ofpbuf_push(stack, v, bytes);
1933 }
1934
1935 /* Pop the topmost value from 'stack', returning a pointer to the value in the
1936 * stack and the length of the value in '*bytes'. In case of underflow a NULL
1937 * is returned and length is returned as zero via '*bytes'. */
1938 void *
1939 nx_stack_pop(struct ofpbuf *stack, uint8_t *bytes)
1940 {
1941 if (!stack->size) {
1942 *bytes = 0;
1943 return NULL;
1944 }
1945
1946 stack->size -= sizeof *bytes;
1947 memcpy(bytes, ofpbuf_tail(stack), sizeof *bytes);
1948
1949 ovs_assert(stack->size >= *bytes);
1950 stack->size -= *bytes;
1951 return ofpbuf_tail(stack);
1952 }
1953
1954 void
1955 nxm_execute_stack_push(const struct ofpact_stack *push,
1956 const struct flow *flow, struct flow_wildcards *wc,
1957 struct ofpbuf *stack)
1958 {
1959 union mf_subvalue dst_value;
1960
1961 mf_write_subfield_flow(&push->subfield,
1962 (union mf_subvalue *)&exact_match_mask,
1963 &wc->masks);
1964
1965 mf_read_subfield(&push->subfield, flow, &dst_value);
1966 uint8_t bytes = DIV_ROUND_UP(push->subfield.n_bits, 8);
1967 nx_stack_push(stack, &dst_value.u8[sizeof dst_value - bytes], bytes);
1968 }
1969
1970 bool
1971 nxm_execute_stack_pop(const struct ofpact_stack *pop,
1972 struct flow *flow, struct flow_wildcards *wc,
1973 struct ofpbuf *stack)
1974 {
1975 uint8_t src_bytes;
1976 const void *src = nx_stack_pop(stack, &src_bytes);
1977 if (src) {
1978 union mf_subvalue src_value;
1979 uint8_t dst_bytes = DIV_ROUND_UP(pop->subfield.n_bits, 8);
1980
1981 if (src_bytes < dst_bytes) {
1982 memset(&src_value.u8[sizeof src_value - dst_bytes], 0,
1983 dst_bytes - src_bytes);
1984 }
1985 memcpy(&src_value.u8[sizeof src_value - src_bytes], src, src_bytes);
1986 mf_write_subfield_flow(&pop->subfield,
1987 (union mf_subvalue *)&exact_match_mask,
1988 &wc->masks);
1989 mf_write_subfield_flow(&pop->subfield, &src_value, flow);
1990 return true;
1991 } else {
1992 /* Attempted to pop from an empty stack. */
1993 return false;
1994 }
1995 }
1996 \f
1997 /* Parses a field from '*s' into '*field'. If successful, stores the
1998 * reference to the field in '*field', and returns NULL. On failure,
1999 * returns a malloc()'ed error message.
2000 */
2001 char * OVS_WARN_UNUSED_RESULT
2002 mf_parse_field(const struct mf_field **field, const char *s)
2003 {
2004 const struct nxm_field *f;
2005 int s_len = strlen(s);
2006
2007 f = nxm_field_by_name(s, s_len);
2008 (*field) = f ? mf_from_id(f->id) : mf_from_name_len(s, s_len);
2009 if (!*field) {
2010 return xasprintf("unknown field `%s'", s);
2011 }
2012 return NULL;
2013 }
2014 \f
2015 /* Formats 'sf' into 's' in a format normally acceptable to
2016 * mf_parse_subfield(). (It won't be acceptable if sf->field is NULL or if
2017 * sf->field has no NXM name.) */
2018 void
2019 mf_format_subfield(const struct mf_subfield *sf, struct ds *s)
2020 {
2021 if (!sf->field) {
2022 ds_put_cstr(s, "<unknown>");
2023 } else {
2024 const struct nxm_field *f = nxm_field_by_mf_id(sf->field->id, 0);
2025 ds_put_cstr(s, f ? f->name : sf->field->name);
2026 }
2027
2028 if (sf->field && sf->ofs == 0 && sf->n_bits == sf->field->n_bits) {
2029 ds_put_cstr(s, "[]");
2030 } else if (sf->n_bits == 1) {
2031 ds_put_format(s, "[%d]", sf->ofs);
2032 } else {
2033 ds_put_format(s, "[%d..%d]", sf->ofs, sf->ofs + sf->n_bits - 1);
2034 }
2035 }
2036
2037 static const struct nxm_field *
2038 mf_parse_subfield_name(const char *name, int name_len, bool *wild)
2039 {
2040 *wild = name_len > 2 && !memcmp(&name[name_len - 2], "_W", 2);
2041 if (*wild) {
2042 name_len -= 2;
2043 }
2044
2045 return nxm_field_by_name(name, name_len);
2046 }
2047
2048 /* Parses a subfield from the beginning of '*sp' into 'sf'. If successful,
2049 * returns NULL and advances '*sp' to the first byte following the parsed
2050 * string. On failure, returns a malloc()'d error message, does not modify
2051 * '*sp', and does not properly initialize 'sf'.
2052 *
2053 * The syntax parsed from '*sp' takes the form "header[start..end]" where
2054 * 'header' is the name of an NXM field and 'start' and 'end' are (inclusive)
2055 * bit indexes. "..end" may be omitted to indicate a single bit. "start..end"
2056 * may both be omitted (the [] are still required) to indicate an entire
2057 * field. */
2058 char * OVS_WARN_UNUSED_RESULT
2059 mf_parse_subfield__(struct mf_subfield *sf, const char **sp)
2060 {
2061 const struct mf_field *field = NULL;
2062 const struct nxm_field *f;
2063 const char *name;
2064 int start, end;
2065 const char *s;
2066 int name_len;
2067 bool wild;
2068
2069 s = *sp;
2070 name = s;
2071 name_len = strcspn(s, "[-");
2072
2073 f = mf_parse_subfield_name(name, name_len, &wild);
2074 field = f ? mf_from_id(f->id) : mf_from_name_len(name, name_len);
2075 if (!field) {
2076 return xasprintf("%s: unknown field `%.*s'", *sp, name_len, s);
2077 }
2078
2079 s += name_len;
2080 /* Assume full field. */
2081 start = 0;
2082 end = field->n_bits - 1;
2083 if (*s == '[') {
2084 if (!strncmp(s, "[]", 2)) {
2085 /* Nothing to do. */
2086 } else if (ovs_scan(s, "[%d..%d]", &start, &end)) {
2087 /* Nothing to do. */
2088 } else if (ovs_scan(s, "[%d]", &start)) {
2089 end = start;
2090 } else {
2091 return xasprintf("%s: syntax error expecting [] or [<bit>] or "
2092 "[<start>..<end>]", *sp);
2093 }
2094 s = strchr(s, ']') + 1;
2095 }
2096
2097 if (start > end) {
2098 return xasprintf("%s: starting bit %d is after ending bit %d",
2099 *sp, start, end);
2100 } else if (start >= field->n_bits) {
2101 return xasprintf("%s: starting bit %d is not valid because field is "
2102 "only %d bits wide", *sp, start, field->n_bits);
2103 } else if (end >= field->n_bits){
2104 return xasprintf("%s: ending bit %d is not valid because field is "
2105 "only %d bits wide", *sp, end, field->n_bits);
2106 }
2107
2108 sf->field = field;
2109 sf->ofs = start;
2110 sf->n_bits = end - start + 1;
2111
2112 *sp = s;
2113 return NULL;
2114 }
2115
2116 /* Parses a subfield from the entirety of 's' into 'sf'. Returns NULL if
2117 * successful, otherwise a malloc()'d string describing the error. The caller
2118 * is responsible for freeing the returned string.
2119 *
2120 * The syntax parsed from 's' takes the form "header[start..end]" where
2121 * 'header' is the name of an NXM field and 'start' and 'end' are (inclusive)
2122 * bit indexes. "..end" may be omitted to indicate a single bit. "start..end"
2123 * may both be omitted (the [] are still required) to indicate an entire
2124 * field. */
2125 char * OVS_WARN_UNUSED_RESULT
2126 mf_parse_subfield(struct mf_subfield *sf, const char *s)
2127 {
2128 char *error = mf_parse_subfield__(sf, &s);
2129 if (!error && s[0]) {
2130 error = xstrdup("unexpected input following field syntax");
2131 }
2132 return error;
2133 }
2134 \f
2135 /* Returns an bitmap in which each bit corresponds to the like-numbered field
2136 * in the OFPXMC12_OPENFLOW_BASIC OXM class, in which the bit values are taken
2137 * from the 'fields' bitmap. Only fields defined in OpenFlow 'version' are
2138 * considered.
2139 *
2140 * This is useful for encoding OpenFlow 1.2 table stats messages. */
2141 ovs_be64
2142 oxm_bitmap_from_mf_bitmap(const struct mf_bitmap *fields,
2143 enum ofp_version version)
2144 {
2145 uint64_t oxm_bitmap = 0;
2146 int i;
2147
2148 BITMAP_FOR_EACH_1 (i, MFF_N_IDS, fields->bm) {
2149 uint64_t oxm = mf_oxm_header(i, version);
2150 uint32_t class = nxm_class(oxm);
2151 int field = nxm_field(oxm);
2152
2153 if (class == OFPXMC12_OPENFLOW_BASIC && field < 64) {
2154 oxm_bitmap |= UINT64_C(1) << field;
2155 }
2156 }
2157 return htonll(oxm_bitmap);
2158 }
2159
2160 /* Opposite conversion from oxm_bitmap_from_mf_bitmap().
2161 *
2162 * This is useful for decoding OpenFlow 1.2 table stats messages. */
2163 struct mf_bitmap
2164 oxm_bitmap_to_mf_bitmap(ovs_be64 oxm_bitmap, enum ofp_version version)
2165 {
2166 struct mf_bitmap fields = MF_BITMAP_INITIALIZER;
2167
2168 for (enum mf_field_id id = 0; id < MFF_N_IDS; id++) {
2169 uint64_t oxm = mf_oxm_header(id, version);
2170 if (oxm && version >= nxm_field_by_header(oxm, false, NULL)->version) {
2171 uint32_t class = nxm_class(oxm);
2172 int field = nxm_field(oxm);
2173
2174 if (class == OFPXMC12_OPENFLOW_BASIC
2175 && field < 64
2176 && oxm_bitmap & htonll(UINT64_C(1) << field)) {
2177 bitmap_set1(fields.bm, id);
2178 }
2179 }
2180 }
2181 return fields;
2182 }
2183
2184 /* Returns a bitmap of fields that can be encoded in OXM and that can be
2185 * modified with a "set_field" action. */
2186 struct mf_bitmap
2187 oxm_writable_fields(void)
2188 {
2189 struct mf_bitmap b = MF_BITMAP_INITIALIZER;
2190 int i;
2191
2192 for (i = 0; i < MFF_N_IDS; i++) {
2193 if (mf_oxm_header(i, 0) && mf_from_id(i)->writable) {
2194 bitmap_set1(b.bm, i);
2195 }
2196 }
2197 return b;
2198 }
2199
2200 /* Returns a bitmap of fields that can be encoded in OXM and that can be
2201 * matched in a flow table. */
2202 struct mf_bitmap
2203 oxm_matchable_fields(void)
2204 {
2205 struct mf_bitmap b = MF_BITMAP_INITIALIZER;
2206 int i;
2207
2208 for (i = 0; i < MFF_N_IDS; i++) {
2209 if (mf_oxm_header(i, 0)) {
2210 bitmap_set1(b.bm, i);
2211 }
2212 }
2213 return b;
2214 }
2215
2216 /* Returns a bitmap of fields that can be encoded in OXM and that can be
2217 * matched in a flow table with an arbitrary bitmask. */
2218 struct mf_bitmap
2219 oxm_maskable_fields(void)
2220 {
2221 struct mf_bitmap b = MF_BITMAP_INITIALIZER;
2222 int i;
2223
2224 for (i = 0; i < MFF_N_IDS; i++) {
2225 if (mf_oxm_header(i, 0) && mf_from_id(i)->maskable == MFM_FULLY) {
2226 bitmap_set1(b.bm, i);
2227 }
2228 }
2229 return b;
2230 }
2231 \f
2232 struct nxm_field_index {
2233 struct hmap_node header_node; /* In nxm_header_map. */
2234 struct hmap_node name_node; /* In nxm_name_map. */
2235 struct ovs_list mf_node; /* In mf_mf_map[nf.id]. */
2236 const struct nxm_field nf;
2237 };
2238
2239 #include "nx-match.inc"
2240
2241 static struct hmap nxm_header_map;
2242 static struct hmap nxm_name_map;
2243 static struct ovs_list nxm_mf_map[MFF_N_IDS];
2244
2245 static void
2246 nxm_init(void)
2247 {
2248 static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER;
2249 if (ovsthread_once_start(&once)) {
2250 hmap_init(&nxm_header_map);
2251 hmap_init(&nxm_name_map);
2252 for (int i = 0; i < MFF_N_IDS; i++) {
2253 ovs_list_init(&nxm_mf_map[i]);
2254 }
2255 for (struct nxm_field_index *nfi = all_nxm_fields;
2256 nfi < &all_nxm_fields[ARRAY_SIZE(all_nxm_fields)]; nfi++) {
2257 hmap_insert(&nxm_header_map, &nfi->header_node,
2258 hash_uint64(nxm_no_len(nfi->nf.header)));
2259 hmap_insert(&nxm_name_map, &nfi->name_node,
2260 hash_string(nfi->nf.name, 0));
2261 ovs_list_push_back(&nxm_mf_map[nfi->nf.id], &nfi->mf_node);
2262 }
2263 ovsthread_once_done(&once);
2264 }
2265 }
2266
2267
2268 static const struct nxm_field *
2269 nxm_field_by_header(uint64_t header, bool is_action, enum ofperr *h_error)
2270 {
2271 const struct nxm_field_index *nfi;
2272 uint64_t header_no_len;
2273
2274 nxm_init();
2275 if (nxm_hasmask(header)) {
2276 header = nxm_make_exact_header(header);
2277 }
2278
2279 header_no_len = nxm_no_len(header);
2280
2281 HMAP_FOR_EACH_IN_BUCKET (nfi, header_node, hash_uint64(header_no_len),
2282 &nxm_header_map) {
2283 if (is_action && nxm_length(header) > 0) {
2284 if (nxm_length(header) != nxm_length(nfi->nf.header) && h_error ) {
2285 *h_error = OFPERR_OFPBAC_BAD_SET_LEN;
2286 }
2287 }
2288 if (header_no_len == nxm_no_len(nfi->nf.header)) {
2289 if (nxm_length(header) == nxm_length(nfi->nf.header) ||
2290 mf_from_id(nfi->nf.id)->variable_len) {
2291 return &nfi->nf;
2292 } else {
2293 return NULL;
2294 }
2295 }
2296 }
2297 return NULL;
2298 }
2299
2300 static const struct nxm_field *
2301 nxm_field_by_name(const char *name, size_t len)
2302 {
2303 const struct nxm_field_index *nfi;
2304
2305 nxm_init();
2306 HMAP_FOR_EACH_WITH_HASH (nfi, name_node, hash_bytes(name, len, 0),
2307 &nxm_name_map) {
2308 if (strlen(nfi->nf.name) == len && !memcmp(nfi->nf.name, name, len)) {
2309 return &nfi->nf;
2310 }
2311 }
2312 return NULL;
2313 }
2314
2315 static const struct nxm_field *
2316 nxm_field_by_mf_id(enum mf_field_id id, enum ofp_version version)
2317 {
2318 const struct nxm_field_index *nfi;
2319 const struct nxm_field *f;
2320
2321 nxm_init();
2322
2323 f = NULL;
2324 LIST_FOR_EACH (nfi, mf_node, &nxm_mf_map[id]) {
2325 if (!f || version >= nfi->nf.version) {
2326 f = &nfi->nf;
2327 }
2328 }
2329 return f;
2330 }