]> git.proxmox.com Git - mirror_ovs.git/blob - lib/nx-match.c
lib/flow: Introduce miniflow_extract().
[mirror_ovs.git] / lib / nx-match.c
1 /*
2 * Copyright (c) 2010, 2011, 2012, 2013, 2014 Nicira, Inc.
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <config.h>
18
19 #include "nx-match.h"
20
21 #include <netinet/icmp6.h>
22
23 #include "classifier.h"
24 #include "dynamic-string.h"
25 #include "meta-flow.h"
26 #include "ofp-actions.h"
27 #include "ofp-errors.h"
28 #include "ofp-util.h"
29 #include "ofpbuf.h"
30 #include "openflow/nicira-ext.h"
31 #include "packets.h"
32 #include "unaligned.h"
33 #include "util.h"
34 #include "vlog.h"
35
36 VLOG_DEFINE_THIS_MODULE(nx_match);
37
38 /* Rate limit for nx_match parse errors. These always indicate a bug in the
39 * peer and so there's not much point in showing a lot of them. */
40 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
41
42 /* Returns the width of the data for a field with the given 'header', in
43 * bytes. */
44 int
45 nxm_field_bytes(uint32_t header)
46 {
47 unsigned int length = NXM_LENGTH(header);
48 return NXM_HASMASK(header) ? length / 2 : length;
49 }
50
51 /* Returns the width of the data for a field with the given 'header', in
52 * bits. */
53 int
54 nxm_field_bits(uint32_t header)
55 {
56 return nxm_field_bytes(header) * 8;
57 }
58 \f
59 /* nx_pull_match() and helpers. */
60
61 static uint32_t
62 nx_entry_ok(const void *p, unsigned int match_len)
63 {
64 unsigned int payload_len;
65 ovs_be32 header_be;
66 uint32_t header;
67
68 if (match_len < 4) {
69 if (match_len) {
70 VLOG_DBG_RL(&rl, "nx_match ends with partial (%u-byte) nxm_header",
71 match_len);
72 }
73 return 0;
74 }
75 memcpy(&header_be, p, 4);
76 header = ntohl(header_be);
77
78 payload_len = NXM_LENGTH(header);
79 if (!payload_len) {
80 VLOG_DBG_RL(&rl, "nxm_entry %08"PRIx32" has invalid payload "
81 "length 0", header);
82 return 0;
83 }
84 if (match_len < payload_len + 4) {
85 VLOG_DBG_RL(&rl, "%"PRIu32"-byte nxm_entry but only "
86 "%u bytes left in nx_match", payload_len + 4, match_len);
87 return 0;
88 }
89
90 return header;
91 }
92
93 /* Given NXM/OXM value 'value' and mask 'mask', each 'width' bytes long,
94 * checks for any 1-bit in the value where there is a 0-bit in the mask. If it
95 * finds one, logs a warning. */
96 static void
97 check_mask_consistency(const uint8_t *p, const struct mf_field *mf)
98 {
99 unsigned int width = mf->n_bytes;
100 const uint8_t *value = p + 4;
101 const uint8_t *mask = p + 4 + width;
102 unsigned int i;
103
104 for (i = 0; i < width; i++) {
105 if (value[i] & ~mask[i]) {
106 if (!VLOG_DROP_WARN(&rl)) {
107 char *s = nx_match_to_string(p, width * 2 + 4);
108 VLOG_WARN_RL(&rl, "NXM/OXM entry %s has 1-bits in value for "
109 "bits wildcarded by the mask. (Future versions "
110 "of OVS may report this as an OpenFlow error.)",
111 s);
112 break;
113 }
114 }
115 }
116 }
117
118 static enum ofperr
119 nx_pull_raw(const uint8_t *p, unsigned int match_len, bool strict,
120 struct match *match, ovs_be64 *cookie, ovs_be64 *cookie_mask)
121 {
122 uint32_t header;
123
124 ovs_assert((cookie != NULL) == (cookie_mask != NULL));
125
126 match_init_catchall(match);
127 if (cookie) {
128 *cookie = *cookie_mask = htonll(0);
129 }
130 if (!match_len) {
131 return 0;
132 }
133
134 for (;
135 (header = nx_entry_ok(p, match_len)) != 0;
136 p += 4 + NXM_LENGTH(header), match_len -= 4 + NXM_LENGTH(header)) {
137 const struct mf_field *mf;
138 enum ofperr error;
139
140 mf = mf_from_nxm_header(header);
141 if (!mf) {
142 if (strict) {
143 error = OFPERR_OFPBMC_BAD_FIELD;
144 } else {
145 continue;
146 }
147 } else if (!mf_are_prereqs_ok(mf, &match->flow)) {
148 error = OFPERR_OFPBMC_BAD_PREREQ;
149 } else if (!mf_is_all_wild(mf, &match->wc)) {
150 error = OFPERR_OFPBMC_DUP_FIELD;
151 } else {
152 unsigned int width = mf->n_bytes;
153 union mf_value value;
154
155 memcpy(&value, p + 4, width);
156 if (!mf_is_value_valid(mf, &value)) {
157 error = OFPERR_OFPBMC_BAD_VALUE;
158 } else if (!NXM_HASMASK(header)) {
159 error = 0;
160 mf_set_value(mf, &value, match);
161 } else {
162 union mf_value mask;
163
164 memcpy(&mask, p + 4 + width, width);
165 if (!mf_is_mask_valid(mf, &mask)) {
166 error = OFPERR_OFPBMC_BAD_MASK;
167 } else {
168 error = 0;
169 check_mask_consistency(p, mf);
170 mf_set(mf, &value, &mask, match);
171 }
172 }
173 }
174
175 /* Check if the match is for a cookie rather than a classifier rule. */
176 if ((header == NXM_NX_COOKIE || header == NXM_NX_COOKIE_W) && cookie) {
177 if (*cookie_mask) {
178 error = OFPERR_OFPBMC_DUP_FIELD;
179 } else {
180 unsigned int width = sizeof *cookie;
181
182 memcpy(cookie, p + 4, width);
183 if (NXM_HASMASK(header)) {
184 memcpy(cookie_mask, p + 4 + width, width);
185 } else {
186 *cookie_mask = OVS_BE64_MAX;
187 }
188 error = 0;
189 }
190 }
191
192 if (error) {
193 VLOG_DBG_RL(&rl, "bad nxm_entry %#08"PRIx32" (vendor=%"PRIu32", "
194 "field=%"PRIu32", hasmask=%"PRIu32", len=%"PRIu32"), "
195 "(%s)", header,
196 NXM_VENDOR(header), NXM_FIELD(header),
197 NXM_HASMASK(header), NXM_LENGTH(header),
198 ofperr_to_string(error));
199 return error;
200 }
201 }
202
203 return match_len ? OFPERR_OFPBMC_BAD_LEN : 0;
204 }
205
206 static enum ofperr
207 nx_pull_match__(struct ofpbuf *b, unsigned int match_len, bool strict,
208 struct match *match,
209 ovs_be64 *cookie, ovs_be64 *cookie_mask)
210 {
211 uint8_t *p = NULL;
212
213 if (match_len) {
214 p = ofpbuf_try_pull(b, ROUND_UP(match_len, 8));
215 if (!p) {
216 VLOG_DBG_RL(&rl, "nx_match length %u, rounded up to a "
217 "multiple of 8, is longer than space in message (max "
218 "length %"PRIu32")", match_len, ofpbuf_size(b));
219 return OFPERR_OFPBMC_BAD_LEN;
220 }
221 }
222
223 return nx_pull_raw(p, match_len, strict, match, cookie, cookie_mask);
224 }
225
226 /* Parses the nx_match formatted match description in 'b' with length
227 * 'match_len'. Stores the results in 'match'. If 'cookie' and 'cookie_mask'
228 * are valid pointers, then stores the cookie and mask in them if 'b' contains
229 * a "NXM_NX_COOKIE*" match. Otherwise, stores 0 in both.
230 *
231 * Fails with an error upon encountering an unknown NXM header.
232 *
233 * Returns 0 if successful, otherwise an OpenFlow error code. */
234 enum ofperr
235 nx_pull_match(struct ofpbuf *b, unsigned int match_len, struct match *match,
236 ovs_be64 *cookie, ovs_be64 *cookie_mask)
237 {
238 return nx_pull_match__(b, match_len, true, match, cookie, cookie_mask);
239 }
240
241 /* Behaves the same as nx_pull_match(), but skips over unknown NXM headers,
242 * instead of failing with an error. */
243 enum ofperr
244 nx_pull_match_loose(struct ofpbuf *b, unsigned int match_len,
245 struct match *match,
246 ovs_be64 *cookie, ovs_be64 *cookie_mask)
247 {
248 return nx_pull_match__(b, match_len, false, match, cookie, cookie_mask);
249 }
250
251 static enum ofperr
252 oxm_pull_match__(struct ofpbuf *b, bool strict, struct match *match)
253 {
254 struct ofp11_match_header *omh = ofpbuf_data(b);
255 uint8_t *p;
256 uint16_t match_len;
257
258 if (ofpbuf_size(b) < sizeof *omh) {
259 return OFPERR_OFPBMC_BAD_LEN;
260 }
261
262 match_len = ntohs(omh->length);
263 if (match_len < sizeof *omh) {
264 return OFPERR_OFPBMC_BAD_LEN;
265 }
266
267 if (omh->type != htons(OFPMT_OXM)) {
268 return OFPERR_OFPBMC_BAD_TYPE;
269 }
270
271 p = ofpbuf_try_pull(b, ROUND_UP(match_len, 8));
272 if (!p) {
273 VLOG_DBG_RL(&rl, "oxm length %u, rounded up to a "
274 "multiple of 8, is longer than space in message (max "
275 "length %"PRIu32")", match_len, ofpbuf_size(b));
276 return OFPERR_OFPBMC_BAD_LEN;
277 }
278
279 return nx_pull_raw(p + sizeof *omh, match_len - sizeof *omh,
280 strict, match, NULL, NULL);
281 }
282
283 /* Parses the oxm formatted match description preceded by a struct
284 * ofp11_match_header in 'b'. Stores the result in 'match'.
285 *
286 * Fails with an error when encountering unknown OXM headers.
287 *
288 * Returns 0 if successful, otherwise an OpenFlow error code. */
289 enum ofperr
290 oxm_pull_match(struct ofpbuf *b, struct match *match)
291 {
292 return oxm_pull_match__(b, true, match);
293 }
294
295 /* Behaves the same as oxm_pull_match() with one exception. Skips over unknown
296 * OXM headers instead of failing with an error when they are encountered. */
297 enum ofperr
298 oxm_pull_match_loose(struct ofpbuf *b, struct match *match)
299 {
300 return oxm_pull_match__(b, false, match);
301 }
302 \f
303 /* nx_put_match() and helpers.
304 *
305 * 'put' functions whose names end in 'w' add a wildcarded field.
306 * 'put' functions whose names end in 'm' add a field that might be wildcarded.
307 * Other 'put' functions add exact-match fields.
308 */
309
310 static void
311 nxm_put_header(struct ofpbuf *b, uint32_t header)
312 {
313 ovs_be32 n_header = htonl(header);
314 ofpbuf_put(b, &n_header, sizeof n_header);
315 }
316
317 static void
318 nxm_put_8(struct ofpbuf *b, uint32_t header, uint8_t value)
319 {
320 nxm_put_header(b, header);
321 ofpbuf_put(b, &value, sizeof value);
322 }
323
324 static void
325 nxm_put_8m(struct ofpbuf *b, uint32_t header, uint8_t value, uint8_t mask)
326 {
327 switch (mask) {
328 case 0:
329 break;
330
331 case UINT8_MAX:
332 nxm_put_8(b, header, value);
333 break;
334
335 default:
336 nxm_put_header(b, NXM_MAKE_WILD_HEADER(header));
337 ofpbuf_put(b, &value, sizeof value);
338 ofpbuf_put(b, &mask, sizeof mask);
339 }
340 }
341
342 static void
343 nxm_put_16(struct ofpbuf *b, uint32_t header, ovs_be16 value)
344 {
345 nxm_put_header(b, header);
346 ofpbuf_put(b, &value, sizeof value);
347 }
348
349 static void
350 nxm_put_16w(struct ofpbuf *b, uint32_t header, ovs_be16 value, ovs_be16 mask)
351 {
352 nxm_put_header(b, header);
353 ofpbuf_put(b, &value, sizeof value);
354 ofpbuf_put(b, &mask, sizeof mask);
355 }
356
357 static void
358 nxm_put_16m(struct ofpbuf *b, uint32_t header, ovs_be16 value, ovs_be16 mask)
359 {
360 switch (mask) {
361 case 0:
362 break;
363
364 case OVS_BE16_MAX:
365 nxm_put_16(b, header, value);
366 break;
367
368 default:
369 nxm_put_16w(b, NXM_MAKE_WILD_HEADER(header), value, mask);
370 break;
371 }
372 }
373
374 static void
375 nxm_put_32(struct ofpbuf *b, uint32_t header, ovs_be32 value)
376 {
377 nxm_put_header(b, header);
378 ofpbuf_put(b, &value, sizeof value);
379 }
380
381 static void
382 nxm_put_32w(struct ofpbuf *b, uint32_t header, ovs_be32 value, ovs_be32 mask)
383 {
384 nxm_put_header(b, header);
385 ofpbuf_put(b, &value, sizeof value);
386 ofpbuf_put(b, &mask, sizeof mask);
387 }
388
389 static void
390 nxm_put_32m(struct ofpbuf *b, uint32_t header, ovs_be32 value, ovs_be32 mask)
391 {
392 switch (mask) {
393 case 0:
394 break;
395
396 case OVS_BE32_MAX:
397 nxm_put_32(b, header, value);
398 break;
399
400 default:
401 nxm_put_32w(b, NXM_MAKE_WILD_HEADER(header), value, mask);
402 break;
403 }
404 }
405
406 static void
407 nxm_put_64(struct ofpbuf *b, uint32_t header, ovs_be64 value)
408 {
409 nxm_put_header(b, header);
410 ofpbuf_put(b, &value, sizeof value);
411 }
412
413 static void
414 nxm_put_64w(struct ofpbuf *b, uint32_t header, ovs_be64 value, ovs_be64 mask)
415 {
416 nxm_put_header(b, header);
417 ofpbuf_put(b, &value, sizeof value);
418 ofpbuf_put(b, &mask, sizeof mask);
419 }
420
421 static void
422 nxm_put_64m(struct ofpbuf *b, uint32_t header, ovs_be64 value, ovs_be64 mask)
423 {
424 switch (mask) {
425 case 0:
426 break;
427
428 case OVS_BE64_MAX:
429 nxm_put_64(b, header, value);
430 break;
431
432 default:
433 nxm_put_64w(b, NXM_MAKE_WILD_HEADER(header), value, mask);
434 break;
435 }
436 }
437
438 static void
439 nxm_put_eth(struct ofpbuf *b, uint32_t header,
440 const uint8_t value[ETH_ADDR_LEN])
441 {
442 nxm_put_header(b, header);
443 ofpbuf_put(b, value, ETH_ADDR_LEN);
444 }
445
446 static void
447 nxm_put_eth_masked(struct ofpbuf *b, uint32_t header,
448 const uint8_t value[ETH_ADDR_LEN],
449 const uint8_t mask[ETH_ADDR_LEN])
450 {
451 if (!eth_addr_is_zero(mask)) {
452 if (eth_mask_is_exact(mask)) {
453 nxm_put_eth(b, header, value);
454 } else {
455 nxm_put_header(b, NXM_MAKE_WILD_HEADER(header));
456 ofpbuf_put(b, value, ETH_ADDR_LEN);
457 ofpbuf_put(b, mask, ETH_ADDR_LEN);
458 }
459 }
460 }
461
462 static void
463 nxm_put_ipv6(struct ofpbuf *b, uint32_t header,
464 const struct in6_addr *value, const struct in6_addr *mask)
465 {
466 if (ipv6_mask_is_any(mask)) {
467 return;
468 } else if (ipv6_mask_is_exact(mask)) {
469 nxm_put_header(b, header);
470 ofpbuf_put(b, value, sizeof *value);
471 } else {
472 nxm_put_header(b, NXM_MAKE_WILD_HEADER(header));
473 ofpbuf_put(b, value, sizeof *value);
474 ofpbuf_put(b, mask, sizeof *mask);
475 }
476 }
477
478 static void
479 nxm_put_frag(struct ofpbuf *b, const struct match *match)
480 {
481 uint8_t nw_frag = match->flow.nw_frag;
482 uint8_t nw_frag_mask = match->wc.masks.nw_frag;
483
484 switch (nw_frag_mask) {
485 case 0:
486 break;
487
488 case FLOW_NW_FRAG_MASK:
489 nxm_put_8(b, NXM_NX_IP_FRAG, nw_frag);
490 break;
491
492 default:
493 nxm_put_8m(b, NXM_NX_IP_FRAG, nw_frag,
494 nw_frag_mask & FLOW_NW_FRAG_MASK);
495 break;
496 }
497 }
498
499 static void
500 nxm_put_ip(struct ofpbuf *b, const struct match *match,
501 uint8_t icmp_proto, uint32_t icmp_type, uint32_t icmp_code,
502 bool oxm)
503 {
504 const struct flow *flow = &match->flow;
505
506 nxm_put_frag(b, match);
507
508 if (match->wc.masks.nw_tos & IP_DSCP_MASK) {
509 if (oxm) {
510 nxm_put_8(b, OXM_OF_IP_DSCP, flow->nw_tos >> 2);
511 } else {
512 nxm_put_8(b, NXM_OF_IP_TOS, flow->nw_tos & IP_DSCP_MASK);
513 }
514 }
515
516 if (match->wc.masks.nw_tos & IP_ECN_MASK) {
517 nxm_put_8(b, oxm ? OXM_OF_IP_ECN : NXM_NX_IP_ECN,
518 flow->nw_tos & IP_ECN_MASK);
519 }
520
521 if (!oxm && match->wc.masks.nw_ttl) {
522 nxm_put_8(b, NXM_NX_IP_TTL, flow->nw_ttl);
523 }
524
525 if (match->wc.masks.nw_proto) {
526 nxm_put_8(b, oxm ? OXM_OF_IP_PROTO : NXM_OF_IP_PROTO, flow->nw_proto);
527
528 if (flow->nw_proto == IPPROTO_TCP) {
529 nxm_put_16m(b, oxm ? OXM_OF_TCP_SRC : NXM_OF_TCP_SRC,
530 flow->tp_src, match->wc.masks.tp_src);
531 nxm_put_16m(b, oxm ? OXM_OF_TCP_DST : NXM_OF_TCP_DST,
532 flow->tp_dst, match->wc.masks.tp_dst);
533 nxm_put_16m(b, NXM_NX_TCP_FLAGS,
534 flow->tcp_flags, match->wc.masks.tcp_flags);
535 } else if (flow->nw_proto == IPPROTO_UDP) {
536 nxm_put_16m(b, oxm ? OXM_OF_UDP_SRC : NXM_OF_UDP_SRC,
537 flow->tp_src, match->wc.masks.tp_src);
538 nxm_put_16m(b, oxm ? OXM_OF_UDP_DST : NXM_OF_UDP_DST,
539 flow->tp_dst, match->wc.masks.tp_dst);
540 } else if (flow->nw_proto == IPPROTO_SCTP) {
541 nxm_put_16m(b, OXM_OF_SCTP_SRC, flow->tp_src,
542 match->wc.masks.tp_src);
543 nxm_put_16m(b, OXM_OF_SCTP_DST, flow->tp_dst,
544 match->wc.masks.tp_dst);
545 } else if (flow->nw_proto == icmp_proto) {
546 if (match->wc.masks.tp_src) {
547 nxm_put_8(b, icmp_type, ntohs(flow->tp_src));
548 }
549 if (match->wc.masks.tp_dst) {
550 nxm_put_8(b, icmp_code, ntohs(flow->tp_dst));
551 }
552 }
553 }
554 }
555
556 /* Appends to 'b' the nx_match format that expresses 'match'. For Flow Mod and
557 * Flow Stats Requests messages, a 'cookie' and 'cookie_mask' may be supplied.
558 * Otherwise, 'cookie_mask' should be zero.
559 *
560 * This function can cause 'b''s data to be reallocated.
561 *
562 * Returns the number of bytes appended to 'b', excluding padding.
563 *
564 * If 'match' is a catch-all rule that matches every packet, then this function
565 * appends nothing to 'b' and returns 0. */
566 static int
567 nx_put_raw(struct ofpbuf *b, bool oxm, const struct match *match,
568 ovs_be64 cookie, ovs_be64 cookie_mask)
569 {
570 const struct flow *flow = &match->flow;
571 const size_t start_len = ofpbuf_size(b);
572 int match_len;
573 int i;
574
575 BUILD_ASSERT_DECL(FLOW_WC_SEQ == 26);
576
577 /* Metadata. */
578 if (match->wc.masks.dp_hash) {
579 if (!oxm) {
580 nxm_put_32m(b, NXM_NX_DP_HASH, htonl(flow->dp_hash),
581 htonl(match->wc.masks.dp_hash));
582 }
583 }
584
585 if (match->wc.masks.recirc_id) {
586 if (!oxm) {
587 nxm_put_32(b, NXM_NX_RECIRC_ID, htonl(flow->recirc_id));
588 }
589 }
590
591 if (match->wc.masks.in_port.ofp_port) {
592 ofp_port_t in_port = flow->in_port.ofp_port;
593 if (oxm) {
594 nxm_put_32(b, OXM_OF_IN_PORT, ofputil_port_to_ofp11(in_port));
595 } else {
596 nxm_put_16(b, NXM_OF_IN_PORT, htons(ofp_to_u16(in_port)));
597 }
598 }
599
600 /* Ethernet. */
601 nxm_put_eth_masked(b, oxm ? OXM_OF_ETH_SRC : NXM_OF_ETH_SRC,
602 flow->dl_src, match->wc.masks.dl_src);
603 nxm_put_eth_masked(b, oxm ? OXM_OF_ETH_DST : NXM_OF_ETH_DST,
604 flow->dl_dst, match->wc.masks.dl_dst);
605 nxm_put_16m(b, oxm ? OXM_OF_ETH_TYPE : NXM_OF_ETH_TYPE,
606 ofputil_dl_type_to_openflow(flow->dl_type),
607 match->wc.masks.dl_type);
608
609 /* 802.1Q. */
610 if (oxm) {
611 ovs_be16 VID_CFI_MASK = htons(VLAN_VID_MASK | VLAN_CFI);
612 ovs_be16 vid = flow->vlan_tci & VID_CFI_MASK;
613 ovs_be16 mask = match->wc.masks.vlan_tci & VID_CFI_MASK;
614
615 if (mask == htons(VLAN_VID_MASK | VLAN_CFI)) {
616 nxm_put_16(b, OXM_OF_VLAN_VID, vid);
617 } else if (mask) {
618 nxm_put_16m(b, OXM_OF_VLAN_VID, vid, mask);
619 }
620
621 if (vid && vlan_tci_to_pcp(match->wc.masks.vlan_tci)) {
622 nxm_put_8(b, OXM_OF_VLAN_PCP, vlan_tci_to_pcp(flow->vlan_tci));
623 }
624
625 } else {
626 nxm_put_16m(b, NXM_OF_VLAN_TCI, flow->vlan_tci,
627 match->wc.masks.vlan_tci);
628 }
629
630 /* MPLS. */
631 if (eth_type_mpls(flow->dl_type)) {
632 if (match->wc.masks.mpls_lse[0] & htonl(MPLS_TC_MASK)) {
633 nxm_put_8(b, OXM_OF_MPLS_TC, mpls_lse_to_tc(flow->mpls_lse[0]));
634 }
635
636 if (match->wc.masks.mpls_lse[0] & htonl(MPLS_BOS_MASK)) {
637 nxm_put_8(b, OXM_OF_MPLS_BOS, mpls_lse_to_bos(flow->mpls_lse[0]));
638 }
639
640 if (match->wc.masks.mpls_lse[0] & htonl(MPLS_LABEL_MASK)) {
641 nxm_put_32(b, OXM_OF_MPLS_LABEL,
642 htonl(mpls_lse_to_label(flow->mpls_lse[0])));
643 }
644 }
645
646 /* L3. */
647 if (flow->dl_type == htons(ETH_TYPE_IP)) {
648 /* IP. */
649 nxm_put_32m(b, oxm ? OXM_OF_IPV4_SRC : NXM_OF_IP_SRC,
650 flow->nw_src, match->wc.masks.nw_src);
651 nxm_put_32m(b, oxm ? OXM_OF_IPV4_DST : NXM_OF_IP_DST,
652 flow->nw_dst, match->wc.masks.nw_dst);
653 nxm_put_ip(b, match, IPPROTO_ICMP,
654 oxm ? OXM_OF_ICMPV4_TYPE : NXM_OF_ICMP_TYPE,
655 oxm ? OXM_OF_ICMPV4_CODE : NXM_OF_ICMP_CODE, oxm);
656 } else if (flow->dl_type == htons(ETH_TYPE_IPV6)) {
657 /* IPv6. */
658 nxm_put_ipv6(b, oxm ? OXM_OF_IPV6_SRC : NXM_NX_IPV6_SRC,
659 &flow->ipv6_src, &match->wc.masks.ipv6_src);
660 nxm_put_ipv6(b, oxm ? OXM_OF_IPV6_DST : NXM_NX_IPV6_DST,
661 &flow->ipv6_dst, &match->wc.masks.ipv6_dst);
662 nxm_put_ip(b, match, IPPROTO_ICMPV6,
663 oxm ? OXM_OF_ICMPV6_TYPE : NXM_NX_ICMPV6_TYPE,
664 oxm ? OXM_OF_ICMPV6_CODE : NXM_NX_ICMPV6_CODE, oxm);
665
666 nxm_put_32m(b, oxm ? OXM_OF_IPV6_FLABEL : NXM_NX_IPV6_LABEL,
667 flow->ipv6_label, match->wc.masks.ipv6_label);
668
669 if (flow->nw_proto == IPPROTO_ICMPV6
670 && (flow->tp_src == htons(ND_NEIGHBOR_SOLICIT) ||
671 flow->tp_src == htons(ND_NEIGHBOR_ADVERT))) {
672 nxm_put_ipv6(b, oxm ? OXM_OF_IPV6_ND_TARGET : NXM_NX_ND_TARGET,
673 &flow->nd_target, &match->wc.masks.nd_target);
674 if (flow->tp_src == htons(ND_NEIGHBOR_SOLICIT)) {
675 nxm_put_eth_masked(b, oxm ? OXM_OF_IPV6_ND_SLL : NXM_NX_ND_SLL,
676 flow->arp_sha, match->wc.masks.arp_sha);
677 }
678 if (flow->tp_src == htons(ND_NEIGHBOR_ADVERT)) {
679 nxm_put_eth_masked(b, oxm ? OXM_OF_IPV6_ND_TLL : NXM_NX_ND_TLL,
680 flow->arp_tha, match->wc.masks.arp_tha);
681 }
682 }
683 } else if (flow->dl_type == htons(ETH_TYPE_ARP) ||
684 flow->dl_type == htons(ETH_TYPE_RARP)) {
685 /* ARP. */
686 if (match->wc.masks.nw_proto) {
687 nxm_put_16(b, oxm ? OXM_OF_ARP_OP : NXM_OF_ARP_OP,
688 htons(flow->nw_proto));
689 }
690 nxm_put_32m(b, oxm ? OXM_OF_ARP_SPA : NXM_OF_ARP_SPA,
691 flow->nw_src, match->wc.masks.nw_src);
692 nxm_put_32m(b, oxm ? OXM_OF_ARP_TPA : NXM_OF_ARP_TPA,
693 flow->nw_dst, match->wc.masks.nw_dst);
694 nxm_put_eth_masked(b, oxm ? OXM_OF_ARP_SHA : NXM_NX_ARP_SHA,
695 flow->arp_sha, match->wc.masks.arp_sha);
696 nxm_put_eth_masked(b, oxm ? OXM_OF_ARP_THA : NXM_NX_ARP_THA,
697 flow->arp_tha, match->wc.masks.arp_tha);
698 }
699
700 /* Tunnel ID. */
701 nxm_put_64m(b, oxm ? OXM_OF_TUNNEL_ID : NXM_NX_TUN_ID,
702 flow->tunnel.tun_id, match->wc.masks.tunnel.tun_id);
703
704 /* Other tunnel metadata. */
705 nxm_put_32m(b, NXM_NX_TUN_IPV4_SRC,
706 flow->tunnel.ip_src, match->wc.masks.tunnel.ip_src);
707 nxm_put_32m(b, NXM_NX_TUN_IPV4_DST,
708 flow->tunnel.ip_dst, match->wc.masks.tunnel.ip_dst);
709
710 /* Registers. */
711 for (i = 0; i < FLOW_N_REGS; i++) {
712 nxm_put_32m(b, NXM_NX_REG(i),
713 htonl(flow->regs[i]), htonl(match->wc.masks.regs[i]));
714 }
715
716 /* Mark. */
717 nxm_put_32m(b, NXM_NX_PKT_MARK, htonl(flow->pkt_mark),
718 htonl(match->wc.masks.pkt_mark));
719
720 /* OpenFlow 1.1+ Metadata. */
721 nxm_put_64m(b, OXM_OF_METADATA, flow->metadata, match->wc.masks.metadata);
722
723 /* Cookie. */
724 nxm_put_64m(b, NXM_NX_COOKIE, cookie, cookie_mask);
725
726 match_len = ofpbuf_size(b) - start_len;
727 return match_len;
728 }
729
730 /* Appends to 'b' the nx_match format that expresses 'match', plus enough zero
731 * bytes to pad the nx_match out to a multiple of 8. For Flow Mod and Flow
732 * Stats Requests messages, a 'cookie' and 'cookie_mask' may be supplied.
733 * Otherwise, 'cookie_mask' should be zero.
734 *
735 * This function can cause 'b''s data to be reallocated.
736 *
737 * Returns the number of bytes appended to 'b', excluding padding. The return
738 * value can be zero if it appended nothing at all to 'b' (which happens if
739 * 'cr' is a catch-all rule that matches every packet). */
740 int
741 nx_put_match(struct ofpbuf *b, const struct match *match,
742 ovs_be64 cookie, ovs_be64 cookie_mask)
743 {
744 int match_len = nx_put_raw(b, false, match, cookie, cookie_mask);
745
746 ofpbuf_put_zeros(b, PAD_SIZE(match_len, 8));
747 return match_len;
748 }
749
750
751 /* Appends to 'b' an struct ofp11_match_header followed by the oxm format that
752 * expresses 'cr', plus enough zero bytes to pad the data appended out to a
753 * multiple of 8.
754 *
755 * This function can cause 'b''s data to be reallocated.
756 *
757 * Returns the number of bytes appended to 'b', excluding the padding. Never
758 * returns zero. */
759 int
760 oxm_put_match(struct ofpbuf *b, const struct match *match)
761 {
762 int match_len;
763 struct ofp11_match_header *omh;
764 size_t start_len = ofpbuf_size(b);
765 ovs_be64 cookie = htonll(0), cookie_mask = htonll(0);
766
767 ofpbuf_put_uninit(b, sizeof *omh);
768 match_len = nx_put_raw(b, true, match, cookie, cookie_mask) + sizeof *omh;
769 ofpbuf_put_zeros(b, PAD_SIZE(match_len, 8));
770
771 omh = ofpbuf_at(b, start_len, sizeof *omh);
772 omh->type = htons(OFPMT_OXM);
773 omh->length = htons(match_len);
774
775 return match_len;
776 }
777 \f
778 /* nx_match_to_string() and helpers. */
779
780 static void format_nxm_field_name(struct ds *, uint32_t header);
781
782 char *
783 nx_match_to_string(const uint8_t *p, unsigned int match_len)
784 {
785 uint32_t header;
786 struct ds s;
787
788 if (!match_len) {
789 return xstrdup("<any>");
790 }
791
792 ds_init(&s);
793 while ((header = nx_entry_ok(p, match_len)) != 0) {
794 unsigned int length = NXM_LENGTH(header);
795 unsigned int value_len = nxm_field_bytes(header);
796 const uint8_t *value = p + 4;
797 const uint8_t *mask = value + value_len;
798 unsigned int i;
799
800 if (s.length) {
801 ds_put_cstr(&s, ", ");
802 }
803
804 format_nxm_field_name(&s, header);
805 ds_put_char(&s, '(');
806
807 for (i = 0; i < value_len; i++) {
808 ds_put_format(&s, "%02x", value[i]);
809 }
810 if (NXM_HASMASK(header)) {
811 ds_put_char(&s, '/');
812 for (i = 0; i < value_len; i++) {
813 ds_put_format(&s, "%02x", mask[i]);
814 }
815 }
816 ds_put_char(&s, ')');
817
818 p += 4 + length;
819 match_len -= 4 + length;
820 }
821
822 if (match_len) {
823 if (s.length) {
824 ds_put_cstr(&s, ", ");
825 }
826
827 ds_put_format(&s, "<%u invalid bytes>", match_len);
828 }
829
830 return ds_steal_cstr(&s);
831 }
832
833 char *
834 oxm_match_to_string(const struct ofpbuf *p, unsigned int match_len)
835 {
836 const struct ofp11_match_header *omh = ofpbuf_data(p);
837 uint16_t match_len_;
838 struct ds s;
839
840 ds_init(&s);
841
842 if (match_len < sizeof *omh) {
843 ds_put_format(&s, "<match too short: %u>", match_len);
844 goto err;
845 }
846
847 if (omh->type != htons(OFPMT_OXM)) {
848 ds_put_format(&s, "<bad match type field: %u>", ntohs(omh->type));
849 goto err;
850 }
851
852 match_len_ = ntohs(omh->length);
853 if (match_len_ < sizeof *omh) {
854 ds_put_format(&s, "<match length field too short: %u>", match_len_);
855 goto err;
856 }
857
858 if (match_len_ != match_len) {
859 ds_put_format(&s, "<match length field incorrect: %u != %u>",
860 match_len_, match_len);
861 goto err;
862 }
863
864 return nx_match_to_string(ofpbuf_at(p, sizeof *omh, 0),
865 match_len - sizeof *omh);
866
867 err:
868 return ds_steal_cstr(&s);
869 }
870
871 static void
872 format_nxm_field_name(struct ds *s, uint32_t header)
873 {
874 const struct mf_field *mf = mf_from_nxm_header(header);
875 if (mf) {
876 ds_put_cstr(s, IS_OXM_HEADER(header) ? mf->oxm_name : mf->nxm_name);
877 if (NXM_HASMASK(header)) {
878 ds_put_cstr(s, "_W");
879 }
880 } else if (header == NXM_NX_COOKIE) {
881 ds_put_cstr(s, "NXM_NX_COOKIE");
882 } else if (header == NXM_NX_COOKIE_W) {
883 ds_put_cstr(s, "NXM_NX_COOKIE_W");
884 } else {
885 ds_put_format(s, "%d:%d", NXM_VENDOR(header), NXM_FIELD(header));
886 }
887 }
888
889 static uint32_t
890 parse_nxm_field_name(const char *name, int name_len)
891 {
892 bool wild;
893 int i;
894
895 /* Check whether it's a field name. */
896 wild = name_len > 2 && !memcmp(&name[name_len - 2], "_W", 2);
897 if (wild) {
898 name_len -= 2;
899 }
900
901 for (i = 0; i < MFF_N_IDS; i++) {
902 const struct mf_field *mf = mf_from_id(i);
903 uint32_t header;
904
905 if (mf->nxm_name &&
906 !strncmp(mf->nxm_name, name, name_len) &&
907 mf->nxm_name[name_len] == '\0') {
908 header = mf->nxm_header;
909 } else if (mf->oxm_name &&
910 !strncmp(mf->oxm_name, name, name_len) &&
911 mf->oxm_name[name_len] == '\0') {
912 header = mf->oxm_header;
913 } else {
914 continue;
915 }
916
917 if (!wild) {
918 return header;
919 } else if (mf->maskable != MFM_NONE) {
920 return NXM_MAKE_WILD_HEADER(header);
921 }
922 }
923
924 if (!strncmp("NXM_NX_COOKIE", name, name_len) &&
925 (name_len == strlen("NXM_NX_COOKIE"))) {
926 if (!wild) {
927 return NXM_NX_COOKIE;
928 } else {
929 return NXM_NX_COOKIE_W;
930 }
931 }
932
933 /* Check whether it's a 32-bit field header value as hex.
934 * (This isn't ordinarily useful except for testing error behavior.) */
935 if (name_len == 8) {
936 uint32_t header = hexits_value(name, name_len, NULL);
937 if (header != UINT_MAX) {
938 return header;
939 }
940 }
941
942 return 0;
943 }
944 \f
945 /* nx_match_from_string(). */
946
947 static int
948 nx_match_from_string_raw(const char *s, struct ofpbuf *b)
949 {
950 const char *full_s = s;
951 const size_t start_len = ofpbuf_size(b);
952
953 if (!strcmp(s, "<any>")) {
954 /* Ensure that 'ofpbuf_data(b)' isn't actually null. */
955 ofpbuf_prealloc_tailroom(b, 1);
956 return 0;
957 }
958
959 for (s += strspn(s, ", "); *s; s += strspn(s, ", ")) {
960 const char *name;
961 uint32_t header;
962 int name_len;
963 size_t n;
964
965 name = s;
966 name_len = strcspn(s, "(");
967 if (s[name_len] != '(') {
968 ovs_fatal(0, "%s: missing ( at end of nx_match", full_s);
969 }
970
971 header = parse_nxm_field_name(name, name_len);
972 if (!header) {
973 ovs_fatal(0, "%s: unknown field `%.*s'", full_s, name_len, s);
974 }
975
976 s += name_len + 1;
977
978 nxm_put_header(b, header);
979 s = ofpbuf_put_hex(b, s, &n);
980 if (n != nxm_field_bytes(header)) {
981 ovs_fatal(0, "%.2s: hex digits expected", s);
982 }
983 if (NXM_HASMASK(header)) {
984 s += strspn(s, " ");
985 if (*s != '/') {
986 ovs_fatal(0, "%s: missing / in masked field %.*s",
987 full_s, name_len, name);
988 }
989 s = ofpbuf_put_hex(b, s + 1, &n);
990 if (n != nxm_field_bytes(header)) {
991 ovs_fatal(0, "%.2s: hex digits expected", s);
992 }
993 }
994
995 s += strspn(s, " ");
996 if (*s != ')') {
997 ovs_fatal(0, "%s: missing ) following field %.*s",
998 full_s, name_len, name);
999 }
1000 s++;
1001 }
1002
1003 return ofpbuf_size(b) - start_len;
1004 }
1005
1006 int
1007 nx_match_from_string(const char *s, struct ofpbuf *b)
1008 {
1009 int match_len = nx_match_from_string_raw(s, b);
1010 ofpbuf_put_zeros(b, PAD_SIZE(match_len, 8));
1011 return match_len;
1012 }
1013
1014 int
1015 oxm_match_from_string(const char *s, struct ofpbuf *b)
1016 {
1017 int match_len;
1018 struct ofp11_match_header *omh;
1019 size_t start_len = ofpbuf_size(b);
1020
1021 ofpbuf_put_uninit(b, sizeof *omh);
1022 match_len = nx_match_from_string_raw(s, b) + sizeof *omh;
1023 ofpbuf_put_zeros(b, PAD_SIZE(match_len, 8));
1024
1025 omh = ofpbuf_at(b, start_len, sizeof *omh);
1026 omh->type = htons(OFPMT_OXM);
1027 omh->length = htons(match_len);
1028
1029 return match_len;
1030 }
1031 \f
1032 /* Parses 's' as a "move" action, in the form described in ovs-ofctl(8), into
1033 * '*move'.
1034 *
1035 * Returns NULL if successful, otherwise a malloc()'d string describing the
1036 * error. The caller is responsible for freeing the returned string. */
1037 char * WARN_UNUSED_RESULT
1038 nxm_parse_reg_move(struct ofpact_reg_move *move, const char *s)
1039 {
1040 const char *full_s = s;
1041 char *error;
1042
1043 error = mf_parse_subfield__(&move->src, &s);
1044 if (error) {
1045 return error;
1046 }
1047 if (strncmp(s, "->", 2)) {
1048 return xasprintf("%s: missing `->' following source", full_s);
1049 }
1050 s += 2;
1051 error = mf_parse_subfield(&move->dst, s);
1052 if (error) {
1053 return error;
1054 }
1055
1056 if (move->src.n_bits != move->dst.n_bits) {
1057 return xasprintf("%s: source field is %d bits wide but destination is "
1058 "%d bits wide", full_s,
1059 move->src.n_bits, move->dst.n_bits);
1060 }
1061 return NULL;
1062 }
1063
1064 /* Parses 's' as a "load" action, in the form described in ovs-ofctl(8), into
1065 * '*load'.
1066 *
1067 * Returns NULL if successful, otherwise a malloc()'d string describing the
1068 * error. The caller is responsible for freeing the returned string. */
1069 char * WARN_UNUSED_RESULT
1070 nxm_parse_reg_load(struct ofpact_reg_load *load, const char *s)
1071 {
1072 const char *full_s = s;
1073 uint64_t value = strtoull(s, (char **) &s, 0);
1074 char *error;
1075
1076 if (strncmp(s, "->", 2)) {
1077 return xasprintf("%s: missing `->' following value", full_s);
1078 }
1079 s += 2;
1080 error = mf_parse_subfield(&load->dst, s);
1081 if (error) {
1082 return error;
1083 }
1084
1085 if (load->dst.n_bits < 64 && (value >> load->dst.n_bits) != 0) {
1086 return xasprintf("%s: value %"PRIu64" does not fit into %d bits",
1087 full_s, value, load->dst.n_bits);
1088 }
1089
1090 load->subvalue.be64[0] = htonll(0);
1091 load->subvalue.be64[1] = htonll(value);
1092 return NULL;
1093 }
1094 \f
1095 /* nxm_format_reg_move(), nxm_format_reg_load(). */
1096
1097 void
1098 nxm_format_reg_move(const struct ofpact_reg_move *move, struct ds *s)
1099 {
1100 ds_put_format(s, "move:");
1101 mf_format_subfield(&move->src, s);
1102 ds_put_cstr(s, "->");
1103 mf_format_subfield(&move->dst, s);
1104 }
1105
1106 void
1107 nxm_format_reg_load(const struct ofpact_reg_load *load, struct ds *s)
1108 {
1109 ds_put_cstr(s, "load:");
1110 mf_format_subvalue(&load->subvalue, s);
1111 ds_put_cstr(s, "->");
1112 mf_format_subfield(&load->dst, s);
1113 }
1114 \f
1115 enum ofperr
1116 nxm_reg_move_from_openflow(const struct nx_action_reg_move *narm,
1117 struct ofpbuf *ofpacts)
1118 {
1119 struct ofpact_reg_move *move;
1120
1121 move = ofpact_put_REG_MOVE(ofpacts);
1122 move->src.field = mf_from_nxm_header(ntohl(narm->src));
1123 move->src.ofs = ntohs(narm->src_ofs);
1124 move->src.n_bits = ntohs(narm->n_bits);
1125 move->dst.field = mf_from_nxm_header(ntohl(narm->dst));
1126 move->dst.ofs = ntohs(narm->dst_ofs);
1127 move->dst.n_bits = ntohs(narm->n_bits);
1128
1129 return nxm_reg_move_check(move, NULL);
1130 }
1131
1132 enum ofperr
1133 nxm_reg_load_from_openflow(const struct nx_action_reg_load *narl,
1134 struct ofpbuf *ofpacts)
1135 {
1136 struct ofpact_reg_load *load;
1137
1138 load = ofpact_put_REG_LOAD(ofpacts);
1139 load->dst.field = mf_from_nxm_header(ntohl(narl->dst));
1140 load->dst.ofs = nxm_decode_ofs(narl->ofs_nbits);
1141 load->dst.n_bits = nxm_decode_n_bits(narl->ofs_nbits);
1142 load->subvalue.be64[1] = narl->value;
1143
1144 /* Reject 'narl' if a bit numbered 'n_bits' or higher is set to 1 in
1145 * narl->value. */
1146 if (load->dst.n_bits < 64 &&
1147 ntohll(narl->value) >> load->dst.n_bits) {
1148 return OFPERR_OFPBAC_BAD_ARGUMENT;
1149 }
1150
1151 return nxm_reg_load_check(load, NULL);
1152 }
1153 \f
1154 enum ofperr
1155 nxm_reg_move_check(const struct ofpact_reg_move *move, const struct flow *flow)
1156 {
1157 enum ofperr error;
1158
1159 error = mf_check_src(&move->src, flow);
1160 if (error) {
1161 return error;
1162 }
1163
1164 return mf_check_dst(&move->dst, NULL);
1165 }
1166
1167 enum ofperr
1168 nxm_reg_load_check(const struct ofpact_reg_load *load, const struct flow *flow)
1169 {
1170 return mf_check_dst(&load->dst, flow);
1171 }
1172 \f
1173 void
1174 nxm_reg_move_to_nxast(const struct ofpact_reg_move *move,
1175 struct ofpbuf *openflow)
1176 {
1177 struct nx_action_reg_move *narm;
1178
1179 narm = ofputil_put_NXAST_REG_MOVE(openflow);
1180 narm->n_bits = htons(move->dst.n_bits);
1181 narm->src_ofs = htons(move->src.ofs);
1182 narm->dst_ofs = htons(move->dst.ofs);
1183 narm->src = htonl(move->src.field->nxm_header);
1184 narm->dst = htonl(move->dst.field->nxm_header);
1185 }
1186
1187 void
1188 nxm_reg_load_to_nxast(const struct ofpact_reg_load *load,
1189 struct ofpbuf *openflow)
1190 {
1191 struct nx_action_reg_load *narl;
1192
1193 narl = ofputil_put_NXAST_REG_LOAD(openflow);
1194 narl->ofs_nbits = nxm_encode_ofs_nbits(load->dst.ofs, load->dst.n_bits);
1195 narl->dst = htonl(load->dst.field->nxm_header);
1196 narl->value = load->subvalue.be64[1];
1197 }
1198 \f
1199 /* nxm_execute_reg_move(), nxm_execute_reg_load(). */
1200
1201 void
1202 nxm_execute_reg_move(const struct ofpact_reg_move *move,
1203 struct flow *flow, struct flow_wildcards *wc)
1204 {
1205 union mf_value src_value;
1206 union mf_value dst_value;
1207
1208 mf_mask_field_and_prereqs(move->dst.field, &wc->masks);
1209 mf_mask_field_and_prereqs(move->src.field, &wc->masks);
1210
1211 mf_get_value(move->dst.field, flow, &dst_value);
1212 mf_get_value(move->src.field, flow, &src_value);
1213 bitwise_copy(&src_value, move->src.field->n_bytes, move->src.ofs,
1214 &dst_value, move->dst.field->n_bytes, move->dst.ofs,
1215 move->src.n_bits);
1216 mf_set_flow_value(move->dst.field, &dst_value, flow);
1217 }
1218
1219 void
1220 nxm_execute_reg_load(const struct ofpact_reg_load *load, struct flow *flow,
1221 struct flow_wildcards *wc)
1222 {
1223 /* Since at the datapath interface we do not have set actions for
1224 * individual fields, but larger sets of fields for a given protocol
1225 * layer, the set action will in practice only ever apply to exactly
1226 * matched flows for the given protocol layer. For example, if the
1227 * reg_load changes the IP TTL, the corresponding datapath action will
1228 * rewrite also the IP addresses and TOS byte. Since these other field
1229 * values may not be explicitly set, they depend on the incoming flow field
1230 * values, and are hence all of them are set in the wildcards masks, when
1231 * the action is committed to the datapath. For the rare case, where the
1232 * reg_load action does not actually change the value, and no other flow
1233 * field values are set (or loaded), the datapath action is skipped, and
1234 * no mask bits are set. Such a datapath flow should, however, be
1235 * dependent on the specific field value, so the corresponding wildcard
1236 * mask bits must be set, lest the datapath flow be applied to packets
1237 * containing some other value in the field and the field value remain
1238 * unchanged regardless of the incoming value.
1239 *
1240 * We set the masks here for the whole fields, and their prerequisities.
1241 * Even if only the lower byte of a TCP destination port is set,
1242 * we set the mask for the whole field, and also the ip_proto in the IP
1243 * header, so that the kernel flow would not be applied on, e.g., a UDP
1244 * packet, or any other IP protocol in addition to TCP packets.
1245 */
1246 mf_mask_field_and_prereqs(load->dst.field, &wc->masks);
1247 mf_write_subfield_flow(&load->dst, &load->subvalue, flow);
1248 }
1249
1250 void
1251 nxm_reg_load(const struct mf_subfield *dst, uint64_t src_data,
1252 struct flow *flow, struct flow_wildcards *wc)
1253 {
1254 union mf_subvalue src_subvalue;
1255 union mf_subvalue mask_value;
1256 ovs_be64 src_data_be = htonll(src_data);
1257
1258 memset(&mask_value, 0xff, sizeof mask_value);
1259 mf_write_subfield_flow(dst, &mask_value, &wc->masks);
1260
1261 bitwise_copy(&src_data_be, sizeof src_data_be, 0,
1262 &src_subvalue, sizeof src_subvalue, 0,
1263 sizeof src_data_be * 8);
1264 mf_write_subfield_flow(dst, &src_subvalue, flow);
1265 }
1266 \f
1267 /* nxm_parse_stack_action, works for both push() and pop(). */
1268
1269 /* Parses 's' as a "push" or "pop" action, in the form described in
1270 * ovs-ofctl(8), into '*stack_action'.
1271 *
1272 * Returns NULL if successful, otherwise a malloc()'d string describing the
1273 * error. The caller is responsible for freeing the returned string. */
1274 char * WARN_UNUSED_RESULT
1275 nxm_parse_stack_action(struct ofpact_stack *stack_action, const char *s)
1276 {
1277 char *error;
1278
1279 error = mf_parse_subfield__(&stack_action->subfield, &s);
1280 if (error) {
1281 return error;
1282 }
1283
1284 if (*s != '\0') {
1285 return xasprintf("%s: trailing garbage following push or pop", s);
1286 }
1287
1288 return NULL;
1289 }
1290
1291 void
1292 nxm_format_stack_push(const struct ofpact_stack *push, struct ds *s)
1293 {
1294 ds_put_cstr(s, "push:");
1295 mf_format_subfield(&push->subfield, s);
1296 }
1297
1298 void
1299 nxm_format_stack_pop(const struct ofpact_stack *pop, struct ds *s)
1300 {
1301 ds_put_cstr(s, "pop:");
1302 mf_format_subfield(&pop->subfield, s);
1303 }
1304
1305 /* Common set for both push and pop actions. */
1306 static void
1307 stack_action_from_openflow__(const struct nx_action_stack *nasp,
1308 struct ofpact_stack *stack_action)
1309 {
1310 stack_action->subfield.field = mf_from_nxm_header(ntohl(nasp->field));
1311 stack_action->subfield.ofs = ntohs(nasp->offset);
1312 stack_action->subfield.n_bits = ntohs(nasp->n_bits);
1313 }
1314
1315 static void
1316 nxm_stack_to_nxast__(const struct ofpact_stack *stack_action,
1317 struct nx_action_stack *nasp)
1318 {
1319 nasp->offset = htons(stack_action->subfield.ofs);
1320 nasp->n_bits = htons(stack_action->subfield.n_bits);
1321 nasp->field = htonl(stack_action->subfield.field->nxm_header);
1322 }
1323
1324 enum ofperr
1325 nxm_stack_push_from_openflow(const struct nx_action_stack *nasp,
1326 struct ofpbuf *ofpacts)
1327 {
1328 struct ofpact_stack *push;
1329
1330 push = ofpact_put_STACK_PUSH(ofpacts);
1331 stack_action_from_openflow__(nasp, push);
1332
1333 return nxm_stack_push_check(push, NULL);
1334 }
1335
1336 enum ofperr
1337 nxm_stack_pop_from_openflow(const struct nx_action_stack *nasp,
1338 struct ofpbuf *ofpacts)
1339 {
1340 struct ofpact_stack *pop;
1341
1342 pop = ofpact_put_STACK_POP(ofpacts);
1343 stack_action_from_openflow__(nasp, pop);
1344
1345 return nxm_stack_pop_check(pop, NULL);
1346 }
1347
1348 enum ofperr
1349 nxm_stack_push_check(const struct ofpact_stack *push,
1350 const struct flow *flow)
1351 {
1352 return mf_check_src(&push->subfield, flow);
1353 }
1354
1355 enum ofperr
1356 nxm_stack_pop_check(const struct ofpact_stack *pop,
1357 const struct flow *flow)
1358 {
1359 return mf_check_dst(&pop->subfield, flow);
1360 }
1361
1362 void
1363 nxm_stack_push_to_nxast(const struct ofpact_stack *stack,
1364 struct ofpbuf *openflow)
1365 {
1366 nxm_stack_to_nxast__(stack, ofputil_put_NXAST_STACK_PUSH(openflow));
1367 }
1368
1369 void
1370 nxm_stack_pop_to_nxast(const struct ofpact_stack *stack,
1371 struct ofpbuf *openflow)
1372 {
1373 nxm_stack_to_nxast__(stack, ofputil_put_NXAST_STACK_POP(openflow));
1374 }
1375
1376 /* nxm_execute_stack_push(), nxm_execute_stack_pop(). */
1377 static void
1378 nx_stack_push(struct ofpbuf *stack, union mf_subvalue *v)
1379 {
1380 ofpbuf_put(stack, v, sizeof *v);
1381 }
1382
1383 static union mf_subvalue *
1384 nx_stack_pop(struct ofpbuf *stack)
1385 {
1386 union mf_subvalue *v = NULL;
1387
1388 if (ofpbuf_size(stack)) {
1389
1390 ofpbuf_set_size(stack, ofpbuf_size(stack) - sizeof *v);
1391 v = (union mf_subvalue *) ofpbuf_tail(stack);
1392 }
1393
1394 return v;
1395 }
1396
1397 void
1398 nxm_execute_stack_push(const struct ofpact_stack *push,
1399 const struct flow *flow, struct flow_wildcards *wc,
1400 struct ofpbuf *stack)
1401 {
1402 union mf_subvalue mask_value;
1403 union mf_subvalue dst_value;
1404
1405 memset(&mask_value, 0xff, sizeof mask_value);
1406 mf_write_subfield_flow(&push->subfield, &mask_value, &wc->masks);
1407
1408 mf_read_subfield(&push->subfield, flow, &dst_value);
1409 nx_stack_push(stack, &dst_value);
1410 }
1411
1412 void
1413 nxm_execute_stack_pop(const struct ofpact_stack *pop,
1414 struct flow *flow, struct flow_wildcards *wc,
1415 struct ofpbuf *stack)
1416 {
1417 union mf_subvalue *src_value;
1418
1419 src_value = nx_stack_pop(stack);
1420
1421 /* Only pop if stack is not empty. Otherwise, give warning. */
1422 if (src_value) {
1423 union mf_subvalue mask_value;
1424
1425 memset(&mask_value, 0xff, sizeof mask_value);
1426 mf_write_subfield_flow(&pop->subfield, &mask_value, &wc->masks);
1427 mf_write_subfield_flow(&pop->subfield, src_value, flow);
1428 } else {
1429 if (!VLOG_DROP_WARN(&rl)) {
1430 char *flow_str = flow_to_string(flow);
1431 VLOG_WARN_RL(&rl, "Failed to pop from an empty stack. On flow \n"
1432 " %s", flow_str);
1433 free(flow_str);
1434 }
1435 }
1436 }