2 * Copyright (c) 2010, 2011, 2012, 2013, 2014 Nicira, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
21 #include <netinet/icmp6.h>
23 #include "classifier.h"
24 #include "dynamic-string.h"
25 #include "meta-flow.h"
26 #include "ofp-actions.h"
27 #include "ofp-errors.h"
30 #include "openflow/nicira-ext.h"
32 #include "unaligned.h"
36 VLOG_DEFINE_THIS_MODULE(nx_match
);
38 /* Rate limit for nx_match parse errors. These always indicate a bug in the
39 * peer and so there's not much point in showing a lot of them. */
40 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(1, 5);
42 /* Returns the width of the data for a field with the given 'header', in
45 nxm_field_bytes(uint32_t header
)
47 unsigned int length
= NXM_LENGTH(header
);
48 return NXM_HASMASK(header
) ? length
/ 2 : length
;
51 /* Returns the width of the data for a field with the given 'header', in
54 nxm_field_bits(uint32_t header
)
56 return nxm_field_bytes(header
) * 8;
59 /* nx_pull_match() and helpers. */
62 nx_entry_ok(const void *p
, unsigned int match_len
)
64 unsigned int payload_len
;
70 VLOG_DBG_RL(&rl
, "nx_match ends with partial (%u-byte) nxm_header",
75 memcpy(&header_be
, p
, 4);
76 header
= ntohl(header_be
);
78 payload_len
= NXM_LENGTH(header
);
80 VLOG_DBG_RL(&rl
, "nxm_entry %08"PRIx32
" has invalid payload "
84 if (match_len
< payload_len
+ 4) {
85 VLOG_DBG_RL(&rl
, "%"PRIu32
"-byte nxm_entry but only "
86 "%u bytes left in nx_match", payload_len
+ 4, match_len
);
93 /* Given NXM/OXM value 'value' and mask 'mask', each 'width' bytes long,
94 * checks for any 1-bit in the value where there is a 0-bit in the mask. If it
95 * finds one, logs a warning. */
97 check_mask_consistency(const uint8_t *p
, const struct mf_field
*mf
)
99 unsigned int width
= mf
->n_bytes
;
100 const uint8_t *value
= p
+ 4;
101 const uint8_t *mask
= p
+ 4 + width
;
104 for (i
= 0; i
< width
; i
++) {
105 if (value
[i
] & ~mask
[i
]) {
106 if (!VLOG_DROP_WARN(&rl
)) {
107 char *s
= nx_match_to_string(p
, width
* 2 + 4);
108 VLOG_WARN_RL(&rl
, "NXM/OXM entry %s has 1-bits in value for "
109 "bits wildcarded by the mask. (Future versions "
110 "of OVS may report this as an OpenFlow error.)",
119 nx_pull_raw(const uint8_t *p
, unsigned int match_len
, bool strict
,
120 struct match
*match
, ovs_be64
*cookie
, ovs_be64
*cookie_mask
)
124 ovs_assert((cookie
!= NULL
) == (cookie_mask
!= NULL
));
126 match_init_catchall(match
);
128 *cookie
= *cookie_mask
= htonll(0);
135 (header
= nx_entry_ok(p
, match_len
)) != 0;
136 p
+= 4 + NXM_LENGTH(header
), match_len
-= 4 + NXM_LENGTH(header
)) {
137 const struct mf_field
*mf
;
140 mf
= mf_from_nxm_header(header
);
143 error
= OFPERR_OFPBMC_BAD_FIELD
;
147 } else if (!mf_are_prereqs_ok(mf
, &match
->flow
)) {
148 error
= OFPERR_OFPBMC_BAD_PREREQ
;
149 } else if (!mf_is_all_wild(mf
, &match
->wc
)) {
150 error
= OFPERR_OFPBMC_DUP_FIELD
;
152 unsigned int width
= mf
->n_bytes
;
153 union mf_value value
;
155 memcpy(&value
, p
+ 4, width
);
156 if (!mf_is_value_valid(mf
, &value
)) {
157 error
= OFPERR_OFPBMC_BAD_VALUE
;
158 } else if (!NXM_HASMASK(header
)) {
160 mf_set_value(mf
, &value
, match
);
164 memcpy(&mask
, p
+ 4 + width
, width
);
165 if (!mf_is_mask_valid(mf
, &mask
)) {
166 error
= OFPERR_OFPBMC_BAD_MASK
;
169 check_mask_consistency(p
, mf
);
170 mf_set(mf
, &value
, &mask
, match
);
175 /* Check if the match is for a cookie rather than a classifier rule. */
176 if ((header
== NXM_NX_COOKIE
|| header
== NXM_NX_COOKIE_W
) && cookie
) {
178 error
= OFPERR_OFPBMC_DUP_FIELD
;
180 unsigned int width
= sizeof *cookie
;
182 memcpy(cookie
, p
+ 4, width
);
183 if (NXM_HASMASK(header
)) {
184 memcpy(cookie_mask
, p
+ 4 + width
, width
);
186 *cookie_mask
= OVS_BE64_MAX
;
193 VLOG_DBG_RL(&rl
, "bad nxm_entry %#08"PRIx32
" (vendor=%"PRIu32
", "
194 "field=%"PRIu32
", hasmask=%"PRIu32
", len=%"PRIu32
"), "
196 NXM_VENDOR(header
), NXM_FIELD(header
),
197 NXM_HASMASK(header
), NXM_LENGTH(header
),
198 ofperr_to_string(error
));
203 return match_len
? OFPERR_OFPBMC_BAD_LEN
: 0;
207 nx_pull_match__(struct ofpbuf
*b
, unsigned int match_len
, bool strict
,
209 ovs_be64
*cookie
, ovs_be64
*cookie_mask
)
214 p
= ofpbuf_try_pull(b
, ROUND_UP(match_len
, 8));
216 VLOG_DBG_RL(&rl
, "nx_match length %u, rounded up to a "
217 "multiple of 8, is longer than space in message (max "
218 "length %"PRIu32
")", match_len
, ofpbuf_size(b
));
219 return OFPERR_OFPBMC_BAD_LEN
;
223 return nx_pull_raw(p
, match_len
, strict
, match
, cookie
, cookie_mask
);
226 /* Parses the nx_match formatted match description in 'b' with length
227 * 'match_len'. Stores the results in 'match'. If 'cookie' and 'cookie_mask'
228 * are valid pointers, then stores the cookie and mask in them if 'b' contains
229 * a "NXM_NX_COOKIE*" match. Otherwise, stores 0 in both.
231 * Fails with an error upon encountering an unknown NXM header.
233 * Returns 0 if successful, otherwise an OpenFlow error code. */
235 nx_pull_match(struct ofpbuf
*b
, unsigned int match_len
, struct match
*match
,
236 ovs_be64
*cookie
, ovs_be64
*cookie_mask
)
238 return nx_pull_match__(b
, match_len
, true, match
, cookie
, cookie_mask
);
241 /* Behaves the same as nx_pull_match(), but skips over unknown NXM headers,
242 * instead of failing with an error. */
244 nx_pull_match_loose(struct ofpbuf
*b
, unsigned int match_len
,
246 ovs_be64
*cookie
, ovs_be64
*cookie_mask
)
248 return nx_pull_match__(b
, match_len
, false, match
, cookie
, cookie_mask
);
252 oxm_pull_match__(struct ofpbuf
*b
, bool strict
, struct match
*match
)
254 struct ofp11_match_header
*omh
= ofpbuf_data(b
);
258 if (ofpbuf_size(b
) < sizeof *omh
) {
259 return OFPERR_OFPBMC_BAD_LEN
;
262 match_len
= ntohs(omh
->length
);
263 if (match_len
< sizeof *omh
) {
264 return OFPERR_OFPBMC_BAD_LEN
;
267 if (omh
->type
!= htons(OFPMT_OXM
)) {
268 return OFPERR_OFPBMC_BAD_TYPE
;
271 p
= ofpbuf_try_pull(b
, ROUND_UP(match_len
, 8));
273 VLOG_DBG_RL(&rl
, "oxm length %u, rounded up to a "
274 "multiple of 8, is longer than space in message (max "
275 "length %"PRIu32
")", match_len
, ofpbuf_size(b
));
276 return OFPERR_OFPBMC_BAD_LEN
;
279 return nx_pull_raw(p
+ sizeof *omh
, match_len
- sizeof *omh
,
280 strict
, match
, NULL
, NULL
);
283 /* Parses the oxm formatted match description preceded by a struct
284 * ofp11_match_header in 'b'. Stores the result in 'match'.
286 * Fails with an error when encountering unknown OXM headers.
288 * Returns 0 if successful, otherwise an OpenFlow error code. */
290 oxm_pull_match(struct ofpbuf
*b
, struct match
*match
)
292 return oxm_pull_match__(b
, true, match
);
295 /* Behaves the same as oxm_pull_match() with one exception. Skips over unknown
296 * OXM headers instead of failing with an error when they are encountered. */
298 oxm_pull_match_loose(struct ofpbuf
*b
, struct match
*match
)
300 return oxm_pull_match__(b
, false, match
);
303 /* nx_put_match() and helpers.
305 * 'put' functions whose names end in 'w' add a wildcarded field.
306 * 'put' functions whose names end in 'm' add a field that might be wildcarded.
307 * Other 'put' functions add exact-match fields.
311 nxm_put_header(struct ofpbuf
*b
, uint32_t header
)
313 ovs_be32 n_header
= htonl(header
);
314 ofpbuf_put(b
, &n_header
, sizeof n_header
);
318 nxm_put_8(struct ofpbuf
*b
, uint32_t header
, uint8_t value
)
320 nxm_put_header(b
, header
);
321 ofpbuf_put(b
, &value
, sizeof value
);
325 nxm_put_8m(struct ofpbuf
*b
, uint32_t header
, uint8_t value
, uint8_t mask
)
332 nxm_put_8(b
, header
, value
);
336 nxm_put_header(b
, NXM_MAKE_WILD_HEADER(header
));
337 ofpbuf_put(b
, &value
, sizeof value
);
338 ofpbuf_put(b
, &mask
, sizeof mask
);
343 nxm_put_16(struct ofpbuf
*b
, uint32_t header
, ovs_be16 value
)
345 nxm_put_header(b
, header
);
346 ofpbuf_put(b
, &value
, sizeof value
);
350 nxm_put_16w(struct ofpbuf
*b
, uint32_t header
, ovs_be16 value
, ovs_be16 mask
)
352 nxm_put_header(b
, header
);
353 ofpbuf_put(b
, &value
, sizeof value
);
354 ofpbuf_put(b
, &mask
, sizeof mask
);
358 nxm_put_16m(struct ofpbuf
*b
, uint32_t header
, ovs_be16 value
, ovs_be16 mask
)
365 nxm_put_16(b
, header
, value
);
369 nxm_put_16w(b
, NXM_MAKE_WILD_HEADER(header
), value
, mask
);
375 nxm_put_32(struct ofpbuf
*b
, uint32_t header
, ovs_be32 value
)
377 nxm_put_header(b
, header
);
378 ofpbuf_put(b
, &value
, sizeof value
);
382 nxm_put_32w(struct ofpbuf
*b
, uint32_t header
, ovs_be32 value
, ovs_be32 mask
)
384 nxm_put_header(b
, header
);
385 ofpbuf_put(b
, &value
, sizeof value
);
386 ofpbuf_put(b
, &mask
, sizeof mask
);
390 nxm_put_32m(struct ofpbuf
*b
, uint32_t header
, ovs_be32 value
, ovs_be32 mask
)
397 nxm_put_32(b
, header
, value
);
401 nxm_put_32w(b
, NXM_MAKE_WILD_HEADER(header
), value
, mask
);
407 nxm_put_64(struct ofpbuf
*b
, uint32_t header
, ovs_be64 value
)
409 nxm_put_header(b
, header
);
410 ofpbuf_put(b
, &value
, sizeof value
);
414 nxm_put_64w(struct ofpbuf
*b
, uint32_t header
, ovs_be64 value
, ovs_be64 mask
)
416 nxm_put_header(b
, header
);
417 ofpbuf_put(b
, &value
, sizeof value
);
418 ofpbuf_put(b
, &mask
, sizeof mask
);
422 nxm_put_64m(struct ofpbuf
*b
, uint32_t header
, ovs_be64 value
, ovs_be64 mask
)
429 nxm_put_64(b
, header
, value
);
433 nxm_put_64w(b
, NXM_MAKE_WILD_HEADER(header
), value
, mask
);
439 nxm_put_eth(struct ofpbuf
*b
, uint32_t header
,
440 const uint8_t value
[ETH_ADDR_LEN
])
442 nxm_put_header(b
, header
);
443 ofpbuf_put(b
, value
, ETH_ADDR_LEN
);
447 nxm_put_eth_masked(struct ofpbuf
*b
, uint32_t header
,
448 const uint8_t value
[ETH_ADDR_LEN
],
449 const uint8_t mask
[ETH_ADDR_LEN
])
451 if (!eth_addr_is_zero(mask
)) {
452 if (eth_mask_is_exact(mask
)) {
453 nxm_put_eth(b
, header
, value
);
455 nxm_put_header(b
, NXM_MAKE_WILD_HEADER(header
));
456 ofpbuf_put(b
, value
, ETH_ADDR_LEN
);
457 ofpbuf_put(b
, mask
, ETH_ADDR_LEN
);
463 nxm_put_ipv6(struct ofpbuf
*b
, uint32_t header
,
464 const struct in6_addr
*value
, const struct in6_addr
*mask
)
466 if (ipv6_mask_is_any(mask
)) {
468 } else if (ipv6_mask_is_exact(mask
)) {
469 nxm_put_header(b
, header
);
470 ofpbuf_put(b
, value
, sizeof *value
);
472 nxm_put_header(b
, NXM_MAKE_WILD_HEADER(header
));
473 ofpbuf_put(b
, value
, sizeof *value
);
474 ofpbuf_put(b
, mask
, sizeof *mask
);
479 nxm_put_frag(struct ofpbuf
*b
, const struct match
*match
)
481 uint8_t nw_frag
= match
->flow
.nw_frag
;
482 uint8_t nw_frag_mask
= match
->wc
.masks
.nw_frag
;
484 switch (nw_frag_mask
) {
488 case FLOW_NW_FRAG_MASK
:
489 nxm_put_8(b
, NXM_NX_IP_FRAG
, nw_frag
);
493 nxm_put_8m(b
, NXM_NX_IP_FRAG
, nw_frag
,
494 nw_frag_mask
& FLOW_NW_FRAG_MASK
);
500 nxm_put_ip(struct ofpbuf
*b
, const struct match
*match
,
501 uint8_t icmp_proto
, uint32_t icmp_type
, uint32_t icmp_code
,
504 const struct flow
*flow
= &match
->flow
;
506 nxm_put_frag(b
, match
);
508 if (match
->wc
.masks
.nw_tos
& IP_DSCP_MASK
) {
510 nxm_put_8(b
, OXM_OF_IP_DSCP
, flow
->nw_tos
>> 2);
512 nxm_put_8(b
, NXM_OF_IP_TOS
, flow
->nw_tos
& IP_DSCP_MASK
);
516 if (match
->wc
.masks
.nw_tos
& IP_ECN_MASK
) {
517 nxm_put_8(b
, oxm
? OXM_OF_IP_ECN
: NXM_NX_IP_ECN
,
518 flow
->nw_tos
& IP_ECN_MASK
);
521 if (!oxm
&& match
->wc
.masks
.nw_ttl
) {
522 nxm_put_8(b
, NXM_NX_IP_TTL
, flow
->nw_ttl
);
525 if (match
->wc
.masks
.nw_proto
) {
526 nxm_put_8(b
, oxm
? OXM_OF_IP_PROTO
: NXM_OF_IP_PROTO
, flow
->nw_proto
);
528 if (flow
->nw_proto
== IPPROTO_TCP
) {
529 nxm_put_16m(b
, oxm
? OXM_OF_TCP_SRC
: NXM_OF_TCP_SRC
,
530 flow
->tp_src
, match
->wc
.masks
.tp_src
);
531 nxm_put_16m(b
, oxm
? OXM_OF_TCP_DST
: NXM_OF_TCP_DST
,
532 flow
->tp_dst
, match
->wc
.masks
.tp_dst
);
533 nxm_put_16m(b
, NXM_NX_TCP_FLAGS
,
534 flow
->tcp_flags
, match
->wc
.masks
.tcp_flags
);
535 } else if (flow
->nw_proto
== IPPROTO_UDP
) {
536 nxm_put_16m(b
, oxm
? OXM_OF_UDP_SRC
: NXM_OF_UDP_SRC
,
537 flow
->tp_src
, match
->wc
.masks
.tp_src
);
538 nxm_put_16m(b
, oxm
? OXM_OF_UDP_DST
: NXM_OF_UDP_DST
,
539 flow
->tp_dst
, match
->wc
.masks
.tp_dst
);
540 } else if (flow
->nw_proto
== IPPROTO_SCTP
) {
541 nxm_put_16m(b
, OXM_OF_SCTP_SRC
, flow
->tp_src
,
542 match
->wc
.masks
.tp_src
);
543 nxm_put_16m(b
, OXM_OF_SCTP_DST
, flow
->tp_dst
,
544 match
->wc
.masks
.tp_dst
);
545 } else if (flow
->nw_proto
== icmp_proto
) {
546 if (match
->wc
.masks
.tp_src
) {
547 nxm_put_8(b
, icmp_type
, ntohs(flow
->tp_src
));
549 if (match
->wc
.masks
.tp_dst
) {
550 nxm_put_8(b
, icmp_code
, ntohs(flow
->tp_dst
));
556 /* Appends to 'b' the nx_match format that expresses 'match'. For Flow Mod and
557 * Flow Stats Requests messages, a 'cookie' and 'cookie_mask' may be supplied.
558 * Otherwise, 'cookie_mask' should be zero.
560 * This function can cause 'b''s data to be reallocated.
562 * Returns the number of bytes appended to 'b', excluding padding.
564 * If 'match' is a catch-all rule that matches every packet, then this function
565 * appends nothing to 'b' and returns 0. */
567 nx_put_raw(struct ofpbuf
*b
, bool oxm
, const struct match
*match
,
568 ovs_be64 cookie
, ovs_be64 cookie_mask
)
570 const struct flow
*flow
= &match
->flow
;
571 const size_t start_len
= ofpbuf_size(b
);
575 BUILD_ASSERT_DECL(FLOW_WC_SEQ
== 26);
578 if (match
->wc
.masks
.dp_hash
) {
580 nxm_put_32m(b
, NXM_NX_DP_HASH
, htonl(flow
->dp_hash
),
581 htonl(match
->wc
.masks
.dp_hash
));
585 if (match
->wc
.masks
.recirc_id
) {
587 nxm_put_32(b
, NXM_NX_RECIRC_ID
, htonl(flow
->recirc_id
));
591 if (match
->wc
.masks
.in_port
.ofp_port
) {
592 ofp_port_t in_port
= flow
->in_port
.ofp_port
;
594 nxm_put_32(b
, OXM_OF_IN_PORT
, ofputil_port_to_ofp11(in_port
));
596 nxm_put_16(b
, NXM_OF_IN_PORT
, htons(ofp_to_u16(in_port
)));
601 nxm_put_eth_masked(b
, oxm
? OXM_OF_ETH_SRC
: NXM_OF_ETH_SRC
,
602 flow
->dl_src
, match
->wc
.masks
.dl_src
);
603 nxm_put_eth_masked(b
, oxm
? OXM_OF_ETH_DST
: NXM_OF_ETH_DST
,
604 flow
->dl_dst
, match
->wc
.masks
.dl_dst
);
605 nxm_put_16m(b
, oxm
? OXM_OF_ETH_TYPE
: NXM_OF_ETH_TYPE
,
606 ofputil_dl_type_to_openflow(flow
->dl_type
),
607 match
->wc
.masks
.dl_type
);
611 ovs_be16 VID_CFI_MASK
= htons(VLAN_VID_MASK
| VLAN_CFI
);
612 ovs_be16 vid
= flow
->vlan_tci
& VID_CFI_MASK
;
613 ovs_be16 mask
= match
->wc
.masks
.vlan_tci
& VID_CFI_MASK
;
615 if (mask
== htons(VLAN_VID_MASK
| VLAN_CFI
)) {
616 nxm_put_16(b
, OXM_OF_VLAN_VID
, vid
);
618 nxm_put_16m(b
, OXM_OF_VLAN_VID
, vid
, mask
);
621 if (vid
&& vlan_tci_to_pcp(match
->wc
.masks
.vlan_tci
)) {
622 nxm_put_8(b
, OXM_OF_VLAN_PCP
, vlan_tci_to_pcp(flow
->vlan_tci
));
626 nxm_put_16m(b
, NXM_OF_VLAN_TCI
, flow
->vlan_tci
,
627 match
->wc
.masks
.vlan_tci
);
631 if (eth_type_mpls(flow
->dl_type
)) {
632 if (match
->wc
.masks
.mpls_lse
[0] & htonl(MPLS_TC_MASK
)) {
633 nxm_put_8(b
, OXM_OF_MPLS_TC
, mpls_lse_to_tc(flow
->mpls_lse
[0]));
636 if (match
->wc
.masks
.mpls_lse
[0] & htonl(MPLS_BOS_MASK
)) {
637 nxm_put_8(b
, OXM_OF_MPLS_BOS
, mpls_lse_to_bos(flow
->mpls_lse
[0]));
640 if (match
->wc
.masks
.mpls_lse
[0] & htonl(MPLS_LABEL_MASK
)) {
641 nxm_put_32(b
, OXM_OF_MPLS_LABEL
,
642 htonl(mpls_lse_to_label(flow
->mpls_lse
[0])));
647 if (flow
->dl_type
== htons(ETH_TYPE_IP
)) {
649 nxm_put_32m(b
, oxm
? OXM_OF_IPV4_SRC
: NXM_OF_IP_SRC
,
650 flow
->nw_src
, match
->wc
.masks
.nw_src
);
651 nxm_put_32m(b
, oxm
? OXM_OF_IPV4_DST
: NXM_OF_IP_DST
,
652 flow
->nw_dst
, match
->wc
.masks
.nw_dst
);
653 nxm_put_ip(b
, match
, IPPROTO_ICMP
,
654 oxm
? OXM_OF_ICMPV4_TYPE
: NXM_OF_ICMP_TYPE
,
655 oxm
? OXM_OF_ICMPV4_CODE
: NXM_OF_ICMP_CODE
, oxm
);
656 } else if (flow
->dl_type
== htons(ETH_TYPE_IPV6
)) {
658 nxm_put_ipv6(b
, oxm
? OXM_OF_IPV6_SRC
: NXM_NX_IPV6_SRC
,
659 &flow
->ipv6_src
, &match
->wc
.masks
.ipv6_src
);
660 nxm_put_ipv6(b
, oxm
? OXM_OF_IPV6_DST
: NXM_NX_IPV6_DST
,
661 &flow
->ipv6_dst
, &match
->wc
.masks
.ipv6_dst
);
662 nxm_put_ip(b
, match
, IPPROTO_ICMPV6
,
663 oxm
? OXM_OF_ICMPV6_TYPE
: NXM_NX_ICMPV6_TYPE
,
664 oxm
? OXM_OF_ICMPV6_CODE
: NXM_NX_ICMPV6_CODE
, oxm
);
666 nxm_put_32m(b
, oxm
? OXM_OF_IPV6_FLABEL
: NXM_NX_IPV6_LABEL
,
667 flow
->ipv6_label
, match
->wc
.masks
.ipv6_label
);
669 if (flow
->nw_proto
== IPPROTO_ICMPV6
670 && (flow
->tp_src
== htons(ND_NEIGHBOR_SOLICIT
) ||
671 flow
->tp_src
== htons(ND_NEIGHBOR_ADVERT
))) {
672 nxm_put_ipv6(b
, oxm
? OXM_OF_IPV6_ND_TARGET
: NXM_NX_ND_TARGET
,
673 &flow
->nd_target
, &match
->wc
.masks
.nd_target
);
674 if (flow
->tp_src
== htons(ND_NEIGHBOR_SOLICIT
)) {
675 nxm_put_eth_masked(b
, oxm
? OXM_OF_IPV6_ND_SLL
: NXM_NX_ND_SLL
,
676 flow
->arp_sha
, match
->wc
.masks
.arp_sha
);
678 if (flow
->tp_src
== htons(ND_NEIGHBOR_ADVERT
)) {
679 nxm_put_eth_masked(b
, oxm
? OXM_OF_IPV6_ND_TLL
: NXM_NX_ND_TLL
,
680 flow
->arp_tha
, match
->wc
.masks
.arp_tha
);
683 } else if (flow
->dl_type
== htons(ETH_TYPE_ARP
) ||
684 flow
->dl_type
== htons(ETH_TYPE_RARP
)) {
686 if (match
->wc
.masks
.nw_proto
) {
687 nxm_put_16(b
, oxm
? OXM_OF_ARP_OP
: NXM_OF_ARP_OP
,
688 htons(flow
->nw_proto
));
690 nxm_put_32m(b
, oxm
? OXM_OF_ARP_SPA
: NXM_OF_ARP_SPA
,
691 flow
->nw_src
, match
->wc
.masks
.nw_src
);
692 nxm_put_32m(b
, oxm
? OXM_OF_ARP_TPA
: NXM_OF_ARP_TPA
,
693 flow
->nw_dst
, match
->wc
.masks
.nw_dst
);
694 nxm_put_eth_masked(b
, oxm
? OXM_OF_ARP_SHA
: NXM_NX_ARP_SHA
,
695 flow
->arp_sha
, match
->wc
.masks
.arp_sha
);
696 nxm_put_eth_masked(b
, oxm
? OXM_OF_ARP_THA
: NXM_NX_ARP_THA
,
697 flow
->arp_tha
, match
->wc
.masks
.arp_tha
);
701 nxm_put_64m(b
, oxm
? OXM_OF_TUNNEL_ID
: NXM_NX_TUN_ID
,
702 flow
->tunnel
.tun_id
, match
->wc
.masks
.tunnel
.tun_id
);
704 /* Other tunnel metadata. */
705 nxm_put_32m(b
, NXM_NX_TUN_IPV4_SRC
,
706 flow
->tunnel
.ip_src
, match
->wc
.masks
.tunnel
.ip_src
);
707 nxm_put_32m(b
, NXM_NX_TUN_IPV4_DST
,
708 flow
->tunnel
.ip_dst
, match
->wc
.masks
.tunnel
.ip_dst
);
711 for (i
= 0; i
< FLOW_N_REGS
; i
++) {
712 nxm_put_32m(b
, NXM_NX_REG(i
),
713 htonl(flow
->regs
[i
]), htonl(match
->wc
.masks
.regs
[i
]));
717 nxm_put_32m(b
, NXM_NX_PKT_MARK
, htonl(flow
->pkt_mark
),
718 htonl(match
->wc
.masks
.pkt_mark
));
720 /* OpenFlow 1.1+ Metadata. */
721 nxm_put_64m(b
, OXM_OF_METADATA
, flow
->metadata
, match
->wc
.masks
.metadata
);
724 nxm_put_64m(b
, NXM_NX_COOKIE
, cookie
, cookie_mask
);
726 match_len
= ofpbuf_size(b
) - start_len
;
730 /* Appends to 'b' the nx_match format that expresses 'match', plus enough zero
731 * bytes to pad the nx_match out to a multiple of 8. For Flow Mod and Flow
732 * Stats Requests messages, a 'cookie' and 'cookie_mask' may be supplied.
733 * Otherwise, 'cookie_mask' should be zero.
735 * This function can cause 'b''s data to be reallocated.
737 * Returns the number of bytes appended to 'b', excluding padding. The return
738 * value can be zero if it appended nothing at all to 'b' (which happens if
739 * 'cr' is a catch-all rule that matches every packet). */
741 nx_put_match(struct ofpbuf
*b
, const struct match
*match
,
742 ovs_be64 cookie
, ovs_be64 cookie_mask
)
744 int match_len
= nx_put_raw(b
, false, match
, cookie
, cookie_mask
);
746 ofpbuf_put_zeros(b
, PAD_SIZE(match_len
, 8));
751 /* Appends to 'b' an struct ofp11_match_header followed by the oxm format that
752 * expresses 'cr', plus enough zero bytes to pad the data appended out to a
755 * This function can cause 'b''s data to be reallocated.
757 * Returns the number of bytes appended to 'b', excluding the padding. Never
760 oxm_put_match(struct ofpbuf
*b
, const struct match
*match
)
763 struct ofp11_match_header
*omh
;
764 size_t start_len
= ofpbuf_size(b
);
765 ovs_be64 cookie
= htonll(0), cookie_mask
= htonll(0);
767 ofpbuf_put_uninit(b
, sizeof *omh
);
768 match_len
= nx_put_raw(b
, true, match
, cookie
, cookie_mask
) + sizeof *omh
;
769 ofpbuf_put_zeros(b
, PAD_SIZE(match_len
, 8));
771 omh
= ofpbuf_at(b
, start_len
, sizeof *omh
);
772 omh
->type
= htons(OFPMT_OXM
);
773 omh
->length
= htons(match_len
);
778 /* nx_match_to_string() and helpers. */
780 static void format_nxm_field_name(struct ds
*, uint32_t header
);
783 nx_match_to_string(const uint8_t *p
, unsigned int match_len
)
789 return xstrdup("<any>");
793 while ((header
= nx_entry_ok(p
, match_len
)) != 0) {
794 unsigned int length
= NXM_LENGTH(header
);
795 unsigned int value_len
= nxm_field_bytes(header
);
796 const uint8_t *value
= p
+ 4;
797 const uint8_t *mask
= value
+ value_len
;
801 ds_put_cstr(&s
, ", ");
804 format_nxm_field_name(&s
, header
);
805 ds_put_char(&s
, '(');
807 for (i
= 0; i
< value_len
; i
++) {
808 ds_put_format(&s
, "%02x", value
[i
]);
810 if (NXM_HASMASK(header
)) {
811 ds_put_char(&s
, '/');
812 for (i
= 0; i
< value_len
; i
++) {
813 ds_put_format(&s
, "%02x", mask
[i
]);
816 ds_put_char(&s
, ')');
819 match_len
-= 4 + length
;
824 ds_put_cstr(&s
, ", ");
827 ds_put_format(&s
, "<%u invalid bytes>", match_len
);
830 return ds_steal_cstr(&s
);
834 oxm_match_to_string(const struct ofpbuf
*p
, unsigned int match_len
)
836 const struct ofp11_match_header
*omh
= ofpbuf_data(p
);
842 if (match_len
< sizeof *omh
) {
843 ds_put_format(&s
, "<match too short: %u>", match_len
);
847 if (omh
->type
!= htons(OFPMT_OXM
)) {
848 ds_put_format(&s
, "<bad match type field: %u>", ntohs(omh
->type
));
852 match_len_
= ntohs(omh
->length
);
853 if (match_len_
< sizeof *omh
) {
854 ds_put_format(&s
, "<match length field too short: %u>", match_len_
);
858 if (match_len_
!= match_len
) {
859 ds_put_format(&s
, "<match length field incorrect: %u != %u>",
860 match_len_
, match_len
);
864 return nx_match_to_string(ofpbuf_at(p
, sizeof *omh
, 0),
865 match_len
- sizeof *omh
);
868 return ds_steal_cstr(&s
);
872 format_nxm_field_name(struct ds
*s
, uint32_t header
)
874 const struct mf_field
*mf
= mf_from_nxm_header(header
);
876 ds_put_cstr(s
, IS_OXM_HEADER(header
) ? mf
->oxm_name
: mf
->nxm_name
);
877 if (NXM_HASMASK(header
)) {
878 ds_put_cstr(s
, "_W");
880 } else if (header
== NXM_NX_COOKIE
) {
881 ds_put_cstr(s
, "NXM_NX_COOKIE");
882 } else if (header
== NXM_NX_COOKIE_W
) {
883 ds_put_cstr(s
, "NXM_NX_COOKIE_W");
885 ds_put_format(s
, "%d:%d", NXM_VENDOR(header
), NXM_FIELD(header
));
890 parse_nxm_field_name(const char *name
, int name_len
)
895 /* Check whether it's a field name. */
896 wild
= name_len
> 2 && !memcmp(&name
[name_len
- 2], "_W", 2);
901 for (i
= 0; i
< MFF_N_IDS
; i
++) {
902 const struct mf_field
*mf
= mf_from_id(i
);
906 !strncmp(mf
->nxm_name
, name
, name_len
) &&
907 mf
->nxm_name
[name_len
] == '\0') {
908 header
= mf
->nxm_header
;
909 } else if (mf
->oxm_name
&&
910 !strncmp(mf
->oxm_name
, name
, name_len
) &&
911 mf
->oxm_name
[name_len
] == '\0') {
912 header
= mf
->oxm_header
;
919 } else if (mf
->maskable
!= MFM_NONE
) {
920 return NXM_MAKE_WILD_HEADER(header
);
924 if (!strncmp("NXM_NX_COOKIE", name
, name_len
) &&
925 (name_len
== strlen("NXM_NX_COOKIE"))) {
927 return NXM_NX_COOKIE
;
929 return NXM_NX_COOKIE_W
;
933 /* Check whether it's a 32-bit field header value as hex.
934 * (This isn't ordinarily useful except for testing error behavior.) */
936 uint32_t header
= hexits_value(name
, name_len
, NULL
);
937 if (header
!= UINT_MAX
) {
945 /* nx_match_from_string(). */
948 nx_match_from_string_raw(const char *s
, struct ofpbuf
*b
)
950 const char *full_s
= s
;
951 const size_t start_len
= ofpbuf_size(b
);
953 if (!strcmp(s
, "<any>")) {
954 /* Ensure that 'ofpbuf_data(b)' isn't actually null. */
955 ofpbuf_prealloc_tailroom(b
, 1);
959 for (s
+= strspn(s
, ", "); *s
; s
+= strspn(s
, ", ")) {
966 name_len
= strcspn(s
, "(");
967 if (s
[name_len
] != '(') {
968 ovs_fatal(0, "%s: missing ( at end of nx_match", full_s
);
971 header
= parse_nxm_field_name(name
, name_len
);
973 ovs_fatal(0, "%s: unknown field `%.*s'", full_s
, name_len
, s
);
978 nxm_put_header(b
, header
);
979 s
= ofpbuf_put_hex(b
, s
, &n
);
980 if (n
!= nxm_field_bytes(header
)) {
981 ovs_fatal(0, "%.2s: hex digits expected", s
);
983 if (NXM_HASMASK(header
)) {
986 ovs_fatal(0, "%s: missing / in masked field %.*s",
987 full_s
, name_len
, name
);
989 s
= ofpbuf_put_hex(b
, s
+ 1, &n
);
990 if (n
!= nxm_field_bytes(header
)) {
991 ovs_fatal(0, "%.2s: hex digits expected", s
);
997 ovs_fatal(0, "%s: missing ) following field %.*s",
998 full_s
, name_len
, name
);
1003 return ofpbuf_size(b
) - start_len
;
1007 nx_match_from_string(const char *s
, struct ofpbuf
*b
)
1009 int match_len
= nx_match_from_string_raw(s
, b
);
1010 ofpbuf_put_zeros(b
, PAD_SIZE(match_len
, 8));
1015 oxm_match_from_string(const char *s
, struct ofpbuf
*b
)
1018 struct ofp11_match_header
*omh
;
1019 size_t start_len
= ofpbuf_size(b
);
1021 ofpbuf_put_uninit(b
, sizeof *omh
);
1022 match_len
= nx_match_from_string_raw(s
, b
) + sizeof *omh
;
1023 ofpbuf_put_zeros(b
, PAD_SIZE(match_len
, 8));
1025 omh
= ofpbuf_at(b
, start_len
, sizeof *omh
);
1026 omh
->type
= htons(OFPMT_OXM
);
1027 omh
->length
= htons(match_len
);
1032 /* Parses 's' as a "move" action, in the form described in ovs-ofctl(8), into
1035 * Returns NULL if successful, otherwise a malloc()'d string describing the
1036 * error. The caller is responsible for freeing the returned string. */
1037 char * WARN_UNUSED_RESULT
1038 nxm_parse_reg_move(struct ofpact_reg_move
*move
, const char *s
)
1040 const char *full_s
= s
;
1043 error
= mf_parse_subfield__(&move
->src
, &s
);
1047 if (strncmp(s
, "->", 2)) {
1048 return xasprintf("%s: missing `->' following source", full_s
);
1051 error
= mf_parse_subfield(&move
->dst
, s
);
1056 if (move
->src
.n_bits
!= move
->dst
.n_bits
) {
1057 return xasprintf("%s: source field is %d bits wide but destination is "
1058 "%d bits wide", full_s
,
1059 move
->src
.n_bits
, move
->dst
.n_bits
);
1064 /* Parses 's' as a "load" action, in the form described in ovs-ofctl(8), into
1067 * Returns NULL if successful, otherwise a malloc()'d string describing the
1068 * error. The caller is responsible for freeing the returned string. */
1069 char * WARN_UNUSED_RESULT
1070 nxm_parse_reg_load(struct ofpact_reg_load
*load
, const char *s
)
1072 const char *full_s
= s
;
1073 uint64_t value
= strtoull(s
, (char **) &s
, 0);
1076 if (strncmp(s
, "->", 2)) {
1077 return xasprintf("%s: missing `->' following value", full_s
);
1080 error
= mf_parse_subfield(&load
->dst
, s
);
1085 if (load
->dst
.n_bits
< 64 && (value
>> load
->dst
.n_bits
) != 0) {
1086 return xasprintf("%s: value %"PRIu64
" does not fit into %d bits",
1087 full_s
, value
, load
->dst
.n_bits
);
1090 load
->subvalue
.be64
[0] = htonll(0);
1091 load
->subvalue
.be64
[1] = htonll(value
);
1095 /* nxm_format_reg_move(), nxm_format_reg_load(). */
1098 nxm_format_reg_move(const struct ofpact_reg_move
*move
, struct ds
*s
)
1100 ds_put_format(s
, "move:");
1101 mf_format_subfield(&move
->src
, s
);
1102 ds_put_cstr(s
, "->");
1103 mf_format_subfield(&move
->dst
, s
);
1107 nxm_format_reg_load(const struct ofpact_reg_load
*load
, struct ds
*s
)
1109 ds_put_cstr(s
, "load:");
1110 mf_format_subvalue(&load
->subvalue
, s
);
1111 ds_put_cstr(s
, "->");
1112 mf_format_subfield(&load
->dst
, s
);
1116 nxm_reg_move_from_openflow(const struct nx_action_reg_move
*narm
,
1117 struct ofpbuf
*ofpacts
)
1119 struct ofpact_reg_move
*move
;
1121 move
= ofpact_put_REG_MOVE(ofpacts
);
1122 move
->src
.field
= mf_from_nxm_header(ntohl(narm
->src
));
1123 move
->src
.ofs
= ntohs(narm
->src_ofs
);
1124 move
->src
.n_bits
= ntohs(narm
->n_bits
);
1125 move
->dst
.field
= mf_from_nxm_header(ntohl(narm
->dst
));
1126 move
->dst
.ofs
= ntohs(narm
->dst_ofs
);
1127 move
->dst
.n_bits
= ntohs(narm
->n_bits
);
1129 return nxm_reg_move_check(move
, NULL
);
1133 nxm_reg_load_from_openflow(const struct nx_action_reg_load
*narl
,
1134 struct ofpbuf
*ofpacts
)
1136 struct ofpact_reg_load
*load
;
1138 load
= ofpact_put_REG_LOAD(ofpacts
);
1139 load
->dst
.field
= mf_from_nxm_header(ntohl(narl
->dst
));
1140 load
->dst
.ofs
= nxm_decode_ofs(narl
->ofs_nbits
);
1141 load
->dst
.n_bits
= nxm_decode_n_bits(narl
->ofs_nbits
);
1142 load
->subvalue
.be64
[1] = narl
->value
;
1144 /* Reject 'narl' if a bit numbered 'n_bits' or higher is set to 1 in
1146 if (load
->dst
.n_bits
< 64 &&
1147 ntohll(narl
->value
) >> load
->dst
.n_bits
) {
1148 return OFPERR_OFPBAC_BAD_ARGUMENT
;
1151 return nxm_reg_load_check(load
, NULL
);
1155 nxm_reg_move_check(const struct ofpact_reg_move
*move
, const struct flow
*flow
)
1159 error
= mf_check_src(&move
->src
, flow
);
1164 return mf_check_dst(&move
->dst
, NULL
);
1168 nxm_reg_load_check(const struct ofpact_reg_load
*load
, const struct flow
*flow
)
1170 return mf_check_dst(&load
->dst
, flow
);
1174 nxm_reg_move_to_nxast(const struct ofpact_reg_move
*move
,
1175 struct ofpbuf
*openflow
)
1177 struct nx_action_reg_move
*narm
;
1179 narm
= ofputil_put_NXAST_REG_MOVE(openflow
);
1180 narm
->n_bits
= htons(move
->dst
.n_bits
);
1181 narm
->src_ofs
= htons(move
->src
.ofs
);
1182 narm
->dst_ofs
= htons(move
->dst
.ofs
);
1183 narm
->src
= htonl(move
->src
.field
->nxm_header
);
1184 narm
->dst
= htonl(move
->dst
.field
->nxm_header
);
1188 nxm_reg_load_to_nxast(const struct ofpact_reg_load
*load
,
1189 struct ofpbuf
*openflow
)
1191 struct nx_action_reg_load
*narl
;
1193 narl
= ofputil_put_NXAST_REG_LOAD(openflow
);
1194 narl
->ofs_nbits
= nxm_encode_ofs_nbits(load
->dst
.ofs
, load
->dst
.n_bits
);
1195 narl
->dst
= htonl(load
->dst
.field
->nxm_header
);
1196 narl
->value
= load
->subvalue
.be64
[1];
1199 /* nxm_execute_reg_move(), nxm_execute_reg_load(). */
1202 nxm_execute_reg_move(const struct ofpact_reg_move
*move
,
1203 struct flow
*flow
, struct flow_wildcards
*wc
)
1205 union mf_value src_value
;
1206 union mf_value dst_value
;
1208 mf_mask_field_and_prereqs(move
->dst
.field
, &wc
->masks
);
1209 mf_mask_field_and_prereqs(move
->src
.field
, &wc
->masks
);
1211 mf_get_value(move
->dst
.field
, flow
, &dst_value
);
1212 mf_get_value(move
->src
.field
, flow
, &src_value
);
1213 bitwise_copy(&src_value
, move
->src
.field
->n_bytes
, move
->src
.ofs
,
1214 &dst_value
, move
->dst
.field
->n_bytes
, move
->dst
.ofs
,
1216 mf_set_flow_value(move
->dst
.field
, &dst_value
, flow
);
1220 nxm_execute_reg_load(const struct ofpact_reg_load
*load
, struct flow
*flow
,
1221 struct flow_wildcards
*wc
)
1223 /* Since at the datapath interface we do not have set actions for
1224 * individual fields, but larger sets of fields for a given protocol
1225 * layer, the set action will in practice only ever apply to exactly
1226 * matched flows for the given protocol layer. For example, if the
1227 * reg_load changes the IP TTL, the corresponding datapath action will
1228 * rewrite also the IP addresses and TOS byte. Since these other field
1229 * values may not be explicitly set, they depend on the incoming flow field
1230 * values, and are hence all of them are set in the wildcards masks, when
1231 * the action is committed to the datapath. For the rare case, where the
1232 * reg_load action does not actually change the value, and no other flow
1233 * field values are set (or loaded), the datapath action is skipped, and
1234 * no mask bits are set. Such a datapath flow should, however, be
1235 * dependent on the specific field value, so the corresponding wildcard
1236 * mask bits must be set, lest the datapath flow be applied to packets
1237 * containing some other value in the field and the field value remain
1238 * unchanged regardless of the incoming value.
1240 * We set the masks here for the whole fields, and their prerequisities.
1241 * Even if only the lower byte of a TCP destination port is set,
1242 * we set the mask for the whole field, and also the ip_proto in the IP
1243 * header, so that the kernel flow would not be applied on, e.g., a UDP
1244 * packet, or any other IP protocol in addition to TCP packets.
1246 mf_mask_field_and_prereqs(load
->dst
.field
, &wc
->masks
);
1247 mf_write_subfield_flow(&load
->dst
, &load
->subvalue
, flow
);
1251 nxm_reg_load(const struct mf_subfield
*dst
, uint64_t src_data
,
1252 struct flow
*flow
, struct flow_wildcards
*wc
)
1254 union mf_subvalue src_subvalue
;
1255 union mf_subvalue mask_value
;
1256 ovs_be64 src_data_be
= htonll(src_data
);
1258 memset(&mask_value
, 0xff, sizeof mask_value
);
1259 mf_write_subfield_flow(dst
, &mask_value
, &wc
->masks
);
1261 bitwise_copy(&src_data_be
, sizeof src_data_be
, 0,
1262 &src_subvalue
, sizeof src_subvalue
, 0,
1263 sizeof src_data_be
* 8);
1264 mf_write_subfield_flow(dst
, &src_subvalue
, flow
);
1267 /* nxm_parse_stack_action, works for both push() and pop(). */
1269 /* Parses 's' as a "push" or "pop" action, in the form described in
1270 * ovs-ofctl(8), into '*stack_action'.
1272 * Returns NULL if successful, otherwise a malloc()'d string describing the
1273 * error. The caller is responsible for freeing the returned string. */
1274 char * WARN_UNUSED_RESULT
1275 nxm_parse_stack_action(struct ofpact_stack
*stack_action
, const char *s
)
1279 error
= mf_parse_subfield__(&stack_action
->subfield
, &s
);
1285 return xasprintf("%s: trailing garbage following push or pop", s
);
1292 nxm_format_stack_push(const struct ofpact_stack
*push
, struct ds
*s
)
1294 ds_put_cstr(s
, "push:");
1295 mf_format_subfield(&push
->subfield
, s
);
1299 nxm_format_stack_pop(const struct ofpact_stack
*pop
, struct ds
*s
)
1301 ds_put_cstr(s
, "pop:");
1302 mf_format_subfield(&pop
->subfield
, s
);
1305 /* Common set for both push and pop actions. */
1307 stack_action_from_openflow__(const struct nx_action_stack
*nasp
,
1308 struct ofpact_stack
*stack_action
)
1310 stack_action
->subfield
.field
= mf_from_nxm_header(ntohl(nasp
->field
));
1311 stack_action
->subfield
.ofs
= ntohs(nasp
->offset
);
1312 stack_action
->subfield
.n_bits
= ntohs(nasp
->n_bits
);
1316 nxm_stack_to_nxast__(const struct ofpact_stack
*stack_action
,
1317 struct nx_action_stack
*nasp
)
1319 nasp
->offset
= htons(stack_action
->subfield
.ofs
);
1320 nasp
->n_bits
= htons(stack_action
->subfield
.n_bits
);
1321 nasp
->field
= htonl(stack_action
->subfield
.field
->nxm_header
);
1325 nxm_stack_push_from_openflow(const struct nx_action_stack
*nasp
,
1326 struct ofpbuf
*ofpacts
)
1328 struct ofpact_stack
*push
;
1330 push
= ofpact_put_STACK_PUSH(ofpacts
);
1331 stack_action_from_openflow__(nasp
, push
);
1333 return nxm_stack_push_check(push
, NULL
);
1337 nxm_stack_pop_from_openflow(const struct nx_action_stack
*nasp
,
1338 struct ofpbuf
*ofpacts
)
1340 struct ofpact_stack
*pop
;
1342 pop
= ofpact_put_STACK_POP(ofpacts
);
1343 stack_action_from_openflow__(nasp
, pop
);
1345 return nxm_stack_pop_check(pop
, NULL
);
1349 nxm_stack_push_check(const struct ofpact_stack
*push
,
1350 const struct flow
*flow
)
1352 return mf_check_src(&push
->subfield
, flow
);
1356 nxm_stack_pop_check(const struct ofpact_stack
*pop
,
1357 const struct flow
*flow
)
1359 return mf_check_dst(&pop
->subfield
, flow
);
1363 nxm_stack_push_to_nxast(const struct ofpact_stack
*stack
,
1364 struct ofpbuf
*openflow
)
1366 nxm_stack_to_nxast__(stack
, ofputil_put_NXAST_STACK_PUSH(openflow
));
1370 nxm_stack_pop_to_nxast(const struct ofpact_stack
*stack
,
1371 struct ofpbuf
*openflow
)
1373 nxm_stack_to_nxast__(stack
, ofputil_put_NXAST_STACK_POP(openflow
));
1376 /* nxm_execute_stack_push(), nxm_execute_stack_pop(). */
1378 nx_stack_push(struct ofpbuf
*stack
, union mf_subvalue
*v
)
1380 ofpbuf_put(stack
, v
, sizeof *v
);
1383 static union mf_subvalue
*
1384 nx_stack_pop(struct ofpbuf
*stack
)
1386 union mf_subvalue
*v
= NULL
;
1388 if (ofpbuf_size(stack
)) {
1390 ofpbuf_set_size(stack
, ofpbuf_size(stack
) - sizeof *v
);
1391 v
= (union mf_subvalue
*) ofpbuf_tail(stack
);
1398 nxm_execute_stack_push(const struct ofpact_stack
*push
,
1399 const struct flow
*flow
, struct flow_wildcards
*wc
,
1400 struct ofpbuf
*stack
)
1402 union mf_subvalue mask_value
;
1403 union mf_subvalue dst_value
;
1405 memset(&mask_value
, 0xff, sizeof mask_value
);
1406 mf_write_subfield_flow(&push
->subfield
, &mask_value
, &wc
->masks
);
1408 mf_read_subfield(&push
->subfield
, flow
, &dst_value
);
1409 nx_stack_push(stack
, &dst_value
);
1413 nxm_execute_stack_pop(const struct ofpact_stack
*pop
,
1414 struct flow
*flow
, struct flow_wildcards
*wc
,
1415 struct ofpbuf
*stack
)
1417 union mf_subvalue
*src_value
;
1419 src_value
= nx_stack_pop(stack
);
1421 /* Only pop if stack is not empty. Otherwise, give warning. */
1423 union mf_subvalue mask_value
;
1425 memset(&mask_value
, 0xff, sizeof mask_value
);
1426 mf_write_subfield_flow(&pop
->subfield
, &mask_value
, &wc
->masks
);
1427 mf_write_subfield_flow(&pop
->subfield
, src_value
, flow
);
1429 if (!VLOG_DROP_WARN(&rl
)) {
1430 char *flow_str
= flow_to_string(flow
);
1431 VLOG_WARN_RL(&rl
, "Failed to pop from an empty stack. On flow \n"