2 * Copyright (c) 2010, 2011, 2012, 2013, 2014 Nicira, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
21 #include <netinet/icmp6.h>
23 #include "classifier.h"
24 #include "dynamic-string.h"
25 #include "meta-flow.h"
26 #include "ofp-actions.h"
27 #include "ofp-errors.h"
30 #include "openflow/nicira-ext.h"
32 #include "unaligned.h"
36 VLOG_DEFINE_THIS_MODULE(nx_match
);
38 /* Rate limit for nx_match parse errors. These always indicate a bug in the
39 * peer and so there's not much point in showing a lot of them. */
40 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(1, 5);
42 /* Returns the width of the data for a field with the given 'header', in
45 nxm_field_bytes(uint32_t header
)
47 unsigned int length
= NXM_LENGTH(header
);
48 return NXM_HASMASK(header
) ? length
/ 2 : length
;
51 /* Returns the width of the data for a field with the given 'header', in
54 nxm_field_bits(uint32_t header
)
56 return nxm_field_bytes(header
) * 8;
59 /* nx_pull_match() and helpers. */
62 nx_entry_ok(const void *p
, unsigned int match_len
)
64 unsigned int payload_len
;
70 VLOG_DBG_RL(&rl
, "nx_match ends with partial (%u-byte) nxm_header",
75 memcpy(&header_be
, p
, 4);
76 header
= ntohl(header_be
);
78 payload_len
= NXM_LENGTH(header
);
80 VLOG_DBG_RL(&rl
, "nxm_entry %08"PRIx32
" has invalid payload "
84 if (match_len
< payload_len
+ 4) {
85 VLOG_DBG_RL(&rl
, "%"PRIu32
"-byte nxm_entry but only "
86 "%u bytes left in nx_match", payload_len
+ 4, match_len
);
93 /* Given NXM/OXM value 'value' and mask 'mask', each 'width' bytes long, checks
94 * for any 1-bit in the value where there is a 0-bit in the mask. Returns 0 if
95 * none, otherwise an error code. */
97 check_mask_consistency(const uint8_t *p
, const struct mf_field
*mf
)
99 unsigned int width
= mf
->n_bytes
;
100 const uint8_t *value
= p
+ 4;
101 const uint8_t *mask
= p
+ 4 + width
;
104 for (i
= 0; i
< width
; i
++) {
105 if (value
[i
] & ~mask
[i
]) {
106 if (!VLOG_DROP_WARN(&rl
)) {
107 char *s
= nx_match_to_string(p
, width
* 2 + 4);
108 VLOG_WARN_RL(&rl
, "Rejecting NXM/OXM entry %s with 1-bits in "
109 "value for bits wildcarded by the mask.", s
);
112 return OFPERR_OFPBMC_BAD_WILDCARDS
;
119 nx_pull_raw(const uint8_t *p
, unsigned int match_len
, bool strict
,
120 struct match
*match
, ovs_be64
*cookie
, ovs_be64
*cookie_mask
)
124 ovs_assert((cookie
!= NULL
) == (cookie_mask
!= NULL
));
126 match_init_catchall(match
);
128 *cookie
= *cookie_mask
= htonll(0);
135 (header
= nx_entry_ok(p
, match_len
)) != 0;
136 p
+= 4 + NXM_LENGTH(header
), match_len
-= 4 + NXM_LENGTH(header
)) {
137 const struct mf_field
*mf
;
140 mf
= mf_from_nxm_header(header
);
143 error
= OFPERR_OFPBMC_BAD_FIELD
;
147 } else if (!mf_are_prereqs_ok(mf
, &match
->flow
)) {
148 error
= OFPERR_OFPBMC_BAD_PREREQ
;
149 } else if (!mf_is_all_wild(mf
, &match
->wc
)) {
150 error
= OFPERR_OFPBMC_DUP_FIELD
;
152 unsigned int width
= mf
->n_bytes
;
153 union mf_value value
;
155 memcpy(&value
, p
+ 4, width
);
156 if (!mf_is_value_valid(mf
, &value
)) {
157 error
= OFPERR_OFPBMC_BAD_VALUE
;
158 } else if (!NXM_HASMASK(header
)) {
160 mf_set_value(mf
, &value
, match
);
164 memcpy(&mask
, p
+ 4 + width
, width
);
165 if (!mf_is_mask_valid(mf
, &mask
)) {
166 error
= OFPERR_OFPBMC_BAD_MASK
;
168 error
= check_mask_consistency(p
, mf
);
170 mf_set(mf
, &value
, &mask
, match
);
176 /* Check if the match is for a cookie rather than a classifier rule. */
177 if ((header
== NXM_NX_COOKIE
|| header
== NXM_NX_COOKIE_W
) && cookie
) {
179 error
= OFPERR_OFPBMC_DUP_FIELD
;
181 unsigned int width
= sizeof *cookie
;
183 memcpy(cookie
, p
+ 4, width
);
184 if (NXM_HASMASK(header
)) {
185 memcpy(cookie_mask
, p
+ 4 + width
, width
);
187 *cookie_mask
= OVS_BE64_MAX
;
194 VLOG_DBG_RL(&rl
, "bad nxm_entry %#08"PRIx32
" (vendor=%"PRIu32
", "
195 "field=%"PRIu32
", hasmask=%"PRIu32
", len=%"PRIu32
"), "
197 NXM_VENDOR(header
), NXM_FIELD(header
),
198 NXM_HASMASK(header
), NXM_LENGTH(header
),
199 ofperr_to_string(error
));
204 return match_len
? OFPERR_OFPBMC_BAD_LEN
: 0;
208 nx_pull_match__(struct ofpbuf
*b
, unsigned int match_len
, bool strict
,
210 ovs_be64
*cookie
, ovs_be64
*cookie_mask
)
215 p
= ofpbuf_try_pull(b
, ROUND_UP(match_len
, 8));
217 VLOG_DBG_RL(&rl
, "nx_match length %u, rounded up to a "
218 "multiple of 8, is longer than space in message (max "
219 "length %"PRIu32
")", match_len
, ofpbuf_size(b
));
220 return OFPERR_OFPBMC_BAD_LEN
;
224 return nx_pull_raw(p
, match_len
, strict
, match
, cookie
, cookie_mask
);
227 /* Parses the nx_match formatted match description in 'b' with length
228 * 'match_len'. Stores the results in 'match'. If 'cookie' and 'cookie_mask'
229 * are valid pointers, then stores the cookie and mask in them if 'b' contains
230 * a "NXM_NX_COOKIE*" match. Otherwise, stores 0 in both.
232 * Fails with an error upon encountering an unknown NXM header.
234 * Returns 0 if successful, otherwise an OpenFlow error code. */
236 nx_pull_match(struct ofpbuf
*b
, unsigned int match_len
, struct match
*match
,
237 ovs_be64
*cookie
, ovs_be64
*cookie_mask
)
239 return nx_pull_match__(b
, match_len
, true, match
, cookie
, cookie_mask
);
242 /* Behaves the same as nx_pull_match(), but skips over unknown NXM headers,
243 * instead of failing with an error. */
245 nx_pull_match_loose(struct ofpbuf
*b
, unsigned int match_len
,
247 ovs_be64
*cookie
, ovs_be64
*cookie_mask
)
249 return nx_pull_match__(b
, match_len
, false, match
, cookie
, cookie_mask
);
253 oxm_pull_match__(struct ofpbuf
*b
, bool strict
, struct match
*match
)
255 struct ofp11_match_header
*omh
= ofpbuf_data(b
);
259 if (ofpbuf_size(b
) < sizeof *omh
) {
260 return OFPERR_OFPBMC_BAD_LEN
;
263 match_len
= ntohs(omh
->length
);
264 if (match_len
< sizeof *omh
) {
265 return OFPERR_OFPBMC_BAD_LEN
;
268 if (omh
->type
!= htons(OFPMT_OXM
)) {
269 return OFPERR_OFPBMC_BAD_TYPE
;
272 p
= ofpbuf_try_pull(b
, ROUND_UP(match_len
, 8));
274 VLOG_DBG_RL(&rl
, "oxm length %u, rounded up to a "
275 "multiple of 8, is longer than space in message (max "
276 "length %"PRIu32
")", match_len
, ofpbuf_size(b
));
277 return OFPERR_OFPBMC_BAD_LEN
;
280 return nx_pull_raw(p
+ sizeof *omh
, match_len
- sizeof *omh
,
281 strict
, match
, NULL
, NULL
);
284 /* Parses the oxm formatted match description preceded by a struct
285 * ofp11_match_header in 'b'. Stores the result in 'match'.
287 * Fails with an error when encountering unknown OXM headers.
289 * Returns 0 if successful, otherwise an OpenFlow error code. */
291 oxm_pull_match(struct ofpbuf
*b
, struct match
*match
)
293 return oxm_pull_match__(b
, true, match
);
296 /* Behaves the same as oxm_pull_match() with one exception. Skips over unknown
297 * OXM headers instead of failing with an error when they are encountered. */
299 oxm_pull_match_loose(struct ofpbuf
*b
, struct match
*match
)
301 return oxm_pull_match__(b
, false, match
);
304 /* nx_put_match() and helpers.
306 * 'put' functions whose names end in 'w' add a wildcarded field.
307 * 'put' functions whose names end in 'm' add a field that might be wildcarded.
308 * Other 'put' functions add exact-match fields.
312 nxm_put_header(struct ofpbuf
*b
, uint32_t header
)
314 ovs_be32 n_header
= htonl(header
);
315 ofpbuf_put(b
, &n_header
, sizeof n_header
);
319 nxm_put_8(struct ofpbuf
*b
, uint32_t header
, uint8_t value
)
321 nxm_put_header(b
, header
);
322 ofpbuf_put(b
, &value
, sizeof value
);
326 nxm_put_8m(struct ofpbuf
*b
, uint32_t header
, uint8_t value
, uint8_t mask
)
333 nxm_put_8(b
, header
, value
);
337 nxm_put_header(b
, NXM_MAKE_WILD_HEADER(header
));
338 ofpbuf_put(b
, &value
, sizeof value
);
339 ofpbuf_put(b
, &mask
, sizeof mask
);
344 nxm_put_16(struct ofpbuf
*b
, uint32_t header
, ovs_be16 value
)
346 nxm_put_header(b
, header
);
347 ofpbuf_put(b
, &value
, sizeof value
);
351 nxm_put_16w(struct ofpbuf
*b
, uint32_t header
, ovs_be16 value
, ovs_be16 mask
)
353 nxm_put_header(b
, header
);
354 ofpbuf_put(b
, &value
, sizeof value
);
355 ofpbuf_put(b
, &mask
, sizeof mask
);
359 nxm_put_16m(struct ofpbuf
*b
, uint32_t header
, ovs_be16 value
, ovs_be16 mask
)
366 nxm_put_16(b
, header
, value
);
370 nxm_put_16w(b
, NXM_MAKE_WILD_HEADER(header
), value
, mask
);
376 nxm_put_32(struct ofpbuf
*b
, uint32_t header
, ovs_be32 value
)
378 nxm_put_header(b
, header
);
379 ofpbuf_put(b
, &value
, sizeof value
);
383 nxm_put_32w(struct ofpbuf
*b
, uint32_t header
, ovs_be32 value
, ovs_be32 mask
)
385 nxm_put_header(b
, header
);
386 ofpbuf_put(b
, &value
, sizeof value
);
387 ofpbuf_put(b
, &mask
, sizeof mask
);
391 nxm_put_32m(struct ofpbuf
*b
, uint32_t header
, ovs_be32 value
, ovs_be32 mask
)
398 nxm_put_32(b
, header
, value
);
402 nxm_put_32w(b
, NXM_MAKE_WILD_HEADER(header
), value
, mask
);
408 nxm_put_64(struct ofpbuf
*b
, uint32_t header
, ovs_be64 value
)
410 nxm_put_header(b
, header
);
411 ofpbuf_put(b
, &value
, sizeof value
);
415 nxm_put_64w(struct ofpbuf
*b
, uint32_t header
, ovs_be64 value
, ovs_be64 mask
)
417 nxm_put_header(b
, header
);
418 ofpbuf_put(b
, &value
, sizeof value
);
419 ofpbuf_put(b
, &mask
, sizeof mask
);
423 nxm_put_64m(struct ofpbuf
*b
, uint32_t header
, ovs_be64 value
, ovs_be64 mask
)
430 nxm_put_64(b
, header
, value
);
434 nxm_put_64w(b
, NXM_MAKE_WILD_HEADER(header
), value
, mask
);
440 nxm_put_eth(struct ofpbuf
*b
, uint32_t header
,
441 const uint8_t value
[ETH_ADDR_LEN
])
443 nxm_put_header(b
, header
);
444 ofpbuf_put(b
, value
, ETH_ADDR_LEN
);
448 nxm_put_eth_masked(struct ofpbuf
*b
, uint32_t header
,
449 const uint8_t value
[ETH_ADDR_LEN
],
450 const uint8_t mask
[ETH_ADDR_LEN
])
452 if (!eth_addr_is_zero(mask
)) {
453 if (eth_mask_is_exact(mask
)) {
454 nxm_put_eth(b
, header
, value
);
456 nxm_put_header(b
, NXM_MAKE_WILD_HEADER(header
));
457 ofpbuf_put(b
, value
, ETH_ADDR_LEN
);
458 ofpbuf_put(b
, mask
, ETH_ADDR_LEN
);
464 nxm_put_ipv6(struct ofpbuf
*b
, uint32_t header
,
465 const struct in6_addr
*value
, const struct in6_addr
*mask
)
467 if (ipv6_mask_is_any(mask
)) {
469 } else if (ipv6_mask_is_exact(mask
)) {
470 nxm_put_header(b
, header
);
471 ofpbuf_put(b
, value
, sizeof *value
);
473 nxm_put_header(b
, NXM_MAKE_WILD_HEADER(header
));
474 ofpbuf_put(b
, value
, sizeof *value
);
475 ofpbuf_put(b
, mask
, sizeof *mask
);
480 nxm_put_frag(struct ofpbuf
*b
, const struct match
*match
, enum ofp_version oxm
)
482 uint32_t header
= mf_oxm_header(MFF_IP_FRAG
, oxm
);
483 uint8_t nw_frag
= match
->flow
.nw_frag
;
484 uint8_t nw_frag_mask
= match
->wc
.masks
.nw_frag
;
486 switch (nw_frag_mask
) {
490 case FLOW_NW_FRAG_MASK
:
491 nxm_put_8(b
, header
, nw_frag
);
495 nxm_put_8m(b
, header
, nw_frag
, nw_frag_mask
& FLOW_NW_FRAG_MASK
);
500 /* Appends to 'b' a set of OXM or NXM matches for the IPv4 or IPv6 fields in
503 nxm_put_ip(struct ofpbuf
*b
, const struct match
*match
, enum ofp_version oxm
)
505 const struct flow
*flow
= &match
->flow
;
507 if (flow
->dl_type
== htons(ETH_TYPE_IP
)) {
508 nxm_put_32m(b
, mf_oxm_header(MFF_IPV4_SRC
, oxm
),
509 flow
->nw_src
, match
->wc
.masks
.nw_src
);
510 nxm_put_32m(b
, mf_oxm_header(MFF_IPV4_DST
, oxm
),
511 flow
->nw_dst
, match
->wc
.masks
.nw_dst
);
513 nxm_put_ipv6(b
, mf_oxm_header(MFF_IPV6_SRC
, oxm
),
514 &flow
->ipv6_src
, &match
->wc
.masks
.ipv6_src
);
515 nxm_put_ipv6(b
, mf_oxm_header(MFF_IPV6_DST
, oxm
),
516 &flow
->ipv6_dst
, &match
->wc
.masks
.ipv6_dst
);
519 nxm_put_frag(b
, match
, oxm
);
521 if (match
->wc
.masks
.nw_tos
& IP_DSCP_MASK
) {
523 nxm_put_8(b
, mf_oxm_header(MFF_IP_DSCP_SHIFTED
, oxm
),
526 nxm_put_8(b
, mf_oxm_header(MFF_IP_DSCP
, oxm
),
527 flow
->nw_tos
& IP_DSCP_MASK
);
531 if (match
->wc
.masks
.nw_tos
& IP_ECN_MASK
) {
532 nxm_put_8(b
, mf_oxm_header(MFF_IP_ECN
, oxm
),
533 flow
->nw_tos
& IP_ECN_MASK
);
536 if (!oxm
&& match
->wc
.masks
.nw_ttl
) {
537 nxm_put_8(b
, mf_oxm_header(MFF_IP_TTL
, oxm
), flow
->nw_ttl
);
540 nxm_put_32m(b
, mf_oxm_header(MFF_IPV6_LABEL
, oxm
),
541 flow
->ipv6_label
, match
->wc
.masks
.ipv6_label
);
543 if (match
->wc
.masks
.nw_proto
) {
544 nxm_put_8(b
, mf_oxm_header(MFF_IP_PROTO
, oxm
), flow
->nw_proto
);
546 if (flow
->nw_proto
== IPPROTO_TCP
) {
547 nxm_put_16m(b
, mf_oxm_header(MFF_TCP_SRC
, oxm
),
548 flow
->tp_src
, match
->wc
.masks
.tp_src
);
549 nxm_put_16m(b
, mf_oxm_header(MFF_TCP_DST
, oxm
),
550 flow
->tp_dst
, match
->wc
.masks
.tp_dst
);
551 nxm_put_16m(b
, mf_oxm_header(MFF_TCP_FLAGS
, oxm
),
552 flow
->tcp_flags
, match
->wc
.masks
.tcp_flags
);
553 } else if (flow
->nw_proto
== IPPROTO_UDP
) {
554 nxm_put_16m(b
, mf_oxm_header(MFF_UDP_SRC
, oxm
),
555 flow
->tp_src
, match
->wc
.masks
.tp_src
);
556 nxm_put_16m(b
, mf_oxm_header(MFF_UDP_DST
, oxm
),
557 flow
->tp_dst
, match
->wc
.masks
.tp_dst
);
558 } else if (flow
->nw_proto
== IPPROTO_SCTP
) {
559 nxm_put_16m(b
, mf_oxm_header(MFF_SCTP_SRC
, oxm
), flow
->tp_src
,
560 match
->wc
.masks
.tp_src
);
561 nxm_put_16m(b
, mf_oxm_header(MFF_SCTP_DST
, oxm
), flow
->tp_dst
,
562 match
->wc
.masks
.tp_dst
);
563 } else if (is_icmpv4(flow
)) {
564 if (match
->wc
.masks
.tp_src
) {
565 nxm_put_8(b
, mf_oxm_header(MFF_ICMPV4_TYPE
, oxm
),
566 ntohs(flow
->tp_src
));
568 if (match
->wc
.masks
.tp_dst
) {
569 nxm_put_8(b
, mf_oxm_header(MFF_ICMPV4_CODE
, oxm
),
570 ntohs(flow
->tp_dst
));
572 } else if (is_icmpv6(flow
)) {
573 if (match
->wc
.masks
.tp_src
) {
574 nxm_put_8(b
, mf_oxm_header(MFF_ICMPV6_TYPE
, oxm
),
575 ntohs(flow
->tp_src
));
577 if (match
->wc
.masks
.tp_dst
) {
578 nxm_put_8(b
, mf_oxm_header(MFF_ICMPV6_CODE
, oxm
),
579 ntohs(flow
->tp_dst
));
581 if (flow
->tp_src
== htons(ND_NEIGHBOR_SOLICIT
) ||
582 flow
->tp_src
== htons(ND_NEIGHBOR_ADVERT
)) {
583 nxm_put_ipv6(b
, mf_oxm_header(MFF_ND_TARGET
, oxm
),
584 &flow
->nd_target
, &match
->wc
.masks
.nd_target
);
585 if (flow
->tp_src
== htons(ND_NEIGHBOR_SOLICIT
)) {
586 nxm_put_eth_masked(b
, mf_oxm_header(MFF_ND_SLL
, oxm
),
587 flow
->arp_sha
, match
->wc
.masks
.arp_sha
);
589 if (flow
->tp_src
== htons(ND_NEIGHBOR_ADVERT
)) {
590 nxm_put_eth_masked(b
, mf_oxm_header(MFF_ND_TLL
, oxm
),
591 flow
->arp_tha
, match
->wc
.masks
.arp_tha
);
598 /* Appends to 'b' the nx_match format that expresses 'match'. For Flow Mod and
599 * Flow Stats Requests messages, a 'cookie' and 'cookie_mask' may be supplied.
600 * Otherwise, 'cookie_mask' should be zero.
602 * Specify 'oxm' as 0 to express the match in NXM format; otherwise, specify
603 * 'oxm' as the OpenFlow version number for the OXM format to use.
605 * This function can cause 'b''s data to be reallocated.
607 * Returns the number of bytes appended to 'b', excluding padding.
609 * If 'match' is a catch-all rule that matches every packet, then this function
610 * appends nothing to 'b' and returns 0. */
612 nx_put_raw(struct ofpbuf
*b
, enum ofp_version oxm
, const struct match
*match
,
613 ovs_be64 cookie
, ovs_be64 cookie_mask
)
615 const struct flow
*flow
= &match
->flow
;
616 const size_t start_len
= ofpbuf_size(b
);
620 BUILD_ASSERT_DECL(FLOW_WC_SEQ
== 27);
623 if (match
->wc
.masks
.dp_hash
) {
624 nxm_put_32m(b
, mf_oxm_header(MFF_DP_HASH
, oxm
),
625 htonl(flow
->dp_hash
), htonl(match
->wc
.masks
.dp_hash
));
628 if (match
->wc
.masks
.recirc_id
) {
629 nxm_put_32(b
, mf_oxm_header(MFF_RECIRC_ID
, oxm
),
630 htonl(flow
->recirc_id
));
633 if (match
->wc
.masks
.in_port
.ofp_port
) {
634 ofp_port_t in_port
= flow
->in_port
.ofp_port
;
636 nxm_put_32(b
, mf_oxm_header(MFF_IN_PORT_OXM
, oxm
),
637 ofputil_port_to_ofp11(in_port
));
639 nxm_put_16(b
, mf_oxm_header(MFF_IN_PORT
, oxm
),
640 htons(ofp_to_u16(in_port
)));
645 nxm_put_eth_masked(b
, mf_oxm_header(MFF_ETH_SRC
, oxm
),
646 flow
->dl_src
, match
->wc
.masks
.dl_src
);
647 nxm_put_eth_masked(b
, mf_oxm_header(MFF_ETH_DST
, oxm
),
648 flow
->dl_dst
, match
->wc
.masks
.dl_dst
);
649 nxm_put_16m(b
, mf_oxm_header(MFF_ETH_TYPE
, oxm
),
650 ofputil_dl_type_to_openflow(flow
->dl_type
),
651 match
->wc
.masks
.dl_type
);
655 ovs_be16 VID_CFI_MASK
= htons(VLAN_VID_MASK
| VLAN_CFI
);
656 ovs_be16 vid
= flow
->vlan_tci
& VID_CFI_MASK
;
657 ovs_be16 mask
= match
->wc
.masks
.vlan_tci
& VID_CFI_MASK
;
659 if (mask
== htons(VLAN_VID_MASK
| VLAN_CFI
)) {
660 nxm_put_16(b
, mf_oxm_header(MFF_VLAN_VID
, oxm
), vid
);
662 nxm_put_16m(b
, mf_oxm_header(MFF_VLAN_VID
, oxm
), vid
, mask
);
665 if (vid
&& vlan_tci_to_pcp(match
->wc
.masks
.vlan_tci
)) {
666 nxm_put_8(b
, mf_oxm_header(MFF_VLAN_PCP
, oxm
),
667 vlan_tci_to_pcp(flow
->vlan_tci
));
671 nxm_put_16m(b
, mf_oxm_header(MFF_VLAN_TCI
, oxm
), flow
->vlan_tci
,
672 match
->wc
.masks
.vlan_tci
);
676 if (eth_type_mpls(flow
->dl_type
)) {
677 if (match
->wc
.masks
.mpls_lse
[0] & htonl(MPLS_TC_MASK
)) {
678 nxm_put_8(b
, mf_oxm_header(MFF_MPLS_TC
, oxm
),
679 mpls_lse_to_tc(flow
->mpls_lse
[0]));
682 if (match
->wc
.masks
.mpls_lse
[0] & htonl(MPLS_BOS_MASK
)) {
683 nxm_put_8(b
, mf_oxm_header(MFF_MPLS_BOS
, oxm
),
684 mpls_lse_to_bos(flow
->mpls_lse
[0]));
687 if (match
->wc
.masks
.mpls_lse
[0] & htonl(MPLS_LABEL_MASK
)) {
688 nxm_put_32(b
, mf_oxm_header(MFF_MPLS_LABEL
, oxm
),
689 htonl(mpls_lse_to_label(flow
->mpls_lse
[0])));
694 if (is_ip_any(flow
)) {
695 nxm_put_ip(b
, match
, oxm
);
696 } else if (flow
->dl_type
== htons(ETH_TYPE_ARP
) ||
697 flow
->dl_type
== htons(ETH_TYPE_RARP
)) {
699 if (match
->wc
.masks
.nw_proto
) {
700 nxm_put_16(b
, mf_oxm_header(MFF_ARP_OP
, oxm
),
701 htons(flow
->nw_proto
));
703 nxm_put_32m(b
, mf_oxm_header(MFF_ARP_SPA
, oxm
),
704 flow
->nw_src
, match
->wc
.masks
.nw_src
);
705 nxm_put_32m(b
, mf_oxm_header(MFF_ARP_TPA
, oxm
),
706 flow
->nw_dst
, match
->wc
.masks
.nw_dst
);
707 nxm_put_eth_masked(b
, mf_oxm_header(MFF_ARP_SHA
, oxm
),
708 flow
->arp_sha
, match
->wc
.masks
.arp_sha
);
709 nxm_put_eth_masked(b
, mf_oxm_header(MFF_ARP_THA
, oxm
),
710 flow
->arp_tha
, match
->wc
.masks
.arp_tha
);
714 nxm_put_64m(b
, mf_oxm_header(MFF_TUN_ID
, oxm
),
715 flow
->tunnel
.tun_id
, match
->wc
.masks
.tunnel
.tun_id
);
717 /* Other tunnel metadata. */
718 nxm_put_32m(b
, mf_oxm_header(MFF_TUN_SRC
, oxm
),
719 flow
->tunnel
.ip_src
, match
->wc
.masks
.tunnel
.ip_src
);
720 nxm_put_32m(b
, mf_oxm_header(MFF_TUN_DST
, oxm
),
721 flow
->tunnel
.ip_dst
, match
->wc
.masks
.tunnel
.ip_dst
);
724 if (oxm
< OFP15_VERSION
) {
725 for (i
= 0; i
< FLOW_N_REGS
; i
++) {
726 nxm_put_32m(b
, mf_oxm_header(MFF_REG0
+ i
, oxm
),
727 htonl(flow
->regs
[i
]), htonl(match
->wc
.masks
.regs
[i
]));
730 for (i
= 0; i
< FLOW_N_XREGS
; i
++) {
731 nxm_put_64m(b
, mf_oxm_header(MFF_XREG0
+ i
, oxm
),
732 htonll(flow_get_xreg(flow
, i
)),
733 htonll(flow_get_xreg(&match
->wc
.masks
, i
)));
738 nxm_put_32m(b
, mf_oxm_header(MFF_PKT_MARK
, oxm
), htonl(flow
->pkt_mark
),
739 htonl(match
->wc
.masks
.pkt_mark
));
741 /* OpenFlow 1.1+ Metadata. */
742 nxm_put_64m(b
, mf_oxm_header(MFF_METADATA
, oxm
),
743 flow
->metadata
, match
->wc
.masks
.metadata
);
746 nxm_put_64m(b
, NXM_NX_COOKIE
, cookie
& cookie_mask
, cookie_mask
);
748 match_len
= ofpbuf_size(b
) - start_len
;
752 /* Appends to 'b' the nx_match format that expresses 'match', plus enough zero
753 * bytes to pad the nx_match out to a multiple of 8. For Flow Mod and Flow
754 * Stats Requests messages, a 'cookie' and 'cookie_mask' may be supplied.
755 * Otherwise, 'cookie_mask' should be zero.
757 * This function can cause 'b''s data to be reallocated.
759 * Returns the number of bytes appended to 'b', excluding padding. The return
760 * value can be zero if it appended nothing at all to 'b' (which happens if
761 * 'cr' is a catch-all rule that matches every packet). */
763 nx_put_match(struct ofpbuf
*b
, const struct match
*match
,
764 ovs_be64 cookie
, ovs_be64 cookie_mask
)
766 int match_len
= nx_put_raw(b
, 0, match
, cookie
, cookie_mask
);
768 ofpbuf_put_zeros(b
, PAD_SIZE(match_len
, 8));
772 /* Appends to 'b' an struct ofp11_match_header followed by the OXM format that
773 * expresses 'cr', plus enough zero bytes to pad the data appended out to a
776 * OXM differs slightly among versions of OpenFlow. Specify the OpenFlow
777 * version in use as 'version'.
779 * This function can cause 'b''s data to be reallocated.
781 * Returns the number of bytes appended to 'b', excluding the padding. Never
784 oxm_put_match(struct ofpbuf
*b
, const struct match
*match
,
785 enum ofp_version version
)
788 struct ofp11_match_header
*omh
;
789 size_t start_len
= ofpbuf_size(b
);
790 ovs_be64 cookie
= htonll(0), cookie_mask
= htonll(0);
792 ofpbuf_put_uninit(b
, sizeof *omh
);
793 match_len
= (nx_put_raw(b
, version
, match
, cookie
, cookie_mask
)
795 ofpbuf_put_zeros(b
, PAD_SIZE(match_len
, 8));
797 omh
= ofpbuf_at(b
, start_len
, sizeof *omh
);
798 omh
->type
= htons(OFPMT_OXM
);
799 omh
->length
= htons(match_len
);
804 /* nx_match_to_string() and helpers. */
806 static void format_nxm_field_name(struct ds
*, uint32_t header
);
809 nx_match_to_string(const uint8_t *p
, unsigned int match_len
)
815 return xstrdup("<any>");
819 while ((header
= nx_entry_ok(p
, match_len
)) != 0) {
820 unsigned int length
= NXM_LENGTH(header
);
821 unsigned int value_len
= nxm_field_bytes(header
);
822 const uint8_t *value
= p
+ 4;
823 const uint8_t *mask
= value
+ value_len
;
827 ds_put_cstr(&s
, ", ");
830 format_nxm_field_name(&s
, header
);
831 ds_put_char(&s
, '(');
833 for (i
= 0; i
< value_len
; i
++) {
834 ds_put_format(&s
, "%02x", value
[i
]);
836 if (NXM_HASMASK(header
)) {
837 ds_put_char(&s
, '/');
838 for (i
= 0; i
< value_len
; i
++) {
839 ds_put_format(&s
, "%02x", mask
[i
]);
842 ds_put_char(&s
, ')');
845 match_len
-= 4 + length
;
850 ds_put_cstr(&s
, ", ");
853 ds_put_format(&s
, "<%u invalid bytes>", match_len
);
856 return ds_steal_cstr(&s
);
860 oxm_match_to_string(const struct ofpbuf
*p
, unsigned int match_len
)
862 const struct ofp11_match_header
*omh
= ofpbuf_data(p
);
868 if (match_len
< sizeof *omh
) {
869 ds_put_format(&s
, "<match too short: %u>", match_len
);
873 if (omh
->type
!= htons(OFPMT_OXM
)) {
874 ds_put_format(&s
, "<bad match type field: %u>", ntohs(omh
->type
));
878 match_len_
= ntohs(omh
->length
);
879 if (match_len_
< sizeof *omh
) {
880 ds_put_format(&s
, "<match length field too short: %u>", match_len_
);
884 if (match_len_
!= match_len
) {
885 ds_put_format(&s
, "<match length field incorrect: %u != %u>",
886 match_len_
, match_len
);
890 return nx_match_to_string(ofpbuf_at(p
, sizeof *omh
, 0),
891 match_len
- sizeof *omh
);
894 return ds_steal_cstr(&s
);
898 format_nxm_field_name(struct ds
*s
, uint32_t header
)
900 const struct mf_field
*mf
= mf_from_nxm_header(header
);
902 ds_put_cstr(s
, IS_OXM_HEADER(header
) ? mf
->oxm_name
: mf
->nxm_name
);
903 if (NXM_HASMASK(header
)) {
904 ds_put_cstr(s
, "_W");
906 } else if (header
== NXM_NX_COOKIE
) {
907 ds_put_cstr(s
, "NXM_NX_COOKIE");
908 } else if (header
== NXM_NX_COOKIE_W
) {
909 ds_put_cstr(s
, "NXM_NX_COOKIE_W");
911 ds_put_format(s
, "%d:%d", NXM_VENDOR(header
), NXM_FIELD(header
));
916 parse_nxm_field_name(const char *name
, int name_len
)
921 /* Check whether it's a field name. */
922 wild
= name_len
> 2 && !memcmp(&name
[name_len
- 2], "_W", 2);
927 for (i
= 0; i
< MFF_N_IDS
; i
++) {
928 const struct mf_field
*mf
= mf_from_id(i
);
932 !strncmp(mf
->nxm_name
, name
, name_len
) &&
933 mf
->nxm_name
[name_len
] == '\0') {
934 header
= mf
->nxm_header
;
935 } else if (mf
->oxm_name
&&
936 !strncmp(mf
->oxm_name
, name
, name_len
) &&
937 mf
->oxm_name
[name_len
] == '\0') {
938 header
= mf
->oxm_header
;
945 } else if (mf
->maskable
!= MFM_NONE
) {
946 return NXM_MAKE_WILD_HEADER(header
);
950 if (!strncmp("NXM_NX_COOKIE", name
, name_len
) &&
951 (name_len
== strlen("NXM_NX_COOKIE"))) {
953 return NXM_NX_COOKIE
;
955 return NXM_NX_COOKIE_W
;
959 /* Check whether it's a 32-bit field header value as hex.
960 * (This isn't ordinarily useful except for testing error behavior.) */
962 uint32_t header
= hexits_value(name
, name_len
, NULL
);
963 if (header
!= UINT_MAX
) {
971 /* nx_match_from_string(). */
974 nx_match_from_string_raw(const char *s
, struct ofpbuf
*b
)
976 const char *full_s
= s
;
977 const size_t start_len
= ofpbuf_size(b
);
979 if (!strcmp(s
, "<any>")) {
980 /* Ensure that 'ofpbuf_data(b)' isn't actually null. */
981 ofpbuf_prealloc_tailroom(b
, 1);
985 for (s
+= strspn(s
, ", "); *s
; s
+= strspn(s
, ", ")) {
992 name_len
= strcspn(s
, "(");
993 if (s
[name_len
] != '(') {
994 ovs_fatal(0, "%s: missing ( at end of nx_match", full_s
);
997 header
= parse_nxm_field_name(name
, name_len
);
999 ovs_fatal(0, "%s: unknown field `%.*s'", full_s
, name_len
, s
);
1004 nxm_put_header(b
, header
);
1005 s
= ofpbuf_put_hex(b
, s
, &n
);
1006 if (n
!= nxm_field_bytes(header
)) {
1007 ovs_fatal(0, "%.2s: hex digits expected", s
);
1009 if (NXM_HASMASK(header
)) {
1010 s
+= strspn(s
, " ");
1012 ovs_fatal(0, "%s: missing / in masked field %.*s",
1013 full_s
, name_len
, name
);
1015 s
= ofpbuf_put_hex(b
, s
+ 1, &n
);
1016 if (n
!= nxm_field_bytes(header
)) {
1017 ovs_fatal(0, "%.2s: hex digits expected", s
);
1021 s
+= strspn(s
, " ");
1023 ovs_fatal(0, "%s: missing ) following field %.*s",
1024 full_s
, name_len
, name
);
1029 return ofpbuf_size(b
) - start_len
;
1033 nx_match_from_string(const char *s
, struct ofpbuf
*b
)
1035 int match_len
= nx_match_from_string_raw(s
, b
);
1036 ofpbuf_put_zeros(b
, PAD_SIZE(match_len
, 8));
1041 oxm_match_from_string(const char *s
, struct ofpbuf
*b
)
1044 struct ofp11_match_header
*omh
;
1045 size_t start_len
= ofpbuf_size(b
);
1047 ofpbuf_put_uninit(b
, sizeof *omh
);
1048 match_len
= nx_match_from_string_raw(s
, b
) + sizeof *omh
;
1049 ofpbuf_put_zeros(b
, PAD_SIZE(match_len
, 8));
1051 omh
= ofpbuf_at(b
, start_len
, sizeof *omh
);
1052 omh
->type
= htons(OFPMT_OXM
);
1053 omh
->length
= htons(match_len
);
1058 /* Parses 's' as a "move" action, in the form described in ovs-ofctl(8), into
1061 * Returns NULL if successful, otherwise a malloc()'d string describing the
1062 * error. The caller is responsible for freeing the returned string. */
1063 char * WARN_UNUSED_RESULT
1064 nxm_parse_reg_move(struct ofpact_reg_move
*move
, const char *s
)
1066 const char *full_s
= s
;
1069 error
= mf_parse_subfield__(&move
->src
, &s
);
1073 if (strncmp(s
, "->", 2)) {
1074 return xasprintf("%s: missing `->' following source", full_s
);
1077 error
= mf_parse_subfield(&move
->dst
, s
);
1082 if (move
->src
.n_bits
!= move
->dst
.n_bits
) {
1083 return xasprintf("%s: source field is %d bits wide but destination is "
1084 "%d bits wide", full_s
,
1085 move
->src
.n_bits
, move
->dst
.n_bits
);
1090 /* Parses 's' as a "load" action, in the form described in ovs-ofctl(8), into
1093 * Returns NULL if successful, otherwise a malloc()'d string describing the
1094 * error. The caller is responsible for freeing the returned string. */
1095 char * WARN_UNUSED_RESULT
1096 nxm_parse_reg_load(struct ofpact_reg_load
*load
, const char *s
)
1098 const char *full_s
= s
;
1099 uint64_t value
= strtoull(s
, (char **) &s
, 0);
1102 if (strncmp(s
, "->", 2)) {
1103 return xasprintf("%s: missing `->' following value", full_s
);
1106 error
= mf_parse_subfield(&load
->dst
, s
);
1111 if (load
->dst
.n_bits
< 64 && (value
>> load
->dst
.n_bits
) != 0) {
1112 return xasprintf("%s: value %"PRIu64
" does not fit into %d bits",
1113 full_s
, value
, load
->dst
.n_bits
);
1116 load
->subvalue
.be64
[0] = htonll(0);
1117 load
->subvalue
.be64
[1] = htonll(value
);
1121 /* nxm_format_reg_move(), nxm_format_reg_load(). */
1124 nxm_format_reg_move(const struct ofpact_reg_move
*move
, struct ds
*s
)
1126 ds_put_format(s
, "move:");
1127 mf_format_subfield(&move
->src
, s
);
1128 ds_put_cstr(s
, "->");
1129 mf_format_subfield(&move
->dst
, s
);
1133 nxm_format_reg_load(const struct ofpact_reg_load
*load
, struct ds
*s
)
1135 ds_put_cstr(s
, "load:");
1136 mf_format_subvalue(&load
->subvalue
, s
);
1137 ds_put_cstr(s
, "->");
1138 mf_format_subfield(&load
->dst
, s
);
1142 nxm_reg_move_check(const struct ofpact_reg_move
*move
, const struct flow
*flow
)
1146 error
= mf_check_src(&move
->src
, flow
);
1151 return mf_check_dst(&move
->dst
, NULL
);
1155 nxm_reg_load_check(const struct ofpact_reg_load
*load
, const struct flow
*flow
)
1157 return mf_check_dst(&load
->dst
, flow
);
1161 /* nxm_execute_reg_move(), nxm_execute_reg_load(). */
1164 nxm_execute_reg_move(const struct ofpact_reg_move
*move
,
1165 struct flow
*flow
, struct flow_wildcards
*wc
)
1167 union mf_value src_value
;
1168 union mf_value dst_value
;
1170 mf_mask_field_and_prereqs(move
->dst
.field
, &wc
->masks
);
1171 mf_mask_field_and_prereqs(move
->src
.field
, &wc
->masks
);
1173 mf_get_value(move
->dst
.field
, flow
, &dst_value
);
1174 mf_get_value(move
->src
.field
, flow
, &src_value
);
1175 bitwise_copy(&src_value
, move
->src
.field
->n_bytes
, move
->src
.ofs
,
1176 &dst_value
, move
->dst
.field
->n_bytes
, move
->dst
.ofs
,
1178 mf_set_flow_value(move
->dst
.field
, &dst_value
, flow
);
1182 nxm_execute_reg_load(const struct ofpact_reg_load
*load
, struct flow
*flow
,
1183 struct flow_wildcards
*wc
)
1185 /* Since at the datapath interface we do not have set actions for
1186 * individual fields, but larger sets of fields for a given protocol
1187 * layer, the set action will in practice only ever apply to exactly
1188 * matched flows for the given protocol layer. For example, if the
1189 * reg_load changes the IP TTL, the corresponding datapath action will
1190 * rewrite also the IP addresses and TOS byte. Since these other field
1191 * values may not be explicitly set, they depend on the incoming flow field
1192 * values, and are hence all of them are set in the wildcards masks, when
1193 * the action is committed to the datapath. For the rare case, where the
1194 * reg_load action does not actually change the value, and no other flow
1195 * field values are set (or loaded), the datapath action is skipped, and
1196 * no mask bits are set. Such a datapath flow should, however, be
1197 * dependent on the specific field value, so the corresponding wildcard
1198 * mask bits must be set, lest the datapath flow be applied to packets
1199 * containing some other value in the field and the field value remain
1200 * unchanged regardless of the incoming value.
1202 * We set the masks here for the whole fields, and their prerequisities.
1203 * Even if only the lower byte of a TCP destination port is set,
1204 * we set the mask for the whole field, and also the ip_proto in the IP
1205 * header, so that the kernel flow would not be applied on, e.g., a UDP
1206 * packet, or any other IP protocol in addition to TCP packets.
1208 mf_mask_field_and_prereqs(load
->dst
.field
, &wc
->masks
);
1209 mf_write_subfield_flow(&load
->dst
, &load
->subvalue
, flow
);
1213 nxm_reg_load(const struct mf_subfield
*dst
, uint64_t src_data
,
1214 struct flow
*flow
, struct flow_wildcards
*wc
)
1216 union mf_subvalue src_subvalue
;
1217 union mf_subvalue mask_value
;
1218 ovs_be64 src_data_be
= htonll(src_data
);
1220 memset(&mask_value
, 0xff, sizeof mask_value
);
1221 mf_write_subfield_flow(dst
, &mask_value
, &wc
->masks
);
1223 bitwise_copy(&src_data_be
, sizeof src_data_be
, 0,
1224 &src_subvalue
, sizeof src_subvalue
, 0,
1225 sizeof src_data_be
* 8);
1226 mf_write_subfield_flow(dst
, &src_subvalue
, flow
);
1229 /* nxm_parse_stack_action, works for both push() and pop(). */
1231 /* Parses 's' as a "push" or "pop" action, in the form described in
1232 * ovs-ofctl(8), into '*stack_action'.
1234 * Returns NULL if successful, otherwise a malloc()'d string describing the
1235 * error. The caller is responsible for freeing the returned string. */
1236 char * WARN_UNUSED_RESULT
1237 nxm_parse_stack_action(struct ofpact_stack
*stack_action
, const char *s
)
1241 error
= mf_parse_subfield__(&stack_action
->subfield
, &s
);
1247 return xasprintf("%s: trailing garbage following push or pop", s
);
1254 nxm_format_stack_push(const struct ofpact_stack
*push
, struct ds
*s
)
1256 ds_put_cstr(s
, "push:");
1257 mf_format_subfield(&push
->subfield
, s
);
1261 nxm_format_stack_pop(const struct ofpact_stack
*pop
, struct ds
*s
)
1263 ds_put_cstr(s
, "pop:");
1264 mf_format_subfield(&pop
->subfield
, s
);
1268 nxm_stack_push_check(const struct ofpact_stack
*push
,
1269 const struct flow
*flow
)
1271 return mf_check_src(&push
->subfield
, flow
);
1275 nxm_stack_pop_check(const struct ofpact_stack
*pop
,
1276 const struct flow
*flow
)
1278 return mf_check_dst(&pop
->subfield
, flow
);
1281 /* nxm_execute_stack_push(), nxm_execute_stack_pop(). */
1283 nx_stack_push(struct ofpbuf
*stack
, union mf_subvalue
*v
)
1285 ofpbuf_put(stack
, v
, sizeof *v
);
1288 static union mf_subvalue
*
1289 nx_stack_pop(struct ofpbuf
*stack
)
1291 union mf_subvalue
*v
= NULL
;
1293 if (ofpbuf_size(stack
)) {
1295 ofpbuf_set_size(stack
, ofpbuf_size(stack
) - sizeof *v
);
1296 v
= (union mf_subvalue
*) ofpbuf_tail(stack
);
1303 nxm_execute_stack_push(const struct ofpact_stack
*push
,
1304 const struct flow
*flow
, struct flow_wildcards
*wc
,
1305 struct ofpbuf
*stack
)
1307 union mf_subvalue mask_value
;
1308 union mf_subvalue dst_value
;
1310 memset(&mask_value
, 0xff, sizeof mask_value
);
1311 mf_write_subfield_flow(&push
->subfield
, &mask_value
, &wc
->masks
);
1313 mf_read_subfield(&push
->subfield
, flow
, &dst_value
);
1314 nx_stack_push(stack
, &dst_value
);
1318 nxm_execute_stack_pop(const struct ofpact_stack
*pop
,
1319 struct flow
*flow
, struct flow_wildcards
*wc
,
1320 struct ofpbuf
*stack
)
1322 union mf_subvalue
*src_value
;
1324 src_value
= nx_stack_pop(stack
);
1326 /* Only pop if stack is not empty. Otherwise, give warning. */
1328 union mf_subvalue mask_value
;
1330 memset(&mask_value
, 0xff, sizeof mask_value
);
1331 mf_write_subfield_flow(&pop
->subfield
, &mask_value
, &wc
->masks
);
1332 mf_write_subfield_flow(&pop
->subfield
, src_value
, flow
);
1334 if (!VLOG_DROP_WARN(&rl
)) {
1335 char *flow_str
= flow_to_string(flow
);
1336 VLOG_WARN_RL(&rl
, "Failed to pop from an empty stack. On flow \n"