2 * Copyright (c) 2010, 2011 Nicira Networks.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
21 #include "classifier.h"
22 #include "dynamic-string.h"
25 #include "openflow/nicira-ext.h"
27 #include "unaligned.h"
30 VLOG_DEFINE_THIS_MODULE(nx_match
);
32 /* Rate limit for nx_match parse errors. These always indicate a bug in the
33 * peer and so there's not much point in showing a lot of them. */
34 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(1, 5);
37 NXM_INVALID
= OFP_MKERR_NICIRA(OFPET_BAD_REQUEST
, NXBRC_NXM_INVALID
),
38 NXM_BAD_TYPE
= OFP_MKERR_NICIRA(OFPET_BAD_REQUEST
, NXBRC_NXM_BAD_TYPE
),
39 NXM_BAD_VALUE
= OFP_MKERR_NICIRA(OFPET_BAD_REQUEST
, NXBRC_NXM_BAD_VALUE
),
40 NXM_BAD_MASK
= OFP_MKERR_NICIRA(OFPET_BAD_REQUEST
, NXBRC_NXM_BAD_MASK
),
41 NXM_BAD_PREREQ
= OFP_MKERR_NICIRA(OFPET_BAD_REQUEST
, NXBRC_NXM_BAD_PREREQ
),
42 NXM_DUP_TYPE
= OFP_MKERR_NICIRA(OFPET_BAD_REQUEST
, NXBRC_NXM_DUP_TYPE
),
43 BAD_ARGUMENT
= OFP_MKERR(OFPET_BAD_ACTION
, OFPBAC_BAD_ARGUMENT
)
46 /* For each NXM_* field, define NFI_NXM_* as consecutive integers starting from
48 enum nxm_field_index
{
49 #define DEFINE_FIELD(HEADER, WILDCARD, DL_TYPE, NW_PROTO, WRITABLE) \
51 #include "nx-match.def"
56 struct hmap_node hmap_node
;
57 enum nxm_field_index index
; /* NFI_* value. */
58 uint32_t header
; /* NXM_* value. */
59 flow_wildcards_t wildcard
; /* FWW_* bit, if exactly one. */
60 ovs_be16 dl_type
; /* dl_type prerequisite, if nonzero. */
61 uint8_t nw_proto
; /* nw_proto prerequisite, if nonzero. */
62 const char *name
; /* "NXM_*" string. */
63 bool writable
; /* Writable with NXAST_REG_{MOVE,LOAD}? */
66 /* All the known fields. */
67 static struct nxm_field nxm_fields
[N_NXM_FIELDS
] = {
68 #define DEFINE_FIELD(HEADER, WILDCARD, DL_TYPE, NW_PROTO, WRITABLE) \
69 { HMAP_NODE_NULL_INITIALIZER, NFI_NXM_##HEADER, NXM_##HEADER, WILDCARD, \
70 CONSTANT_HTONS(DL_TYPE), NW_PROTO, "NXM_" #HEADER, WRITABLE },
71 #include "nx-match.def"
74 /* Hash table of 'nxm_fields'. */
75 static struct hmap all_nxm_fields
= HMAP_INITIALIZER(&all_nxm_fields
);
77 /* Possible masks for NXM_OF_ETH_DST_W. */
78 static const uint8_t eth_all_0s
[ETH_ADDR_LEN
]
79 = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
80 static const uint8_t eth_all_1s
[ETH_ADDR_LEN
]
81 = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
82 static const uint8_t eth_mcast_1
[ETH_ADDR_LEN
]
83 = {0x01, 0x00, 0x00, 0x00, 0x00, 0x00};
84 static const uint8_t eth_mcast_0
[ETH_ADDR_LEN
]
85 = {0xfe, 0xff, 0xff, 0xff, 0xff, 0xff};
90 if (hmap_is_empty(&all_nxm_fields
)) {
93 for (i
= 0; i
< N_NXM_FIELDS
; i
++) {
94 struct nxm_field
*f
= &nxm_fields
[i
];
95 hmap_insert(&all_nxm_fields
, &f
->hmap_node
,
96 hash_int(f
->header
, 0));
99 /* Verify that the header values are unique (duplicate "case" values
100 * cause a compile error). */
102 #define DEFINE_FIELD(HEADER, WILDCARD, DL_TYPE, NW_PROTO, WRITABLE) \
103 case NXM_##HEADER: break;
104 #include "nx-match.def"
109 static const struct nxm_field
*
110 nxm_field_lookup(uint32_t header
)
116 HMAP_FOR_EACH_WITH_HASH (f
, hmap_node
, hash_int(header
, 0),
118 if (f
->header
== header
) {
126 /* Returns the width of the data for a field with the given 'header', in
129 nxm_field_bytes(uint32_t header
)
131 unsigned int length
= NXM_LENGTH(header
);
132 return NXM_HASMASK(header
) ? length
/ 2 : length
;
135 /* Returns the width of the data for a field with the given 'header', in
138 nxm_field_bits(uint32_t header
)
140 return nxm_field_bytes(header
) * 8;
143 /* nx_pull_match() and helpers. */
146 parse_nx_reg(const struct nxm_field
*f
,
147 struct flow
*flow
, struct flow_wildcards
*wc
,
148 const void *value
, const void *maskp
)
150 int idx
= NXM_NX_REG_IDX(f
->header
);
151 if (wc
->reg_masks
[idx
]) {
154 flow_wildcards_set_reg_mask(wc
, idx
,
155 (NXM_HASMASK(f
->header
)
156 ? ntohl(get_unaligned_be32(maskp
))
158 flow
->regs
[idx
] = ntohl(get_unaligned_be32(value
));
159 flow
->regs
[idx
] &= wc
->reg_masks
[idx
];
165 parse_nxm_entry(struct cls_rule
*rule
, const struct nxm_field
*f
,
166 const void *value
, const void *mask
)
168 struct flow_wildcards
*wc
= &rule
->wc
;
169 struct flow
*flow
= &rule
->flow
;
173 case NFI_NXM_OF_IN_PORT
:
174 flow
->in_port
= ntohs(get_unaligned_be16(value
));
175 if (flow
->in_port
== OFPP_LOCAL
) {
176 flow
->in_port
= ODPP_LOCAL
;
180 /* Ethernet header. */
181 case NFI_NXM_OF_ETH_DST
:
182 if ((wc
->wildcards
& (FWW_DL_DST
| FWW_ETH_MCAST
))
183 != (FWW_DL_DST
| FWW_ETH_MCAST
)) {
186 wc
->wildcards
&= ~(FWW_DL_DST
| FWW_ETH_MCAST
);
187 memcpy(flow
->dl_dst
, value
, ETH_ADDR_LEN
);
190 case NFI_NXM_OF_ETH_DST_W
:
191 if ((wc
->wildcards
& (FWW_DL_DST
| FWW_ETH_MCAST
))
192 != (FWW_DL_DST
| FWW_ETH_MCAST
)) {
194 } else if (eth_addr_equals(mask
, eth_mcast_1
)) {
195 wc
->wildcards
&= ~FWW_ETH_MCAST
;
196 flow
->dl_dst
[0] = *(uint8_t *) value
& 0x01;
197 } else if (eth_addr_equals(mask
, eth_mcast_0
)) {
198 wc
->wildcards
&= ~FWW_DL_DST
;
199 memcpy(flow
->dl_dst
, value
, ETH_ADDR_LEN
);
200 flow
->dl_dst
[0] &= 0xfe;
201 } else if (eth_addr_equals(mask
, eth_all_0s
)) {
203 } else if (eth_addr_equals(mask
, eth_all_1s
)) {
204 wc
->wildcards
&= ~(FWW_DL_DST
| FWW_ETH_MCAST
);
205 memcpy(flow
->dl_dst
, value
, ETH_ADDR_LEN
);
210 case NFI_NXM_OF_ETH_SRC
:
211 memcpy(flow
->dl_src
, value
, ETH_ADDR_LEN
);
213 case NFI_NXM_OF_ETH_TYPE
:
214 flow
->dl_type
= ofputil_dl_type_from_openflow(get_unaligned_be16(value
));
218 case NFI_NXM_OF_VLAN_TCI
:
219 if (wc
->vlan_tci_mask
) {
222 cls_rule_set_dl_tci(rule
, get_unaligned_be16(value
));
225 case NFI_NXM_OF_VLAN_TCI_W
:
226 if (wc
->vlan_tci_mask
) {
229 cls_rule_set_dl_tci_masked(rule
, get_unaligned_be16(value
),
230 get_unaligned_be16(mask
));
235 case NFI_NXM_OF_IP_TOS
:
236 if (*(uint8_t *) value
& 0x03) {
237 return NXM_BAD_VALUE
;
239 flow
->nw_tos
= *(uint8_t *) value
;
242 case NFI_NXM_OF_IP_PROTO
:
243 flow
->nw_proto
= *(uint8_t *) value
;
246 /* IP addresses in IP and ARP headers. */
247 case NFI_NXM_OF_IP_SRC
:
248 case NFI_NXM_OF_ARP_SPA
:
249 if (wc
->nw_src_mask
) {
252 cls_rule_set_nw_src(rule
, get_unaligned_be32(value
));
255 case NFI_NXM_OF_IP_SRC_W
:
256 case NFI_NXM_OF_ARP_SPA_W
:
257 if (wc
->nw_src_mask
) {
260 ovs_be32 ip
= get_unaligned_be32(value
);
261 ovs_be32 netmask
= get_unaligned_be32(mask
);
262 if (!cls_rule_set_nw_src_masked(rule
, ip
, netmask
)) {
267 case NFI_NXM_OF_IP_DST
:
268 case NFI_NXM_OF_ARP_TPA
:
269 if (wc
->nw_dst_mask
) {
272 cls_rule_set_nw_dst(rule
, get_unaligned_be32(value
));
275 case NFI_NXM_OF_IP_DST_W
:
276 case NFI_NXM_OF_ARP_TPA_W
:
277 if (wc
->nw_dst_mask
) {
280 ovs_be32 ip
= get_unaligned_be32(value
);
281 ovs_be32 netmask
= get_unaligned_be32(mask
);
282 if (!cls_rule_set_nw_dst_masked(rule
, ip
, netmask
)) {
289 case NFI_NXM_OF_TCP_SRC
:
290 flow
->tp_src
= get_unaligned_be16(value
);
292 case NFI_NXM_OF_TCP_DST
:
293 flow
->tp_dst
= get_unaligned_be16(value
);
297 case NFI_NXM_OF_UDP_SRC
:
298 flow
->tp_src
= get_unaligned_be16(value
);
300 case NFI_NXM_OF_UDP_DST
:
301 flow
->tp_dst
= get_unaligned_be16(value
);
305 case NFI_NXM_OF_ICMP_TYPE
:
306 flow
->tp_src
= htons(*(uint8_t *) value
);
308 case NFI_NXM_OF_ICMP_CODE
:
309 flow
->tp_dst
= htons(*(uint8_t *) value
);
313 case NFI_NXM_OF_ARP_OP
:
314 if (ntohs(get_unaligned_be16(value
)) > 255) {
315 return NXM_BAD_VALUE
;
317 flow
->nw_proto
= ntohs(get_unaligned_be16(value
));
322 case NFI_NXM_NX_TUN_ID
:
323 if (wc
->tun_id_mask
) {
326 cls_rule_set_tun_id(rule
, get_unaligned_be64(value
));
329 case NFI_NXM_NX_TUN_ID_W
:
330 if (wc
->tun_id_mask
) {
333 ovs_be64 tun_id
= get_unaligned_be64(value
);
334 ovs_be64 tun_mask
= get_unaligned_be64(mask
);
335 cls_rule_set_tun_id_masked(rule
, tun_id
, tun_mask
);
340 case NFI_NXM_NX_REG0
:
341 case NFI_NXM_NX_REG0_W
:
343 case NFI_NXM_NX_REG1
:
344 case NFI_NXM_NX_REG1_W
:
347 case NFI_NXM_NX_REG2
:
348 case NFI_NXM_NX_REG2_W
:
351 case NFI_NXM_NX_REG3
:
352 case NFI_NXM_NX_REG3_W
:
357 return parse_nx_reg(f
, flow
, wc
, value
, mask
);
366 nxm_prereqs_ok(const struct nxm_field
*field
, const struct flow
*flow
)
368 return (!field
->dl_type
369 || (field
->dl_type
== flow
->dl_type
370 && (!field
->nw_proto
|| field
->nw_proto
== flow
->nw_proto
)));
374 nx_entry_ok(const void *p
, unsigned int match_len
)
376 unsigned int payload_len
;
382 VLOG_DBG_RL(&rl
, "nx_match ends with partial nxm_header");
386 memcpy(&header_be
, p
, 4);
387 header
= ntohl(header_be
);
389 payload_len
= NXM_LENGTH(header
);
391 VLOG_DBG_RL(&rl
, "nxm_entry %08"PRIx32
" has invalid payload "
395 if (match_len
< payload_len
+ 4) {
396 VLOG_DBG_RL(&rl
, "%"PRIu32
"-byte nxm_entry but only "
397 "%u bytes left in nx_match", payload_len
+ 4, match_len
);
405 nx_pull_match(struct ofpbuf
*b
, unsigned int match_len
, uint16_t priority
,
406 struct cls_rule
*rule
)
411 p
= ofpbuf_try_pull(b
, ROUND_UP(match_len
, 8));
413 VLOG_DBG_RL(&rl
, "nx_match length %u, rounded up to a "
414 "multiple of 8, is longer than space in message (max "
415 "length %zu)", match_len
, b
->size
);
416 return ofp_mkerr(OFPET_BAD_REQUEST
, OFPBRC_BAD_LEN
);
419 cls_rule_init_catchall(rule
, priority
);
420 while ((header
= nx_entry_ok(p
, match_len
)) != 0) {
421 unsigned length
= NXM_LENGTH(header
);
422 const struct nxm_field
*f
;
425 f
= nxm_field_lookup(header
);
427 error
= NXM_BAD_TYPE
;
428 } else if (!nxm_prereqs_ok(f
, &rule
->flow
)) {
429 error
= NXM_BAD_PREREQ
;
430 } else if (f
->wildcard
&& !(rule
->wc
.wildcards
& f
->wildcard
)) {
431 error
= NXM_DUP_TYPE
;
433 /* 'hasmask' and 'length' are known to be correct at this point
434 * because they are included in 'header' and nxm_field_lookup()
435 * checked them already. */
436 rule
->wc
.wildcards
&= ~f
->wildcard
;
437 error
= parse_nxm_entry(rule
, f
, p
+ 4, p
+ 4 + length
/ 2);
440 VLOG_DBG_RL(&rl
, "bad nxm_entry with vendor=%"PRIu32
", "
441 "field=%"PRIu32
", hasmask=%"PRIu32
", type=%"PRIu32
" "
443 NXM_VENDOR(header
), NXM_FIELD(header
),
444 NXM_HASMASK(header
), NXM_TYPE(header
),
451 match_len
-= 4 + length
;
454 return match_len
? NXM_INVALID
: 0;
457 /* nx_put_match() and helpers.
459 * 'put' functions whose names end in 'w' add a wildcarded field.
460 * 'put' functions whose names end in 'm' add a field that might be wildcarded.
461 * Other 'put' functions add exact-match fields.
465 nxm_put_header(struct ofpbuf
*b
, uint32_t header
)
467 ovs_be32 n_header
= htonl(header
);
468 ofpbuf_put(b
, &n_header
, sizeof n_header
);
472 nxm_put_8(struct ofpbuf
*b
, uint32_t header
, uint8_t value
)
474 nxm_put_header(b
, header
);
475 ofpbuf_put(b
, &value
, sizeof value
);
479 nxm_put_16(struct ofpbuf
*b
, uint32_t header
, ovs_be16 value
)
481 nxm_put_header(b
, header
);
482 ofpbuf_put(b
, &value
, sizeof value
);
486 nxm_put_16w(struct ofpbuf
*b
, uint32_t header
, ovs_be16 value
, ovs_be16 mask
)
488 nxm_put_header(b
, header
);
489 ofpbuf_put(b
, &value
, sizeof value
);
490 ofpbuf_put(b
, &mask
, sizeof mask
);
494 nxm_put_16m(struct ofpbuf
*b
, uint32_t header
, ovs_be16 value
, ovs_be16 mask
)
500 case CONSTANT_HTONS(UINT16_MAX
):
501 nxm_put_16(b
, header
, value
);
505 nxm_put_16w(b
, NXM_MAKE_WILD_HEADER(header
), value
, mask
);
511 nxm_put_32(struct ofpbuf
*b
, uint32_t header
, ovs_be32 value
)
513 nxm_put_header(b
, header
);
514 ofpbuf_put(b
, &value
, sizeof value
);
518 nxm_put_32w(struct ofpbuf
*b
, uint32_t header
, ovs_be32 value
, ovs_be32 mask
)
520 nxm_put_header(b
, header
);
521 ofpbuf_put(b
, &value
, sizeof value
);
522 ofpbuf_put(b
, &mask
, sizeof mask
);
526 nxm_put_32m(struct ofpbuf
*b
, uint32_t header
, ovs_be32 value
, ovs_be32 mask
)
532 case CONSTANT_HTONL(UINT32_MAX
):
533 nxm_put_32(b
, header
, value
);
537 nxm_put_32w(b
, NXM_MAKE_WILD_HEADER(header
), value
, mask
);
543 nxm_put_64(struct ofpbuf
*b
, uint32_t header
, ovs_be64 value
)
545 nxm_put_header(b
, header
);
546 ofpbuf_put(b
, &value
, sizeof value
);
550 nxm_put_64w(struct ofpbuf
*b
, uint32_t header
, ovs_be64 value
, ovs_be64 mask
)
552 nxm_put_header(b
, header
);
553 ofpbuf_put(b
, &value
, sizeof value
);
554 ofpbuf_put(b
, &mask
, sizeof mask
);
558 nxm_put_64m(struct ofpbuf
*b
, uint32_t header
, ovs_be64 value
, ovs_be64 mask
)
564 case CONSTANT_HTONLL(UINT64_MAX
):
565 nxm_put_64(b
, header
, value
);
569 nxm_put_64w(b
, NXM_MAKE_WILD_HEADER(header
), value
, mask
);
575 nxm_put_eth(struct ofpbuf
*b
, uint32_t header
,
576 const uint8_t value
[ETH_ADDR_LEN
])
578 nxm_put_header(b
, header
);
579 ofpbuf_put(b
, value
, ETH_ADDR_LEN
);
583 nxm_put_eth_dst(struct ofpbuf
*b
,
584 uint32_t wc
, const uint8_t value
[ETH_ADDR_LEN
])
586 switch (wc
& (FWW_DL_DST
| FWW_ETH_MCAST
)) {
587 case FWW_DL_DST
| FWW_ETH_MCAST
:
590 nxm_put_header(b
, NXM_OF_ETH_DST_W
);
591 ofpbuf_put(b
, value
, ETH_ADDR_LEN
);
592 ofpbuf_put(b
, eth_mcast_1
, ETH_ADDR_LEN
);
595 nxm_put_header(b
, NXM_OF_ETH_DST_W
);
596 ofpbuf_put(b
, value
, ETH_ADDR_LEN
);
597 ofpbuf_put(b
, eth_mcast_0
, ETH_ADDR_LEN
);
600 nxm_put_eth(b
, NXM_OF_ETH_DST
, value
);
605 /* Appends to 'b' the nx_match format that expresses 'cr' (except for
606 * 'cr->priority', because priority is not part of nx_match), plus enough
607 * zero bytes to pad the nx_match out to a multiple of 8.
609 * This function can cause 'b''s data to be reallocated.
611 * Returns the number of bytes appended to 'b', excluding padding.
613 * If 'cr' is a catch-all rule that matches every packet, then this function
614 * appends nothing to 'b' and returns 0. */
616 nx_put_match(struct ofpbuf
*b
, const struct cls_rule
*cr
)
618 const flow_wildcards_t wc
= cr
->wc
.wildcards
;
619 const struct flow
*flow
= &cr
->flow
;
620 const size_t start_len
= b
->size
;
625 if (!(wc
& FWW_IN_PORT
)) {
626 uint16_t in_port
= flow
->in_port
;
627 if (in_port
== ODPP_LOCAL
) {
628 in_port
= OFPP_LOCAL
;
630 nxm_put_16(b
, NXM_OF_IN_PORT
, htons(in_port
));
634 nxm_put_eth_dst(b
, wc
, flow
->dl_dst
);
635 if (!(wc
& FWW_DL_SRC
)) {
636 nxm_put_eth(b
, NXM_OF_ETH_SRC
, flow
->dl_src
);
638 if (!(wc
& FWW_DL_TYPE
)) {
639 nxm_put_16(b
, NXM_OF_ETH_TYPE
,
640 ofputil_dl_type_to_openflow(flow
->dl_type
));
644 nxm_put_16m(b
, NXM_OF_VLAN_TCI
, flow
->vlan_tci
, cr
->wc
.vlan_tci_mask
);
647 if (!(wc
& FWW_DL_TYPE
) && flow
->dl_type
== htons(ETH_TYPE_IP
)) {
649 if (!(wc
& FWW_NW_TOS
)) {
650 nxm_put_8(b
, NXM_OF_IP_TOS
, flow
->nw_tos
& 0xfc);
652 nxm_put_32m(b
, NXM_OF_IP_SRC
, flow
->nw_src
, cr
->wc
.nw_src_mask
);
653 nxm_put_32m(b
, NXM_OF_IP_DST
, flow
->nw_dst
, cr
->wc
.nw_dst_mask
);
655 if (!(wc
& FWW_NW_PROTO
)) {
656 nxm_put_8(b
, NXM_OF_IP_PROTO
, flow
->nw_proto
);
657 switch (flow
->nw_proto
) {
660 if (!(wc
& FWW_TP_SRC
)) {
661 nxm_put_16(b
, NXM_OF_TCP_SRC
, flow
->tp_src
);
663 if (!(wc
& FWW_TP_DST
)) {
664 nxm_put_16(b
, NXM_OF_TCP_DST
, flow
->tp_dst
);
670 if (!(wc
& FWW_TP_SRC
)) {
671 nxm_put_16(b
, NXM_OF_UDP_SRC
, flow
->tp_src
);
673 if (!(wc
& FWW_TP_DST
)) {
674 nxm_put_16(b
, NXM_OF_UDP_DST
, flow
->tp_dst
);
680 if (!(wc
& FWW_TP_SRC
)) {
681 nxm_put_8(b
, NXM_OF_ICMP_TYPE
, ntohs(flow
->tp_src
));
683 if (!(wc
& FWW_TP_DST
)) {
684 nxm_put_8(b
, NXM_OF_ICMP_CODE
, ntohs(flow
->tp_dst
));
689 } else if (!(wc
& FWW_DL_TYPE
) && flow
->dl_type
== htons(ETH_TYPE_ARP
)) {
691 if (!(wc
& FWW_NW_PROTO
)) {
692 nxm_put_16(b
, NXM_OF_ARP_OP
, htons(flow
->nw_proto
));
694 nxm_put_32m(b
, NXM_OF_ARP_SPA
, flow
->nw_src
, cr
->wc
.nw_src_mask
);
695 nxm_put_32m(b
, NXM_OF_ARP_TPA
, flow
->nw_dst
, cr
->wc
.nw_dst_mask
);
699 nxm_put_64m(b
, NXM_NX_TUN_ID
, flow
->tun_id
, cr
->wc
.tun_id_mask
);
702 for (i
= 0; i
< FLOW_N_REGS
; i
++) {
703 nxm_put_32m(b
, NXM_NX_REG(i
),
704 htonl(flow
->regs
[i
]), htonl(cr
->wc
.reg_masks
[i
]));
707 match_len
= b
->size
- start_len
;
708 ofpbuf_put_zeros(b
, ROUND_UP(match_len
, 8) - match_len
);
712 /* nx_match_to_string() and helpers. */
714 static void format_nxm_field_name(struct ds
*, uint32_t header
);
717 nx_match_to_string(const uint8_t *p
, unsigned int match_len
)
723 return xstrdup("<any>");
727 while ((header
= nx_entry_ok(p
, match_len
)) != 0) {
728 unsigned int length
= NXM_LENGTH(header
);
729 unsigned int value_len
= nxm_field_bytes(header
);
730 const uint8_t *value
= p
+ 4;
731 const uint8_t *mask
= value
+ value_len
;
735 ds_put_cstr(&s
, ", ");
738 format_nxm_field_name(&s
, header
);
739 ds_put_char(&s
, '(');
741 for (i
= 0; i
< value_len
; i
++) {
742 ds_put_format(&s
, "%02x", value
[i
]);
744 if (NXM_HASMASK(header
)) {
745 ds_put_char(&s
, '/');
746 for (i
= 0; i
< value_len
; i
++) {
747 ds_put_format(&s
, "%02x", mask
[i
]);
750 ds_put_char(&s
, ')');
753 match_len
-= 4 + length
;
758 ds_put_cstr(&s
, ", ");
761 ds_put_format(&s
, "<%u invalid bytes>", match_len
);
764 return ds_steal_cstr(&s
);
768 format_nxm_field_name(struct ds
*s
, uint32_t header
)
770 const struct nxm_field
*f
= nxm_field_lookup(header
);
772 ds_put_cstr(s
, f
->name
);
774 ds_put_format(s
, "%d:%d", NXM_VENDOR(header
), NXM_FIELD(header
));
779 parse_nxm_field_name(const char *name
, int name_len
)
781 const struct nxm_field
*f
;
783 /* Check whether it's a field name. */
784 for (f
= nxm_fields
; f
< &nxm_fields
[ARRAY_SIZE(nxm_fields
)]; f
++) {
785 if (!strncmp(f
->name
, name
, name_len
) && f
->name
[name_len
] == '\0') {
790 /* Check whether it's a 32-bit field header value as hex.
791 * (This isn't ordinarily useful except for testing error behavior.) */
793 uint32_t header
= hexits_value(name
, name_len
, NULL
);
794 if (header
!= UINT_MAX
) {
802 /* nx_match_from_string(). */
805 nx_match_from_string(const char *s
, struct ofpbuf
*b
)
807 const char *full_s
= s
;
808 const size_t start_len
= b
->size
;
811 if (!strcmp(s
, "<any>")) {
812 /* Ensure that 'b->data' isn't actually null. */
813 ofpbuf_prealloc_tailroom(b
, 1);
817 for (s
+= strspn(s
, ", "); *s
; s
+= strspn(s
, ", ")) {
824 name_len
= strcspn(s
, "(");
825 if (s
[name_len
] != '(') {
826 ovs_fatal(0, "%s: missing ( at end of nx_match", full_s
);
829 header
= parse_nxm_field_name(name
, name_len
);
831 ovs_fatal(0, "%s: unknown field `%.*s'", full_s
, name_len
, s
);
836 nxm_put_header(b
, header
);
837 s
= ofpbuf_put_hex(b
, s
, &n
);
838 if (n
!= nxm_field_bytes(header
)) {
839 ovs_fatal(0, "%.2s: hex digits expected", s
);
841 if (NXM_HASMASK(header
)) {
844 ovs_fatal(0, "%s: missing / in masked field %.*s",
845 full_s
, name_len
, name
);
847 s
= ofpbuf_put_hex(b
, s
+ 1, &n
);
848 if (n
!= nxm_field_bytes(header
)) {
849 ovs_fatal(0, "%.2s: hex digits expected", s
);
855 ovs_fatal(0, "%s: missing ) following field %.*s",
856 full_s
, name_len
, name
);
861 match_len
= b
->size
- start_len
;
862 ofpbuf_put_zeros(b
, ROUND_UP(match_len
, 8) - match_len
);
867 nxm_parse_field_bits(const char *s
, uint32_t *headerp
, int *ofsp
, int *n_bitsp
)
869 const char *full_s
= s
;
877 name_len
= strcspn(s
, "[");
878 if (s
[name_len
] != '[') {
879 ovs_fatal(0, "%s: missing [ looking for field name", full_s
);
882 header
= parse_nxm_field_name(name
, name_len
);
884 ovs_fatal(0, "%s: unknown field `%.*s'", full_s
, name_len
, s
);
886 width
= nxm_field_bits(header
);
889 if (sscanf(s
, "[%d..%d]", &start
, &end
) == 2) {
891 } else if (sscanf(s
, "[%d]", &start
) == 1) {
893 } else if (!strncmp(s
, "[]", 2)) {
897 ovs_fatal(0, "%s: syntax error expecting [] or [<bit>] or "
898 "[<start>..<end>]", full_s
);
900 s
= strchr(s
, ']') + 1;
903 ovs_fatal(0, "%s: starting bit %d is after ending bit %d",
905 } else if (start
>= width
) {
906 ovs_fatal(0, "%s: starting bit %d is not valid because field is only "
907 "%d bits wide", full_s
, start
, width
);
908 } else if (end
>= width
){
909 ovs_fatal(0, "%s: ending bit %d is not valid because field is only "
910 "%d bits wide", full_s
, end
, width
);
915 *n_bitsp
= end
- start
+ 1;
921 nxm_parse_reg_move(struct nx_action_reg_move
*move
, const char *s
)
923 const char *full_s
= s
;
925 int src_ofs
, dst_ofs
;
926 int src_n_bits
, dst_n_bits
;
928 s
= nxm_parse_field_bits(s
, &src
, &src_ofs
, &src_n_bits
);
929 if (strncmp(s
, "->", 2)) {
930 ovs_fatal(0, "%s: missing `->' following source", full_s
);
933 s
= nxm_parse_field_bits(s
, &dst
, &dst_ofs
, &dst_n_bits
);
935 ovs_fatal(0, "%s: trailing garbage following destination", full_s
);
938 if (src_n_bits
!= dst_n_bits
) {
939 ovs_fatal(0, "%s: source field is %d bits wide but destination is "
940 "%d bits wide", full_s
, src_n_bits
, dst_n_bits
);
943 move
->type
= htons(OFPAT_VENDOR
);
944 move
->len
= htons(sizeof *move
);
945 move
->vendor
= htonl(NX_VENDOR_ID
);
946 move
->subtype
= htons(NXAST_REG_MOVE
);
947 move
->n_bits
= htons(src_n_bits
);
948 move
->src_ofs
= htons(src_ofs
);
949 move
->dst_ofs
= htons(dst_ofs
);
950 move
->src
= htonl(src
);
951 move
->dst
= htonl(dst
);
955 nxm_parse_reg_load(struct nx_action_reg_load
*load
, const char *s
)
957 const char *full_s
= s
;
962 value
= strtoull(s
, (char **) &s
, 0);
963 if (strncmp(s
, "->", 2)) {
964 ovs_fatal(0, "%s: missing `->' following value", full_s
);
967 s
= nxm_parse_field_bits(s
, &dst
, &ofs
, &n_bits
);
969 ovs_fatal(0, "%s: trailing garbage following destination", full_s
);
972 if (n_bits
< 64 && (value
>> n_bits
) != 0) {
973 ovs_fatal(0, "%s: value %"PRIu64
" does not fit into %d bits",
974 full_s
, value
, n_bits
);
977 load
->type
= htons(OFPAT_VENDOR
);
978 load
->len
= htons(sizeof *load
);
979 load
->vendor
= htonl(NX_VENDOR_ID
);
980 load
->subtype
= htons(NXAST_REG_LOAD
);
981 load
->ofs_nbits
= nxm_encode_ofs_nbits(ofs
, n_bits
);
982 load
->dst
= htonl(dst
);
983 load
->value
= htonll(value
);
986 /* nxm_format_reg_move(), nxm_format_reg_load(). */
989 nxm_format_field_bits(struct ds
*s
, uint32_t header
, int ofs
, int n_bits
)
991 format_nxm_field_name(s
, header
);
992 if (ofs
== 0 && n_bits
== nxm_field_bits(header
)) {
993 ds_put_cstr(s
, "[]");
994 } else if (n_bits
== 1) {
995 ds_put_format(s
, "[%d]", ofs
);
997 ds_put_format(s
, "[%d..%d]", ofs
, ofs
+ n_bits
- 1);
1002 nxm_format_reg_move(const struct nx_action_reg_move
*move
, struct ds
*s
)
1004 int n_bits
= ntohs(move
->n_bits
);
1005 int src_ofs
= ntohs(move
->src_ofs
);
1006 int dst_ofs
= ntohs(move
->dst_ofs
);
1007 uint32_t src
= ntohl(move
->src
);
1008 uint32_t dst
= ntohl(move
->dst
);
1010 ds_put_format(s
, "move:");
1011 nxm_format_field_bits(s
, src
, src_ofs
, n_bits
);
1012 ds_put_cstr(s
, "->");
1013 nxm_format_field_bits(s
, dst
, dst_ofs
, n_bits
);
1017 nxm_format_reg_load(const struct nx_action_reg_load
*load
, struct ds
*s
)
1019 int ofs
= nxm_decode_ofs(load
->ofs_nbits
);
1020 int n_bits
= nxm_decode_n_bits(load
->ofs_nbits
);
1021 uint32_t dst
= ntohl(load
->dst
);
1022 uint64_t value
= ntohll(load
->value
);
1024 ds_put_format(s
, "load:%#"PRIx64
"->", value
);
1025 nxm_format_field_bits(s
, dst
, ofs
, n_bits
);
1028 /* nxm_check_reg_move(), nxm_check_reg_load(). */
1031 field_ok(const struct nxm_field
*f
, const struct flow
*flow
, int size
)
1033 return (f
&& !NXM_HASMASK(f
->header
)
1034 && nxm_prereqs_ok(f
, flow
) && size
<= nxm_field_bits(f
->header
));
1038 nxm_check_reg_move(const struct nx_action_reg_move
*action
,
1039 const struct flow
*flow
)
1041 const struct nxm_field
*src
;
1042 const struct nxm_field
*dst
;
1044 if (action
->n_bits
== htons(0)) {
1045 return BAD_ARGUMENT
;
1048 src
= nxm_field_lookup(ntohl(action
->src
));
1049 if (!field_ok(src
, flow
, ntohs(action
->src_ofs
) + ntohs(action
->n_bits
))) {
1050 return BAD_ARGUMENT
;
1053 dst
= nxm_field_lookup(ntohl(action
->dst
));
1054 if (!field_ok(dst
, flow
, ntohs(action
->dst_ofs
) + ntohs(action
->n_bits
))) {
1055 return BAD_ARGUMENT
;
1058 if (!dst
->writable
) {
1059 return BAD_ARGUMENT
;
1066 nxm_check_reg_load(const struct nx_action_reg_load
*action
,
1067 const struct flow
*flow
)
1069 const struct nxm_field
*dst
;
1072 ofs
= nxm_decode_ofs(action
->ofs_nbits
);
1073 n_bits
= nxm_decode_n_bits(action
->ofs_nbits
);
1074 dst
= nxm_field_lookup(ntohl(action
->dst
));
1075 if (!field_ok(dst
, flow
, ofs
+ n_bits
)) {
1076 return BAD_ARGUMENT
;
1079 /* Reject 'action' if a bit numbered 'n_bits' or higher is set to 1 in
1081 if (n_bits
< 64 && ntohll(action
->value
) >> n_bits
) {
1082 return BAD_ARGUMENT
;
1085 if (!dst
->writable
) {
1086 return BAD_ARGUMENT
;
1092 /* nxm_execute_reg_move(), nxm_execute_reg_load(). */
1095 nxm_read_field(const struct nxm_field
*src
, const struct flow
*flow
)
1097 switch (src
->index
) {
1098 case NFI_NXM_OF_IN_PORT
:
1099 return flow
->in_port
== ODPP_LOCAL
? OFPP_LOCAL
: flow
->in_port
;
1101 case NFI_NXM_OF_ETH_DST
:
1102 return eth_addr_to_uint64(flow
->dl_dst
);
1104 case NFI_NXM_OF_ETH_SRC
:
1105 return eth_addr_to_uint64(flow
->dl_src
);
1107 case NFI_NXM_OF_ETH_TYPE
:
1108 return ntohs(ofputil_dl_type_to_openflow(flow
->dl_type
));
1110 case NFI_NXM_OF_VLAN_TCI
:
1111 return ntohs(flow
->vlan_tci
);
1113 case NFI_NXM_OF_IP_TOS
:
1114 return flow
->nw_tos
;
1116 case NFI_NXM_OF_IP_PROTO
:
1117 case NFI_NXM_OF_ARP_OP
:
1118 return flow
->nw_proto
;
1120 case NFI_NXM_OF_IP_SRC
:
1121 case NFI_NXM_OF_ARP_SPA
:
1122 return ntohl(flow
->nw_src
);
1124 case NFI_NXM_OF_IP_DST
:
1125 case NFI_NXM_OF_ARP_TPA
:
1126 return ntohl(flow
->nw_dst
);
1128 case NFI_NXM_OF_TCP_SRC
:
1129 case NFI_NXM_OF_UDP_SRC
:
1130 return ntohs(flow
->tp_src
);
1132 case NFI_NXM_OF_TCP_DST
:
1133 case NFI_NXM_OF_UDP_DST
:
1134 return ntohs(flow
->tp_dst
);
1136 case NFI_NXM_OF_ICMP_TYPE
:
1137 return ntohs(flow
->tp_src
) & 0xff;
1139 case NFI_NXM_OF_ICMP_CODE
:
1140 return ntohs(flow
->tp_dst
) & 0xff;
1142 case NFI_NXM_NX_TUN_ID
:
1143 return ntohll(flow
->tun_id
);
1145 #define NXM_READ_REGISTER(IDX) \
1146 case NFI_NXM_NX_REG##IDX: \
1147 return flow->regs[IDX]; \
1148 case NFI_NXM_NX_REG##IDX##_W: \
1151 NXM_READ_REGISTER(0);
1152 #if FLOW_N_REGS >= 2
1153 NXM_READ_REGISTER(1);
1155 #if FLOW_N_REGS >= 3
1156 NXM_READ_REGISTER(2);
1158 #if FLOW_N_REGS >= 4
1159 NXM_READ_REGISTER(3);
1165 case NFI_NXM_NX_TUN_ID_W
:
1166 case NFI_NXM_OF_ETH_DST_W
:
1167 case NFI_NXM_OF_VLAN_TCI_W
:
1168 case NFI_NXM_OF_IP_SRC_W
:
1169 case NFI_NXM_OF_IP_DST_W
:
1170 case NFI_NXM_OF_ARP_SPA_W
:
1171 case NFI_NXM_OF_ARP_TPA_W
:
1180 nxm_write_field(const struct nxm_field
*dst
, struct flow
*flow
,
1183 switch (dst
->index
) {
1184 case NFI_NXM_OF_VLAN_TCI
:
1185 flow
->vlan_tci
= htons(new_value
);
1188 case NFI_NXM_NX_TUN_ID
:
1189 flow
->tun_id
= htonll(new_value
);
1192 #define NXM_WRITE_REGISTER(IDX) \
1193 case NFI_NXM_NX_REG##IDX: \
1194 flow->regs[IDX] = new_value; \
1196 case NFI_NXM_NX_REG##IDX##_W: \
1199 NXM_WRITE_REGISTER(0);
1200 #if FLOW_N_REGS >= 2
1201 NXM_WRITE_REGISTER(1);
1203 #if FLOW_N_REGS >= 3
1204 NXM_WRITE_REGISTER(2);
1206 #if FLOW_N_REGS >= 4
1207 NXM_WRITE_REGISTER(3);
1213 case NFI_NXM_OF_IN_PORT
:
1214 case NFI_NXM_OF_ETH_DST
:
1215 case NFI_NXM_OF_ETH_SRC
:
1216 case NFI_NXM_OF_ETH_TYPE
:
1217 case NFI_NXM_OF_IP_TOS
:
1218 case NFI_NXM_OF_IP_PROTO
:
1219 case NFI_NXM_OF_ARP_OP
:
1220 case NFI_NXM_OF_IP_SRC
:
1221 case NFI_NXM_OF_ARP_SPA
:
1222 case NFI_NXM_OF_IP_DST
:
1223 case NFI_NXM_OF_ARP_TPA
:
1224 case NFI_NXM_OF_TCP_SRC
:
1225 case NFI_NXM_OF_UDP_SRC
:
1226 case NFI_NXM_OF_TCP_DST
:
1227 case NFI_NXM_OF_UDP_DST
:
1228 case NFI_NXM_OF_ICMP_TYPE
:
1229 case NFI_NXM_OF_ICMP_CODE
:
1230 case NFI_NXM_NX_TUN_ID_W
:
1231 case NFI_NXM_OF_ETH_DST_W
:
1232 case NFI_NXM_OF_VLAN_TCI_W
:
1233 case NFI_NXM_OF_IP_SRC_W
:
1234 case NFI_NXM_OF_IP_DST_W
:
1235 case NFI_NXM_OF_ARP_SPA_W
:
1236 case NFI_NXM_OF_ARP_TPA_W
:
1243 nxm_execute_reg_move(const struct nx_action_reg_move
*action
,
1247 int n_bits
= ntohs(action
->n_bits
);
1248 uint64_t mask
= n_bits
== 64 ? UINT64_MAX
: (UINT64_C(1) << n_bits
) - 1;
1250 /* Get the interesting bits of the source field. */
1251 const struct nxm_field
*src
= nxm_field_lookup(ntohl(action
->src
));
1252 int src_ofs
= ntohs(action
->src_ofs
);
1253 uint64_t src_data
= nxm_read_field(src
, flow
) & (mask
<< src_ofs
);
1255 /* Get the remaining bits of the destination field. */
1256 const struct nxm_field
*dst
= nxm_field_lookup(ntohl(action
->dst
));
1257 int dst_ofs
= ntohs(action
->dst_ofs
);
1258 uint64_t dst_data
= nxm_read_field(dst
, flow
) & ~(mask
<< dst_ofs
);
1260 /* Get the final value. */
1261 uint64_t new_data
= dst_data
| ((src_data
>> src_ofs
) << dst_ofs
);
1263 nxm_write_field(dst
, flow
, new_data
);
1267 nxm_execute_reg_load(const struct nx_action_reg_load
*action
,
1271 int n_bits
= nxm_decode_n_bits(action
->ofs_nbits
);
1272 uint64_t mask
= n_bits
== 64 ? UINT64_MAX
: (UINT64_C(1) << n_bits
) - 1;
1274 /* Get source data. */
1275 uint64_t src_data
= ntohll(action
->value
);
1277 /* Get remaining bits of the destination field. */
1278 const struct nxm_field
*dst
= nxm_field_lookup(ntohl(action
->dst
));
1279 int dst_ofs
= nxm_decode_ofs(action
->ofs_nbits
);
1280 uint64_t dst_data
= nxm_read_field(dst
, flow
) & ~(mask
<< dst_ofs
);
1282 /* Get the final value. */
1283 uint64_t new_data
= dst_data
| (src_data
<< dst_ofs
);
1285 nxm_write_field(dst
, flow
, new_data
);