]> git.proxmox.com Git - ovs.git/blob - lib/nx-match.c
ovs-ofctl: Fix small typo about nw_tos in man page.
[ovs.git] / lib / nx-match.c
1 /*
2 * Copyright (c) 2010, 2011 Nicira Networks.
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <config.h>
18
19 #include "nx-match.h"
20
21 #include "classifier.h"
22 #include "dynamic-string.h"
23 #include "ofp-util.h"
24 #include "ofpbuf.h"
25 #include "openflow/nicira-ext.h"
26 #include "packets.h"
27 #include "unaligned.h"
28 #include "vlog.h"
29
30 VLOG_DEFINE_THIS_MODULE(nx_match);
31
32 /* Rate limit for nx_match parse errors. These always indicate a bug in the
33 * peer and so there's not much point in showing a lot of them. */
34 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
35
36 enum {
37 NXM_INVALID = OFP_MKERR_NICIRA(OFPET_BAD_REQUEST, NXBRC_NXM_INVALID),
38 NXM_BAD_TYPE = OFP_MKERR_NICIRA(OFPET_BAD_REQUEST, NXBRC_NXM_BAD_TYPE),
39 NXM_BAD_VALUE = OFP_MKERR_NICIRA(OFPET_BAD_REQUEST, NXBRC_NXM_BAD_VALUE),
40 NXM_BAD_MASK = OFP_MKERR_NICIRA(OFPET_BAD_REQUEST, NXBRC_NXM_BAD_MASK),
41 NXM_BAD_PREREQ = OFP_MKERR_NICIRA(OFPET_BAD_REQUEST, NXBRC_NXM_BAD_PREREQ),
42 NXM_DUP_TYPE = OFP_MKERR_NICIRA(OFPET_BAD_REQUEST, NXBRC_NXM_DUP_TYPE),
43 BAD_ARGUMENT = OFP_MKERR(OFPET_BAD_ACTION, OFPBAC_BAD_ARGUMENT)
44 };
45
46 /* For each NXM_* field, define NFI_NXM_* as consecutive integers starting from
47 * zero. */
48 enum nxm_field_index {
49 #define DEFINE_FIELD(HEADER, WILDCARD, DL_TYPE, NW_PROTO, WRITABLE) \
50 NFI_NXM_##HEADER,
51 #include "nx-match.def"
52 N_NXM_FIELDS
53 };
54
55 struct nxm_field {
56 struct hmap_node hmap_node;
57 enum nxm_field_index index; /* NFI_* value. */
58 uint32_t header; /* NXM_* value. */
59 flow_wildcards_t wildcard; /* FWW_* bit, if exactly one. */
60 ovs_be16 dl_type; /* dl_type prerequisite, if nonzero. */
61 uint8_t nw_proto; /* nw_proto prerequisite, if nonzero. */
62 const char *name; /* "NXM_*" string. */
63 bool writable; /* Writable with NXAST_REG_{MOVE,LOAD}? */
64 };
65
66 /* All the known fields. */
67 static struct nxm_field nxm_fields[N_NXM_FIELDS] = {
68 #define DEFINE_FIELD(HEADER, WILDCARD, DL_TYPE, NW_PROTO, WRITABLE) \
69 { HMAP_NODE_NULL_INITIALIZER, NFI_NXM_##HEADER, NXM_##HEADER, WILDCARD, \
70 CONSTANT_HTONS(DL_TYPE), NW_PROTO, "NXM_" #HEADER, WRITABLE },
71 #include "nx-match.def"
72 };
73
74 /* Hash table of 'nxm_fields'. */
75 static struct hmap all_nxm_fields = HMAP_INITIALIZER(&all_nxm_fields);
76
77 /* Possible masks for NXM_OF_ETH_DST_W. */
78 static const uint8_t eth_all_0s[ETH_ADDR_LEN]
79 = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
80 static const uint8_t eth_all_1s[ETH_ADDR_LEN]
81 = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
82 static const uint8_t eth_mcast_1[ETH_ADDR_LEN]
83 = {0x01, 0x00, 0x00, 0x00, 0x00, 0x00};
84 static const uint8_t eth_mcast_0[ETH_ADDR_LEN]
85 = {0xfe, 0xff, 0xff, 0xff, 0xff, 0xff};
86
87 static void
88 nxm_init(void)
89 {
90 if (hmap_is_empty(&all_nxm_fields)) {
91 int i;
92
93 for (i = 0; i < N_NXM_FIELDS; i++) {
94 struct nxm_field *f = &nxm_fields[i];
95 hmap_insert(&all_nxm_fields, &f->hmap_node,
96 hash_int(f->header, 0));
97 }
98
99 /* Verify that the header values are unique (duplicate "case" values
100 * cause a compile error). */
101 switch (0) {
102 #define DEFINE_FIELD(HEADER, WILDCARD, DL_TYPE, NW_PROTO, WRITABLE) \
103 case NXM_##HEADER: break;
104 #include "nx-match.def"
105 }
106 }
107 }
108
109 static const struct nxm_field *
110 nxm_field_lookup(uint32_t header)
111 {
112 struct nxm_field *f;
113
114 nxm_init();
115
116 HMAP_FOR_EACH_WITH_HASH (f, hmap_node, hash_int(header, 0),
117 &all_nxm_fields) {
118 if (f->header == header) {
119 return f;
120 }
121 }
122
123 return NULL;
124 }
125
126 /* Returns the width of the data for a field with the given 'header', in
127 * bytes. */
128 int
129 nxm_field_bytes(uint32_t header)
130 {
131 unsigned int length = NXM_LENGTH(header);
132 return NXM_HASMASK(header) ? length / 2 : length;
133 }
134
135 /* Returns the width of the data for a field with the given 'header', in
136 * bits. */
137 int
138 nxm_field_bits(uint32_t header)
139 {
140 return nxm_field_bytes(header) * 8;
141 }
142 \f
143 /* nx_pull_match() and helpers. */
144
145 static int
146 parse_nx_reg(const struct nxm_field *f,
147 struct flow *flow, struct flow_wildcards *wc,
148 const void *value, const void *maskp)
149 {
150 int idx = NXM_NX_REG_IDX(f->header);
151 if (wc->reg_masks[idx]) {
152 return NXM_DUP_TYPE;
153 } else {
154 flow_wildcards_set_reg_mask(wc, idx,
155 (NXM_HASMASK(f->header)
156 ? ntohl(get_unaligned_be32(maskp))
157 : UINT32_MAX));
158 flow->regs[idx] = ntohl(get_unaligned_be32(value));
159 flow->regs[idx] &= wc->reg_masks[idx];
160 return 0;
161 }
162 }
163
164 static int
165 parse_nxm_entry(struct cls_rule *rule, const struct nxm_field *f,
166 const void *value, const void *mask)
167 {
168 struct flow_wildcards *wc = &rule->wc;
169 struct flow *flow = &rule->flow;
170
171 switch (f->index) {
172 /* Metadata. */
173 case NFI_NXM_OF_IN_PORT:
174 flow->in_port = ntohs(get_unaligned_be16(value));
175 if (flow->in_port == OFPP_LOCAL) {
176 flow->in_port = ODPP_LOCAL;
177 }
178 return 0;
179
180 /* Ethernet header. */
181 case NFI_NXM_OF_ETH_DST:
182 if ((wc->wildcards & (FWW_DL_DST | FWW_ETH_MCAST))
183 != (FWW_DL_DST | FWW_ETH_MCAST)) {
184 return NXM_DUP_TYPE;
185 } else {
186 wc->wildcards &= ~(FWW_DL_DST | FWW_ETH_MCAST);
187 memcpy(flow->dl_dst, value, ETH_ADDR_LEN);
188 return 0;
189 }
190 case NFI_NXM_OF_ETH_DST_W:
191 if ((wc->wildcards & (FWW_DL_DST | FWW_ETH_MCAST))
192 != (FWW_DL_DST | FWW_ETH_MCAST)) {
193 return NXM_DUP_TYPE;
194 } else if (eth_addr_equals(mask, eth_mcast_1)) {
195 wc->wildcards &= ~FWW_ETH_MCAST;
196 flow->dl_dst[0] = *(uint8_t *) value & 0x01;
197 } else if (eth_addr_equals(mask, eth_mcast_0)) {
198 wc->wildcards &= ~FWW_DL_DST;
199 memcpy(flow->dl_dst, value, ETH_ADDR_LEN);
200 flow->dl_dst[0] &= 0xfe;
201 } else if (eth_addr_equals(mask, eth_all_0s)) {
202 return 0;
203 } else if (eth_addr_equals(mask, eth_all_1s)) {
204 wc->wildcards &= ~(FWW_DL_DST | FWW_ETH_MCAST);
205 memcpy(flow->dl_dst, value, ETH_ADDR_LEN);
206 return 0;
207 } else {
208 return NXM_BAD_MASK;
209 }
210 case NFI_NXM_OF_ETH_SRC:
211 memcpy(flow->dl_src, value, ETH_ADDR_LEN);
212 return 0;
213 case NFI_NXM_OF_ETH_TYPE:
214 flow->dl_type = ofputil_dl_type_from_openflow(get_unaligned_be16(value));
215 return 0;
216
217 /* 802.1Q header. */
218 case NFI_NXM_OF_VLAN_TCI:
219 if (wc->vlan_tci_mask) {
220 return NXM_DUP_TYPE;
221 } else {
222 cls_rule_set_dl_tci(rule, get_unaligned_be16(value));
223 return 0;
224 }
225 case NFI_NXM_OF_VLAN_TCI_W:
226 if (wc->vlan_tci_mask) {
227 return NXM_DUP_TYPE;
228 } else {
229 cls_rule_set_dl_tci_masked(rule, get_unaligned_be16(value),
230 get_unaligned_be16(mask));
231 return 0;
232 }
233
234 /* IP header. */
235 case NFI_NXM_OF_IP_TOS:
236 if (*(uint8_t *) value & 0x03) {
237 return NXM_BAD_VALUE;
238 } else {
239 flow->nw_tos = *(uint8_t *) value;
240 return 0;
241 }
242 case NFI_NXM_OF_IP_PROTO:
243 flow->nw_proto = *(uint8_t *) value;
244 return 0;
245
246 /* IP addresses in IP and ARP headers. */
247 case NFI_NXM_OF_IP_SRC:
248 case NFI_NXM_OF_ARP_SPA:
249 if (wc->nw_src_mask) {
250 return NXM_DUP_TYPE;
251 } else {
252 cls_rule_set_nw_src(rule, get_unaligned_be32(value));
253 return 0;
254 }
255 case NFI_NXM_OF_IP_SRC_W:
256 case NFI_NXM_OF_ARP_SPA_W:
257 if (wc->nw_src_mask) {
258 return NXM_DUP_TYPE;
259 } else {
260 ovs_be32 ip = get_unaligned_be32(value);
261 ovs_be32 netmask = get_unaligned_be32(mask);
262 if (!cls_rule_set_nw_src_masked(rule, ip, netmask)) {
263 return NXM_BAD_MASK;
264 }
265 return 0;
266 }
267 case NFI_NXM_OF_IP_DST:
268 case NFI_NXM_OF_ARP_TPA:
269 if (wc->nw_dst_mask) {
270 return NXM_DUP_TYPE;
271 } else {
272 cls_rule_set_nw_dst(rule, get_unaligned_be32(value));
273 return 0;
274 }
275 case NFI_NXM_OF_IP_DST_W:
276 case NFI_NXM_OF_ARP_TPA_W:
277 if (wc->nw_dst_mask) {
278 return NXM_DUP_TYPE;
279 } else {
280 ovs_be32 ip = get_unaligned_be32(value);
281 ovs_be32 netmask = get_unaligned_be32(mask);
282 if (!cls_rule_set_nw_dst_masked(rule, ip, netmask)) {
283 return NXM_BAD_MASK;
284 }
285 return 0;
286 }
287
288 /* TCP header. */
289 case NFI_NXM_OF_TCP_SRC:
290 flow->tp_src = get_unaligned_be16(value);
291 return 0;
292 case NFI_NXM_OF_TCP_DST:
293 flow->tp_dst = get_unaligned_be16(value);
294 return 0;
295
296 /* UDP header. */
297 case NFI_NXM_OF_UDP_SRC:
298 flow->tp_src = get_unaligned_be16(value);
299 return 0;
300 case NFI_NXM_OF_UDP_DST:
301 flow->tp_dst = get_unaligned_be16(value);
302 return 0;
303
304 /* ICMP header. */
305 case NFI_NXM_OF_ICMP_TYPE:
306 flow->tp_src = htons(*(uint8_t *) value);
307 return 0;
308 case NFI_NXM_OF_ICMP_CODE:
309 flow->tp_dst = htons(*(uint8_t *) value);
310 return 0;
311
312 /* ARP header. */
313 case NFI_NXM_OF_ARP_OP:
314 if (ntohs(get_unaligned_be16(value)) > 255) {
315 return NXM_BAD_VALUE;
316 } else {
317 flow->nw_proto = ntohs(get_unaligned_be16(value));
318 return 0;
319 }
320
321 /* Tunnel ID. */
322 case NFI_NXM_NX_TUN_ID:
323 if (wc->tun_id_mask) {
324 return NXM_DUP_TYPE;
325 } else {
326 cls_rule_set_tun_id(rule, get_unaligned_be64(value));
327 return 0;
328 }
329 case NFI_NXM_NX_TUN_ID_W:
330 if (wc->tun_id_mask) {
331 return NXM_DUP_TYPE;
332 } else {
333 ovs_be64 tun_id = get_unaligned_be64(value);
334 ovs_be64 tun_mask = get_unaligned_be64(mask);
335 cls_rule_set_tun_id_masked(rule, tun_id, tun_mask);
336 return 0;
337 }
338
339 /* Registers. */
340 case NFI_NXM_NX_REG0:
341 case NFI_NXM_NX_REG0_W:
342 #if FLOW_N_REGS >= 2
343 case NFI_NXM_NX_REG1:
344 case NFI_NXM_NX_REG1_W:
345 #endif
346 #if FLOW_N_REGS >= 3
347 case NFI_NXM_NX_REG2:
348 case NFI_NXM_NX_REG2_W:
349 #endif
350 #if FLOW_N_REGS >= 4
351 case NFI_NXM_NX_REG3:
352 case NFI_NXM_NX_REG3_W:
353 #endif
354 #if FLOW_N_REGS > 4
355 #error
356 #endif
357 return parse_nx_reg(f, flow, wc, value, mask);
358
359 case N_NXM_FIELDS:
360 NOT_REACHED();
361 }
362 NOT_REACHED();
363 }
364
365 static bool
366 nxm_prereqs_ok(const struct nxm_field *field, const struct flow *flow)
367 {
368 return (!field->dl_type
369 || (field->dl_type == flow->dl_type
370 && (!field->nw_proto || field->nw_proto == flow->nw_proto)));
371 }
372
373 static uint32_t
374 nx_entry_ok(const void *p, unsigned int match_len)
375 {
376 unsigned int payload_len;
377 ovs_be32 header_be;
378 uint32_t header;
379
380 if (match_len < 4) {
381 if (match_len) {
382 VLOG_DBG_RL(&rl, "nx_match ends with partial nxm_header");
383 }
384 return 0;
385 }
386 memcpy(&header_be, p, 4);
387 header = ntohl(header_be);
388
389 payload_len = NXM_LENGTH(header);
390 if (!payload_len) {
391 VLOG_DBG_RL(&rl, "nxm_entry %08"PRIx32" has invalid payload "
392 "length 0", header);
393 return 0;
394 }
395 if (match_len < payload_len + 4) {
396 VLOG_DBG_RL(&rl, "%"PRIu32"-byte nxm_entry but only "
397 "%u bytes left in nx_match", payload_len + 4, match_len);
398 return 0;
399 }
400
401 return header;
402 }
403
404 int
405 nx_pull_match(struct ofpbuf *b, unsigned int match_len, uint16_t priority,
406 struct cls_rule *rule)
407 {
408 uint32_t header;
409 uint8_t *p;
410
411 p = ofpbuf_try_pull(b, ROUND_UP(match_len, 8));
412 if (!p) {
413 VLOG_DBG_RL(&rl, "nx_match length %u, rounded up to a "
414 "multiple of 8, is longer than space in message (max "
415 "length %zu)", match_len, b->size);
416 return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_LEN);
417 }
418
419 cls_rule_init_catchall(rule, priority);
420 while ((header = nx_entry_ok(p, match_len)) != 0) {
421 unsigned length = NXM_LENGTH(header);
422 const struct nxm_field *f;
423 int error;
424
425 f = nxm_field_lookup(header);
426 if (!f) {
427 error = NXM_BAD_TYPE;
428 } else if (!nxm_prereqs_ok(f, &rule->flow)) {
429 error = NXM_BAD_PREREQ;
430 } else if (f->wildcard && !(rule->wc.wildcards & f->wildcard)) {
431 error = NXM_DUP_TYPE;
432 } else {
433 /* 'hasmask' and 'length' are known to be correct at this point
434 * because they are included in 'header' and nxm_field_lookup()
435 * checked them already. */
436 rule->wc.wildcards &= ~f->wildcard;
437 error = parse_nxm_entry(rule, f, p + 4, p + 4 + length / 2);
438 }
439 if (error) {
440 VLOG_DBG_RL(&rl, "bad nxm_entry with vendor=%"PRIu32", "
441 "field=%"PRIu32", hasmask=%"PRIu32", type=%"PRIu32" "
442 "(error %x)",
443 NXM_VENDOR(header), NXM_FIELD(header),
444 NXM_HASMASK(header), NXM_TYPE(header),
445 error);
446 return error;
447 }
448
449
450 p += 4 + length;
451 match_len -= 4 + length;
452 }
453
454 return match_len ? NXM_INVALID : 0;
455 }
456 \f
457 /* nx_put_match() and helpers.
458 *
459 * 'put' functions whose names end in 'w' add a wildcarded field.
460 * 'put' functions whose names end in 'm' add a field that might be wildcarded.
461 * Other 'put' functions add exact-match fields.
462 */
463
464 static void
465 nxm_put_header(struct ofpbuf *b, uint32_t header)
466 {
467 ovs_be32 n_header = htonl(header);
468 ofpbuf_put(b, &n_header, sizeof n_header);
469 }
470
471 static void
472 nxm_put_8(struct ofpbuf *b, uint32_t header, uint8_t value)
473 {
474 nxm_put_header(b, header);
475 ofpbuf_put(b, &value, sizeof value);
476 }
477
478 static void
479 nxm_put_16(struct ofpbuf *b, uint32_t header, ovs_be16 value)
480 {
481 nxm_put_header(b, header);
482 ofpbuf_put(b, &value, sizeof value);
483 }
484
485 static void
486 nxm_put_16w(struct ofpbuf *b, uint32_t header, ovs_be16 value, ovs_be16 mask)
487 {
488 nxm_put_header(b, header);
489 ofpbuf_put(b, &value, sizeof value);
490 ofpbuf_put(b, &mask, sizeof mask);
491 }
492
493 static void
494 nxm_put_16m(struct ofpbuf *b, uint32_t header, ovs_be16 value, ovs_be16 mask)
495 {
496 switch (mask) {
497 case 0:
498 break;
499
500 case CONSTANT_HTONS(UINT16_MAX):
501 nxm_put_16(b, header, value);
502 break;
503
504 default:
505 nxm_put_16w(b, NXM_MAKE_WILD_HEADER(header), value, mask);
506 break;
507 }
508 }
509
510 static void
511 nxm_put_32(struct ofpbuf *b, uint32_t header, ovs_be32 value)
512 {
513 nxm_put_header(b, header);
514 ofpbuf_put(b, &value, sizeof value);
515 }
516
517 static void
518 nxm_put_32w(struct ofpbuf *b, uint32_t header, ovs_be32 value, ovs_be32 mask)
519 {
520 nxm_put_header(b, header);
521 ofpbuf_put(b, &value, sizeof value);
522 ofpbuf_put(b, &mask, sizeof mask);
523 }
524
525 static void
526 nxm_put_32m(struct ofpbuf *b, uint32_t header, ovs_be32 value, ovs_be32 mask)
527 {
528 switch (mask) {
529 case 0:
530 break;
531
532 case CONSTANT_HTONL(UINT32_MAX):
533 nxm_put_32(b, header, value);
534 break;
535
536 default:
537 nxm_put_32w(b, NXM_MAKE_WILD_HEADER(header), value, mask);
538 break;
539 }
540 }
541
542 static void
543 nxm_put_64(struct ofpbuf *b, uint32_t header, ovs_be64 value)
544 {
545 nxm_put_header(b, header);
546 ofpbuf_put(b, &value, sizeof value);
547 }
548
549 static void
550 nxm_put_64w(struct ofpbuf *b, uint32_t header, ovs_be64 value, ovs_be64 mask)
551 {
552 nxm_put_header(b, header);
553 ofpbuf_put(b, &value, sizeof value);
554 ofpbuf_put(b, &mask, sizeof mask);
555 }
556
557 static void
558 nxm_put_64m(struct ofpbuf *b, uint32_t header, ovs_be64 value, ovs_be64 mask)
559 {
560 switch (mask) {
561 case 0:
562 break;
563
564 case CONSTANT_HTONLL(UINT64_MAX):
565 nxm_put_64(b, header, value);
566 break;
567
568 default:
569 nxm_put_64w(b, NXM_MAKE_WILD_HEADER(header), value, mask);
570 break;
571 }
572 }
573
574 static void
575 nxm_put_eth(struct ofpbuf *b, uint32_t header,
576 const uint8_t value[ETH_ADDR_LEN])
577 {
578 nxm_put_header(b, header);
579 ofpbuf_put(b, value, ETH_ADDR_LEN);
580 }
581
582 static void
583 nxm_put_eth_dst(struct ofpbuf *b,
584 uint32_t wc, const uint8_t value[ETH_ADDR_LEN])
585 {
586 switch (wc & (FWW_DL_DST | FWW_ETH_MCAST)) {
587 case FWW_DL_DST | FWW_ETH_MCAST:
588 break;
589 case FWW_DL_DST:
590 nxm_put_header(b, NXM_OF_ETH_DST_W);
591 ofpbuf_put(b, value, ETH_ADDR_LEN);
592 ofpbuf_put(b, eth_mcast_1, ETH_ADDR_LEN);
593 break;
594 case FWW_ETH_MCAST:
595 nxm_put_header(b, NXM_OF_ETH_DST_W);
596 ofpbuf_put(b, value, ETH_ADDR_LEN);
597 ofpbuf_put(b, eth_mcast_0, ETH_ADDR_LEN);
598 break;
599 case 0:
600 nxm_put_eth(b, NXM_OF_ETH_DST, value);
601 break;
602 }
603 }
604
605 /* Appends to 'b' the nx_match format that expresses 'cr' (except for
606 * 'cr->priority', because priority is not part of nx_match), plus enough
607 * zero bytes to pad the nx_match out to a multiple of 8.
608 *
609 * This function can cause 'b''s data to be reallocated.
610 *
611 * Returns the number of bytes appended to 'b', excluding padding.
612 *
613 * If 'cr' is a catch-all rule that matches every packet, then this function
614 * appends nothing to 'b' and returns 0. */
615 int
616 nx_put_match(struct ofpbuf *b, const struct cls_rule *cr)
617 {
618 const flow_wildcards_t wc = cr->wc.wildcards;
619 const struct flow *flow = &cr->flow;
620 const size_t start_len = b->size;
621 int match_len;
622 int i;
623
624 /* Metadata. */
625 if (!(wc & FWW_IN_PORT)) {
626 uint16_t in_port = flow->in_port;
627 if (in_port == ODPP_LOCAL) {
628 in_port = OFPP_LOCAL;
629 }
630 nxm_put_16(b, NXM_OF_IN_PORT, htons(in_port));
631 }
632
633 /* Ethernet. */
634 nxm_put_eth_dst(b, wc, flow->dl_dst);
635 if (!(wc & FWW_DL_SRC)) {
636 nxm_put_eth(b, NXM_OF_ETH_SRC, flow->dl_src);
637 }
638 if (!(wc & FWW_DL_TYPE)) {
639 nxm_put_16(b, NXM_OF_ETH_TYPE,
640 ofputil_dl_type_to_openflow(flow->dl_type));
641 }
642
643 /* 802.1Q. */
644 nxm_put_16m(b, NXM_OF_VLAN_TCI, flow->vlan_tci, cr->wc.vlan_tci_mask);
645
646 /* L3. */
647 if (!(wc & FWW_DL_TYPE) && flow->dl_type == htons(ETH_TYPE_IP)) {
648 /* IP. */
649 if (!(wc & FWW_NW_TOS)) {
650 nxm_put_8(b, NXM_OF_IP_TOS, flow->nw_tos & 0xfc);
651 }
652 nxm_put_32m(b, NXM_OF_IP_SRC, flow->nw_src, cr->wc.nw_src_mask);
653 nxm_put_32m(b, NXM_OF_IP_DST, flow->nw_dst, cr->wc.nw_dst_mask);
654
655 if (!(wc & FWW_NW_PROTO)) {
656 nxm_put_8(b, NXM_OF_IP_PROTO, flow->nw_proto);
657 switch (flow->nw_proto) {
658 /* TCP. */
659 case IP_TYPE_TCP:
660 if (!(wc & FWW_TP_SRC)) {
661 nxm_put_16(b, NXM_OF_TCP_SRC, flow->tp_src);
662 }
663 if (!(wc & FWW_TP_DST)) {
664 nxm_put_16(b, NXM_OF_TCP_DST, flow->tp_dst);
665 }
666 break;
667
668 /* UDP. */
669 case IP_TYPE_UDP:
670 if (!(wc & FWW_TP_SRC)) {
671 nxm_put_16(b, NXM_OF_UDP_SRC, flow->tp_src);
672 }
673 if (!(wc & FWW_TP_DST)) {
674 nxm_put_16(b, NXM_OF_UDP_DST, flow->tp_dst);
675 }
676 break;
677
678 /* ICMP. */
679 case IP_TYPE_ICMP:
680 if (!(wc & FWW_TP_SRC)) {
681 nxm_put_8(b, NXM_OF_ICMP_TYPE, ntohs(flow->tp_src));
682 }
683 if (!(wc & FWW_TP_DST)) {
684 nxm_put_8(b, NXM_OF_ICMP_CODE, ntohs(flow->tp_dst));
685 }
686 break;
687 }
688 }
689 } else if (!(wc & FWW_DL_TYPE) && flow->dl_type == htons(ETH_TYPE_ARP)) {
690 /* ARP. */
691 if (!(wc & FWW_NW_PROTO)) {
692 nxm_put_16(b, NXM_OF_ARP_OP, htons(flow->nw_proto));
693 }
694 nxm_put_32m(b, NXM_OF_ARP_SPA, flow->nw_src, cr->wc.nw_src_mask);
695 nxm_put_32m(b, NXM_OF_ARP_TPA, flow->nw_dst, cr->wc.nw_dst_mask);
696 }
697
698 /* Tunnel ID. */
699 nxm_put_64m(b, NXM_NX_TUN_ID, flow->tun_id, cr->wc.tun_id_mask);
700
701 /* Registers. */
702 for (i = 0; i < FLOW_N_REGS; i++) {
703 nxm_put_32m(b, NXM_NX_REG(i),
704 htonl(flow->regs[i]), htonl(cr->wc.reg_masks[i]));
705 }
706
707 match_len = b->size - start_len;
708 ofpbuf_put_zeros(b, ROUND_UP(match_len, 8) - match_len);
709 return match_len;
710 }
711 \f
712 /* nx_match_to_string() and helpers. */
713
714 static void format_nxm_field_name(struct ds *, uint32_t header);
715
716 char *
717 nx_match_to_string(const uint8_t *p, unsigned int match_len)
718 {
719 uint32_t header;
720 struct ds s;
721
722 if (!match_len) {
723 return xstrdup("<any>");
724 }
725
726 ds_init(&s);
727 while ((header = nx_entry_ok(p, match_len)) != 0) {
728 unsigned int length = NXM_LENGTH(header);
729 unsigned int value_len = nxm_field_bytes(header);
730 const uint8_t *value = p + 4;
731 const uint8_t *mask = value + value_len;
732 unsigned int i;
733
734 if (s.length) {
735 ds_put_cstr(&s, ", ");
736 }
737
738 format_nxm_field_name(&s, header);
739 ds_put_char(&s, '(');
740
741 for (i = 0; i < value_len; i++) {
742 ds_put_format(&s, "%02x", value[i]);
743 }
744 if (NXM_HASMASK(header)) {
745 ds_put_char(&s, '/');
746 for (i = 0; i < value_len; i++) {
747 ds_put_format(&s, "%02x", mask[i]);
748 }
749 }
750 ds_put_char(&s, ')');
751
752 p += 4 + length;
753 match_len -= 4 + length;
754 }
755
756 if (match_len) {
757 if (s.length) {
758 ds_put_cstr(&s, ", ");
759 }
760
761 ds_put_format(&s, "<%u invalid bytes>", match_len);
762 }
763
764 return ds_steal_cstr(&s);
765 }
766
767 static void
768 format_nxm_field_name(struct ds *s, uint32_t header)
769 {
770 const struct nxm_field *f = nxm_field_lookup(header);
771 if (f) {
772 ds_put_cstr(s, f->name);
773 } else {
774 ds_put_format(s, "%d:%d", NXM_VENDOR(header), NXM_FIELD(header));
775 }
776 }
777
778 static uint32_t
779 parse_nxm_field_name(const char *name, int name_len)
780 {
781 const struct nxm_field *f;
782
783 /* Check whether it's a field name. */
784 for (f = nxm_fields; f < &nxm_fields[ARRAY_SIZE(nxm_fields)]; f++) {
785 if (!strncmp(f->name, name, name_len) && f->name[name_len] == '\0') {
786 return f->header;
787 }
788 }
789
790 /* Check whether it's a 32-bit field header value as hex.
791 * (This isn't ordinarily useful except for testing error behavior.) */
792 if (name_len == 8) {
793 uint32_t header = hexits_value(name, name_len, NULL);
794 if (header != UINT_MAX) {
795 return header;
796 }
797 }
798
799 return 0;
800 }
801 \f
802 /* nx_match_from_string(). */
803
804 int
805 nx_match_from_string(const char *s, struct ofpbuf *b)
806 {
807 const char *full_s = s;
808 const size_t start_len = b->size;
809 int match_len;
810
811 if (!strcmp(s, "<any>")) {
812 /* Ensure that 'b->data' isn't actually null. */
813 ofpbuf_prealloc_tailroom(b, 1);
814 return 0;
815 }
816
817 for (s += strspn(s, ", "); *s; s += strspn(s, ", ")) {
818 const char *name;
819 uint32_t header;
820 int name_len;
821 size_t n;
822
823 name = s;
824 name_len = strcspn(s, "(");
825 if (s[name_len] != '(') {
826 ovs_fatal(0, "%s: missing ( at end of nx_match", full_s);
827 }
828
829 header = parse_nxm_field_name(name, name_len);
830 if (!header) {
831 ovs_fatal(0, "%s: unknown field `%.*s'", full_s, name_len, s);
832 }
833
834 s += name_len + 1;
835
836 nxm_put_header(b, header);
837 s = ofpbuf_put_hex(b, s, &n);
838 if (n != nxm_field_bytes(header)) {
839 ovs_fatal(0, "%.2s: hex digits expected", s);
840 }
841 if (NXM_HASMASK(header)) {
842 s += strspn(s, " ");
843 if (*s != '/') {
844 ovs_fatal(0, "%s: missing / in masked field %.*s",
845 full_s, name_len, name);
846 }
847 s = ofpbuf_put_hex(b, s + 1, &n);
848 if (n != nxm_field_bytes(header)) {
849 ovs_fatal(0, "%.2s: hex digits expected", s);
850 }
851 }
852
853 s += strspn(s, " ");
854 if (*s != ')') {
855 ovs_fatal(0, "%s: missing ) following field %.*s",
856 full_s, name_len, name);
857 }
858 s++;
859 }
860
861 match_len = b->size - start_len;
862 ofpbuf_put_zeros(b, ROUND_UP(match_len, 8) - match_len);
863 return match_len;
864 }
865 \f
866 const char *
867 nxm_parse_field_bits(const char *s, uint32_t *headerp, int *ofsp, int *n_bitsp)
868 {
869 const char *full_s = s;
870 const char *name;
871 uint32_t header;
872 int start, end;
873 int name_len;
874 int width;
875
876 name = s;
877 name_len = strcspn(s, "[");
878 if (s[name_len] != '[') {
879 ovs_fatal(0, "%s: missing [ looking for field name", full_s);
880 }
881
882 header = parse_nxm_field_name(name, name_len);
883 if (!header) {
884 ovs_fatal(0, "%s: unknown field `%.*s'", full_s, name_len, s);
885 }
886 width = nxm_field_bits(header);
887
888 s += name_len;
889 if (sscanf(s, "[%d..%d]", &start, &end) == 2) {
890 /* Nothing to do. */
891 } else if (sscanf(s, "[%d]", &start) == 1) {
892 end = start;
893 } else if (!strncmp(s, "[]", 2)) {
894 start = 0;
895 end = width - 1;
896 } else {
897 ovs_fatal(0, "%s: syntax error expecting [] or [<bit>] or "
898 "[<start>..<end>]", full_s);
899 }
900 s = strchr(s, ']') + 1;
901
902 if (start > end) {
903 ovs_fatal(0, "%s: starting bit %d is after ending bit %d",
904 full_s, start, end);
905 } else if (start >= width) {
906 ovs_fatal(0, "%s: starting bit %d is not valid because field is only "
907 "%d bits wide", full_s, start, width);
908 } else if (end >= width){
909 ovs_fatal(0, "%s: ending bit %d is not valid because field is only "
910 "%d bits wide", full_s, end, width);
911 }
912
913 *headerp = header;
914 *ofsp = start;
915 *n_bitsp = end - start + 1;
916
917 return s;
918 }
919
920 void
921 nxm_parse_reg_move(struct nx_action_reg_move *move, const char *s)
922 {
923 const char *full_s = s;
924 uint32_t src, dst;
925 int src_ofs, dst_ofs;
926 int src_n_bits, dst_n_bits;
927
928 s = nxm_parse_field_bits(s, &src, &src_ofs, &src_n_bits);
929 if (strncmp(s, "->", 2)) {
930 ovs_fatal(0, "%s: missing `->' following source", full_s);
931 }
932 s += 2;
933 s = nxm_parse_field_bits(s, &dst, &dst_ofs, &dst_n_bits);
934 if (*s != '\0') {
935 ovs_fatal(0, "%s: trailing garbage following destination", full_s);
936 }
937
938 if (src_n_bits != dst_n_bits) {
939 ovs_fatal(0, "%s: source field is %d bits wide but destination is "
940 "%d bits wide", full_s, src_n_bits, dst_n_bits);
941 }
942
943 move->type = htons(OFPAT_VENDOR);
944 move->len = htons(sizeof *move);
945 move->vendor = htonl(NX_VENDOR_ID);
946 move->subtype = htons(NXAST_REG_MOVE);
947 move->n_bits = htons(src_n_bits);
948 move->src_ofs = htons(src_ofs);
949 move->dst_ofs = htons(dst_ofs);
950 move->src = htonl(src);
951 move->dst = htonl(dst);
952 }
953
954 void
955 nxm_parse_reg_load(struct nx_action_reg_load *load, const char *s)
956 {
957 const char *full_s = s;
958 uint32_t dst;
959 int ofs, n_bits;
960 uint64_t value;
961
962 value = strtoull(s, (char **) &s, 0);
963 if (strncmp(s, "->", 2)) {
964 ovs_fatal(0, "%s: missing `->' following value", full_s);
965 }
966 s += 2;
967 s = nxm_parse_field_bits(s, &dst, &ofs, &n_bits);
968 if (*s != '\0') {
969 ovs_fatal(0, "%s: trailing garbage following destination", full_s);
970 }
971
972 if (n_bits < 64 && (value >> n_bits) != 0) {
973 ovs_fatal(0, "%s: value %"PRIu64" does not fit into %d bits",
974 full_s, value, n_bits);
975 }
976
977 load->type = htons(OFPAT_VENDOR);
978 load->len = htons(sizeof *load);
979 load->vendor = htonl(NX_VENDOR_ID);
980 load->subtype = htons(NXAST_REG_LOAD);
981 load->ofs_nbits = nxm_encode_ofs_nbits(ofs, n_bits);
982 load->dst = htonl(dst);
983 load->value = htonll(value);
984 }
985 \f
986 /* nxm_format_reg_move(), nxm_format_reg_load(). */
987
988 void
989 nxm_format_field_bits(struct ds *s, uint32_t header, int ofs, int n_bits)
990 {
991 format_nxm_field_name(s, header);
992 if (ofs == 0 && n_bits == nxm_field_bits(header)) {
993 ds_put_cstr(s, "[]");
994 } else if (n_bits == 1) {
995 ds_put_format(s, "[%d]", ofs);
996 } else {
997 ds_put_format(s, "[%d..%d]", ofs, ofs + n_bits - 1);
998 }
999 }
1000
1001 void
1002 nxm_format_reg_move(const struct nx_action_reg_move *move, struct ds *s)
1003 {
1004 int n_bits = ntohs(move->n_bits);
1005 int src_ofs = ntohs(move->src_ofs);
1006 int dst_ofs = ntohs(move->dst_ofs);
1007 uint32_t src = ntohl(move->src);
1008 uint32_t dst = ntohl(move->dst);
1009
1010 ds_put_format(s, "move:");
1011 nxm_format_field_bits(s, src, src_ofs, n_bits);
1012 ds_put_cstr(s, "->");
1013 nxm_format_field_bits(s, dst, dst_ofs, n_bits);
1014 }
1015
1016 void
1017 nxm_format_reg_load(const struct nx_action_reg_load *load, struct ds *s)
1018 {
1019 int ofs = nxm_decode_ofs(load->ofs_nbits);
1020 int n_bits = nxm_decode_n_bits(load->ofs_nbits);
1021 uint32_t dst = ntohl(load->dst);
1022 uint64_t value = ntohll(load->value);
1023
1024 ds_put_format(s, "load:%#"PRIx64"->", value);
1025 nxm_format_field_bits(s, dst, ofs, n_bits);
1026 }
1027 \f
1028 /* nxm_check_reg_move(), nxm_check_reg_load(). */
1029
1030 static bool
1031 field_ok(const struct nxm_field *f, const struct flow *flow, int size)
1032 {
1033 return (f && !NXM_HASMASK(f->header)
1034 && nxm_prereqs_ok(f, flow) && size <= nxm_field_bits(f->header));
1035 }
1036
1037 int
1038 nxm_check_reg_move(const struct nx_action_reg_move *action,
1039 const struct flow *flow)
1040 {
1041 const struct nxm_field *src;
1042 const struct nxm_field *dst;
1043
1044 if (action->n_bits == htons(0)) {
1045 return BAD_ARGUMENT;
1046 }
1047
1048 src = nxm_field_lookup(ntohl(action->src));
1049 if (!field_ok(src, flow, ntohs(action->src_ofs) + ntohs(action->n_bits))) {
1050 return BAD_ARGUMENT;
1051 }
1052
1053 dst = nxm_field_lookup(ntohl(action->dst));
1054 if (!field_ok(dst, flow, ntohs(action->dst_ofs) + ntohs(action->n_bits))) {
1055 return BAD_ARGUMENT;
1056 }
1057
1058 if (!dst->writable) {
1059 return BAD_ARGUMENT;
1060 }
1061
1062 return 0;
1063 }
1064
1065 int
1066 nxm_check_reg_load(const struct nx_action_reg_load *action,
1067 const struct flow *flow)
1068 {
1069 const struct nxm_field *dst;
1070 int ofs, n_bits;
1071
1072 ofs = nxm_decode_ofs(action->ofs_nbits);
1073 n_bits = nxm_decode_n_bits(action->ofs_nbits);
1074 dst = nxm_field_lookup(ntohl(action->dst));
1075 if (!field_ok(dst, flow, ofs + n_bits)) {
1076 return BAD_ARGUMENT;
1077 }
1078
1079 /* Reject 'action' if a bit numbered 'n_bits' or higher is set to 1 in
1080 * action->value. */
1081 if (n_bits < 64 && ntohll(action->value) >> n_bits) {
1082 return BAD_ARGUMENT;
1083 }
1084
1085 if (!dst->writable) {
1086 return BAD_ARGUMENT;
1087 }
1088
1089 return 0;
1090 }
1091 \f
1092 /* nxm_execute_reg_move(), nxm_execute_reg_load(). */
1093
1094 static uint64_t
1095 nxm_read_field(const struct nxm_field *src, const struct flow *flow)
1096 {
1097 switch (src->index) {
1098 case NFI_NXM_OF_IN_PORT:
1099 return flow->in_port == ODPP_LOCAL ? OFPP_LOCAL : flow->in_port;
1100
1101 case NFI_NXM_OF_ETH_DST:
1102 return eth_addr_to_uint64(flow->dl_dst);
1103
1104 case NFI_NXM_OF_ETH_SRC:
1105 return eth_addr_to_uint64(flow->dl_src);
1106
1107 case NFI_NXM_OF_ETH_TYPE:
1108 return ntohs(ofputil_dl_type_to_openflow(flow->dl_type));
1109
1110 case NFI_NXM_OF_VLAN_TCI:
1111 return ntohs(flow->vlan_tci);
1112
1113 case NFI_NXM_OF_IP_TOS:
1114 return flow->nw_tos;
1115
1116 case NFI_NXM_OF_IP_PROTO:
1117 case NFI_NXM_OF_ARP_OP:
1118 return flow->nw_proto;
1119
1120 case NFI_NXM_OF_IP_SRC:
1121 case NFI_NXM_OF_ARP_SPA:
1122 return ntohl(flow->nw_src);
1123
1124 case NFI_NXM_OF_IP_DST:
1125 case NFI_NXM_OF_ARP_TPA:
1126 return ntohl(flow->nw_dst);
1127
1128 case NFI_NXM_OF_TCP_SRC:
1129 case NFI_NXM_OF_UDP_SRC:
1130 return ntohs(flow->tp_src);
1131
1132 case NFI_NXM_OF_TCP_DST:
1133 case NFI_NXM_OF_UDP_DST:
1134 return ntohs(flow->tp_dst);
1135
1136 case NFI_NXM_OF_ICMP_TYPE:
1137 return ntohs(flow->tp_src) & 0xff;
1138
1139 case NFI_NXM_OF_ICMP_CODE:
1140 return ntohs(flow->tp_dst) & 0xff;
1141
1142 case NFI_NXM_NX_TUN_ID:
1143 return ntohll(flow->tun_id);
1144
1145 #define NXM_READ_REGISTER(IDX) \
1146 case NFI_NXM_NX_REG##IDX: \
1147 return flow->regs[IDX]; \
1148 case NFI_NXM_NX_REG##IDX##_W: \
1149 NOT_REACHED();
1150
1151 NXM_READ_REGISTER(0);
1152 #if FLOW_N_REGS >= 2
1153 NXM_READ_REGISTER(1);
1154 #endif
1155 #if FLOW_N_REGS >= 3
1156 NXM_READ_REGISTER(2);
1157 #endif
1158 #if FLOW_N_REGS >= 4
1159 NXM_READ_REGISTER(3);
1160 #endif
1161 #if FLOW_N_REGS > 4
1162 #error
1163 #endif
1164
1165 case NFI_NXM_NX_TUN_ID_W:
1166 case NFI_NXM_OF_ETH_DST_W:
1167 case NFI_NXM_OF_VLAN_TCI_W:
1168 case NFI_NXM_OF_IP_SRC_W:
1169 case NFI_NXM_OF_IP_DST_W:
1170 case NFI_NXM_OF_ARP_SPA_W:
1171 case NFI_NXM_OF_ARP_TPA_W:
1172 case N_NXM_FIELDS:
1173 NOT_REACHED();
1174 }
1175
1176 NOT_REACHED();
1177 }
1178
1179 static void
1180 nxm_write_field(const struct nxm_field *dst, struct flow *flow,
1181 uint64_t new_value)
1182 {
1183 switch (dst->index) {
1184 case NFI_NXM_OF_VLAN_TCI:
1185 flow->vlan_tci = htons(new_value);
1186 break;
1187
1188 case NFI_NXM_NX_TUN_ID:
1189 flow->tun_id = htonll(new_value);
1190 break;
1191
1192 #define NXM_WRITE_REGISTER(IDX) \
1193 case NFI_NXM_NX_REG##IDX: \
1194 flow->regs[IDX] = new_value; \
1195 break; \
1196 case NFI_NXM_NX_REG##IDX##_W: \
1197 NOT_REACHED();
1198
1199 NXM_WRITE_REGISTER(0);
1200 #if FLOW_N_REGS >= 2
1201 NXM_WRITE_REGISTER(1);
1202 #endif
1203 #if FLOW_N_REGS >= 3
1204 NXM_WRITE_REGISTER(2);
1205 #endif
1206 #if FLOW_N_REGS >= 4
1207 NXM_WRITE_REGISTER(3);
1208 #endif
1209 #if FLOW_N_REGS > 4
1210 #error
1211 #endif
1212
1213 case NFI_NXM_OF_IN_PORT:
1214 case NFI_NXM_OF_ETH_DST:
1215 case NFI_NXM_OF_ETH_SRC:
1216 case NFI_NXM_OF_ETH_TYPE:
1217 case NFI_NXM_OF_IP_TOS:
1218 case NFI_NXM_OF_IP_PROTO:
1219 case NFI_NXM_OF_ARP_OP:
1220 case NFI_NXM_OF_IP_SRC:
1221 case NFI_NXM_OF_ARP_SPA:
1222 case NFI_NXM_OF_IP_DST:
1223 case NFI_NXM_OF_ARP_TPA:
1224 case NFI_NXM_OF_TCP_SRC:
1225 case NFI_NXM_OF_UDP_SRC:
1226 case NFI_NXM_OF_TCP_DST:
1227 case NFI_NXM_OF_UDP_DST:
1228 case NFI_NXM_OF_ICMP_TYPE:
1229 case NFI_NXM_OF_ICMP_CODE:
1230 case NFI_NXM_NX_TUN_ID_W:
1231 case NFI_NXM_OF_ETH_DST_W:
1232 case NFI_NXM_OF_VLAN_TCI_W:
1233 case NFI_NXM_OF_IP_SRC_W:
1234 case NFI_NXM_OF_IP_DST_W:
1235 case NFI_NXM_OF_ARP_SPA_W:
1236 case NFI_NXM_OF_ARP_TPA_W:
1237 case N_NXM_FIELDS:
1238 NOT_REACHED();
1239 }
1240 }
1241
1242 void
1243 nxm_execute_reg_move(const struct nx_action_reg_move *action,
1244 struct flow *flow)
1245 {
1246 /* Preparation. */
1247 int n_bits = ntohs(action->n_bits);
1248 uint64_t mask = n_bits == 64 ? UINT64_MAX : (UINT64_C(1) << n_bits) - 1;
1249
1250 /* Get the interesting bits of the source field. */
1251 const struct nxm_field *src = nxm_field_lookup(ntohl(action->src));
1252 int src_ofs = ntohs(action->src_ofs);
1253 uint64_t src_data = nxm_read_field(src, flow) & (mask << src_ofs);
1254
1255 /* Get the remaining bits of the destination field. */
1256 const struct nxm_field *dst = nxm_field_lookup(ntohl(action->dst));
1257 int dst_ofs = ntohs(action->dst_ofs);
1258 uint64_t dst_data = nxm_read_field(dst, flow) & ~(mask << dst_ofs);
1259
1260 /* Get the final value. */
1261 uint64_t new_data = dst_data | ((src_data >> src_ofs) << dst_ofs);
1262
1263 nxm_write_field(dst, flow, new_data);
1264 }
1265
1266 void
1267 nxm_execute_reg_load(const struct nx_action_reg_load *action,
1268 struct flow *flow)
1269 {
1270 /* Preparation. */
1271 int n_bits = nxm_decode_n_bits(action->ofs_nbits);
1272 uint64_t mask = n_bits == 64 ? UINT64_MAX : (UINT64_C(1) << n_bits) - 1;
1273
1274 /* Get source data. */
1275 uint64_t src_data = ntohll(action->value);
1276
1277 /* Get remaining bits of the destination field. */
1278 const struct nxm_field *dst = nxm_field_lookup(ntohl(action->dst));
1279 int dst_ofs = nxm_decode_ofs(action->ofs_nbits);
1280 uint64_t dst_data = nxm_read_field(dst, flow) & ~(mask << dst_ofs);
1281
1282 /* Get the final value. */
1283 uint64_t new_data = dst_data | (src_data << dst_ofs);
1284
1285 nxm_write_field(dst, flow, new_data);
1286 }