]> git.proxmox.com Git - mirror_ovs.git/blob - lib/nx-match.c
ofpbuf: New function ofpbuf_put_hex().
[mirror_ovs.git] / lib / nx-match.c
1 /*
2 * Copyright (c) 2010 Nicira Networks.
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <config.h>
18
19 #include "nx-match.h"
20
21 #include "classifier.h"
22 #include "dynamic-string.h"
23 #include "ofp-util.h"
24 #include "ofpbuf.h"
25 #include "openflow/nicira-ext.h"
26 #include "packets.h"
27 #include "unaligned.h"
28 #include "vlog.h"
29
30 VLOG_DEFINE_THIS_MODULE(nx_match);
31
32 /* Rate limit for nx_match parse errors. These always indicate a bug in the
33 * peer and so there's not much point in showing a lot of them. */
34 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
35
36 enum {
37 NXM_INVALID = OFP_MKERR_NICIRA(OFPET_BAD_REQUEST, NXBRC_NXM_INVALID),
38 NXM_BAD_TYPE = OFP_MKERR_NICIRA(OFPET_BAD_REQUEST, NXBRC_NXM_BAD_TYPE),
39 NXM_BAD_VALUE = OFP_MKERR_NICIRA(OFPET_BAD_REQUEST, NXBRC_NXM_BAD_VALUE),
40 NXM_BAD_MASK = OFP_MKERR_NICIRA(OFPET_BAD_REQUEST, NXBRC_NXM_BAD_MASK),
41 NXM_BAD_PREREQ = OFP_MKERR_NICIRA(OFPET_BAD_REQUEST, NXBRC_NXM_BAD_PREREQ),
42 NXM_DUP_TYPE = OFP_MKERR_NICIRA(OFPET_BAD_REQUEST, NXBRC_NXM_DUP_TYPE),
43 BAD_ARGUMENT = OFP_MKERR(OFPET_BAD_ACTION, OFPBAC_BAD_ARGUMENT)
44 };
45
46 /* For each NXM_* field, define NFI_NXM_* as consecutive integers starting from
47 * zero. */
48 enum nxm_field_index {
49 #define DEFINE_FIELD(HEADER, WILDCARD, DL_TYPE, NW_PROTO) NFI_NXM_##HEADER,
50 #include "nx-match.def"
51 N_NXM_FIELDS
52 };
53
54 struct nxm_field {
55 struct hmap_node hmap_node;
56 enum nxm_field_index index; /* NFI_* value. */
57 uint32_t header; /* NXM_* value. */
58 flow_wildcards_t wildcard; /* FWW_* bit, if exactly one. */
59 ovs_be16 dl_type; /* dl_type prerequisite, if nonzero. */
60 uint8_t nw_proto; /* nw_proto prerequisite, if nonzero. */
61 const char *name; /* "NXM_*" string. */
62 };
63
64 /* All the known fields. */
65 static struct nxm_field nxm_fields[N_NXM_FIELDS] = {
66 #define DEFINE_FIELD(HEADER, WILDCARD, DL_TYPE, NW_PROTO) \
67 { HMAP_NODE_NULL_INITIALIZER, NFI_NXM_##HEADER, NXM_##HEADER, WILDCARD, \
68 CONSTANT_HTONS(DL_TYPE), NW_PROTO, "NXM_" #HEADER },
69 #include "nx-match.def"
70 };
71
72 /* Hash table of 'nxm_fields'. */
73 static struct hmap all_nxm_fields = HMAP_INITIALIZER(&all_nxm_fields);
74
75 /* Possible masks for NXM_OF_ETH_DST_W. */
76 static const uint8_t eth_all_0s[ETH_ADDR_LEN]
77 = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
78 static const uint8_t eth_all_1s[ETH_ADDR_LEN]
79 = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
80 static const uint8_t eth_mcast_1[ETH_ADDR_LEN]
81 = {0x01, 0x00, 0x00, 0x00, 0x00, 0x00};
82 static const uint8_t eth_mcast_0[ETH_ADDR_LEN]
83 = {0xfe, 0xff, 0xff, 0xff, 0xff, 0xff};
84
85 static void
86 nxm_init(void)
87 {
88 if (hmap_is_empty(&all_nxm_fields)) {
89 int i;
90
91 for (i = 0; i < N_NXM_FIELDS; i++) {
92 struct nxm_field *f = &nxm_fields[i];
93 hmap_insert(&all_nxm_fields, &f->hmap_node,
94 hash_int(f->header, 0));
95 }
96
97 /* Verify that the header values are unique (duplicate "case" values
98 * cause a compile error). */
99 switch (0) {
100 #define DEFINE_FIELD(HEADER, WILDCARD, DL_TYPE, NW_PROTO) \
101 case NXM_##HEADER: break;
102 #include "nx-match.def"
103 }
104 }
105 }
106
107 static const struct nxm_field *
108 nxm_field_lookup(uint32_t header)
109 {
110 struct nxm_field *f;
111
112 nxm_init();
113
114 HMAP_FOR_EACH_WITH_HASH (f, hmap_node, hash_int(header, 0),
115 &all_nxm_fields) {
116 if (f->header == header) {
117 return f;
118 }
119 }
120
121 return NULL;
122 }
123
124 /* Returns the width of the data for a field with the given 'header', in
125 * bytes. */
126 static int
127 nxm_field_bytes(uint32_t header)
128 {
129 unsigned int length = NXM_LENGTH(header);
130 return NXM_HASMASK(header) ? length / 2 : length;
131 }
132
133 /* Returns the width of the data for a field with the given 'header', in
134 * bits. */
135 static int
136 nxm_field_bits(uint32_t header)
137 {
138 return nxm_field_bytes(header) * 8;
139 }
140 \f
141 /* nx_pull_match() and helpers. */
142
143 static int
144 parse_nx_reg(const struct nxm_field *f,
145 struct flow *flow, struct flow_wildcards *wc,
146 const void *value, const void *maskp)
147 {
148 int idx = NXM_NX_REG_IDX(f->header);
149 if (wc->reg_masks[idx]) {
150 return NXM_DUP_TYPE;
151 } else {
152 flow_wildcards_set_reg_mask(wc, idx,
153 (NXM_HASMASK(f->header)
154 ? ntohl(get_unaligned_be32(maskp))
155 : UINT32_MAX));
156 flow->regs[idx] = ntohl(get_unaligned_be32(value));
157 flow->regs[idx] &= wc->reg_masks[idx];
158 return 0;
159 }
160 }
161
162 static int
163 parse_nxm_entry(struct cls_rule *rule, const struct nxm_field *f,
164 const void *value, const void *mask)
165 {
166 struct flow_wildcards *wc = &rule->wc;
167 struct flow *flow = &rule->flow;
168
169 switch (f->index) {
170 /* Metadata. */
171 case NFI_NXM_OF_IN_PORT:
172 flow->in_port = ntohs(get_unaligned_be16(value));
173 if (flow->in_port == OFPP_LOCAL) {
174 flow->in_port = ODPP_LOCAL;
175 }
176 return 0;
177
178 /* Ethernet header. */
179 case NFI_NXM_OF_ETH_DST:
180 if ((wc->wildcards & (FWW_DL_DST | FWW_ETH_MCAST))
181 != (FWW_DL_DST | FWW_ETH_MCAST)) {
182 return NXM_DUP_TYPE;
183 } else {
184 wc->wildcards &= ~(FWW_DL_DST | FWW_ETH_MCAST);
185 memcpy(flow->dl_dst, value, ETH_ADDR_LEN);
186 return 0;
187 }
188 case NFI_NXM_OF_ETH_DST_W:
189 if ((wc->wildcards & (FWW_DL_DST | FWW_ETH_MCAST))
190 != (FWW_DL_DST | FWW_ETH_MCAST)) {
191 return NXM_DUP_TYPE;
192 } else if (eth_addr_equals(mask, eth_mcast_1)) {
193 wc->wildcards &= ~FWW_ETH_MCAST;
194 flow->dl_dst[0] = *(uint8_t *) value & 0x01;
195 } else if (eth_addr_equals(mask, eth_mcast_0)) {
196 wc->wildcards &= ~FWW_DL_DST;
197 memcpy(flow->dl_dst, value, ETH_ADDR_LEN);
198 flow->dl_dst[0] &= 0xfe;
199 } else if (eth_addr_equals(mask, eth_all_0s)) {
200 return 0;
201 } else if (eth_addr_equals(mask, eth_all_1s)) {
202 wc->wildcards &= ~(FWW_DL_DST | FWW_ETH_MCAST);
203 memcpy(flow->dl_dst, value, ETH_ADDR_LEN);
204 return 0;
205 } else {
206 return NXM_BAD_MASK;
207 }
208 case NFI_NXM_OF_ETH_SRC:
209 memcpy(flow->dl_src, value, ETH_ADDR_LEN);
210 return 0;
211 case NFI_NXM_OF_ETH_TYPE:
212 flow->dl_type = get_unaligned_be16(value);
213 return 0;
214
215 /* 802.1Q header. */
216 case NFI_NXM_OF_VLAN_TCI:
217 if (wc->vlan_tci_mask) {
218 return NXM_DUP_TYPE;
219 } else {
220 cls_rule_set_dl_tci(rule, get_unaligned_be16(value));
221 return 0;
222 }
223 case NFI_NXM_OF_VLAN_TCI_W:
224 if (wc->vlan_tci_mask) {
225 return NXM_DUP_TYPE;
226 } else {
227 cls_rule_set_dl_tci_masked(rule, get_unaligned_be16(value),
228 get_unaligned_be16(mask));
229 return 0;
230 }
231
232 /* IP header. */
233 case NFI_NXM_OF_IP_TOS:
234 if (*(uint8_t *) value & 0x03) {
235 return NXM_BAD_VALUE;
236 } else {
237 flow->nw_tos = *(uint8_t *) value;
238 return 0;
239 }
240 case NFI_NXM_OF_IP_PROTO:
241 flow->nw_proto = *(uint8_t *) value;
242 return 0;
243
244 /* IP addresses in IP and ARP headers. */
245 case NFI_NXM_OF_IP_SRC:
246 case NFI_NXM_OF_ARP_SPA:
247 if (wc->nw_src_mask) {
248 return NXM_DUP_TYPE;
249 } else {
250 cls_rule_set_nw_src(rule, get_unaligned_be32(value));
251 return 0;
252 }
253 case NFI_NXM_OF_IP_SRC_W:
254 case NFI_NXM_OF_ARP_SPA_W:
255 if (wc->nw_src_mask) {
256 return NXM_DUP_TYPE;
257 } else {
258 ovs_be32 ip = get_unaligned_be32(value);
259 ovs_be32 netmask = get_unaligned_be32(mask);
260 if (!cls_rule_set_nw_src_masked(rule, ip, netmask)) {
261 return NXM_BAD_MASK;
262 }
263 return 0;
264 }
265 case NFI_NXM_OF_IP_DST:
266 case NFI_NXM_OF_ARP_TPA:
267 if (wc->nw_dst_mask) {
268 return NXM_DUP_TYPE;
269 } else {
270 cls_rule_set_nw_dst(rule, get_unaligned_be32(value));
271 return 0;
272 }
273 case NFI_NXM_OF_IP_DST_W:
274 case NFI_NXM_OF_ARP_TPA_W:
275 if (wc->nw_dst_mask) {
276 return NXM_DUP_TYPE;
277 } else {
278 ovs_be32 ip = get_unaligned_be32(value);
279 ovs_be32 netmask = get_unaligned_be32(mask);
280 if (!cls_rule_set_nw_dst_masked(rule, ip, netmask)) {
281 return NXM_BAD_MASK;
282 }
283 return 0;
284 }
285
286 /* TCP header. */
287 case NFI_NXM_OF_TCP_SRC:
288 flow->tp_src = get_unaligned_be16(value);
289 return 0;
290 case NFI_NXM_OF_TCP_DST:
291 flow->tp_dst = get_unaligned_be16(value);
292 return 0;
293
294 /* UDP header. */
295 case NFI_NXM_OF_UDP_SRC:
296 flow->tp_src = get_unaligned_be16(value);
297 return 0;
298 case NFI_NXM_OF_UDP_DST:
299 flow->tp_dst = get_unaligned_be16(value);
300 return 0;
301
302 /* ICMP header. */
303 case NFI_NXM_OF_ICMP_TYPE:
304 flow->tp_src = htons(*(uint8_t *) value);
305 return 0;
306 case NFI_NXM_OF_ICMP_CODE:
307 flow->tp_dst = htons(*(uint8_t *) value);
308 return 0;
309
310 /* ARP header. */
311 case NFI_NXM_OF_ARP_OP:
312 if (ntohs(get_unaligned_be16(value)) > 255) {
313 return NXM_BAD_VALUE;
314 } else {
315 flow->nw_proto = ntohs(get_unaligned_be16(value));
316 return 0;
317 }
318
319 /* Tunnel ID. */
320 case NFI_NXM_NX_TUN_ID:
321 flow->tun_id = htonl(ntohll(get_unaligned_be64(value)));
322 return 0;
323
324 /* Registers. */
325 case NFI_NXM_NX_REG0:
326 case NFI_NXM_NX_REG0_W:
327 #if FLOW_N_REGS >= 2
328 case NFI_NXM_NX_REG1:
329 case NFI_NXM_NX_REG1_W:
330 #endif
331 #if FLOW_N_REGS >= 3
332 case NFI_NXM_NX_REG2:
333 case NFI_NXM_NX_REG2_W:
334 #endif
335 #if FLOW_N_REGS >= 4
336 case NFI_NXM_NX_REG3:
337 case NFI_NXM_NX_REG3_W:
338 #endif
339 #if FLOW_N_REGS > 4
340 #error
341 #endif
342 return parse_nx_reg(f, flow, wc, value, mask);
343
344 case N_NXM_FIELDS:
345 NOT_REACHED();
346 }
347 NOT_REACHED();
348 }
349
350 static bool
351 nxm_prereqs_ok(const struct nxm_field *field, const struct flow *flow)
352 {
353 return (!field->dl_type
354 || (field->dl_type == flow->dl_type
355 && (!field->nw_proto || field->nw_proto == flow->nw_proto)));
356 }
357
358 static uint32_t
359 nx_entry_ok(const void *p, unsigned int match_len)
360 {
361 unsigned int payload_len;
362 ovs_be32 header_be;
363 uint32_t header;
364
365 if (match_len < 4) {
366 if (match_len) {
367 VLOG_DBG_RL(&rl, "nx_match ends with partial nxm_header");
368 }
369 return 0;
370 }
371 memcpy(&header_be, p, 4);
372 header = ntohl(header_be);
373
374 payload_len = NXM_LENGTH(header);
375 if (!payload_len) {
376 VLOG_DBG_RL(&rl, "nxm_entry %08"PRIx32" has invalid payload "
377 "length 0", header);
378 return 0;
379 }
380 if (match_len < payload_len + 4) {
381 VLOG_DBG_RL(&rl, "%"PRIu32"-byte nxm_entry but only "
382 "%u bytes left in nx_match", payload_len + 4, match_len);
383 return 0;
384 }
385
386 return header;
387 }
388
389 int
390 nx_pull_match(struct ofpbuf *b, unsigned int match_len, uint16_t priority,
391 struct cls_rule *rule)
392 {
393 uint32_t header;
394 uint8_t *p;
395
396 p = ofpbuf_try_pull(b, ROUND_UP(match_len, 8));
397 if (!p) {
398 VLOG_DBG_RL(&rl, "nx_match length %zu, rounded up to a "
399 "multiple of 8, is longer than space in message (max "
400 "length %zu)", match_len, b->size);
401 return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_LEN);
402 }
403
404 cls_rule_init_catchall(rule, priority);
405 while ((header = nx_entry_ok(p, match_len)) != 0) {
406 unsigned length = NXM_LENGTH(header);
407 const struct nxm_field *f;
408 int error;
409
410 f = nxm_field_lookup(header);
411 if (!f) {
412 error = NXM_BAD_TYPE;
413 } else if (!nxm_prereqs_ok(f, &rule->flow)) {
414 error = NXM_BAD_PREREQ;
415 } else if (f->wildcard && !(rule->wc.wildcards & f->wildcard)) {
416 error = NXM_DUP_TYPE;
417 } else {
418 /* 'hasmask' and 'length' are known to be correct at this point
419 * because they are included in 'header' and nxm_field_lookup()
420 * checked them already. */
421 rule->wc.wildcards &= ~f->wildcard;
422 error = parse_nxm_entry(rule, f, p + 4, p + 4 + length / 2);
423 }
424 if (error) {
425 VLOG_DBG_RL(&rl, "bad nxm_entry with vendor=%"PRIu32", "
426 "field=%"PRIu32", hasmask=%"PRIu32", type=%"PRIu32" "
427 "(error %x)",
428 NXM_VENDOR(header), NXM_FIELD(header),
429 NXM_HASMASK(header), NXM_TYPE(header),
430 error);
431 return error;
432 }
433
434
435 p += 4 + length;
436 match_len -= 4 + length;
437 }
438
439 return match_len ? NXM_INVALID : 0;
440 }
441 \f
442 /* nx_put_match() and helpers.
443 *
444 * 'put' functions whose names end in 'w' add a wildcarded field.
445 * 'put' functions whose names end in 'm' add a field that might be wildcarded.
446 * Other 'put' functions add exact-match fields.
447 */
448
449 static void
450 nxm_put_header(struct ofpbuf *b, uint32_t header)
451 {
452 ovs_be32 n_header = htonl(header);
453 ofpbuf_put(b, &n_header, sizeof n_header);
454 }
455
456 static void
457 nxm_put_8(struct ofpbuf *b, uint32_t header, uint8_t value)
458 {
459 nxm_put_header(b, header);
460 ofpbuf_put(b, &value, sizeof value);
461 }
462
463 static void
464 nxm_put_16(struct ofpbuf *b, uint32_t header, ovs_be16 value)
465 {
466 nxm_put_header(b, header);
467 ofpbuf_put(b, &value, sizeof value);
468 }
469
470 static void
471 nxm_put_16w(struct ofpbuf *b, uint32_t header, ovs_be16 value, ovs_be16 mask)
472 {
473 nxm_put_header(b, header);
474 ofpbuf_put(b, &value, sizeof value);
475 ofpbuf_put(b, &mask, sizeof mask);
476 }
477
478 static void
479 nxm_put_16m(struct ofpbuf *b, uint32_t header, ovs_be16 value, ovs_be16 mask)
480 {
481 switch (mask) {
482 case 0:
483 break;
484
485 case CONSTANT_HTONS(UINT16_MAX):
486 nxm_put_16(b, header, value);
487 break;
488
489 default:
490 nxm_put_16w(b, NXM_MAKE_WILD_HEADER(header), value, mask);
491 break;
492 }
493 }
494
495 static void
496 nxm_put_32(struct ofpbuf *b, uint32_t header, ovs_be32 value)
497 {
498 nxm_put_header(b, header);
499 ofpbuf_put(b, &value, sizeof value);
500 }
501
502 static void
503 nxm_put_32w(struct ofpbuf *b, uint32_t header, ovs_be32 value, ovs_be32 mask)
504 {
505 nxm_put_header(b, header);
506 ofpbuf_put(b, &value, sizeof value);
507 ofpbuf_put(b, &mask, sizeof mask);
508 }
509
510 static void
511 nxm_put_32m(struct ofpbuf *b, uint32_t header, ovs_be32 value, ovs_be32 mask)
512 {
513 switch (mask) {
514 case 0:
515 break;
516
517 case CONSTANT_HTONL(UINT32_MAX):
518 nxm_put_32(b, header, value);
519 break;
520
521 default:
522 nxm_put_32w(b, NXM_MAKE_WILD_HEADER(header), value, mask);
523 break;
524 }
525 }
526
527 static void
528 nxm_put_64(struct ofpbuf *b, uint32_t header, ovs_be64 value)
529 {
530 nxm_put_header(b, header);
531 ofpbuf_put(b, &value, sizeof value);
532 }
533
534 static void
535 nxm_put_eth(struct ofpbuf *b, uint32_t header,
536 const uint8_t value[ETH_ADDR_LEN])
537 {
538 nxm_put_header(b, header);
539 ofpbuf_put(b, value, ETH_ADDR_LEN);
540 }
541
542 static void
543 nxm_put_eth_dst(struct ofpbuf *b,
544 uint32_t wc, const uint8_t value[ETH_ADDR_LEN])
545 {
546 switch (wc & (FWW_DL_DST | FWW_ETH_MCAST)) {
547 case FWW_DL_DST | FWW_ETH_MCAST:
548 break;
549 case FWW_DL_DST:
550 nxm_put_header(b, NXM_OF_ETH_DST_W);
551 ofpbuf_put(b, value, ETH_ADDR_LEN);
552 ofpbuf_put(b, eth_mcast_1, ETH_ADDR_LEN);
553 break;
554 case FWW_ETH_MCAST:
555 nxm_put_header(b, NXM_OF_ETH_DST_W);
556 ofpbuf_put(b, value, ETH_ADDR_LEN);
557 ofpbuf_put(b, eth_mcast_0, ETH_ADDR_LEN);
558 break;
559 case 0:
560 nxm_put_eth(b, NXM_OF_ETH_DST, value);
561 break;
562 }
563 }
564
565 int
566 nx_put_match(struct ofpbuf *b, const struct cls_rule *cr)
567 {
568 const flow_wildcards_t wc = cr->wc.wildcards;
569 const struct flow *flow = &cr->flow;
570 const size_t start_len = b->size;
571 int match_len;
572 int i;
573
574 /* Metadata. */
575 if (!(wc & FWW_IN_PORT)) {
576 uint16_t in_port = flow->in_port;
577 if (in_port == ODPP_LOCAL) {
578 in_port = OFPP_LOCAL;
579 }
580 nxm_put_16(b, NXM_OF_IN_PORT, htons(in_port));
581 }
582
583 /* Ethernet. */
584 nxm_put_eth_dst(b, wc, flow->dl_dst);
585 if (!(wc & FWW_DL_SRC)) {
586 nxm_put_eth(b, NXM_OF_ETH_SRC, flow->dl_src);
587 }
588 if (!(wc & FWW_DL_TYPE)) {
589 nxm_put_16(b, NXM_OF_ETH_TYPE, flow->dl_type);
590 }
591
592 /* 802.1Q. */
593 nxm_put_16m(b, NXM_OF_VLAN_TCI, flow->vlan_tci, cr->wc.vlan_tci_mask);
594
595 /* L3. */
596 if (!(wc & FWW_DL_TYPE) && flow->dl_type == htons(ETH_TYPE_IP)) {
597 /* IP. */
598 if (!(wc & FWW_NW_TOS)) {
599 nxm_put_8(b, NXM_OF_IP_TOS, flow->nw_tos & 0xfc);
600 }
601 nxm_put_32m(b, NXM_OF_IP_SRC, flow->nw_src, cr->wc.nw_src_mask);
602 nxm_put_32m(b, NXM_OF_IP_DST, flow->nw_dst, cr->wc.nw_dst_mask);
603
604 if (!(wc & FWW_NW_PROTO)) {
605 nxm_put_8(b, NXM_OF_IP_PROTO, flow->nw_proto);
606 switch (flow->nw_proto) {
607 /* TCP. */
608 case IP_TYPE_TCP:
609 if (!(wc & FWW_TP_SRC)) {
610 nxm_put_16(b, NXM_OF_TCP_SRC, flow->tp_src);
611 }
612 if (!(wc & FWW_TP_DST)) {
613 nxm_put_16(b, NXM_OF_TCP_DST, flow->tp_dst);
614 }
615 break;
616
617 /* UDP. */
618 case IP_TYPE_UDP:
619 if (!(wc & FWW_TP_SRC)) {
620 nxm_put_16(b, NXM_OF_UDP_SRC, flow->tp_src);
621 }
622 if (!(wc & FWW_TP_DST)) {
623 nxm_put_16(b, NXM_OF_UDP_DST, flow->tp_dst);
624 }
625 break;
626
627 /* ICMP. */
628 case IP_TYPE_ICMP:
629 if (!(wc & FWW_TP_SRC)) {
630 nxm_put_8(b, NXM_OF_ICMP_TYPE, ntohs(flow->tp_src));
631 }
632 if (!(wc & FWW_TP_DST)) {
633 nxm_put_8(b, NXM_OF_ICMP_CODE, ntohs(flow->tp_dst));
634 }
635 break;
636 }
637 }
638 } else if (!(wc & FWW_DL_TYPE) && flow->dl_type == htons(ETH_TYPE_ARP)) {
639 /* ARP. */
640 if (!(wc & FWW_NW_PROTO)) {
641 nxm_put_16(b, NXM_OF_ARP_OP, htons(flow->nw_proto));
642 }
643 nxm_put_32m(b, NXM_OF_ARP_SPA, flow->nw_src, cr->wc.nw_src_mask);
644 nxm_put_32m(b, NXM_OF_ARP_TPA, flow->nw_dst, cr->wc.nw_dst_mask);
645 }
646
647 /* Tunnel ID. */
648 if (!(wc & FWW_TUN_ID)) {
649 nxm_put_64(b, NXM_NX_TUN_ID, htonll(ntohl(flow->tun_id)));
650 }
651
652 /* Registers. */
653 for (i = 0; i < FLOW_N_REGS; i++) {
654 nxm_put_32m(b, NXM_NX_REG(i),
655 htonl(flow->regs[i]), htonl(cr->wc.reg_masks[i]));
656 }
657
658 match_len = b->size - start_len;
659 ofpbuf_put_zeros(b, ROUND_UP(match_len, 8) - match_len);
660 return match_len;
661 }
662 \f
663 /* nx_match_to_string() and helpers. */
664
665 static void format_nxm_field_name(struct ds *, uint32_t header);
666
667 char *
668 nx_match_to_string(const uint8_t *p, unsigned int match_len)
669 {
670 uint32_t header;
671 struct ds s;
672
673 if (!match_len) {
674 return xstrdup("<any>");
675 }
676
677 ds_init(&s);
678 while ((header = nx_entry_ok(p, match_len)) != 0) {
679 unsigned int length = NXM_LENGTH(header);
680 unsigned int value_len = nxm_field_bytes(header);
681 const uint8_t *value = p + 4;
682 const uint8_t *mask = value + value_len;
683 unsigned int i;
684
685 if (s.length) {
686 ds_put_cstr(&s, ", ");
687 }
688
689 format_nxm_field_name(&s, header);
690 ds_put_char(&s, '(');
691
692 for (i = 0; i < value_len; i++) {
693 ds_put_format(&s, "%02x", value[i]);
694 }
695 if (NXM_HASMASK(header)) {
696 ds_put_char(&s, '/');
697 for (i = 0; i < value_len; i++) {
698 ds_put_format(&s, "%02x", mask[i]);
699 }
700 }
701 ds_put_char(&s, ')');
702
703 p += 4 + length;
704 match_len -= 4 + length;
705 }
706
707 if (match_len) {
708 if (s.length) {
709 ds_put_cstr(&s, ", ");
710 }
711
712 ds_put_format(&s, "<%u invalid bytes>", match_len);
713 }
714
715 return ds_steal_cstr(&s);
716 }
717
718 static void
719 format_nxm_field_name(struct ds *s, uint32_t header)
720 {
721 const struct nxm_field *f = nxm_field_lookup(header);
722 if (f) {
723 ds_put_cstr(s, f->name);
724 } else {
725 ds_put_format(s, "%d:%d", NXM_VENDOR(header), NXM_FIELD(header));
726 }
727 }
728
729 static uint32_t
730 parse_nxm_field_name(const char *name, int name_len)
731 {
732 const struct nxm_field *f;
733
734 /* Check whether it's a field name. */
735 for (f = nxm_fields; f < &nxm_fields[ARRAY_SIZE(nxm_fields)]; f++) {
736 if (!strncmp(f->name, name, name_len) && f->name[name_len] == '\0') {
737 return f->header;
738 }
739 }
740
741 /* Check whether it's a 32-bit field header value as hex.
742 * (This isn't ordinarily useful except for testing error behavior.) */
743 if (name_len == 8) {
744 uint32_t header = hexits_value(name, name_len, NULL);
745 if (header != UINT_MAX) {
746 return header;
747 }
748 }
749
750 return 0;
751 }
752 \f
753 /* nx_match_from_string(). */
754
755 int
756 nx_match_from_string(const char *s, struct ofpbuf *b)
757 {
758 const char *full_s = s;
759 const size_t start_len = b->size;
760 int match_len;
761
762 if (!strcmp(s, "<any>")) {
763 /* Ensure that 'b->data' isn't actually null. */
764 ofpbuf_prealloc_tailroom(b, 1);
765 return 0;
766 }
767
768 for (s += strspn(s, ", "); *s; s += strspn(s, ", ")) {
769 const char *name;
770 uint32_t header;
771 int name_len;
772 size_t n;
773
774 name = s;
775 name_len = strcspn(s, "(");
776 if (s[name_len] != '(') {
777 ovs_fatal(0, "%s: missing ( at end of nx_match", full_s);
778 }
779
780 header = parse_nxm_field_name(name, name_len);
781 if (!header) {
782 ovs_fatal(0, "%s: unknown field `%.*s'", full_s, name_len, s);
783 }
784
785 s += name_len + 1;
786
787 nxm_put_header(b, header);
788 s = ofpbuf_put_hex(b, s, &n);
789 if (n != nxm_field_bytes(header)) {
790 ovs_fatal(0, "%.2s: hex digits expected", s);
791 }
792 if (NXM_HASMASK(header)) {
793 s += strspn(s, " ");
794 if (*s != '/') {
795 ovs_fatal(0, "%s: missing / in masked field %.*s",
796 full_s, name_len, name);
797 }
798 s = ofpbuf_put_hex(b, s + 1, &n);
799 if (n != nxm_field_bytes(header)) {
800 ovs_fatal(0, "%.2s: hex digits expected", s);
801 }
802 }
803
804 s += strspn(s, " ");
805 if (*s != ')') {
806 ovs_fatal(0, "%s: missing ) following field %.*s",
807 full_s, name_len, name);
808 }
809 s++;
810 }
811
812 match_len = b->size - start_len;
813 ofpbuf_put_zeros(b, ROUND_UP(match_len, 8) - match_len);
814 return match_len;
815 }
816 \f
817 static const char *
818 parse_nxm_field_bits(const char *s, uint32_t *headerp, int *ofsp, int *n_bitsp)
819 {
820 const char *full_s = s;
821 const char *name;
822 uint32_t header;
823 int start, end;
824 int name_len;
825 int width;
826
827 name = s;
828 name_len = strcspn(s, "[");
829 if (s[name_len] != '[') {
830 ovs_fatal(0, "%s: missing [ looking for field name", full_s);
831 }
832
833 header = parse_nxm_field_name(name, name_len);
834 if (!header) {
835 ovs_fatal(0, "%s: unknown field `%.*s'", full_s, name_len, s);
836 }
837 width = nxm_field_bits(header);
838
839 s += name_len;
840 if (sscanf(s, "[%d..%d]", &start, &end) == 2) {
841 /* Nothing to do. */
842 } else if (sscanf(s, "[%d]", &start) == 1) {
843 end = start;
844 } else if (!strncmp(s, "[]", 2)) {
845 start = 0;
846 end = width - 1;
847 } else {
848 ovs_fatal(0, "%s: syntax error expecting [] or [<bit>] or "
849 "[<start>..<end>]", full_s);
850 }
851 s = strchr(s, ']') + 1;
852
853 if (start > end) {
854 ovs_fatal(0, "%s: starting bit %d is after ending bit %d",
855 full_s, start, end);
856 } else if (start >= width) {
857 ovs_fatal(0, "%s: starting bit %d is not valid because field is only "
858 "%d bits wide", full_s, start, width);
859 } else if (end >= width){
860 ovs_fatal(0, "%s: ending bit %d is not valid because field is only "
861 "%d bits wide", full_s, end, width);
862 }
863
864 *headerp = header;
865 *ofsp = start;
866 *n_bitsp = end - start + 1;
867
868 return s;
869 }
870
871 void
872 nxm_parse_reg_move(struct nx_action_reg_move *move, const char *s)
873 {
874 const char *full_s = s;
875 uint32_t src, dst;
876 int src_ofs, dst_ofs;
877 int src_n_bits, dst_n_bits;
878
879 s = parse_nxm_field_bits(s, &src, &src_ofs, &src_n_bits);
880 if (strncmp(s, "->", 2)) {
881 ovs_fatal(0, "%s: missing `->' following source", full_s);
882 }
883 s += 2;
884 s = parse_nxm_field_bits(s, &dst, &dst_ofs, &dst_n_bits);
885 if (*s != '\0') {
886 ovs_fatal(0, "%s: trailing garbage following destination", full_s);
887 }
888
889 if (src_n_bits != dst_n_bits) {
890 ovs_fatal(0, "%s: source field is %d bits wide but destination is "
891 "%d bits wide", full_s, src_n_bits, dst_n_bits);
892 }
893
894 move->type = htons(OFPAT_VENDOR);
895 move->len = htons(sizeof *move);
896 move->vendor = htonl(NX_VENDOR_ID);
897 move->subtype = htons(NXAST_REG_MOVE);
898 move->n_bits = htons(src_n_bits);
899 move->src_ofs = htons(src_ofs);
900 move->dst_ofs = htons(dst_ofs);
901 move->src = htonl(src);
902 move->dst = htonl(dst);
903 }
904
905 void
906 nxm_parse_reg_load(struct nx_action_reg_load *load, const char *s)
907 {
908 const char *full_s = s;
909 uint32_t dst;
910 int ofs, n_bits;
911 uint64_t value;
912
913 value = strtoull(s, (char **) &s, 0);
914 if (strncmp(s, "->", 2)) {
915 ovs_fatal(0, "%s: missing `->' following value", full_s);
916 }
917 s += 2;
918 s = parse_nxm_field_bits(s, &dst, &ofs, &n_bits);
919 if (*s != '\0') {
920 ovs_fatal(0, "%s: trailing garbage following destination", full_s);
921 }
922
923 if (n_bits < 64 && (value >> n_bits) != 0) {
924 ovs_fatal(0, "%s: value %llu does not fit into %d bits",
925 full_s, value, n_bits);
926 }
927
928 load->type = htons(OFPAT_VENDOR);
929 load->len = htons(sizeof *load);
930 load->vendor = htonl(NX_VENDOR_ID);
931 load->subtype = htons(NXAST_REG_LOAD);
932 load->ofs_nbits = htons((ofs << 6) | (n_bits - 1));
933 load->dst = htonl(dst);
934 load->value = htonll(value);
935 }
936 \f
937 /* nxm_format_reg_move(), nxm_format_reg_load(). */
938
939 static void
940 format_nxm_field_bits(struct ds *s, uint32_t header, int ofs, int n_bits)
941 {
942 format_nxm_field_name(s, header);
943 if (n_bits != 1) {
944 ds_put_format(s, "[%d..%d]", ofs, ofs + n_bits - 1);
945 } else {
946 ds_put_format(s, "[%d]", ofs);
947 }
948 }
949
950 void
951 nxm_format_reg_move(const struct nx_action_reg_move *move, struct ds *s)
952 {
953 int n_bits = ntohs(move->n_bits);
954 int src_ofs = ntohs(move->src_ofs);
955 int dst_ofs = ntohs(move->dst_ofs);
956 uint32_t src = ntohl(move->src);
957 uint32_t dst = ntohl(move->dst);
958
959 ds_put_format(s, "move:");
960 format_nxm_field_bits(s, src, src_ofs, n_bits);
961 ds_put_cstr(s, "->");
962 format_nxm_field_bits(s, dst, dst_ofs, n_bits);
963 }
964
965 void
966 nxm_format_reg_load(const struct nx_action_reg_load *load, struct ds *s)
967 {
968 uint16_t ofs_nbits = ntohs(load->ofs_nbits);
969 int ofs = ofs_nbits >> 6;
970 int n_bits = (ofs_nbits & 0x3f) + 1;
971 uint32_t dst = ntohl(load->dst);
972 uint64_t value = ntohll(load->value);
973
974 ds_put_format(s, "load:%"PRIu64"->", value);
975 format_nxm_field_bits(s, dst, ofs, n_bits);
976 }
977 \f
978 /* nxm_check_reg_move(), nxm_check_reg_load(). */
979
980 static bool
981 field_ok(const struct nxm_field *f, const struct flow *flow, int size)
982 {
983 return (f && !NXM_HASMASK(f->header)
984 && nxm_prereqs_ok(f, flow) && size <= nxm_field_bits(f->header));
985 }
986
987 int
988 nxm_check_reg_move(const struct nx_action_reg_move *action,
989 const struct flow *flow)
990 {
991 const struct nxm_field *src;
992 const struct nxm_field *dst;
993
994 if (action->n_bits == htons(0)) {
995 return BAD_ARGUMENT;
996 }
997
998 src = nxm_field_lookup(ntohl(action->src));
999 if (!field_ok(src, flow, ntohs(action->src_ofs) + ntohs(action->n_bits))) {
1000 return BAD_ARGUMENT;
1001 }
1002
1003 dst = nxm_field_lookup(ntohl(action->dst));
1004 if (!field_ok(dst, flow, ntohs(action->dst_ofs) + ntohs(action->n_bits))) {
1005 return BAD_ARGUMENT;
1006 }
1007
1008 if (!NXM_IS_NX_REG(dst->header)
1009 && dst->header != NXM_OF_VLAN_TCI
1010 && dst->header != NXM_NX_TUN_ID) {
1011 return BAD_ARGUMENT;
1012 }
1013
1014 return 0;
1015 }
1016
1017 int
1018 nxm_check_reg_load(const struct nx_action_reg_load *action,
1019 const struct flow *flow)
1020 {
1021 const struct nxm_field *dst;
1022 int ofs, n_bits;
1023
1024 ofs = ntohs(action->ofs_nbits) >> 6;
1025 n_bits = (ntohs(action->ofs_nbits) & 0x3f) + 1;
1026 dst = nxm_field_lookup(ntohl(action->dst));
1027 if (!field_ok(dst, flow, ofs + n_bits)) {
1028 return BAD_ARGUMENT;
1029 }
1030
1031 /* Reject 'action' if a bit numbered 'n_bits' or higher is set to 1 in
1032 * action->value. */
1033 if (n_bits < 64 && ntohll(action->value) >> n_bits) {
1034 return BAD_ARGUMENT;
1035 }
1036
1037 if (!NXM_IS_NX_REG(dst->header)) {
1038 return BAD_ARGUMENT;
1039 }
1040
1041 return 0;
1042 }
1043 \f
1044 /* nxm_execute_reg_move(), nxm_execute_reg_load(). */
1045
1046 static uint64_t
1047 nxm_read_field(const struct nxm_field *src, const struct flow *flow)
1048 {
1049 switch (src->index) {
1050 case NFI_NXM_OF_IN_PORT:
1051 return flow->in_port == ODPP_LOCAL ? OFPP_LOCAL : flow->in_port;
1052
1053 case NFI_NXM_OF_ETH_DST:
1054 return eth_addr_to_uint64(flow->dl_dst);
1055
1056 case NFI_NXM_OF_ETH_SRC:
1057 return eth_addr_to_uint64(flow->dl_src);
1058
1059 case NFI_NXM_OF_ETH_TYPE:
1060 return ntohs(flow->dl_type);
1061
1062 case NFI_NXM_OF_VLAN_TCI:
1063 return ntohs(flow->vlan_tci);
1064
1065 case NFI_NXM_OF_IP_TOS:
1066 return flow->nw_tos;
1067
1068 case NFI_NXM_OF_IP_PROTO:
1069 case NFI_NXM_OF_ARP_OP:
1070 return flow->nw_proto;
1071
1072 case NFI_NXM_OF_IP_SRC:
1073 case NFI_NXM_OF_ARP_SPA:
1074 return ntohl(flow->nw_src);
1075
1076 case NFI_NXM_OF_IP_DST:
1077 case NFI_NXM_OF_ARP_TPA:
1078 return ntohl(flow->nw_dst);
1079
1080 case NFI_NXM_OF_TCP_SRC:
1081 case NFI_NXM_OF_UDP_SRC:
1082 return ntohs(flow->tp_src);
1083
1084 case NFI_NXM_OF_TCP_DST:
1085 case NFI_NXM_OF_UDP_DST:
1086 return ntohs(flow->tp_dst);
1087
1088 case NFI_NXM_OF_ICMP_TYPE:
1089 return ntohs(flow->tp_src) & 0xff;
1090
1091 case NFI_NXM_OF_ICMP_CODE:
1092 return ntohs(flow->tp_dst) & 0xff;
1093
1094 case NFI_NXM_NX_TUN_ID:
1095 return ntohl(flow->tun_id);
1096
1097 #define NXM_READ_REGISTER(IDX) \
1098 case NFI_NXM_NX_REG##IDX: \
1099 return flow->regs[IDX]; \
1100 case NFI_NXM_NX_REG##IDX##_W: \
1101 NOT_REACHED();
1102
1103 NXM_READ_REGISTER(0);
1104 #if FLOW_N_REGS >= 2
1105 NXM_READ_REGISTER(1);
1106 #endif
1107 #if FLOW_N_REGS >= 3
1108 NXM_READ_REGISTER(2);
1109 #endif
1110 #if FLOW_N_REGS >= 4
1111 NXM_READ_REGISTER(3);
1112 #endif
1113 #if FLOW_N_REGS > 4
1114 #error
1115 #endif
1116
1117 case NFI_NXM_OF_ETH_DST_W:
1118 case NFI_NXM_OF_VLAN_TCI_W:
1119 case NFI_NXM_OF_IP_SRC_W:
1120 case NFI_NXM_OF_IP_DST_W:
1121 case NFI_NXM_OF_ARP_SPA_W:
1122 case NFI_NXM_OF_ARP_TPA_W:
1123 case N_NXM_FIELDS:
1124 NOT_REACHED();
1125 }
1126
1127 NOT_REACHED();
1128 }
1129
1130 void
1131 nxm_execute_reg_move(const struct nx_action_reg_move *action,
1132 struct flow *flow)
1133 {
1134 /* Preparation. */
1135 int n_bits = ntohs(action->n_bits);
1136 uint64_t mask = n_bits == 64 ? UINT64_MAX : (UINT64_C(1) << n_bits) - 1;
1137
1138 /* Get the interesting bits of the source field. */
1139 const struct nxm_field *src = nxm_field_lookup(ntohl(action->src));
1140 int src_ofs = ntohs(action->src_ofs);
1141 uint64_t src_data = nxm_read_field(src, flow) & (mask << src_ofs);
1142
1143 /* Get the remaining bits of the destination field. */
1144 const struct nxm_field *dst = nxm_field_lookup(ntohl(action->dst));
1145 int dst_ofs = ntohs(action->dst_ofs);
1146 uint64_t dst_data = nxm_read_field(dst, flow) & ~(mask << dst_ofs);
1147
1148 /* Get the final value. */
1149 uint64_t new_data = dst_data | ((src_data >> src_ofs) << dst_ofs);
1150
1151 /* Store the result. */
1152 if (NXM_IS_NX_REG(dst->header)) {
1153 flow->regs[NXM_NX_REG_IDX(dst->header)] = new_data;
1154 } else if (dst->header == NXM_OF_VLAN_TCI) {
1155 flow->vlan_tci = htons(new_data);
1156 } else if (dst->header == NXM_NX_TUN_ID) {
1157 flow->tun_id = htonl(new_data);
1158 } else {
1159 NOT_REACHED();
1160 }
1161 }
1162
1163 void
1164 nxm_execute_reg_load(const struct nx_action_reg_load *action,
1165 struct flow *flow)
1166 {
1167 /* Preparation. */
1168 int n_bits = (ntohs(action->ofs_nbits) & 0x3f) + 1;
1169 uint32_t mask = n_bits == 32 ? UINT32_MAX : (UINT32_C(1) << n_bits) - 1;
1170 uint32_t *reg = &flow->regs[NXM_NX_REG_IDX(ntohl(action->dst))];
1171
1172 /* Get source data. */
1173 uint32_t src_data = ntohll(action->value);
1174
1175 /* Get remaining bits of the destination field. */
1176 int dst_ofs = ntohs(action->ofs_nbits) >> 6;
1177 uint32_t dst_data = *reg & ~(mask << dst_ofs);
1178
1179 *reg = dst_data | (src_data << dst_ofs);
1180 }