]> git.proxmox.com Git - mirror_ovs.git/blob - lib/nx-match.c
ofproto: Break apart into generic and hardware-specific parts.
[mirror_ovs.git] / lib / nx-match.c
1 /*
2 * Copyright (c) 2010, 2011 Nicira Networks.
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <config.h>
18
19 #include "nx-match.h"
20
21 #include <netinet/icmp6.h>
22
23 #include "classifier.h"
24 #include "dynamic-string.h"
25 #include "ofp-util.h"
26 #include "ofpbuf.h"
27 #include "openflow/nicira-ext.h"
28 #include "packets.h"
29 #include "unaligned.h"
30 #include "vlog.h"
31
32 VLOG_DEFINE_THIS_MODULE(nx_match);
33
34 /* Rate limit for nx_match parse errors. These always indicate a bug in the
35 * peer and so there's not much point in showing a lot of them. */
36 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
37
38 enum {
39 NXM_INVALID = OFP_MKERR_NICIRA(OFPET_BAD_REQUEST, NXBRC_NXM_INVALID),
40 NXM_BAD_TYPE = OFP_MKERR_NICIRA(OFPET_BAD_REQUEST, NXBRC_NXM_BAD_TYPE),
41 NXM_BAD_VALUE = OFP_MKERR_NICIRA(OFPET_BAD_REQUEST, NXBRC_NXM_BAD_VALUE),
42 NXM_BAD_MASK = OFP_MKERR_NICIRA(OFPET_BAD_REQUEST, NXBRC_NXM_BAD_MASK),
43 NXM_BAD_PREREQ = OFP_MKERR_NICIRA(OFPET_BAD_REQUEST, NXBRC_NXM_BAD_PREREQ),
44 NXM_DUP_TYPE = OFP_MKERR_NICIRA(OFPET_BAD_REQUEST, NXBRC_NXM_DUP_TYPE),
45 BAD_ARGUMENT = OFP_MKERR(OFPET_BAD_ACTION, OFPBAC_BAD_ARGUMENT)
46 };
47
48 /* For each NXM_* field, define NFI_NXM_* as consecutive integers starting from
49 * zero. */
50 enum nxm_field_index {
51 #define DEFINE_FIELD(HEADER, WILDCARD, DL_TYPES, NW_PROTO, WRITABLE) \
52 NFI_NXM_##HEADER,
53 #include "nx-match.def"
54 N_NXM_FIELDS
55 };
56
57 struct nxm_field {
58 struct hmap_node hmap_node;
59 enum nxm_field_index index; /* NFI_* value. */
60 uint32_t header; /* NXM_* value. */
61 flow_wildcards_t wildcard; /* FWW_* bit, if exactly one. */
62 ovs_be16 dl_type[N_NXM_DL_TYPES]; /* dl_type prerequisites. */
63 uint8_t nw_proto; /* nw_proto prerequisite, if nonzero. */
64 const char *name; /* "NXM_*" string. */
65 bool writable; /* Writable with NXAST_REG_{MOVE,LOAD}? */
66 };
67
68
69 /* All the known fields. */
70 static struct nxm_field nxm_fields[N_NXM_FIELDS] = {
71 #define DEFINE_FIELD(HEADER, WILDCARD, DL_TYPES, NW_PROTO, WRITABLE) \
72 { HMAP_NODE_NULL_INITIALIZER, NFI_NXM_##HEADER, NXM_##HEADER, WILDCARD, \
73 DL_CONVERT DL_TYPES, NW_PROTO, "NXM_" #HEADER, WRITABLE },
74 #define DL_CONVERT(T1, T2) { CONSTANT_HTONS(T1), CONSTANT_HTONS(T2) }
75 #include "nx-match.def"
76 };
77
78 /* Hash table of 'nxm_fields'. */
79 static struct hmap all_nxm_fields = HMAP_INITIALIZER(&all_nxm_fields);
80
81 /* Possible masks for NXM_OF_ETH_DST_W. */
82 static const uint8_t eth_all_0s[ETH_ADDR_LEN]
83 = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
84 static const uint8_t eth_all_1s[ETH_ADDR_LEN]
85 = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
86 static const uint8_t eth_mcast_1[ETH_ADDR_LEN]
87 = {0x01, 0x00, 0x00, 0x00, 0x00, 0x00};
88 static const uint8_t eth_mcast_0[ETH_ADDR_LEN]
89 = {0xfe, 0xff, 0xff, 0xff, 0xff, 0xff};
90
91 static void
92 nxm_init(void)
93 {
94 if (hmap_is_empty(&all_nxm_fields)) {
95 int i;
96
97 for (i = 0; i < N_NXM_FIELDS; i++) {
98 struct nxm_field *f = &nxm_fields[i];
99 hmap_insert(&all_nxm_fields, &f->hmap_node,
100 hash_int(f->header, 0));
101 }
102
103 /* Verify that the header values are unique (duplicate "case" values
104 * cause a compile error). */
105 switch (0) {
106 #define DEFINE_FIELD(HEADER, WILDCARD, DL_TYPE, NW_PROTO, WRITABLE) \
107 case NXM_##HEADER: break;
108 #include "nx-match.def"
109 }
110 }
111 }
112
113 static const struct nxm_field *
114 nxm_field_lookup(uint32_t header)
115 {
116 struct nxm_field *f;
117
118 nxm_init();
119
120 HMAP_FOR_EACH_WITH_HASH (f, hmap_node, hash_int(header, 0),
121 &all_nxm_fields) {
122 if (f->header == header) {
123 return f;
124 }
125 }
126
127 return NULL;
128 }
129
130 /* Returns the width of the data for a field with the given 'header', in
131 * bytes. */
132 int
133 nxm_field_bytes(uint32_t header)
134 {
135 unsigned int length = NXM_LENGTH(header);
136 return NXM_HASMASK(header) ? length / 2 : length;
137 }
138
139 /* Returns the width of the data for a field with the given 'header', in
140 * bits. */
141 int
142 nxm_field_bits(uint32_t header)
143 {
144 return nxm_field_bytes(header) * 8;
145 }
146 \f
147 /* nx_pull_match() and helpers. */
148
149 static int
150 parse_nx_reg(const struct nxm_field *f,
151 struct flow *flow, struct flow_wildcards *wc,
152 const void *value, const void *maskp)
153 {
154 int idx = NXM_NX_REG_IDX(f->header);
155 if (wc->reg_masks[idx]) {
156 return NXM_DUP_TYPE;
157 } else {
158 flow_wildcards_set_reg_mask(wc, idx,
159 (NXM_HASMASK(f->header)
160 ? ntohl(get_unaligned_be32(maskp))
161 : UINT32_MAX));
162 flow->regs[idx] = ntohl(get_unaligned_be32(value));
163 flow->regs[idx] &= wc->reg_masks[idx];
164 return 0;
165 }
166 }
167
168 static int
169 parse_nxm_entry(struct cls_rule *rule, const struct nxm_field *f,
170 const void *value, const void *mask)
171 {
172 struct flow_wildcards *wc = &rule->wc;
173 struct flow *flow = &rule->flow;
174
175 switch (f->index) {
176 /* Metadata. */
177 case NFI_NXM_OF_IN_PORT:
178 flow->in_port = ntohs(get_unaligned_be16(value));
179 return 0;
180
181 /* Ethernet header. */
182 case NFI_NXM_OF_ETH_DST:
183 if ((wc->wildcards & (FWW_DL_DST | FWW_ETH_MCAST))
184 != (FWW_DL_DST | FWW_ETH_MCAST)) {
185 return NXM_DUP_TYPE;
186 } else {
187 wc->wildcards &= ~(FWW_DL_DST | FWW_ETH_MCAST);
188 memcpy(flow->dl_dst, value, ETH_ADDR_LEN);
189 return 0;
190 }
191 case NFI_NXM_OF_ETH_DST_W:
192 if ((wc->wildcards & (FWW_DL_DST | FWW_ETH_MCAST))
193 != (FWW_DL_DST | FWW_ETH_MCAST)) {
194 return NXM_DUP_TYPE;
195 } else if (eth_addr_equals(mask, eth_mcast_1)) {
196 wc->wildcards &= ~FWW_ETH_MCAST;
197 flow->dl_dst[0] = *(uint8_t *) value & 0x01;
198 return 0;
199 } else if (eth_addr_equals(mask, eth_mcast_0)) {
200 wc->wildcards &= ~FWW_DL_DST;
201 memcpy(flow->dl_dst, value, ETH_ADDR_LEN);
202 flow->dl_dst[0] &= 0xfe;
203 return 0;
204 } else if (eth_addr_equals(mask, eth_all_0s)) {
205 return 0;
206 } else if (eth_addr_equals(mask, eth_all_1s)) {
207 wc->wildcards &= ~(FWW_DL_DST | FWW_ETH_MCAST);
208 memcpy(flow->dl_dst, value, ETH_ADDR_LEN);
209 return 0;
210 } else {
211 return NXM_BAD_MASK;
212 }
213 case NFI_NXM_OF_ETH_SRC:
214 memcpy(flow->dl_src, value, ETH_ADDR_LEN);
215 return 0;
216 case NFI_NXM_OF_ETH_TYPE:
217 flow->dl_type = ofputil_dl_type_from_openflow(get_unaligned_be16(value));
218 return 0;
219
220 /* 802.1Q header. */
221 case NFI_NXM_OF_VLAN_TCI:
222 if (wc->vlan_tci_mask) {
223 return NXM_DUP_TYPE;
224 } else {
225 cls_rule_set_dl_tci(rule, get_unaligned_be16(value));
226 return 0;
227 }
228 case NFI_NXM_OF_VLAN_TCI_W:
229 if (wc->vlan_tci_mask) {
230 return NXM_DUP_TYPE;
231 } else {
232 cls_rule_set_dl_tci_masked(rule, get_unaligned_be16(value),
233 get_unaligned_be16(mask));
234 return 0;
235 }
236
237 /* IP header. */
238 case NFI_NXM_OF_IP_TOS:
239 if (*(uint8_t *) value & 0x03) {
240 return NXM_BAD_VALUE;
241 } else {
242 flow->nw_tos = *(uint8_t *) value;
243 return 0;
244 }
245 case NFI_NXM_OF_IP_PROTO:
246 flow->nw_proto = *(uint8_t *) value;
247 return 0;
248
249 /* IP addresses in IP and ARP headers. */
250 case NFI_NXM_OF_IP_SRC:
251 case NFI_NXM_OF_ARP_SPA:
252 if (wc->nw_src_mask) {
253 return NXM_DUP_TYPE;
254 } else {
255 cls_rule_set_nw_src(rule, get_unaligned_be32(value));
256 return 0;
257 }
258 case NFI_NXM_OF_IP_SRC_W:
259 case NFI_NXM_OF_ARP_SPA_W:
260 if (wc->nw_src_mask) {
261 return NXM_DUP_TYPE;
262 } else {
263 ovs_be32 ip = get_unaligned_be32(value);
264 ovs_be32 netmask = get_unaligned_be32(mask);
265 if (!cls_rule_set_nw_src_masked(rule, ip, netmask)) {
266 return NXM_BAD_MASK;
267 }
268 return 0;
269 }
270 case NFI_NXM_OF_IP_DST:
271 case NFI_NXM_OF_ARP_TPA:
272 if (wc->nw_dst_mask) {
273 return NXM_DUP_TYPE;
274 } else {
275 cls_rule_set_nw_dst(rule, get_unaligned_be32(value));
276 return 0;
277 }
278 case NFI_NXM_OF_IP_DST_W:
279 case NFI_NXM_OF_ARP_TPA_W:
280 if (wc->nw_dst_mask) {
281 return NXM_DUP_TYPE;
282 } else {
283 ovs_be32 ip = get_unaligned_be32(value);
284 ovs_be32 netmask = get_unaligned_be32(mask);
285 if (!cls_rule_set_nw_dst_masked(rule, ip, netmask)) {
286 return NXM_BAD_MASK;
287 }
288 return 0;
289 }
290
291 /* IPv6 addresses. */
292 case NFI_NXM_NX_IPV6_SRC:
293 if (!ipv6_mask_is_any(&wc->ipv6_src_mask)) {
294 return NXM_DUP_TYPE;
295 } else {
296 struct in6_addr ipv6;
297 memcpy(&ipv6, value, sizeof ipv6);
298 cls_rule_set_ipv6_src(rule, &ipv6);
299 return 0;
300 }
301 case NFI_NXM_NX_IPV6_SRC_W:
302 if (!ipv6_mask_is_any(&wc->ipv6_src_mask)) {
303 return NXM_DUP_TYPE;
304 } else {
305 struct in6_addr ipv6, netmask;
306 memcpy(&ipv6, value, sizeof ipv6);
307 memcpy(&netmask, mask, sizeof netmask);
308 if (!cls_rule_set_ipv6_src_masked(rule, &ipv6, &netmask)) {
309 return NXM_BAD_MASK;
310 }
311 return 0;
312 }
313 case NFI_NXM_NX_IPV6_DST:
314 if (!ipv6_mask_is_any(&wc->ipv6_dst_mask)) {
315 return NXM_DUP_TYPE;
316 } else {
317 struct in6_addr ipv6;
318 memcpy(&ipv6, value, sizeof ipv6);
319 cls_rule_set_ipv6_dst(rule, &ipv6);
320 return 0;
321 }
322 case NFI_NXM_NX_IPV6_DST_W:
323 if (!ipv6_mask_is_any(&wc->ipv6_dst_mask)) {
324 return NXM_DUP_TYPE;
325 } else {
326 struct in6_addr ipv6, netmask;
327 memcpy(&ipv6, value, sizeof ipv6);
328 memcpy(&netmask, mask, sizeof netmask);
329 if (!cls_rule_set_ipv6_dst_masked(rule, &ipv6, &netmask)) {
330 return NXM_BAD_MASK;
331 }
332 return 0;
333 }
334
335 /* TCP header. */
336 case NFI_NXM_OF_TCP_SRC:
337 flow->tp_src = get_unaligned_be16(value);
338 return 0;
339 case NFI_NXM_OF_TCP_DST:
340 flow->tp_dst = get_unaligned_be16(value);
341 return 0;
342
343 /* UDP header. */
344 case NFI_NXM_OF_UDP_SRC:
345 flow->tp_src = get_unaligned_be16(value);
346 return 0;
347 case NFI_NXM_OF_UDP_DST:
348 flow->tp_dst = get_unaligned_be16(value);
349 return 0;
350
351 /* ICMP header. */
352 case NFI_NXM_OF_ICMP_TYPE:
353 flow->tp_src = htons(*(uint8_t *) value);
354 return 0;
355 case NFI_NXM_OF_ICMP_CODE:
356 flow->tp_dst = htons(*(uint8_t *) value);
357 return 0;
358
359 /* ICMPv6 header. */
360 case NFI_NXM_NX_ICMPV6_TYPE:
361 flow->tp_src = htons(*(uint8_t *) value);
362 return 0;
363 case NFI_NXM_NX_ICMPV6_CODE:
364 flow->tp_dst = htons(*(uint8_t *) value);
365 return 0;
366
367 /* IPv6 Neighbor Discovery. */
368 case NFI_NXM_NX_ND_TARGET:
369 /* We've already verified that it's an ICMPv6 message. */
370 if ((flow->tp_src != htons(ND_NEIGHBOR_SOLICIT))
371 && (flow->tp_src != htons(ND_NEIGHBOR_ADVERT))) {
372 return NXM_BAD_PREREQ;
373 }
374 memcpy(&flow->nd_target, value, sizeof flow->nd_target);
375 return 0;
376 case NFI_NXM_NX_ND_SLL:
377 /* We've already verified that it's an ICMPv6 message. */
378 if (flow->tp_src != htons(ND_NEIGHBOR_SOLICIT)) {
379 return NXM_BAD_PREREQ;
380 }
381 memcpy(flow->arp_sha, value, ETH_ADDR_LEN);
382 return 0;
383 case NFI_NXM_NX_ND_TLL:
384 /* We've already verified that it's an ICMPv6 message. */
385 if (flow->tp_src != htons(ND_NEIGHBOR_ADVERT)) {
386 return NXM_BAD_PREREQ;
387 }
388 memcpy(flow->arp_tha, value, ETH_ADDR_LEN);
389 return 0;
390
391 /* ARP header. */
392 case NFI_NXM_OF_ARP_OP:
393 if (ntohs(get_unaligned_be16(value)) > 255) {
394 return NXM_BAD_VALUE;
395 } else {
396 flow->nw_proto = ntohs(get_unaligned_be16(value));
397 return 0;
398 }
399
400 case NFI_NXM_NX_ARP_SHA:
401 memcpy(flow->arp_sha, value, ETH_ADDR_LEN);
402 return 0;
403 case NFI_NXM_NX_ARP_THA:
404 memcpy(flow->arp_tha, value, ETH_ADDR_LEN);
405 return 0;
406
407 /* Tunnel ID. */
408 case NFI_NXM_NX_TUN_ID:
409 if (wc->tun_id_mask) {
410 return NXM_DUP_TYPE;
411 } else {
412 cls_rule_set_tun_id(rule, get_unaligned_be64(value));
413 return 0;
414 }
415 case NFI_NXM_NX_TUN_ID_W:
416 if (wc->tun_id_mask) {
417 return NXM_DUP_TYPE;
418 } else {
419 ovs_be64 tun_id = get_unaligned_be64(value);
420 ovs_be64 tun_mask = get_unaligned_be64(mask);
421 cls_rule_set_tun_id_masked(rule, tun_id, tun_mask);
422 return 0;
423 }
424
425 /* Registers. */
426 case NFI_NXM_NX_REG0:
427 case NFI_NXM_NX_REG0_W:
428 #if FLOW_N_REGS >= 2
429 case NFI_NXM_NX_REG1:
430 case NFI_NXM_NX_REG1_W:
431 #endif
432 #if FLOW_N_REGS >= 3
433 case NFI_NXM_NX_REG2:
434 case NFI_NXM_NX_REG2_W:
435 #endif
436 #if FLOW_N_REGS >= 4
437 case NFI_NXM_NX_REG3:
438 case NFI_NXM_NX_REG3_W:
439 #endif
440 #if FLOW_N_REGS > 4
441 #error
442 #endif
443 return parse_nx_reg(f, flow, wc, value, mask);
444
445 case N_NXM_FIELDS:
446 NOT_REACHED();
447 }
448 NOT_REACHED();
449 }
450
451 static bool
452 nxm_prereqs_ok(const struct nxm_field *field, const struct flow *flow)
453 {
454 if (field->nw_proto && field->nw_proto != flow->nw_proto) {
455 return false;
456 }
457
458 if (!field->dl_type[0]) {
459 return true;
460 } else if (field->dl_type[0] == flow->dl_type) {
461 return true;
462 } else if (field->dl_type[1] && field->dl_type[1] == flow->dl_type) {
463 return true;
464 }
465
466 return false;
467 }
468
469 static uint32_t
470 nx_entry_ok(const void *p, unsigned int match_len)
471 {
472 unsigned int payload_len;
473 ovs_be32 header_be;
474 uint32_t header;
475
476 if (match_len < 4) {
477 if (match_len) {
478 VLOG_DBG_RL(&rl, "nx_match ends with partial nxm_header");
479 }
480 return 0;
481 }
482 memcpy(&header_be, p, 4);
483 header = ntohl(header_be);
484
485 payload_len = NXM_LENGTH(header);
486 if (!payload_len) {
487 VLOG_DBG_RL(&rl, "nxm_entry %08"PRIx32" has invalid payload "
488 "length 0", header);
489 return 0;
490 }
491 if (match_len < payload_len + 4) {
492 VLOG_DBG_RL(&rl, "%"PRIu32"-byte nxm_entry but only "
493 "%u bytes left in nx_match", payload_len + 4, match_len);
494 return 0;
495 }
496
497 return header;
498 }
499
500 int
501 nx_pull_match(struct ofpbuf *b, unsigned int match_len, uint16_t priority,
502 struct cls_rule *rule)
503 {
504 uint32_t header;
505 uint8_t *p;
506
507 p = ofpbuf_try_pull(b, ROUND_UP(match_len, 8));
508 if (!p) {
509 VLOG_DBG_RL(&rl, "nx_match length %u, rounded up to a "
510 "multiple of 8, is longer than space in message (max "
511 "length %zu)", match_len, b->size);
512 return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_LEN);
513 }
514
515 cls_rule_init_catchall(rule, priority);
516 while ((header = nx_entry_ok(p, match_len)) != 0) {
517 unsigned length = NXM_LENGTH(header);
518 const struct nxm_field *f;
519 int error;
520
521 f = nxm_field_lookup(header);
522 if (!f) {
523 error = NXM_BAD_TYPE;
524 } else if (!nxm_prereqs_ok(f, &rule->flow)) {
525 error = NXM_BAD_PREREQ;
526 } else if (f->wildcard && !(rule->wc.wildcards & f->wildcard)) {
527 error = NXM_DUP_TYPE;
528 } else {
529 /* 'hasmask' and 'length' are known to be correct at this point
530 * because they are included in 'header' and nxm_field_lookup()
531 * checked them already. */
532 rule->wc.wildcards &= ~f->wildcard;
533 error = parse_nxm_entry(rule, f, p + 4, p + 4 + length / 2);
534 }
535 if (error) {
536 VLOG_DBG_RL(&rl, "bad nxm_entry with vendor=%"PRIu32", "
537 "field=%"PRIu32", hasmask=%"PRIu32", type=%"PRIu32" "
538 "(error %x)",
539 NXM_VENDOR(header), NXM_FIELD(header),
540 NXM_HASMASK(header), NXM_TYPE(header),
541 error);
542 return error;
543 }
544
545
546 p += 4 + length;
547 match_len -= 4 + length;
548 }
549
550 return match_len ? NXM_INVALID : 0;
551 }
552 \f
553 /* nx_put_match() and helpers.
554 *
555 * 'put' functions whose names end in 'w' add a wildcarded field.
556 * 'put' functions whose names end in 'm' add a field that might be wildcarded.
557 * Other 'put' functions add exact-match fields.
558 */
559
560 static void
561 nxm_put_header(struct ofpbuf *b, uint32_t header)
562 {
563 ovs_be32 n_header = htonl(header);
564 ofpbuf_put(b, &n_header, sizeof n_header);
565 }
566
567 static void
568 nxm_put_8(struct ofpbuf *b, uint32_t header, uint8_t value)
569 {
570 nxm_put_header(b, header);
571 ofpbuf_put(b, &value, sizeof value);
572 }
573
574 static void
575 nxm_put_16(struct ofpbuf *b, uint32_t header, ovs_be16 value)
576 {
577 nxm_put_header(b, header);
578 ofpbuf_put(b, &value, sizeof value);
579 }
580
581 static void
582 nxm_put_16w(struct ofpbuf *b, uint32_t header, ovs_be16 value, ovs_be16 mask)
583 {
584 nxm_put_header(b, header);
585 ofpbuf_put(b, &value, sizeof value);
586 ofpbuf_put(b, &mask, sizeof mask);
587 }
588
589 static void
590 nxm_put_16m(struct ofpbuf *b, uint32_t header, ovs_be16 value, ovs_be16 mask)
591 {
592 switch (mask) {
593 case 0:
594 break;
595
596 case CONSTANT_HTONS(UINT16_MAX):
597 nxm_put_16(b, header, value);
598 break;
599
600 default:
601 nxm_put_16w(b, NXM_MAKE_WILD_HEADER(header), value, mask);
602 break;
603 }
604 }
605
606 static void
607 nxm_put_32(struct ofpbuf *b, uint32_t header, ovs_be32 value)
608 {
609 nxm_put_header(b, header);
610 ofpbuf_put(b, &value, sizeof value);
611 }
612
613 static void
614 nxm_put_32w(struct ofpbuf *b, uint32_t header, ovs_be32 value, ovs_be32 mask)
615 {
616 nxm_put_header(b, header);
617 ofpbuf_put(b, &value, sizeof value);
618 ofpbuf_put(b, &mask, sizeof mask);
619 }
620
621 static void
622 nxm_put_32m(struct ofpbuf *b, uint32_t header, ovs_be32 value, ovs_be32 mask)
623 {
624 switch (mask) {
625 case 0:
626 break;
627
628 case CONSTANT_HTONL(UINT32_MAX):
629 nxm_put_32(b, header, value);
630 break;
631
632 default:
633 nxm_put_32w(b, NXM_MAKE_WILD_HEADER(header), value, mask);
634 break;
635 }
636 }
637
638 static void
639 nxm_put_64(struct ofpbuf *b, uint32_t header, ovs_be64 value)
640 {
641 nxm_put_header(b, header);
642 ofpbuf_put(b, &value, sizeof value);
643 }
644
645 static void
646 nxm_put_64w(struct ofpbuf *b, uint32_t header, ovs_be64 value, ovs_be64 mask)
647 {
648 nxm_put_header(b, header);
649 ofpbuf_put(b, &value, sizeof value);
650 ofpbuf_put(b, &mask, sizeof mask);
651 }
652
653 static void
654 nxm_put_64m(struct ofpbuf *b, uint32_t header, ovs_be64 value, ovs_be64 mask)
655 {
656 switch (mask) {
657 case 0:
658 break;
659
660 case CONSTANT_HTONLL(UINT64_MAX):
661 nxm_put_64(b, header, value);
662 break;
663
664 default:
665 nxm_put_64w(b, NXM_MAKE_WILD_HEADER(header), value, mask);
666 break;
667 }
668 }
669
670 static void
671 nxm_put_eth(struct ofpbuf *b, uint32_t header,
672 const uint8_t value[ETH_ADDR_LEN])
673 {
674 nxm_put_header(b, header);
675 ofpbuf_put(b, value, ETH_ADDR_LEN);
676 }
677
678 static void
679 nxm_put_eth_dst(struct ofpbuf *b,
680 uint32_t wc, const uint8_t value[ETH_ADDR_LEN])
681 {
682 switch (wc & (FWW_DL_DST | FWW_ETH_MCAST)) {
683 case FWW_DL_DST | FWW_ETH_MCAST:
684 break;
685 case FWW_DL_DST:
686 nxm_put_header(b, NXM_OF_ETH_DST_W);
687 ofpbuf_put(b, value, ETH_ADDR_LEN);
688 ofpbuf_put(b, eth_mcast_1, ETH_ADDR_LEN);
689 break;
690 case FWW_ETH_MCAST:
691 nxm_put_header(b, NXM_OF_ETH_DST_W);
692 ofpbuf_put(b, value, ETH_ADDR_LEN);
693 ofpbuf_put(b, eth_mcast_0, ETH_ADDR_LEN);
694 break;
695 case 0:
696 nxm_put_eth(b, NXM_OF_ETH_DST, value);
697 break;
698 }
699 }
700
701 static void
702 nxm_put_ipv6(struct ofpbuf *b, uint32_t header,
703 const struct in6_addr *value, const struct in6_addr *mask)
704 {
705 if (ipv6_mask_is_any(mask)) {
706 return;
707 } else if (ipv6_mask_is_exact(mask)) {
708 nxm_put_header(b, header);
709 ofpbuf_put(b, value, sizeof *value);
710 } else {
711 nxm_put_header(b, NXM_MAKE_WILD_HEADER(header));
712 ofpbuf_put(b, value, sizeof *value);
713 ofpbuf_put(b, mask, sizeof *mask);
714 }
715 }
716
717 /* Appends to 'b' the nx_match format that expresses 'cr' (except for
718 * 'cr->priority', because priority is not part of nx_match), plus enough
719 * zero bytes to pad the nx_match out to a multiple of 8.
720 *
721 * This function can cause 'b''s data to be reallocated.
722 *
723 * Returns the number of bytes appended to 'b', excluding padding.
724 *
725 * If 'cr' is a catch-all rule that matches every packet, then this function
726 * appends nothing to 'b' and returns 0. */
727 int
728 nx_put_match(struct ofpbuf *b, const struct cls_rule *cr)
729 {
730 const flow_wildcards_t wc = cr->wc.wildcards;
731 const struct flow *flow = &cr->flow;
732 const size_t start_len = b->size;
733 int match_len;
734 int i;
735
736 /* Metadata. */
737 if (!(wc & FWW_IN_PORT)) {
738 uint16_t in_port = flow->in_port;
739 nxm_put_16(b, NXM_OF_IN_PORT, htons(in_port));
740 }
741
742 /* Ethernet. */
743 nxm_put_eth_dst(b, wc, flow->dl_dst);
744 if (!(wc & FWW_DL_SRC)) {
745 nxm_put_eth(b, NXM_OF_ETH_SRC, flow->dl_src);
746 }
747 if (!(wc & FWW_DL_TYPE)) {
748 nxm_put_16(b, NXM_OF_ETH_TYPE,
749 ofputil_dl_type_to_openflow(flow->dl_type));
750 }
751
752 /* 802.1Q. */
753 nxm_put_16m(b, NXM_OF_VLAN_TCI, flow->vlan_tci, cr->wc.vlan_tci_mask);
754
755 /* L3. */
756 if (!(wc & FWW_DL_TYPE) && flow->dl_type == htons(ETH_TYPE_IP)) {
757 /* IP. */
758 if (!(wc & FWW_NW_TOS)) {
759 nxm_put_8(b, NXM_OF_IP_TOS, flow->nw_tos & 0xfc);
760 }
761 nxm_put_32m(b, NXM_OF_IP_SRC, flow->nw_src, cr->wc.nw_src_mask);
762 nxm_put_32m(b, NXM_OF_IP_DST, flow->nw_dst, cr->wc.nw_dst_mask);
763
764 if (!(wc & FWW_NW_PROTO)) {
765 nxm_put_8(b, NXM_OF_IP_PROTO, flow->nw_proto);
766 switch (flow->nw_proto) {
767 /* TCP. */
768 case IPPROTO_TCP:
769 if (!(wc & FWW_TP_SRC)) {
770 nxm_put_16(b, NXM_OF_TCP_SRC, flow->tp_src);
771 }
772 if (!(wc & FWW_TP_DST)) {
773 nxm_put_16(b, NXM_OF_TCP_DST, flow->tp_dst);
774 }
775 break;
776
777 /* UDP. */
778 case IPPROTO_UDP:
779 if (!(wc & FWW_TP_SRC)) {
780 nxm_put_16(b, NXM_OF_UDP_SRC, flow->tp_src);
781 }
782 if (!(wc & FWW_TP_DST)) {
783 nxm_put_16(b, NXM_OF_UDP_DST, flow->tp_dst);
784 }
785 break;
786
787 /* ICMP. */
788 case IPPROTO_ICMP:
789 if (!(wc & FWW_TP_SRC)) {
790 nxm_put_8(b, NXM_OF_ICMP_TYPE, ntohs(flow->tp_src));
791 }
792 if (!(wc & FWW_TP_DST)) {
793 nxm_put_8(b, NXM_OF_ICMP_CODE, ntohs(flow->tp_dst));
794 }
795 break;
796 }
797 }
798 } else if (!(wc & FWW_DL_TYPE) && flow->dl_type == htons(ETH_TYPE_IPV6)) {
799 /* IPv6. */
800
801 if (!(wc & FWW_NW_TOS)) {
802 nxm_put_8(b, NXM_OF_IP_TOS, flow->nw_tos & 0xfc);
803 }
804 nxm_put_ipv6(b, NXM_NX_IPV6_SRC, &flow->ipv6_src,
805 &cr->wc.ipv6_src_mask);
806 nxm_put_ipv6(b, NXM_NX_IPV6_DST, &flow->ipv6_dst,
807 &cr->wc.ipv6_dst_mask);
808
809 if (!(wc & FWW_NW_PROTO)) {
810 nxm_put_8(b, NXM_OF_IP_PROTO, flow->nw_proto);
811 switch (flow->nw_proto) {
812 /* TCP. */
813 case IPPROTO_TCP:
814 if (!(wc & FWW_TP_SRC)) {
815 nxm_put_16(b, NXM_OF_TCP_SRC, flow->tp_src);
816 }
817 if (!(wc & FWW_TP_DST)) {
818 nxm_put_16(b, NXM_OF_TCP_DST, flow->tp_dst);
819 }
820 break;
821
822 /* UDP. */
823 case IPPROTO_UDP:
824 if (!(wc & FWW_TP_SRC)) {
825 nxm_put_16(b, NXM_OF_UDP_SRC, flow->tp_src);
826 }
827 if (!(wc & FWW_TP_DST)) {
828 nxm_put_16(b, NXM_OF_UDP_DST, flow->tp_dst);
829 }
830 break;
831
832 /* ICMPv6. */
833 case IPPROTO_ICMPV6:
834 if (!(wc & FWW_TP_SRC)) {
835 nxm_put_8(b, NXM_NX_ICMPV6_TYPE, ntohs(flow->tp_src));
836 }
837 if (!(wc & FWW_TP_DST)) {
838 nxm_put_8(b, NXM_NX_ICMPV6_CODE, ntohs(flow->tp_dst));
839 }
840 if (!(wc & FWW_ND_TARGET)) {
841 nxm_put_ipv6(b, NXM_NX_ND_TARGET, &flow->nd_target,
842 &in6addr_exact);
843 }
844 if (!(wc & FWW_ARP_SHA)) {
845 nxm_put_eth(b, NXM_NX_ND_SLL, flow->arp_sha);
846 }
847 if (!(wc & FWW_ARP_THA)) {
848 nxm_put_eth(b, NXM_NX_ND_TLL, flow->arp_tha);
849 }
850 break;
851 }
852 }
853 } else if (!(wc & FWW_DL_TYPE) && flow->dl_type == htons(ETH_TYPE_ARP)) {
854 /* ARP. */
855 if (!(wc & FWW_NW_PROTO)) {
856 nxm_put_16(b, NXM_OF_ARP_OP, htons(flow->nw_proto));
857 }
858 nxm_put_32m(b, NXM_OF_ARP_SPA, flow->nw_src, cr->wc.nw_src_mask);
859 nxm_put_32m(b, NXM_OF_ARP_TPA, flow->nw_dst, cr->wc.nw_dst_mask);
860 if (!(wc & FWW_ARP_SHA)) {
861 nxm_put_eth(b, NXM_NX_ARP_SHA, flow->arp_sha);
862 }
863 if (!(wc & FWW_ARP_THA)) {
864 nxm_put_eth(b, NXM_NX_ARP_THA, flow->arp_tha);
865 }
866 }
867
868 /* Tunnel ID. */
869 nxm_put_64m(b, NXM_NX_TUN_ID, flow->tun_id, cr->wc.tun_id_mask);
870
871 /* Registers. */
872 for (i = 0; i < FLOW_N_REGS; i++) {
873 nxm_put_32m(b, NXM_NX_REG(i),
874 htonl(flow->regs[i]), htonl(cr->wc.reg_masks[i]));
875 }
876
877 match_len = b->size - start_len;
878 ofpbuf_put_zeros(b, ROUND_UP(match_len, 8) - match_len);
879 return match_len;
880 }
881 \f
882 /* nx_match_to_string() and helpers. */
883
884 static void format_nxm_field_name(struct ds *, uint32_t header);
885
886 char *
887 nx_match_to_string(const uint8_t *p, unsigned int match_len)
888 {
889 uint32_t header;
890 struct ds s;
891
892 if (!match_len) {
893 return xstrdup("<any>");
894 }
895
896 ds_init(&s);
897 while ((header = nx_entry_ok(p, match_len)) != 0) {
898 unsigned int length = NXM_LENGTH(header);
899 unsigned int value_len = nxm_field_bytes(header);
900 const uint8_t *value = p + 4;
901 const uint8_t *mask = value + value_len;
902 unsigned int i;
903
904 if (s.length) {
905 ds_put_cstr(&s, ", ");
906 }
907
908 format_nxm_field_name(&s, header);
909 ds_put_char(&s, '(');
910
911 for (i = 0; i < value_len; i++) {
912 ds_put_format(&s, "%02x", value[i]);
913 }
914 if (NXM_HASMASK(header)) {
915 ds_put_char(&s, '/');
916 for (i = 0; i < value_len; i++) {
917 ds_put_format(&s, "%02x", mask[i]);
918 }
919 }
920 ds_put_char(&s, ')');
921
922 p += 4 + length;
923 match_len -= 4 + length;
924 }
925
926 if (match_len) {
927 if (s.length) {
928 ds_put_cstr(&s, ", ");
929 }
930
931 ds_put_format(&s, "<%u invalid bytes>", match_len);
932 }
933
934 return ds_steal_cstr(&s);
935 }
936
937 static void
938 format_nxm_field_name(struct ds *s, uint32_t header)
939 {
940 const struct nxm_field *f = nxm_field_lookup(header);
941 if (f) {
942 ds_put_cstr(s, f->name);
943 } else {
944 ds_put_format(s, "%d:%d", NXM_VENDOR(header), NXM_FIELD(header));
945 }
946 }
947
948 static uint32_t
949 parse_nxm_field_name(const char *name, int name_len)
950 {
951 const struct nxm_field *f;
952
953 /* Check whether it's a field name. */
954 for (f = nxm_fields; f < &nxm_fields[ARRAY_SIZE(nxm_fields)]; f++) {
955 if (!strncmp(f->name, name, name_len) && f->name[name_len] == '\0') {
956 return f->header;
957 }
958 }
959
960 /* Check whether it's a 32-bit field header value as hex.
961 * (This isn't ordinarily useful except for testing error behavior.) */
962 if (name_len == 8) {
963 uint32_t header = hexits_value(name, name_len, NULL);
964 if (header != UINT_MAX) {
965 return header;
966 }
967 }
968
969 return 0;
970 }
971 \f
972 /* nx_match_from_string(). */
973
974 int
975 nx_match_from_string(const char *s, struct ofpbuf *b)
976 {
977 const char *full_s = s;
978 const size_t start_len = b->size;
979 int match_len;
980
981 if (!strcmp(s, "<any>")) {
982 /* Ensure that 'b->data' isn't actually null. */
983 ofpbuf_prealloc_tailroom(b, 1);
984 return 0;
985 }
986
987 for (s += strspn(s, ", "); *s; s += strspn(s, ", ")) {
988 const char *name;
989 uint32_t header;
990 int name_len;
991 size_t n;
992
993 name = s;
994 name_len = strcspn(s, "(");
995 if (s[name_len] != '(') {
996 ovs_fatal(0, "%s: missing ( at end of nx_match", full_s);
997 }
998
999 header = parse_nxm_field_name(name, name_len);
1000 if (!header) {
1001 ovs_fatal(0, "%s: unknown field `%.*s'", full_s, name_len, s);
1002 }
1003
1004 s += name_len + 1;
1005
1006 nxm_put_header(b, header);
1007 s = ofpbuf_put_hex(b, s, &n);
1008 if (n != nxm_field_bytes(header)) {
1009 ovs_fatal(0, "%.2s: hex digits expected", s);
1010 }
1011 if (NXM_HASMASK(header)) {
1012 s += strspn(s, " ");
1013 if (*s != '/') {
1014 ovs_fatal(0, "%s: missing / in masked field %.*s",
1015 full_s, name_len, name);
1016 }
1017 s = ofpbuf_put_hex(b, s + 1, &n);
1018 if (n != nxm_field_bytes(header)) {
1019 ovs_fatal(0, "%.2s: hex digits expected", s);
1020 }
1021 }
1022
1023 s += strspn(s, " ");
1024 if (*s != ')') {
1025 ovs_fatal(0, "%s: missing ) following field %.*s",
1026 full_s, name_len, name);
1027 }
1028 s++;
1029 }
1030
1031 match_len = b->size - start_len;
1032 ofpbuf_put_zeros(b, ROUND_UP(match_len, 8) - match_len);
1033 return match_len;
1034 }
1035 \f
1036 const char *
1037 nxm_parse_field_bits(const char *s, uint32_t *headerp, int *ofsp, int *n_bitsp)
1038 {
1039 const char *full_s = s;
1040 const char *name;
1041 uint32_t header;
1042 int start, end;
1043 int name_len;
1044 int width;
1045
1046 name = s;
1047 name_len = strcspn(s, "[");
1048 if (s[name_len] != '[') {
1049 ovs_fatal(0, "%s: missing [ looking for field name", full_s);
1050 }
1051
1052 header = parse_nxm_field_name(name, name_len);
1053 if (!header) {
1054 ovs_fatal(0, "%s: unknown field `%.*s'", full_s, name_len, s);
1055 }
1056 width = nxm_field_bits(header);
1057
1058 s += name_len;
1059 if (sscanf(s, "[%d..%d]", &start, &end) == 2) {
1060 /* Nothing to do. */
1061 } else if (sscanf(s, "[%d]", &start) == 1) {
1062 end = start;
1063 } else if (!strncmp(s, "[]", 2)) {
1064 start = 0;
1065 end = width - 1;
1066 } else {
1067 ovs_fatal(0, "%s: syntax error expecting [] or [<bit>] or "
1068 "[<start>..<end>]", full_s);
1069 }
1070 s = strchr(s, ']') + 1;
1071
1072 if (start > end) {
1073 ovs_fatal(0, "%s: starting bit %d is after ending bit %d",
1074 full_s, start, end);
1075 } else if (start >= width) {
1076 ovs_fatal(0, "%s: starting bit %d is not valid because field is only "
1077 "%d bits wide", full_s, start, width);
1078 } else if (end >= width){
1079 ovs_fatal(0, "%s: ending bit %d is not valid because field is only "
1080 "%d bits wide", full_s, end, width);
1081 }
1082
1083 *headerp = header;
1084 *ofsp = start;
1085 *n_bitsp = end - start + 1;
1086
1087 return s;
1088 }
1089
1090 void
1091 nxm_parse_reg_move(struct nx_action_reg_move *move, const char *s)
1092 {
1093 const char *full_s = s;
1094 uint32_t src, dst;
1095 int src_ofs, dst_ofs;
1096 int src_n_bits, dst_n_bits;
1097
1098 s = nxm_parse_field_bits(s, &src, &src_ofs, &src_n_bits);
1099 if (strncmp(s, "->", 2)) {
1100 ovs_fatal(0, "%s: missing `->' following source", full_s);
1101 }
1102 s += 2;
1103 s = nxm_parse_field_bits(s, &dst, &dst_ofs, &dst_n_bits);
1104 if (*s != '\0') {
1105 ovs_fatal(0, "%s: trailing garbage following destination", full_s);
1106 }
1107
1108 if (src_n_bits != dst_n_bits) {
1109 ovs_fatal(0, "%s: source field is %d bits wide but destination is "
1110 "%d bits wide", full_s, src_n_bits, dst_n_bits);
1111 }
1112
1113 move->type = htons(OFPAT_VENDOR);
1114 move->len = htons(sizeof *move);
1115 move->vendor = htonl(NX_VENDOR_ID);
1116 move->subtype = htons(NXAST_REG_MOVE);
1117 move->n_bits = htons(src_n_bits);
1118 move->src_ofs = htons(src_ofs);
1119 move->dst_ofs = htons(dst_ofs);
1120 move->src = htonl(src);
1121 move->dst = htonl(dst);
1122 }
1123
1124 void
1125 nxm_parse_reg_load(struct nx_action_reg_load *load, const char *s)
1126 {
1127 const char *full_s = s;
1128 uint32_t dst;
1129 int ofs, n_bits;
1130 uint64_t value;
1131
1132 value = strtoull(s, (char **) &s, 0);
1133 if (strncmp(s, "->", 2)) {
1134 ovs_fatal(0, "%s: missing `->' following value", full_s);
1135 }
1136 s += 2;
1137 s = nxm_parse_field_bits(s, &dst, &ofs, &n_bits);
1138 if (*s != '\0') {
1139 ovs_fatal(0, "%s: trailing garbage following destination", full_s);
1140 }
1141
1142 if (n_bits < 64 && (value >> n_bits) != 0) {
1143 ovs_fatal(0, "%s: value %"PRIu64" does not fit into %d bits",
1144 full_s, value, n_bits);
1145 }
1146
1147 load->type = htons(OFPAT_VENDOR);
1148 load->len = htons(sizeof *load);
1149 load->vendor = htonl(NX_VENDOR_ID);
1150 load->subtype = htons(NXAST_REG_LOAD);
1151 load->ofs_nbits = nxm_encode_ofs_nbits(ofs, n_bits);
1152 load->dst = htonl(dst);
1153 load->value = htonll(value);
1154 }
1155 \f
1156 /* nxm_format_reg_move(), nxm_format_reg_load(). */
1157
1158 void
1159 nxm_format_field_bits(struct ds *s, uint32_t header, int ofs, int n_bits)
1160 {
1161 format_nxm_field_name(s, header);
1162 if (ofs == 0 && n_bits == nxm_field_bits(header)) {
1163 ds_put_cstr(s, "[]");
1164 } else if (n_bits == 1) {
1165 ds_put_format(s, "[%d]", ofs);
1166 } else {
1167 ds_put_format(s, "[%d..%d]", ofs, ofs + n_bits - 1);
1168 }
1169 }
1170
1171 void
1172 nxm_format_reg_move(const struct nx_action_reg_move *move, struct ds *s)
1173 {
1174 int n_bits = ntohs(move->n_bits);
1175 int src_ofs = ntohs(move->src_ofs);
1176 int dst_ofs = ntohs(move->dst_ofs);
1177 uint32_t src = ntohl(move->src);
1178 uint32_t dst = ntohl(move->dst);
1179
1180 ds_put_format(s, "move:");
1181 nxm_format_field_bits(s, src, src_ofs, n_bits);
1182 ds_put_cstr(s, "->");
1183 nxm_format_field_bits(s, dst, dst_ofs, n_bits);
1184 }
1185
1186 void
1187 nxm_format_reg_load(const struct nx_action_reg_load *load, struct ds *s)
1188 {
1189 int ofs = nxm_decode_ofs(load->ofs_nbits);
1190 int n_bits = nxm_decode_n_bits(load->ofs_nbits);
1191 uint32_t dst = ntohl(load->dst);
1192 uint64_t value = ntohll(load->value);
1193
1194 ds_put_format(s, "load:%#"PRIx64"->", value);
1195 nxm_format_field_bits(s, dst, ofs, n_bits);
1196 }
1197 \f
1198 /* nxm_check_reg_move(), nxm_check_reg_load(). */
1199
1200 static bool
1201 field_ok(const struct nxm_field *f, const struct flow *flow, int size)
1202 {
1203 return (f && !NXM_HASMASK(f->header)
1204 && nxm_prereqs_ok(f, flow) && size <= nxm_field_bits(f->header));
1205 }
1206
1207 int
1208 nxm_check_reg_move(const struct nx_action_reg_move *action,
1209 const struct flow *flow)
1210 {
1211 const struct nxm_field *src;
1212 const struct nxm_field *dst;
1213
1214 if (action->n_bits == htons(0)) {
1215 return BAD_ARGUMENT;
1216 }
1217
1218 src = nxm_field_lookup(ntohl(action->src));
1219 if (!field_ok(src, flow, ntohs(action->src_ofs) + ntohs(action->n_bits))) {
1220 return BAD_ARGUMENT;
1221 }
1222
1223 dst = nxm_field_lookup(ntohl(action->dst));
1224 if (!field_ok(dst, flow, ntohs(action->dst_ofs) + ntohs(action->n_bits))) {
1225 return BAD_ARGUMENT;
1226 }
1227
1228 if (!dst->writable) {
1229 return BAD_ARGUMENT;
1230 }
1231
1232 return 0;
1233 }
1234
1235 int
1236 nxm_check_reg_load(const struct nx_action_reg_load *action,
1237 const struct flow *flow)
1238 {
1239 const struct nxm_field *dst;
1240 int ofs, n_bits;
1241
1242 ofs = nxm_decode_ofs(action->ofs_nbits);
1243 n_bits = nxm_decode_n_bits(action->ofs_nbits);
1244 dst = nxm_field_lookup(ntohl(action->dst));
1245 if (!field_ok(dst, flow, ofs + n_bits)) {
1246 return BAD_ARGUMENT;
1247 }
1248
1249 /* Reject 'action' if a bit numbered 'n_bits' or higher is set to 1 in
1250 * action->value. */
1251 if (n_bits < 64 && ntohll(action->value) >> n_bits) {
1252 return BAD_ARGUMENT;
1253 }
1254
1255 if (!dst->writable) {
1256 return BAD_ARGUMENT;
1257 }
1258
1259 return 0;
1260 }
1261 \f
1262 /* nxm_execute_reg_move(), nxm_execute_reg_load(). */
1263
1264 static uint64_t
1265 nxm_read_field(const struct nxm_field *src, const struct flow *flow)
1266 {
1267 switch (src->index) {
1268 case NFI_NXM_OF_IN_PORT:
1269 return flow->in_port;
1270
1271 case NFI_NXM_OF_ETH_DST:
1272 return eth_addr_to_uint64(flow->dl_dst);
1273
1274 case NFI_NXM_OF_ETH_SRC:
1275 return eth_addr_to_uint64(flow->dl_src);
1276
1277 case NFI_NXM_OF_ETH_TYPE:
1278 return ntohs(ofputil_dl_type_to_openflow(flow->dl_type));
1279
1280 case NFI_NXM_OF_VLAN_TCI:
1281 return ntohs(flow->vlan_tci);
1282
1283 case NFI_NXM_OF_IP_TOS:
1284 return flow->nw_tos;
1285
1286 case NFI_NXM_OF_IP_PROTO:
1287 case NFI_NXM_OF_ARP_OP:
1288 return flow->nw_proto;
1289
1290 case NFI_NXM_OF_IP_SRC:
1291 case NFI_NXM_OF_ARP_SPA:
1292 return ntohl(flow->nw_src);
1293
1294 case NFI_NXM_OF_IP_DST:
1295 case NFI_NXM_OF_ARP_TPA:
1296 return ntohl(flow->nw_dst);
1297
1298 case NFI_NXM_OF_TCP_SRC:
1299 case NFI_NXM_OF_UDP_SRC:
1300 return ntohs(flow->tp_src);
1301
1302 case NFI_NXM_OF_TCP_DST:
1303 case NFI_NXM_OF_UDP_DST:
1304 return ntohs(flow->tp_dst);
1305
1306 case NFI_NXM_OF_ICMP_TYPE:
1307 case NFI_NXM_NX_ICMPV6_TYPE:
1308 return ntohs(flow->tp_src) & 0xff;
1309
1310 case NFI_NXM_OF_ICMP_CODE:
1311 case NFI_NXM_NX_ICMPV6_CODE:
1312 return ntohs(flow->tp_dst) & 0xff;
1313
1314 case NFI_NXM_NX_TUN_ID:
1315 return ntohll(flow->tun_id);
1316
1317 #define NXM_READ_REGISTER(IDX) \
1318 case NFI_NXM_NX_REG##IDX: \
1319 return flow->regs[IDX]; \
1320 case NFI_NXM_NX_REG##IDX##_W: \
1321 NOT_REACHED();
1322
1323 NXM_READ_REGISTER(0);
1324 #if FLOW_N_REGS >= 2
1325 NXM_READ_REGISTER(1);
1326 #endif
1327 #if FLOW_N_REGS >= 3
1328 NXM_READ_REGISTER(2);
1329 #endif
1330 #if FLOW_N_REGS >= 4
1331 NXM_READ_REGISTER(3);
1332 #endif
1333 #if FLOW_N_REGS > 4
1334 #error
1335 #endif
1336
1337 case NFI_NXM_NX_ARP_SHA:
1338 case NFI_NXM_NX_ND_SLL:
1339 return eth_addr_to_uint64(flow->arp_sha);
1340
1341 case NFI_NXM_NX_ARP_THA:
1342 case NFI_NXM_NX_ND_TLL:
1343 return eth_addr_to_uint64(flow->arp_tha);
1344
1345 case NFI_NXM_NX_TUN_ID_W:
1346 case NFI_NXM_OF_ETH_DST_W:
1347 case NFI_NXM_OF_VLAN_TCI_W:
1348 case NFI_NXM_OF_IP_SRC_W:
1349 case NFI_NXM_OF_IP_DST_W:
1350 case NFI_NXM_OF_ARP_SPA_W:
1351 case NFI_NXM_OF_ARP_TPA_W:
1352 case NFI_NXM_NX_IPV6_SRC:
1353 case NFI_NXM_NX_IPV6_SRC_W:
1354 case NFI_NXM_NX_IPV6_DST:
1355 case NFI_NXM_NX_IPV6_DST_W:
1356 case NFI_NXM_NX_ND_TARGET:
1357 case N_NXM_FIELDS:
1358 NOT_REACHED();
1359 }
1360
1361 NOT_REACHED();
1362 }
1363
1364 static void
1365 nxm_write_field(const struct nxm_field *dst, struct flow *flow,
1366 uint64_t new_value)
1367 {
1368 switch (dst->index) {
1369 case NFI_NXM_OF_VLAN_TCI:
1370 flow->vlan_tci = htons(new_value);
1371 break;
1372
1373 case NFI_NXM_NX_TUN_ID:
1374 flow->tun_id = htonll(new_value);
1375 break;
1376
1377 #define NXM_WRITE_REGISTER(IDX) \
1378 case NFI_NXM_NX_REG##IDX: \
1379 flow->regs[IDX] = new_value; \
1380 break; \
1381 case NFI_NXM_NX_REG##IDX##_W: \
1382 NOT_REACHED();
1383
1384 NXM_WRITE_REGISTER(0);
1385 #if FLOW_N_REGS >= 2
1386 NXM_WRITE_REGISTER(1);
1387 #endif
1388 #if FLOW_N_REGS >= 3
1389 NXM_WRITE_REGISTER(2);
1390 #endif
1391 #if FLOW_N_REGS >= 4
1392 NXM_WRITE_REGISTER(3);
1393 #endif
1394 #if FLOW_N_REGS > 4
1395 #error
1396 #endif
1397
1398 case NFI_NXM_OF_IN_PORT:
1399 case NFI_NXM_OF_ETH_DST:
1400 case NFI_NXM_OF_ETH_SRC:
1401 case NFI_NXM_OF_ETH_TYPE:
1402 case NFI_NXM_OF_IP_TOS:
1403 case NFI_NXM_OF_IP_PROTO:
1404 case NFI_NXM_OF_ARP_OP:
1405 case NFI_NXM_OF_IP_SRC:
1406 case NFI_NXM_OF_ARP_SPA:
1407 case NFI_NXM_OF_IP_DST:
1408 case NFI_NXM_OF_ARP_TPA:
1409 case NFI_NXM_OF_TCP_SRC:
1410 case NFI_NXM_OF_UDP_SRC:
1411 case NFI_NXM_OF_TCP_DST:
1412 case NFI_NXM_OF_UDP_DST:
1413 case NFI_NXM_OF_ICMP_TYPE:
1414 case NFI_NXM_OF_ICMP_CODE:
1415 case NFI_NXM_NX_TUN_ID_W:
1416 case NFI_NXM_OF_ETH_DST_W:
1417 case NFI_NXM_OF_VLAN_TCI_W:
1418 case NFI_NXM_OF_IP_SRC_W:
1419 case NFI_NXM_OF_IP_DST_W:
1420 case NFI_NXM_OF_ARP_SPA_W:
1421 case NFI_NXM_OF_ARP_TPA_W:
1422 case NFI_NXM_NX_ARP_SHA:
1423 case NFI_NXM_NX_ARP_THA:
1424 case NFI_NXM_NX_IPV6_SRC:
1425 case NFI_NXM_NX_IPV6_SRC_W:
1426 case NFI_NXM_NX_IPV6_DST:
1427 case NFI_NXM_NX_IPV6_DST_W:
1428 case NFI_NXM_NX_ICMPV6_TYPE:
1429 case NFI_NXM_NX_ICMPV6_CODE:
1430 case NFI_NXM_NX_ND_TARGET:
1431 case NFI_NXM_NX_ND_SLL:
1432 case NFI_NXM_NX_ND_TLL:
1433 case N_NXM_FIELDS:
1434 NOT_REACHED();
1435 }
1436 }
1437
1438 void
1439 nxm_execute_reg_move(const struct nx_action_reg_move *action,
1440 struct flow *flow)
1441 {
1442 /* Preparation. */
1443 int n_bits = ntohs(action->n_bits);
1444 uint64_t mask = n_bits == 64 ? UINT64_MAX : (UINT64_C(1) << n_bits) - 1;
1445
1446 /* Get the interesting bits of the source field. */
1447 const struct nxm_field *src = nxm_field_lookup(ntohl(action->src));
1448 int src_ofs = ntohs(action->src_ofs);
1449 uint64_t src_data = nxm_read_field(src, flow) & (mask << src_ofs);
1450
1451 /* Get the remaining bits of the destination field. */
1452 const struct nxm_field *dst = nxm_field_lookup(ntohl(action->dst));
1453 int dst_ofs = ntohs(action->dst_ofs);
1454 uint64_t dst_data = nxm_read_field(dst, flow) & ~(mask << dst_ofs);
1455
1456 /* Get the final value. */
1457 uint64_t new_data = dst_data | ((src_data >> src_ofs) << dst_ofs);
1458
1459 nxm_write_field(dst, flow, new_data);
1460 }
1461
1462 void
1463 nxm_execute_reg_load(const struct nx_action_reg_load *action,
1464 struct flow *flow)
1465 {
1466 /* Preparation. */
1467 int n_bits = nxm_decode_n_bits(action->ofs_nbits);
1468 uint64_t mask = n_bits == 64 ? UINT64_MAX : (UINT64_C(1) << n_bits) - 1;
1469
1470 /* Get source data. */
1471 uint64_t src_data = ntohll(action->value);
1472
1473 /* Get remaining bits of the destination field. */
1474 const struct nxm_field *dst = nxm_field_lookup(ntohl(action->dst));
1475 int dst_ofs = nxm_decode_ofs(action->ofs_nbits);
1476 uint64_t dst_data = nxm_read_field(dst, flow) & ~(mask << dst_ofs);
1477
1478 /* Get the final value. */
1479 uint64_t new_data = dst_data | (src_data << dst_ofs);
1480
1481 nxm_write_field(dst, flow, new_data);
1482 }