]> git.proxmox.com Git - mirror_ovs.git/blob - lib/nx-match.c
TCP flags matching support.
[mirror_ovs.git] / lib / nx-match.c
1 /*
2 * Copyright (c) 2010, 2011, 2012, 2013 Nicira, Inc.
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <config.h>
18
19 #include "nx-match.h"
20
21 #include <netinet/icmp6.h>
22
23 #include "classifier.h"
24 #include "dynamic-string.h"
25 #include "meta-flow.h"
26 #include "ofp-actions.h"
27 #include "ofp-errors.h"
28 #include "ofp-util.h"
29 #include "ofpbuf.h"
30 #include "openflow/nicira-ext.h"
31 #include "packets.h"
32 #include "unaligned.h"
33 #include "util.h"
34 #include "vlog.h"
35
36 VLOG_DEFINE_THIS_MODULE(nx_match);
37
38 /* Rate limit for nx_match parse errors. These always indicate a bug in the
39 * peer and so there's not much point in showing a lot of them. */
40 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
41
42 /* Returns the width of the data for a field with the given 'header', in
43 * bytes. */
44 int
45 nxm_field_bytes(uint32_t header)
46 {
47 unsigned int length = NXM_LENGTH(header);
48 return NXM_HASMASK(header) ? length / 2 : length;
49 }
50
51 /* Returns the width of the data for a field with the given 'header', in
52 * bits. */
53 int
54 nxm_field_bits(uint32_t header)
55 {
56 return nxm_field_bytes(header) * 8;
57 }
58 \f
59 /* nx_pull_match() and helpers. */
60
61 static uint32_t
62 nx_entry_ok(const void *p, unsigned int match_len)
63 {
64 unsigned int payload_len;
65 ovs_be32 header_be;
66 uint32_t header;
67
68 if (match_len < 4) {
69 if (match_len) {
70 VLOG_DBG_RL(&rl, "nx_match ends with partial (%u-byte) nxm_header",
71 match_len);
72 }
73 return 0;
74 }
75 memcpy(&header_be, p, 4);
76 header = ntohl(header_be);
77
78 payload_len = NXM_LENGTH(header);
79 if (!payload_len) {
80 VLOG_DBG_RL(&rl, "nxm_entry %08"PRIx32" has invalid payload "
81 "length 0", header);
82 return 0;
83 }
84 if (match_len < payload_len + 4) {
85 VLOG_DBG_RL(&rl, "%"PRIu32"-byte nxm_entry but only "
86 "%u bytes left in nx_match", payload_len + 4, match_len);
87 return 0;
88 }
89
90 return header;
91 }
92
93 /* Given NXM/OXM value 'value' and mask 'mask', each 'width' bytes long,
94 * checks for any 1-bit in the value where there is a 0-bit in the mask. If it
95 * finds one, logs a warning. */
96 static void
97 check_mask_consistency(const uint8_t *p, const struct mf_field *mf)
98 {
99 unsigned int width = mf->n_bytes;
100 const uint8_t *value = p + 4;
101 const uint8_t *mask = p + 4 + width;
102 unsigned int i;
103
104 for (i = 0; i < width; i++) {
105 if (value[i] & ~mask[i]) {
106 if (!VLOG_DROP_WARN(&rl)) {
107 char *s = nx_match_to_string(p, width * 2 + 4);
108 VLOG_WARN_RL(&rl, "NXM/OXM entry %s has 1-bits in value for "
109 "bits wildcarded by the mask. (Future versions "
110 "of OVS may report this as an OpenFlow error.)",
111 s);
112 break;
113 }
114 }
115 }
116 }
117
118 static enum ofperr
119 nx_pull_raw(const uint8_t *p, unsigned int match_len, bool strict,
120 struct match *match, ovs_be64 *cookie, ovs_be64 *cookie_mask)
121 {
122 uint32_t header;
123
124 ovs_assert((cookie != NULL) == (cookie_mask != NULL));
125
126 match_init_catchall(match);
127 if (cookie) {
128 *cookie = *cookie_mask = htonll(0);
129 }
130 if (!match_len) {
131 return 0;
132 }
133
134 for (;
135 (header = nx_entry_ok(p, match_len)) != 0;
136 p += 4 + NXM_LENGTH(header), match_len -= 4 + NXM_LENGTH(header)) {
137 const struct mf_field *mf;
138 enum ofperr error;
139
140 mf = mf_from_nxm_header(header);
141 if (!mf) {
142 if (strict) {
143 error = OFPERR_OFPBMC_BAD_FIELD;
144 } else {
145 continue;
146 }
147 } else if (!mf_are_prereqs_ok(mf, &match->flow)) {
148 error = OFPERR_OFPBMC_BAD_PREREQ;
149 } else if (!mf_is_all_wild(mf, &match->wc)) {
150 error = OFPERR_OFPBMC_DUP_FIELD;
151 } else {
152 unsigned int width = mf->n_bytes;
153 union mf_value value;
154
155 memcpy(&value, p + 4, width);
156 if (!mf_is_value_valid(mf, &value)) {
157 error = OFPERR_OFPBMC_BAD_VALUE;
158 } else if (!NXM_HASMASK(header)) {
159 error = 0;
160 mf_set_value(mf, &value, match);
161 } else {
162 union mf_value mask;
163
164 memcpy(&mask, p + 4 + width, width);
165 if (!mf_is_mask_valid(mf, &mask)) {
166 error = OFPERR_OFPBMC_BAD_MASK;
167 } else {
168 error = 0;
169 check_mask_consistency(p, mf);
170 mf_set(mf, &value, &mask, match);
171 }
172 }
173 }
174
175 /* Check if the match is for a cookie rather than a classifier rule. */
176 if ((header == NXM_NX_COOKIE || header == NXM_NX_COOKIE_W) && cookie) {
177 if (*cookie_mask) {
178 error = OFPERR_OFPBMC_DUP_FIELD;
179 } else {
180 unsigned int width = sizeof *cookie;
181
182 memcpy(cookie, p + 4, width);
183 if (NXM_HASMASK(header)) {
184 memcpy(cookie_mask, p + 4 + width, width);
185 } else {
186 *cookie_mask = OVS_BE64_MAX;
187 }
188 error = 0;
189 }
190 }
191
192 if (error) {
193 VLOG_DBG_RL(&rl, "bad nxm_entry %#08"PRIx32" (vendor=%"PRIu32", "
194 "field=%"PRIu32", hasmask=%"PRIu32", len=%"PRIu32"), "
195 "(%s)", header,
196 NXM_VENDOR(header), NXM_FIELD(header),
197 NXM_HASMASK(header), NXM_LENGTH(header),
198 ofperr_to_string(error));
199 return error;
200 }
201 }
202
203 return match_len ? OFPERR_OFPBMC_BAD_LEN : 0;
204 }
205
206 static enum ofperr
207 nx_pull_match__(struct ofpbuf *b, unsigned int match_len, bool strict,
208 struct match *match,
209 ovs_be64 *cookie, ovs_be64 *cookie_mask)
210 {
211 uint8_t *p = NULL;
212
213 if (match_len) {
214 p = ofpbuf_try_pull(b, ROUND_UP(match_len, 8));
215 if (!p) {
216 VLOG_DBG_RL(&rl, "nx_match length %u, rounded up to a "
217 "multiple of 8, is longer than space in message (max "
218 "length %zu)", match_len, b->size);
219 return OFPERR_OFPBMC_BAD_LEN;
220 }
221 }
222
223 return nx_pull_raw(p, match_len, strict, match, cookie, cookie_mask);
224 }
225
226 /* Parses the nx_match formatted match description in 'b' with length
227 * 'match_len'. Stores the results in 'match'. If 'cookie' and 'cookie_mask'
228 * are valid pointers, then stores the cookie and mask in them if 'b' contains
229 * a "NXM_NX_COOKIE*" match. Otherwise, stores 0 in both.
230 *
231 * Fails with an error upon encountering an unknown NXM header.
232 *
233 * Returns 0 if successful, otherwise an OpenFlow error code. */
234 enum ofperr
235 nx_pull_match(struct ofpbuf *b, unsigned int match_len, struct match *match,
236 ovs_be64 *cookie, ovs_be64 *cookie_mask)
237 {
238 return nx_pull_match__(b, match_len, true, match, cookie, cookie_mask);
239 }
240
241 /* Behaves the same as nx_pull_match(), but skips over unknown NXM headers,
242 * instead of failing with an error. */
243 enum ofperr
244 nx_pull_match_loose(struct ofpbuf *b, unsigned int match_len,
245 struct match *match,
246 ovs_be64 *cookie, ovs_be64 *cookie_mask)
247 {
248 return nx_pull_match__(b, match_len, false, match, cookie, cookie_mask);
249 }
250
251 static enum ofperr
252 oxm_pull_match__(struct ofpbuf *b, bool strict, struct match *match)
253 {
254 struct ofp11_match_header *omh = b->data;
255 uint8_t *p;
256 uint16_t match_len;
257
258 if (b->size < sizeof *omh) {
259 return OFPERR_OFPBMC_BAD_LEN;
260 }
261
262 match_len = ntohs(omh->length);
263 if (match_len < sizeof *omh) {
264 return OFPERR_OFPBMC_BAD_LEN;
265 }
266
267 if (omh->type != htons(OFPMT_OXM)) {
268 return OFPERR_OFPBMC_BAD_TYPE;
269 }
270
271 p = ofpbuf_try_pull(b, ROUND_UP(match_len, 8));
272 if (!p) {
273 VLOG_DBG_RL(&rl, "oxm length %u, rounded up to a "
274 "multiple of 8, is longer than space in message (max "
275 "length %zu)", match_len, b->size);
276 return OFPERR_OFPBMC_BAD_LEN;
277 }
278
279 return nx_pull_raw(p + sizeof *omh, match_len - sizeof *omh,
280 strict, match, NULL, NULL);
281 }
282
283 /* Parses the oxm formatted match description preceded by a struct
284 * ofp11_match_header in 'b'. Stores the result in 'match'.
285 *
286 * Fails with an error when encountering unknown OXM headers.
287 *
288 * Returns 0 if successful, otherwise an OpenFlow error code. */
289 enum ofperr
290 oxm_pull_match(struct ofpbuf *b, struct match *match)
291 {
292 return oxm_pull_match__(b, true, match);
293 }
294
295 /* Behaves the same as oxm_pull_match() with one exception. Skips over unknown
296 * OXM headers instead of failing with an error when they are encountered. */
297 enum ofperr
298 oxm_pull_match_loose(struct ofpbuf *b, struct match *match)
299 {
300 return oxm_pull_match__(b, false, match);
301 }
302 \f
303 /* nx_put_match() and helpers.
304 *
305 * 'put' functions whose names end in 'w' add a wildcarded field.
306 * 'put' functions whose names end in 'm' add a field that might be wildcarded.
307 * Other 'put' functions add exact-match fields.
308 */
309
310 static void
311 nxm_put_header(struct ofpbuf *b, uint32_t header)
312 {
313 ovs_be32 n_header = htonl(header);
314 ofpbuf_put(b, &n_header, sizeof n_header);
315 }
316
317 static void
318 nxm_put_8(struct ofpbuf *b, uint32_t header, uint8_t value)
319 {
320 nxm_put_header(b, header);
321 ofpbuf_put(b, &value, sizeof value);
322 }
323
324 static void
325 nxm_put_8m(struct ofpbuf *b, uint32_t header, uint8_t value, uint8_t mask)
326 {
327 switch (mask) {
328 case 0:
329 break;
330
331 case UINT8_MAX:
332 nxm_put_8(b, header, value);
333 break;
334
335 default:
336 nxm_put_header(b, NXM_MAKE_WILD_HEADER(header));
337 ofpbuf_put(b, &value, sizeof value);
338 ofpbuf_put(b, &mask, sizeof mask);
339 }
340 }
341
342 static void
343 nxm_put_16(struct ofpbuf *b, uint32_t header, ovs_be16 value)
344 {
345 nxm_put_header(b, header);
346 ofpbuf_put(b, &value, sizeof value);
347 }
348
349 static void
350 nxm_put_16w(struct ofpbuf *b, uint32_t header, ovs_be16 value, ovs_be16 mask)
351 {
352 nxm_put_header(b, header);
353 ofpbuf_put(b, &value, sizeof value);
354 ofpbuf_put(b, &mask, sizeof mask);
355 }
356
357 static void
358 nxm_put_16m(struct ofpbuf *b, uint32_t header, ovs_be16 value, ovs_be16 mask)
359 {
360 switch (mask) {
361 case 0:
362 break;
363
364 case OVS_BE16_MAX:
365 nxm_put_16(b, header, value);
366 break;
367
368 default:
369 nxm_put_16w(b, NXM_MAKE_WILD_HEADER(header), value, mask);
370 break;
371 }
372 }
373
374 static void
375 nxm_put_32(struct ofpbuf *b, uint32_t header, ovs_be32 value)
376 {
377 nxm_put_header(b, header);
378 ofpbuf_put(b, &value, sizeof value);
379 }
380
381 static void
382 nxm_put_32w(struct ofpbuf *b, uint32_t header, ovs_be32 value, ovs_be32 mask)
383 {
384 nxm_put_header(b, header);
385 ofpbuf_put(b, &value, sizeof value);
386 ofpbuf_put(b, &mask, sizeof mask);
387 }
388
389 static void
390 nxm_put_32m(struct ofpbuf *b, uint32_t header, ovs_be32 value, ovs_be32 mask)
391 {
392 switch (mask) {
393 case 0:
394 break;
395
396 case OVS_BE32_MAX:
397 nxm_put_32(b, header, value);
398 break;
399
400 default:
401 nxm_put_32w(b, NXM_MAKE_WILD_HEADER(header), value, mask);
402 break;
403 }
404 }
405
406 static void
407 nxm_put_64(struct ofpbuf *b, uint32_t header, ovs_be64 value)
408 {
409 nxm_put_header(b, header);
410 ofpbuf_put(b, &value, sizeof value);
411 }
412
413 static void
414 nxm_put_64w(struct ofpbuf *b, uint32_t header, ovs_be64 value, ovs_be64 mask)
415 {
416 nxm_put_header(b, header);
417 ofpbuf_put(b, &value, sizeof value);
418 ofpbuf_put(b, &mask, sizeof mask);
419 }
420
421 static void
422 nxm_put_64m(struct ofpbuf *b, uint32_t header, ovs_be64 value, ovs_be64 mask)
423 {
424 switch (mask) {
425 case 0:
426 break;
427
428 case OVS_BE64_MAX:
429 nxm_put_64(b, header, value);
430 break;
431
432 default:
433 nxm_put_64w(b, NXM_MAKE_WILD_HEADER(header), value, mask);
434 break;
435 }
436 }
437
438 static void
439 nxm_put_eth(struct ofpbuf *b, uint32_t header,
440 const uint8_t value[ETH_ADDR_LEN])
441 {
442 nxm_put_header(b, header);
443 ofpbuf_put(b, value, ETH_ADDR_LEN);
444 }
445
446 static void
447 nxm_put_eth_masked(struct ofpbuf *b, uint32_t header,
448 const uint8_t value[ETH_ADDR_LEN],
449 const uint8_t mask[ETH_ADDR_LEN])
450 {
451 if (!eth_addr_is_zero(mask)) {
452 if (eth_mask_is_exact(mask)) {
453 nxm_put_eth(b, header, value);
454 } else {
455 nxm_put_header(b, NXM_MAKE_WILD_HEADER(header));
456 ofpbuf_put(b, value, ETH_ADDR_LEN);
457 ofpbuf_put(b, mask, ETH_ADDR_LEN);
458 }
459 }
460 }
461
462 static void
463 nxm_put_ipv6(struct ofpbuf *b, uint32_t header,
464 const struct in6_addr *value, const struct in6_addr *mask)
465 {
466 if (ipv6_mask_is_any(mask)) {
467 return;
468 } else if (ipv6_mask_is_exact(mask)) {
469 nxm_put_header(b, header);
470 ofpbuf_put(b, value, sizeof *value);
471 } else {
472 nxm_put_header(b, NXM_MAKE_WILD_HEADER(header));
473 ofpbuf_put(b, value, sizeof *value);
474 ofpbuf_put(b, mask, sizeof *mask);
475 }
476 }
477
478 static void
479 nxm_put_frag(struct ofpbuf *b, const struct match *match)
480 {
481 uint8_t nw_frag = match->flow.nw_frag;
482 uint8_t nw_frag_mask = match->wc.masks.nw_frag;
483
484 switch (nw_frag_mask) {
485 case 0:
486 break;
487
488 case FLOW_NW_FRAG_MASK:
489 nxm_put_8(b, NXM_NX_IP_FRAG, nw_frag);
490 break;
491
492 default:
493 nxm_put_8m(b, NXM_NX_IP_FRAG, nw_frag,
494 nw_frag_mask & FLOW_NW_FRAG_MASK);
495 break;
496 }
497 }
498
499 static void
500 nxm_put_ip(struct ofpbuf *b, const struct match *match,
501 uint8_t icmp_proto, uint32_t icmp_type, uint32_t icmp_code,
502 bool oxm)
503 {
504 const struct flow *flow = &match->flow;
505
506 nxm_put_frag(b, match);
507
508 if (match->wc.masks.nw_tos & IP_DSCP_MASK) {
509 if (oxm) {
510 nxm_put_8(b, OXM_OF_IP_DSCP, flow->nw_tos >> 2);
511 } else {
512 nxm_put_8(b, NXM_OF_IP_TOS, flow->nw_tos & IP_DSCP_MASK);
513 }
514 }
515
516 if (match->wc.masks.nw_tos & IP_ECN_MASK) {
517 nxm_put_8(b, oxm ? OXM_OF_IP_ECN : NXM_NX_IP_ECN,
518 flow->nw_tos & IP_ECN_MASK);
519 }
520
521 if (!oxm && match->wc.masks.nw_ttl) {
522 nxm_put_8(b, NXM_NX_IP_TTL, flow->nw_ttl);
523 }
524
525 if (match->wc.masks.nw_proto) {
526 nxm_put_8(b, oxm ? OXM_OF_IP_PROTO : NXM_OF_IP_PROTO, flow->nw_proto);
527
528 if (flow->nw_proto == IPPROTO_TCP) {
529 nxm_put_16m(b, oxm ? OXM_OF_TCP_SRC : NXM_OF_TCP_SRC,
530 flow->tp_src, match->wc.masks.tp_src);
531 nxm_put_16m(b, oxm ? OXM_OF_TCP_DST : NXM_OF_TCP_DST,
532 flow->tp_dst, match->wc.masks.tp_dst);
533 nxm_put_16m(b, NXM_NX_TCP_FLAGS,
534 flow->tcp_flags, match->wc.masks.tcp_flags);
535 } else if (flow->nw_proto == IPPROTO_UDP) {
536 nxm_put_16m(b, oxm ? OXM_OF_UDP_SRC : NXM_OF_UDP_SRC,
537 flow->tp_src, match->wc.masks.tp_src);
538 nxm_put_16m(b, oxm ? OXM_OF_UDP_DST : NXM_OF_UDP_DST,
539 flow->tp_dst, match->wc.masks.tp_dst);
540 } else if (flow->nw_proto == IPPROTO_SCTP) {
541 nxm_put_16m(b, OXM_OF_SCTP_SRC, flow->tp_src,
542 match->wc.masks.tp_src);
543 nxm_put_16m(b, OXM_OF_SCTP_DST, flow->tp_dst,
544 match->wc.masks.tp_dst);
545 } else if (flow->nw_proto == icmp_proto) {
546 if (match->wc.masks.tp_src) {
547 nxm_put_8(b, icmp_type, ntohs(flow->tp_src));
548 }
549 if (match->wc.masks.tp_dst) {
550 nxm_put_8(b, icmp_code, ntohs(flow->tp_dst));
551 }
552 }
553 }
554 }
555
556 /* Appends to 'b' the nx_match format that expresses 'match'. For Flow Mod and
557 * Flow Stats Requests messages, a 'cookie' and 'cookie_mask' may be supplied.
558 * Otherwise, 'cookie_mask' should be zero.
559 *
560 * This function can cause 'b''s data to be reallocated.
561 *
562 * Returns the number of bytes appended to 'b', excluding padding.
563 *
564 * If 'match' is a catch-all rule that matches every packet, then this function
565 * appends nothing to 'b' and returns 0. */
566 static int
567 nx_put_raw(struct ofpbuf *b, bool oxm, const struct match *match,
568 ovs_be64 cookie, ovs_be64 cookie_mask)
569 {
570 const struct flow *flow = &match->flow;
571 const size_t start_len = b->size;
572 int match_len;
573 int i;
574
575 BUILD_ASSERT_DECL(FLOW_WC_SEQ == 22);
576
577 /* Metadata. */
578 if (match->wc.masks.in_port.ofp_port) {
579 ofp_port_t in_port = flow->in_port.ofp_port;
580 if (oxm) {
581 nxm_put_32(b, OXM_OF_IN_PORT, ofputil_port_to_ofp11(in_port));
582 } else {
583 nxm_put_16(b, NXM_OF_IN_PORT, htons(ofp_to_u16(in_port)));
584 }
585 }
586
587 /* Ethernet. */
588 nxm_put_eth_masked(b, oxm ? OXM_OF_ETH_SRC : NXM_OF_ETH_SRC,
589 flow->dl_src, match->wc.masks.dl_src);
590 nxm_put_eth_masked(b, oxm ? OXM_OF_ETH_DST : NXM_OF_ETH_DST,
591 flow->dl_dst, match->wc.masks.dl_dst);
592 nxm_put_16m(b, oxm ? OXM_OF_ETH_TYPE : NXM_OF_ETH_TYPE,
593 ofputil_dl_type_to_openflow(flow->dl_type),
594 match->wc.masks.dl_type);
595
596 /* 802.1Q. */
597 if (oxm) {
598 ovs_be16 VID_CFI_MASK = htons(VLAN_VID_MASK | VLAN_CFI);
599 ovs_be16 vid = flow->vlan_tci & VID_CFI_MASK;
600 ovs_be16 mask = match->wc.masks.vlan_tci & VID_CFI_MASK;
601
602 if (mask == htons(VLAN_VID_MASK | VLAN_CFI)) {
603 nxm_put_16(b, OXM_OF_VLAN_VID, vid);
604 } else if (mask) {
605 nxm_put_16m(b, OXM_OF_VLAN_VID, vid, mask);
606 }
607
608 if (vid && vlan_tci_to_pcp(match->wc.masks.vlan_tci)) {
609 nxm_put_8(b, OXM_OF_VLAN_PCP, vlan_tci_to_pcp(flow->vlan_tci));
610 }
611
612 } else {
613 nxm_put_16m(b, NXM_OF_VLAN_TCI, flow->vlan_tci,
614 match->wc.masks.vlan_tci);
615 }
616
617 /* MPLS. */
618 if (eth_type_mpls(flow->dl_type)) {
619 if (match->wc.masks.mpls_lse & htonl(MPLS_TC_MASK)) {
620 nxm_put_8(b, OXM_OF_MPLS_TC, mpls_lse_to_tc(flow->mpls_lse));
621 }
622
623 if (match->wc.masks.mpls_lse & htonl(MPLS_BOS_MASK)) {
624 nxm_put_8(b, OXM_OF_MPLS_BOS, mpls_lse_to_bos(flow->mpls_lse));
625 }
626
627 if (match->wc.masks.mpls_lse & htonl(MPLS_LABEL_MASK)) {
628 nxm_put_32(b, OXM_OF_MPLS_LABEL,
629 htonl(mpls_lse_to_label(flow->mpls_lse)));
630 }
631 }
632
633 /* L3. */
634 if (flow->dl_type == htons(ETH_TYPE_IP)) {
635 /* IP. */
636 nxm_put_32m(b, oxm ? OXM_OF_IPV4_SRC : NXM_OF_IP_SRC,
637 flow->nw_src, match->wc.masks.nw_src);
638 nxm_put_32m(b, oxm ? OXM_OF_IPV4_DST : NXM_OF_IP_DST,
639 flow->nw_dst, match->wc.masks.nw_dst);
640 nxm_put_ip(b, match, IPPROTO_ICMP,
641 oxm ? OXM_OF_ICMPV4_TYPE : NXM_OF_ICMP_TYPE,
642 oxm ? OXM_OF_ICMPV4_CODE : NXM_OF_ICMP_CODE, oxm);
643 } else if (flow->dl_type == htons(ETH_TYPE_IPV6)) {
644 /* IPv6. */
645 nxm_put_ipv6(b, oxm ? OXM_OF_IPV6_SRC : NXM_NX_IPV6_SRC,
646 &flow->ipv6_src, &match->wc.masks.ipv6_src);
647 nxm_put_ipv6(b, oxm ? OXM_OF_IPV6_DST : NXM_NX_IPV6_DST,
648 &flow->ipv6_dst, &match->wc.masks.ipv6_dst);
649 nxm_put_ip(b, match, IPPROTO_ICMPV6,
650 oxm ? OXM_OF_ICMPV6_TYPE : NXM_NX_ICMPV6_TYPE,
651 oxm ? OXM_OF_ICMPV6_CODE : NXM_NX_ICMPV6_CODE, oxm);
652
653 nxm_put_32m(b, oxm ? OXM_OF_IPV6_FLABEL : NXM_NX_IPV6_LABEL,
654 flow->ipv6_label, match->wc.masks.ipv6_label);
655
656 if (flow->nw_proto == IPPROTO_ICMPV6
657 && (flow->tp_src == htons(ND_NEIGHBOR_SOLICIT) ||
658 flow->tp_src == htons(ND_NEIGHBOR_ADVERT))) {
659 nxm_put_ipv6(b, oxm ? OXM_OF_IPV6_ND_TARGET : NXM_NX_ND_TARGET,
660 &flow->nd_target, &match->wc.masks.nd_target);
661 if (flow->tp_src == htons(ND_NEIGHBOR_SOLICIT)) {
662 nxm_put_eth_masked(b, oxm ? OXM_OF_IPV6_ND_SLL : NXM_NX_ND_SLL,
663 flow->arp_sha, match->wc.masks.arp_sha);
664 }
665 if (flow->tp_src == htons(ND_NEIGHBOR_ADVERT)) {
666 nxm_put_eth_masked(b, oxm ? OXM_OF_IPV6_ND_TLL : NXM_NX_ND_TLL,
667 flow->arp_tha, match->wc.masks.arp_tha);
668 }
669 }
670 } else if (flow->dl_type == htons(ETH_TYPE_ARP) ||
671 flow->dl_type == htons(ETH_TYPE_RARP)) {
672 /* ARP. */
673 if (match->wc.masks.nw_proto) {
674 nxm_put_16(b, oxm ? OXM_OF_ARP_OP : NXM_OF_ARP_OP,
675 htons(flow->nw_proto));
676 }
677 nxm_put_32m(b, oxm ? OXM_OF_ARP_SPA : NXM_OF_ARP_SPA,
678 flow->nw_src, match->wc.masks.nw_src);
679 nxm_put_32m(b, oxm ? OXM_OF_ARP_TPA : NXM_OF_ARP_TPA,
680 flow->nw_dst, match->wc.masks.nw_dst);
681 nxm_put_eth_masked(b, oxm ? OXM_OF_ARP_SHA : NXM_NX_ARP_SHA,
682 flow->arp_sha, match->wc.masks.arp_sha);
683 nxm_put_eth_masked(b, oxm ? OXM_OF_ARP_THA : NXM_NX_ARP_THA,
684 flow->arp_tha, match->wc.masks.arp_tha);
685 }
686
687 /* Tunnel ID. */
688 nxm_put_64m(b, oxm ? OXM_OF_TUNNEL_ID : NXM_NX_TUN_ID,
689 flow->tunnel.tun_id, match->wc.masks.tunnel.tun_id);
690
691 /* Other tunnel metadata. */
692 nxm_put_32m(b, NXM_NX_TUN_IPV4_SRC,
693 flow->tunnel.ip_src, match->wc.masks.tunnel.ip_src);
694 nxm_put_32m(b, NXM_NX_TUN_IPV4_DST,
695 flow->tunnel.ip_dst, match->wc.masks.tunnel.ip_dst);
696
697 /* Registers. */
698 for (i = 0; i < FLOW_N_REGS; i++) {
699 nxm_put_32m(b, NXM_NX_REG(i),
700 htonl(flow->regs[i]), htonl(match->wc.masks.regs[i]));
701 }
702
703 /* Mark. */
704 nxm_put_32m(b, NXM_NX_PKT_MARK, htonl(flow->pkt_mark),
705 htonl(match->wc.masks.pkt_mark));
706
707 /* OpenFlow 1.1+ Metadata. */
708 nxm_put_64m(b, OXM_OF_METADATA, flow->metadata, match->wc.masks.metadata);
709
710 /* Cookie. */
711 nxm_put_64m(b, NXM_NX_COOKIE, cookie, cookie_mask);
712
713 match_len = b->size - start_len;
714 return match_len;
715 }
716
717 /* Appends to 'b' the nx_match format that expresses 'match', plus enough zero
718 * bytes to pad the nx_match out to a multiple of 8. For Flow Mod and Flow
719 * Stats Requests messages, a 'cookie' and 'cookie_mask' may be supplied.
720 * Otherwise, 'cookie_mask' should be zero.
721 *
722 * This function can cause 'b''s data to be reallocated.
723 *
724 * Returns the number of bytes appended to 'b', excluding padding. The return
725 * value can be zero if it appended nothing at all to 'b' (which happens if
726 * 'cr' is a catch-all rule that matches every packet). */
727 int
728 nx_put_match(struct ofpbuf *b, const struct match *match,
729 ovs_be64 cookie, ovs_be64 cookie_mask)
730 {
731 int match_len = nx_put_raw(b, false, match, cookie, cookie_mask);
732
733 ofpbuf_put_zeros(b, ROUND_UP(match_len, 8) - match_len);
734 return match_len;
735 }
736
737
738 /* Appends to 'b' an struct ofp11_match_header followed by the oxm format that
739 * expresses 'cr', plus enough zero bytes to pad the data appended out to a
740 * multiple of 8.
741 *
742 * This function can cause 'b''s data to be reallocated.
743 *
744 * Returns the number of bytes appended to 'b', excluding the padding. Never
745 * returns zero. */
746 int
747 oxm_put_match(struct ofpbuf *b, const struct match *match)
748 {
749 int match_len;
750 struct ofp11_match_header *omh;
751 size_t start_len = b->size;
752 ovs_be64 cookie = htonll(0), cookie_mask = htonll(0);
753
754 ofpbuf_put_uninit(b, sizeof *omh);
755 match_len = nx_put_raw(b, true, match, cookie, cookie_mask) + sizeof *omh;
756 ofpbuf_put_zeros(b, ROUND_UP(match_len, 8) - match_len);
757
758 omh = ofpbuf_at(b, start_len, sizeof *omh);
759 omh->type = htons(OFPMT_OXM);
760 omh->length = htons(match_len);
761
762 return match_len;
763 }
764 \f
765 /* nx_match_to_string() and helpers. */
766
767 static void format_nxm_field_name(struct ds *, uint32_t header);
768
769 char *
770 nx_match_to_string(const uint8_t *p, unsigned int match_len)
771 {
772 uint32_t header;
773 struct ds s;
774
775 if (!match_len) {
776 return xstrdup("<any>");
777 }
778
779 ds_init(&s);
780 while ((header = nx_entry_ok(p, match_len)) != 0) {
781 unsigned int length = NXM_LENGTH(header);
782 unsigned int value_len = nxm_field_bytes(header);
783 const uint8_t *value = p + 4;
784 const uint8_t *mask = value + value_len;
785 unsigned int i;
786
787 if (s.length) {
788 ds_put_cstr(&s, ", ");
789 }
790
791 format_nxm_field_name(&s, header);
792 ds_put_char(&s, '(');
793
794 for (i = 0; i < value_len; i++) {
795 ds_put_format(&s, "%02x", value[i]);
796 }
797 if (NXM_HASMASK(header)) {
798 ds_put_char(&s, '/');
799 for (i = 0; i < value_len; i++) {
800 ds_put_format(&s, "%02x", mask[i]);
801 }
802 }
803 ds_put_char(&s, ')');
804
805 p += 4 + length;
806 match_len -= 4 + length;
807 }
808
809 if (match_len) {
810 if (s.length) {
811 ds_put_cstr(&s, ", ");
812 }
813
814 ds_put_format(&s, "<%u invalid bytes>", match_len);
815 }
816
817 return ds_steal_cstr(&s);
818 }
819
820 char *
821 oxm_match_to_string(const struct ofpbuf *p, unsigned int match_len)
822 {
823 const struct ofp11_match_header *omh = p->data;
824 uint16_t match_len_;
825 struct ds s;
826
827 ds_init(&s);
828
829 if (match_len < sizeof *omh) {
830 ds_put_format(&s, "<match too short: %u>", match_len);
831 goto err;
832 }
833
834 if (omh->type != htons(OFPMT_OXM)) {
835 ds_put_format(&s, "<bad match type field: %u>", ntohs(omh->type));
836 goto err;
837 }
838
839 match_len_ = ntohs(omh->length);
840 if (match_len_ < sizeof *omh) {
841 ds_put_format(&s, "<match length field too short: %u>", match_len_);
842 goto err;
843 }
844
845 if (match_len_ != match_len) {
846 ds_put_format(&s, "<match length field incorrect: %u != %u>",
847 match_len_, match_len);
848 goto err;
849 }
850
851 return nx_match_to_string(ofpbuf_at(p, sizeof *omh, 0),
852 match_len - sizeof *omh);
853
854 err:
855 return ds_steal_cstr(&s);
856 }
857
858 static void
859 format_nxm_field_name(struct ds *s, uint32_t header)
860 {
861 const struct mf_field *mf = mf_from_nxm_header(header);
862 if (mf) {
863 ds_put_cstr(s, IS_OXM_HEADER(header) ? mf->oxm_name : mf->nxm_name);
864 if (NXM_HASMASK(header)) {
865 ds_put_cstr(s, "_W");
866 }
867 } else if (header == NXM_NX_COOKIE) {
868 ds_put_cstr(s, "NXM_NX_COOKIE");
869 } else if (header == NXM_NX_COOKIE_W) {
870 ds_put_cstr(s, "NXM_NX_COOKIE_W");
871 } else {
872 ds_put_format(s, "%d:%d", NXM_VENDOR(header), NXM_FIELD(header));
873 }
874 }
875
876 static uint32_t
877 parse_nxm_field_name(const char *name, int name_len)
878 {
879 bool wild;
880 int i;
881
882 /* Check whether it's a field name. */
883 wild = name_len > 2 && !memcmp(&name[name_len - 2], "_W", 2);
884 if (wild) {
885 name_len -= 2;
886 }
887
888 for (i = 0; i < MFF_N_IDS; i++) {
889 const struct mf_field *mf = mf_from_id(i);
890 uint32_t header;
891
892 if (mf->nxm_name &&
893 !strncmp(mf->nxm_name, name, name_len) &&
894 mf->nxm_name[name_len] == '\0') {
895 header = mf->nxm_header;
896 } else if (mf->oxm_name &&
897 !strncmp(mf->oxm_name, name, name_len) &&
898 mf->oxm_name[name_len] == '\0') {
899 header = mf->oxm_header;
900 } else {
901 continue;
902 }
903
904 if (!wild) {
905 return header;
906 } else if (mf->maskable != MFM_NONE) {
907 return NXM_MAKE_WILD_HEADER(header);
908 }
909 }
910
911 if (!strncmp("NXM_NX_COOKIE", name, name_len) &&
912 (name_len == strlen("NXM_NX_COOKIE"))) {
913 if (!wild) {
914 return NXM_NX_COOKIE;
915 } else {
916 return NXM_NX_COOKIE_W;
917 }
918 }
919
920 /* Check whether it's a 32-bit field header value as hex.
921 * (This isn't ordinarily useful except for testing error behavior.) */
922 if (name_len == 8) {
923 uint32_t header = hexits_value(name, name_len, NULL);
924 if (header != UINT_MAX) {
925 return header;
926 }
927 }
928
929 return 0;
930 }
931 \f
932 /* nx_match_from_string(). */
933
934 static int
935 nx_match_from_string_raw(const char *s, struct ofpbuf *b)
936 {
937 const char *full_s = s;
938 const size_t start_len = b->size;
939
940 if (!strcmp(s, "<any>")) {
941 /* Ensure that 'b->data' isn't actually null. */
942 ofpbuf_prealloc_tailroom(b, 1);
943 return 0;
944 }
945
946 for (s += strspn(s, ", "); *s; s += strspn(s, ", ")) {
947 const char *name;
948 uint32_t header;
949 int name_len;
950 size_t n;
951
952 name = s;
953 name_len = strcspn(s, "(");
954 if (s[name_len] != '(') {
955 ovs_fatal(0, "%s: missing ( at end of nx_match", full_s);
956 }
957
958 header = parse_nxm_field_name(name, name_len);
959 if (!header) {
960 ovs_fatal(0, "%s: unknown field `%.*s'", full_s, name_len, s);
961 }
962
963 s += name_len + 1;
964
965 nxm_put_header(b, header);
966 s = ofpbuf_put_hex(b, s, &n);
967 if (n != nxm_field_bytes(header)) {
968 ovs_fatal(0, "%.2s: hex digits expected", s);
969 }
970 if (NXM_HASMASK(header)) {
971 s += strspn(s, " ");
972 if (*s != '/') {
973 ovs_fatal(0, "%s: missing / in masked field %.*s",
974 full_s, name_len, name);
975 }
976 s = ofpbuf_put_hex(b, s + 1, &n);
977 if (n != nxm_field_bytes(header)) {
978 ovs_fatal(0, "%.2s: hex digits expected", s);
979 }
980 }
981
982 s += strspn(s, " ");
983 if (*s != ')') {
984 ovs_fatal(0, "%s: missing ) following field %.*s",
985 full_s, name_len, name);
986 }
987 s++;
988 }
989
990 return b->size - start_len;
991 }
992
993 int
994 nx_match_from_string(const char *s, struct ofpbuf *b)
995 {
996 int match_len = nx_match_from_string_raw(s, b);
997 ofpbuf_put_zeros(b, ROUND_UP(match_len, 8) - match_len);
998 return match_len;
999 }
1000
1001 int
1002 oxm_match_from_string(const char *s, struct ofpbuf *b)
1003 {
1004 int match_len;
1005 struct ofp11_match_header *omh;
1006 size_t start_len = b->size;
1007
1008 ofpbuf_put_uninit(b, sizeof *omh);
1009 match_len = nx_match_from_string_raw(s, b) + sizeof *omh;
1010 ofpbuf_put_zeros(b, ROUND_UP(match_len, 8) - match_len);
1011
1012 omh = ofpbuf_at(b, start_len, sizeof *omh);
1013 omh->type = htons(OFPMT_OXM);
1014 omh->length = htons(match_len);
1015
1016 return match_len;
1017 }
1018 \f
1019 /* Parses 's' as a "move" action, in the form described in ovs-ofctl(8), into
1020 * '*move'.
1021 *
1022 * Returns NULL if successful, otherwise a malloc()'d string describing the
1023 * error. The caller is responsible for freeing the returned string. */
1024 char * WARN_UNUSED_RESULT
1025 nxm_parse_reg_move(struct ofpact_reg_move *move, const char *s)
1026 {
1027 const char *full_s = s;
1028 char *error;
1029
1030 error = mf_parse_subfield__(&move->src, &s);
1031 if (error) {
1032 return error;
1033 }
1034 if (strncmp(s, "->", 2)) {
1035 return xasprintf("%s: missing `->' following source", full_s);
1036 }
1037 s += 2;
1038 error = mf_parse_subfield(&move->dst, s);
1039 if (error) {
1040 return error;
1041 }
1042
1043 if (move->src.n_bits != move->dst.n_bits) {
1044 return xasprintf("%s: source field is %d bits wide but destination is "
1045 "%d bits wide", full_s,
1046 move->src.n_bits, move->dst.n_bits);
1047 }
1048 return NULL;
1049 }
1050
1051 /* Parses 's' as a "load" action, in the form described in ovs-ofctl(8), into
1052 * '*load'.
1053 *
1054 * Returns NULL if successful, otherwise a malloc()'d string describing the
1055 * error. The caller is responsible for freeing the returned string. */
1056 char * WARN_UNUSED_RESULT
1057 nxm_parse_reg_load(struct ofpact_reg_load *load, const char *s)
1058 {
1059 const char *full_s = s;
1060 uint64_t value = strtoull(s, (char **) &s, 0);
1061 char *error;
1062
1063 if (strncmp(s, "->", 2)) {
1064 return xasprintf("%s: missing `->' following value", full_s);
1065 }
1066 s += 2;
1067 error = mf_parse_subfield(&load->dst, s);
1068 if (error) {
1069 return error;
1070 }
1071
1072 if (load->dst.n_bits < 64 && (value >> load->dst.n_bits) != 0) {
1073 return xasprintf("%s: value %"PRIu64" does not fit into %d bits",
1074 full_s, value, load->dst.n_bits);
1075 }
1076
1077 load->subvalue.be64[0] = htonll(0);
1078 load->subvalue.be64[1] = htonll(value);
1079 return NULL;
1080 }
1081 \f
1082 /* nxm_format_reg_move(), nxm_format_reg_load(). */
1083
1084 void
1085 nxm_format_reg_move(const struct ofpact_reg_move *move, struct ds *s)
1086 {
1087 ds_put_format(s, "move:");
1088 mf_format_subfield(&move->src, s);
1089 ds_put_cstr(s, "->");
1090 mf_format_subfield(&move->dst, s);
1091 }
1092
1093 static void
1094 set_field_format(const struct ofpact_reg_load *load, struct ds *s)
1095 {
1096 const struct mf_field *mf = load->dst.field;
1097 union mf_value value;
1098
1099 ovs_assert(load->ofpact.compat == OFPUTIL_OFPAT12_SET_FIELD);
1100 ds_put_format(s, "set_field:");
1101 memset(&value, 0, sizeof value);
1102 bitwise_copy(&load->subvalue, sizeof load->subvalue, 0,
1103 &value, mf->n_bytes, 0, load->dst.n_bits);
1104 mf_format(mf, &value, NULL, s);
1105 ds_put_format(s, "->%s", mf->name);
1106 }
1107
1108 static void
1109 load_format(const struct ofpact_reg_load *load, struct ds *s)
1110 {
1111 ds_put_cstr(s, "load:");
1112 mf_format_subvalue(&load->subvalue, s);
1113 ds_put_cstr(s, "->");
1114 mf_format_subfield(&load->dst, s);
1115 }
1116
1117 void
1118 nxm_format_reg_load(const struct ofpact_reg_load *load, struct ds *s)
1119 {
1120 if (load->ofpact.compat == OFPUTIL_OFPAT12_SET_FIELD) {
1121 set_field_format(load, s);
1122 } else {
1123 load_format(load, s);
1124 }
1125 }
1126 \f
1127 enum ofperr
1128 nxm_reg_move_from_openflow(const struct nx_action_reg_move *narm,
1129 struct ofpbuf *ofpacts)
1130 {
1131 struct ofpact_reg_move *move;
1132
1133 move = ofpact_put_REG_MOVE(ofpacts);
1134 move->src.field = mf_from_nxm_header(ntohl(narm->src));
1135 move->src.ofs = ntohs(narm->src_ofs);
1136 move->src.n_bits = ntohs(narm->n_bits);
1137 move->dst.field = mf_from_nxm_header(ntohl(narm->dst));
1138 move->dst.ofs = ntohs(narm->dst_ofs);
1139 move->dst.n_bits = ntohs(narm->n_bits);
1140
1141 return nxm_reg_move_check(move, NULL);
1142 }
1143
1144 enum ofperr
1145 nxm_reg_load_from_openflow(const struct nx_action_reg_load *narl,
1146 struct ofpbuf *ofpacts)
1147 {
1148 struct ofpact_reg_load *load;
1149
1150 load = ofpact_put_REG_LOAD(ofpacts);
1151 load->dst.field = mf_from_nxm_header(ntohl(narl->dst));
1152 load->dst.ofs = nxm_decode_ofs(narl->ofs_nbits);
1153 load->dst.n_bits = nxm_decode_n_bits(narl->ofs_nbits);
1154 load->subvalue.be64[1] = narl->value;
1155
1156 /* Reject 'narl' if a bit numbered 'n_bits' or higher is set to 1 in
1157 * narl->value. */
1158 if (load->dst.n_bits < 64 &&
1159 ntohll(narl->value) >> load->dst.n_bits) {
1160 return OFPERR_OFPBAC_BAD_ARGUMENT;
1161 }
1162
1163 return nxm_reg_load_check(load, NULL);
1164 }
1165
1166 enum ofperr
1167 nxm_reg_load_from_openflow12_set_field(
1168 const struct ofp12_action_set_field * oasf, struct ofpbuf *ofpacts)
1169 {
1170 uint16_t oasf_len = ntohs(oasf->len);
1171 uint32_t oxm_header = ntohl(oasf->dst);
1172 uint8_t oxm_length = NXM_LENGTH(oxm_header);
1173 struct ofpact_reg_load *load;
1174 const struct mf_field *mf;
1175
1176 /* ofp12_action_set_field is padded to 64 bits by zero */
1177 if (oasf_len != ROUND_UP(sizeof(*oasf) + oxm_length, 8)) {
1178 return OFPERR_OFPBAC_BAD_SET_LEN;
1179 }
1180 if (!is_all_zeros((const uint8_t *)(oasf) + sizeof *oasf + oxm_length,
1181 oasf_len - oxm_length - sizeof *oasf)) {
1182 return OFPERR_OFPBAC_BAD_SET_ARGUMENT;
1183 }
1184
1185 if (NXM_HASMASK(oxm_header)) {
1186 return OFPERR_OFPBAC_BAD_SET_TYPE;
1187 }
1188 mf = mf_from_nxm_header(oxm_header);
1189 if (!mf) {
1190 return OFPERR_OFPBAC_BAD_SET_TYPE;
1191 }
1192 load = ofpact_put_REG_LOAD(ofpacts);
1193 ofpact_set_field_init(load, mf, oasf + 1);
1194
1195 return nxm_reg_load_check(load, NULL);
1196 }
1197 \f
1198 enum ofperr
1199 nxm_reg_move_check(const struct ofpact_reg_move *move, const struct flow *flow)
1200 {
1201 enum ofperr error;
1202
1203 error = mf_check_src(&move->src, flow);
1204 if (error) {
1205 return error;
1206 }
1207
1208 return mf_check_dst(&move->dst, NULL);
1209 }
1210
1211 enum ofperr
1212 nxm_reg_load_check(const struct ofpact_reg_load *load, const struct flow *flow)
1213 {
1214 return mf_check_dst(&load->dst, flow);
1215 }
1216 \f
1217 void
1218 nxm_reg_move_to_nxast(const struct ofpact_reg_move *move,
1219 struct ofpbuf *openflow)
1220 {
1221 struct nx_action_reg_move *narm;
1222
1223 narm = ofputil_put_NXAST_REG_MOVE(openflow);
1224 narm->n_bits = htons(move->dst.n_bits);
1225 narm->src_ofs = htons(move->src.ofs);
1226 narm->dst_ofs = htons(move->dst.ofs);
1227 narm->src = htonl(move->src.field->nxm_header);
1228 narm->dst = htonl(move->dst.field->nxm_header);
1229 }
1230
1231 static void
1232 reg_load_to_nxast(const struct ofpact_reg_load *load, struct ofpbuf *openflow)
1233 {
1234 struct nx_action_reg_load *narl;
1235
1236 narl = ofputil_put_NXAST_REG_LOAD(openflow);
1237 narl->ofs_nbits = nxm_encode_ofs_nbits(load->dst.ofs, load->dst.n_bits);
1238 narl->dst = htonl(load->dst.field->nxm_header);
1239 narl->value = load->subvalue.be64[1];
1240 }
1241
1242 static void
1243 set_field_to_ofast(const struct ofpact_reg_load *load,
1244 struct ofpbuf *openflow)
1245 {
1246 const struct mf_field *mf = load->dst.field;
1247 uint16_t padded_value_len = ROUND_UP(mf->n_bytes, 8);
1248 struct ofp12_action_set_field *oasf;
1249 char *value;
1250
1251 /* Set field is the only action of variable length (so far),
1252 * so handling the variable length portion is open-coded here */
1253 oasf = ofputil_put_OFPAT12_SET_FIELD(openflow);
1254 oasf->dst = htonl(mf->oxm_header);
1255 oasf->len = htons(ntohs(oasf->len) + padded_value_len);
1256
1257 value = ofpbuf_put_zeros(openflow, padded_value_len);
1258 bitwise_copy(&load->subvalue, sizeof load->subvalue, load->dst.ofs,
1259 value, mf->n_bytes, load->dst.ofs, load->dst.n_bits);
1260 }
1261
1262 void
1263 nxm_reg_load_to_nxast(const struct ofpact_reg_load *load,
1264 struct ofpbuf *openflow)
1265 {
1266
1267 if (load->ofpact.compat == OFPUTIL_OFPAT12_SET_FIELD) {
1268 struct ofp_header *oh = (struct ofp_header *)openflow->l2;
1269
1270 switch(oh->version) {
1271 case OFP13_VERSION:
1272 case OFP12_VERSION:
1273 set_field_to_ofast(load, openflow);
1274 break;
1275
1276 case OFP11_VERSION:
1277 case OFP10_VERSION:
1278 if (load->dst.n_bits < 64) {
1279 reg_load_to_nxast(load, openflow);
1280 } else {
1281 /* Split into 64bit chunks */
1282 int chunk, ofs;
1283 for (ofs = 0; ofs < load->dst.n_bits; ofs += chunk) {
1284 struct ofpact_reg_load subload = *load;
1285
1286 chunk = MIN(load->dst.n_bits - ofs, 64);
1287
1288 subload.dst.field = load->dst.field;
1289 subload.dst.ofs = load->dst.ofs + ofs;
1290 subload.dst.n_bits = chunk;
1291 bitwise_copy(&load->subvalue, sizeof load->subvalue, ofs,
1292 &subload.subvalue, sizeof subload.subvalue, 0,
1293 chunk);
1294 reg_load_to_nxast(&subload, openflow);
1295 }
1296 }
1297 break;
1298
1299 default:
1300 NOT_REACHED();
1301 }
1302 } else {
1303 reg_load_to_nxast(load, openflow);
1304 }
1305 }
1306 \f
1307 /* nxm_execute_reg_move(), nxm_execute_reg_load(). */
1308
1309 void
1310 nxm_execute_reg_move(const struct ofpact_reg_move *move,
1311 struct flow *flow, struct flow_wildcards *wc)
1312 {
1313 union mf_value src_value;
1314 union mf_value dst_value;
1315
1316 mf_mask_field_and_prereqs(move->dst.field, &wc->masks);
1317 mf_mask_field_and_prereqs(move->src.field, &wc->masks);
1318
1319 mf_get_value(move->dst.field, flow, &dst_value);
1320 mf_get_value(move->src.field, flow, &src_value);
1321 bitwise_copy(&src_value, move->src.field->n_bytes, move->src.ofs,
1322 &dst_value, move->dst.field->n_bytes, move->dst.ofs,
1323 move->src.n_bits);
1324 mf_set_flow_value(move->dst.field, &dst_value, flow);
1325 }
1326
1327 void
1328 nxm_execute_reg_load(const struct ofpact_reg_load *load, struct flow *flow,
1329 struct flow_wildcards *wc)
1330 {
1331 /* Since at the datapath interface we do not have set actions for
1332 * individual fields, but larger sets of fields for a given protocol
1333 * layer, the set action will in practice only ever apply to exactly
1334 * matched flows for the given protocol layer. For example, if the
1335 * reg_load changes the IP TTL, the corresponding datapath action will
1336 * rewrite also the IP addresses and TOS byte. Since these other field
1337 * values may not be explicitly set, they depend on the incoming flow field
1338 * values, and are hence all of them are set in the wildcards masks, when
1339 * the action is committed to the datapath. For the rare case, where the
1340 * reg_load action does not actually change the value, and no other flow
1341 * field values are set (or loaded), the datapath action is skipped, and
1342 * no mask bits are set. Such a datapath flow should, however, be
1343 * dependent on the specific field value, so the corresponding wildcard
1344 * mask bits must be set, lest the datapath flow be applied to packets
1345 * containing some other value in the field and the field value remain
1346 * unchanged regardless of the incoming value.
1347 *
1348 * We set the masks here for the whole fields, and their prerequisities.
1349 * Even if only the lower byte of a TCP destination port is set,
1350 * we set the mask for the whole field, and also the ip_proto in the IP
1351 * header, so that the kernel flow would not be applied on, e.g., a UDP
1352 * packet, or any other IP protocol in addition to TCP packets.
1353 */
1354 mf_mask_field_and_prereqs(load->dst.field, &wc->masks);
1355 mf_write_subfield_flow(&load->dst, &load->subvalue, flow);
1356 }
1357
1358 void
1359 nxm_reg_load(const struct mf_subfield *dst, uint64_t src_data,
1360 struct flow *flow, struct flow_wildcards *wc)
1361 {
1362 union mf_subvalue src_subvalue;
1363 union mf_subvalue mask_value;
1364 ovs_be64 src_data_be = htonll(src_data);
1365
1366 memset(&mask_value, 0xff, sizeof mask_value);
1367 mf_write_subfield_flow(dst, &mask_value, &wc->masks);
1368
1369 bitwise_copy(&src_data_be, sizeof src_data_be, 0,
1370 &src_subvalue, sizeof src_subvalue, 0,
1371 sizeof src_data_be * 8);
1372 mf_write_subfield_flow(dst, &src_subvalue, flow);
1373 }
1374 \f
1375 /* nxm_parse_stack_action, works for both push() and pop(). */
1376
1377 /* Parses 's' as a "push" or "pop" action, in the form described in
1378 * ovs-ofctl(8), into '*stack_action'.
1379 *
1380 * Returns NULL if successful, otherwise a malloc()'d string describing the
1381 * error. The caller is responsible for freeing the returned string. */
1382 char * WARN_UNUSED_RESULT
1383 nxm_parse_stack_action(struct ofpact_stack *stack_action, const char *s)
1384 {
1385 char *error;
1386
1387 error = mf_parse_subfield__(&stack_action->subfield, &s);
1388 if (error) {
1389 return error;
1390 }
1391
1392 if (*s != '\0') {
1393 return xasprintf("%s: trailing garbage following push or pop", s);
1394 }
1395
1396 return NULL;
1397 }
1398
1399 void
1400 nxm_format_stack_push(const struct ofpact_stack *push, struct ds *s)
1401 {
1402 ds_put_cstr(s, "push:");
1403 mf_format_subfield(&push->subfield, s);
1404 }
1405
1406 void
1407 nxm_format_stack_pop(const struct ofpact_stack *pop, struct ds *s)
1408 {
1409 ds_put_cstr(s, "pop:");
1410 mf_format_subfield(&pop->subfield, s);
1411 }
1412
1413 /* Common set for both push and pop actions. */
1414 static void
1415 stack_action_from_openflow__(const struct nx_action_stack *nasp,
1416 struct ofpact_stack *stack_action)
1417 {
1418 stack_action->subfield.field = mf_from_nxm_header(ntohl(nasp->field));
1419 stack_action->subfield.ofs = ntohs(nasp->offset);
1420 stack_action->subfield.n_bits = ntohs(nasp->n_bits);
1421 }
1422
1423 static void
1424 nxm_stack_to_nxast__(const struct ofpact_stack *stack_action,
1425 struct nx_action_stack *nasp)
1426 {
1427 nasp->offset = htons(stack_action->subfield.ofs);
1428 nasp->n_bits = htons(stack_action->subfield.n_bits);
1429 nasp->field = htonl(stack_action->subfield.field->nxm_header);
1430 }
1431
1432 enum ofperr
1433 nxm_stack_push_from_openflow(const struct nx_action_stack *nasp,
1434 struct ofpbuf *ofpacts)
1435 {
1436 struct ofpact_stack *push;
1437
1438 push = ofpact_put_STACK_PUSH(ofpacts);
1439 stack_action_from_openflow__(nasp, push);
1440
1441 return nxm_stack_push_check(push, NULL);
1442 }
1443
1444 enum ofperr
1445 nxm_stack_pop_from_openflow(const struct nx_action_stack *nasp,
1446 struct ofpbuf *ofpacts)
1447 {
1448 struct ofpact_stack *pop;
1449
1450 pop = ofpact_put_STACK_POP(ofpacts);
1451 stack_action_from_openflow__(nasp, pop);
1452
1453 return nxm_stack_pop_check(pop, NULL);
1454 }
1455
1456 enum ofperr
1457 nxm_stack_push_check(const struct ofpact_stack *push,
1458 const struct flow *flow)
1459 {
1460 return mf_check_src(&push->subfield, flow);
1461 }
1462
1463 enum ofperr
1464 nxm_stack_pop_check(const struct ofpact_stack *pop,
1465 const struct flow *flow)
1466 {
1467 return mf_check_dst(&pop->subfield, flow);
1468 }
1469
1470 void
1471 nxm_stack_push_to_nxast(const struct ofpact_stack *stack,
1472 struct ofpbuf *openflow)
1473 {
1474 nxm_stack_to_nxast__(stack, ofputil_put_NXAST_STACK_PUSH(openflow));
1475 }
1476
1477 void
1478 nxm_stack_pop_to_nxast(const struct ofpact_stack *stack,
1479 struct ofpbuf *openflow)
1480 {
1481 nxm_stack_to_nxast__(stack, ofputil_put_NXAST_STACK_POP(openflow));
1482 }
1483
1484 /* nxm_execute_stack_push(), nxm_execute_stack_pop(). */
1485 static void
1486 nx_stack_push(struct ofpbuf *stack, union mf_subvalue *v)
1487 {
1488 ofpbuf_put(stack, v, sizeof *v);
1489 }
1490
1491 static union mf_subvalue *
1492 nx_stack_pop(struct ofpbuf *stack)
1493 {
1494 union mf_subvalue *v = NULL;
1495
1496 if (stack->size) {
1497 stack->size -= sizeof *v;
1498 v = (union mf_subvalue *) ofpbuf_tail(stack);
1499 }
1500
1501 return v;
1502 }
1503
1504 void
1505 nxm_execute_stack_push(const struct ofpact_stack *push,
1506 const struct flow *flow, struct flow_wildcards *wc,
1507 struct ofpbuf *stack)
1508 {
1509 union mf_subvalue mask_value;
1510 union mf_subvalue dst_value;
1511
1512 memset(&mask_value, 0xff, sizeof mask_value);
1513 mf_write_subfield_flow(&push->subfield, &mask_value, &wc->masks);
1514
1515 mf_read_subfield(&push->subfield, flow, &dst_value);
1516 nx_stack_push(stack, &dst_value);
1517 }
1518
1519 void
1520 nxm_execute_stack_pop(const struct ofpact_stack *pop,
1521 struct flow *flow, struct flow_wildcards *wc,
1522 struct ofpbuf *stack)
1523 {
1524 union mf_subvalue *src_value;
1525
1526 src_value = nx_stack_pop(stack);
1527
1528 /* Only pop if stack is not empty. Otherwise, give warning. */
1529 if (src_value) {
1530 union mf_subvalue mask_value;
1531
1532 memset(&mask_value, 0xff, sizeof mask_value);
1533 mf_write_subfield_flow(&pop->subfield, &mask_value, &wc->masks);
1534 mf_write_subfield_flow(&pop->subfield, src_value, flow);
1535 } else {
1536 if (!VLOG_DROP_WARN(&rl)) {
1537 char *flow_str = flow_to_string(flow);
1538 VLOG_WARN_RL(&rl, "Failed to pop from an empty stack. On flow \n"
1539 " %s", flow_str);
1540 free(flow_str);
1541 }
1542 }
1543 }