]> git.proxmox.com Git - mirror_ovs.git/blame - lib/flow.c
netdev_class: Pass a struct ofpbuf * to rx_recv()
[mirror_ovs.git] / lib / flow.c
CommitLineData
064af421 1/*
4e022ec0 2 * Copyright (c) 2008, 2009, 2010, 2011, 2012, 2013 Nicira, Inc.
064af421 3 *
a14bc59f
BP
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
064af421 7 *
a14bc59f
BP
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
064af421
BP
15 */
16#include <config.h>
17#include <sys/types.h>
18#include "flow.h"
d31f1109 19#include <errno.h>
064af421 20#include <inttypes.h>
5cb7a798 21#include <limits.h>
064af421 22#include <netinet/in.h>
d31f1109
JP
23#include <netinet/icmp6.h>
24#include <netinet/ip6.h>
5cb7a798 25#include <stdint.h>
064af421
BP
26#include <stdlib.h>
27#include <string.h>
10a24935 28#include "byte-order.h"
064af421 29#include "coverage.h"
dc5a7ce7 30#include "csum.h"
064af421
BP
31#include "dynamic-string.h"
32#include "hash.h"
c49d1dd1 33#include "jhash.h"
aa6c9932 34#include "match.h"
064af421
BP
35#include "ofpbuf.h"
36#include "openflow/openflow.h"
064af421 37#include "packets.h"
94639963 38#include "random.h"
176aaa65 39#include "unaligned.h"
064af421 40
d76f09ea 41COVERAGE_DEFINE(flow_extract);
5cb7a798 42COVERAGE_DEFINE(miniflow_malloc);
d76f09ea 43
476f36e8
JR
44/* U32 indices for segmented flow classification. */
45const uint8_t flow_segment_u32s[4] = {
46 FLOW_SEGMENT_1_ENDS_AT / 4,
47 FLOW_SEGMENT_2_ENDS_AT / 4,
48 FLOW_SEGMENT_3_ENDS_AT / 4,
49 FLOW_U32S
50};
51
a26ef517
JP
52static struct arp_eth_header *
53pull_arp(struct ofpbuf *packet)
54{
55 return ofpbuf_try_pull(packet, ARP_ETH_HEADER_LEN);
56}
57
064af421
BP
58static struct ip_header *
59pull_ip(struct ofpbuf *packet)
60{
61 if (packet->size >= IP_HEADER_LEN) {
62 struct ip_header *ip = packet->data;
63 int ip_len = IP_IHL(ip->ip_ihl_ver) * 4;
64 if (ip_len >= IP_HEADER_LEN && packet->size >= ip_len) {
65 return ofpbuf_pull(packet, ip_len);
66 }
67 }
68 return NULL;
69}
70
71static struct tcp_header *
d295e8e9 72pull_tcp(struct ofpbuf *packet)
064af421
BP
73{
74 if (packet->size >= TCP_HEADER_LEN) {
75 struct tcp_header *tcp = packet->data;
76 int tcp_len = TCP_OFFSET(tcp->tcp_ctl) * 4;
77 if (tcp_len >= TCP_HEADER_LEN && packet->size >= tcp_len) {
78 return ofpbuf_pull(packet, tcp_len);
79 }
80 }
81 return NULL;
82}
83
84static struct udp_header *
d295e8e9 85pull_udp(struct ofpbuf *packet)
064af421
BP
86{
87 return ofpbuf_try_pull(packet, UDP_HEADER_LEN);
88}
89
c6bcb685
JS
90static struct sctp_header *
91pull_sctp(struct ofpbuf *packet)
92{
93 return ofpbuf_try_pull(packet, SCTP_HEADER_LEN);
94}
95
064af421 96static struct icmp_header *
d295e8e9 97pull_icmp(struct ofpbuf *packet)
064af421
BP
98{
99 return ofpbuf_try_pull(packet, ICMP_HEADER_LEN);
100}
101
d31f1109
JP
102static struct icmp6_hdr *
103pull_icmpv6(struct ofpbuf *packet)
104{
105 return ofpbuf_try_pull(packet, sizeof(struct icmp6_hdr));
106}
107
b02475c5
SH
108static void
109parse_mpls(struct ofpbuf *b, struct flow *flow)
110{
111 struct mpls_hdr *mh;
b0a17866 112 bool top = true;
b02475c5
SH
113
114 while ((mh = ofpbuf_try_pull(b, sizeof *mh))) {
b0a17866
SH
115 if (top) {
116 top = false;
b02475c5
SH
117 flow->mpls_lse = mh->mpls_lse;
118 }
119 if (mh->mpls_lse & htonl(MPLS_BOS_MASK)) {
120 break;
121 }
122 }
123}
124
50f06e16 125static void
ae412e7d 126parse_vlan(struct ofpbuf *b, struct flow *flow)
064af421 127{
50f06e16 128 struct qtag_prefix {
0b3e77bb
BP
129 ovs_be16 eth_type; /* ETH_TYPE_VLAN */
130 ovs_be16 tci;
50f06e16
BP
131 };
132
0b3e77bb 133 if (b->size >= sizeof(struct qtag_prefix) + sizeof(ovs_be16)) {
50f06e16 134 struct qtag_prefix *qp = ofpbuf_pull(b, sizeof *qp);
66642cb4 135 flow->vlan_tci = qp->tci | htons(VLAN_CFI);
50f06e16 136 }
064af421
BP
137}
138
0b3e77bb 139static ovs_be16
50f06e16 140parse_ethertype(struct ofpbuf *b)
064af421 141{
50f06e16 142 struct llc_snap_header *llc;
0b3e77bb 143 ovs_be16 proto;
50f06e16 144
0b3e77bb 145 proto = *(ovs_be16 *) ofpbuf_pull(b, sizeof proto);
36956a7d 146 if (ntohs(proto) >= ETH_TYPE_MIN) {
50f06e16
BP
147 return proto;
148 }
149
150 if (b->size < sizeof *llc) {
36956a7d 151 return htons(FLOW_DL_TYPE_NONE);
50f06e16
BP
152 }
153
154 llc = b->data;
155 if (llc->llc.llc_dsap != LLC_DSAP_SNAP
156 || llc->llc.llc_ssap != LLC_SSAP_SNAP
157 || llc->llc.llc_cntl != LLC_CNTL_SNAP
158 || memcmp(llc->snap.snap_org, SNAP_ORG_ETHERNET,
159 sizeof llc->snap.snap_org)) {
36956a7d 160 return htons(FLOW_DL_TYPE_NONE);
50f06e16
BP
161 }
162
163 ofpbuf_pull(b, sizeof *llc);
9e69bc5f
RL
164
165 if (ntohs(llc->snap.snap_type) >= ETH_TYPE_MIN) {
166 return llc->snap.snap_type;
167 }
168
169 return htons(FLOW_DL_TYPE_NONE);
064af421
BP
170}
171
d31f1109
JP
172static int
173parse_ipv6(struct ofpbuf *packet, struct flow *flow)
174{
4528f34f 175 const struct ovs_16aligned_ip6_hdr *nh;
d31f1109
JP
176 ovs_be32 tc_flow;
177 int nexthdr;
178
88366484
JG
179 nh = ofpbuf_try_pull(packet, sizeof *nh);
180 if (!nh) {
181 return EINVAL;
d31f1109
JP
182 }
183
d31f1109 184 nexthdr = nh->ip6_nxt;
d31f1109 185
4528f34f
BP
186 memcpy(&flow->ipv6_src, &nh->ip6_src, sizeof flow->ipv6_src);
187 memcpy(&flow->ipv6_dst, &nh->ip6_dst, sizeof flow->ipv6_dst);
d31f1109 188
4528f34f 189 tc_flow = get_16aligned_be32(&nh->ip6_flow);
25cfd5ca 190 flow->nw_tos = ntohl(tc_flow) >> 20;
fa8223b7 191 flow->ipv6_label = tc_flow & htonl(IPV6_LABEL_MASK);
a61680c6 192 flow->nw_ttl = nh->ip6_hlim;
d31f1109
JP
193 flow->nw_proto = IPPROTO_NONE;
194
d31f1109
JP
195 while (1) {
196 if ((nexthdr != IPPROTO_HOPOPTS)
197 && (nexthdr != IPPROTO_ROUTING)
198 && (nexthdr != IPPROTO_DSTOPTS)
199 && (nexthdr != IPPROTO_AH)
200 && (nexthdr != IPPROTO_FRAGMENT)) {
201 /* It's either a terminal header (e.g., TCP, UDP) or one we
202 * don't understand. In either case, we're done with the
203 * packet, so use it to fill in 'nw_proto'. */
204 break;
205 }
206
207 /* We only verify that at least 8 bytes of the next header are
208 * available, but many of these headers are longer. Ensure that
209 * accesses within the extension header are within those first 8
88366484 210 * bytes. All extension headers are required to be at least 8
d31f1109 211 * bytes. */
88366484
JG
212 if (packet->size < 8) {
213 return EINVAL;
d31f1109
JP
214 }
215
216 if ((nexthdr == IPPROTO_HOPOPTS)
217 || (nexthdr == IPPROTO_ROUTING)
218 || (nexthdr == IPPROTO_DSTOPTS)) {
219 /* These headers, while different, have the fields we care about
220 * in the same location and with the same interpretation. */
6b175ad6 221 const struct ip6_ext *ext_hdr = packet->data;
d31f1109 222 nexthdr = ext_hdr->ip6e_nxt;
88366484
JG
223 if (!ofpbuf_try_pull(packet, (ext_hdr->ip6e_len + 1) * 8)) {
224 return EINVAL;
225 }
d31f1109
JP
226 } else if (nexthdr == IPPROTO_AH) {
227 /* A standard AH definition isn't available, but the fields
228 * we care about are in the same location as the generic
229 * option header--only the header length is calculated
230 * differently. */
6b175ad6 231 const struct ip6_ext *ext_hdr = packet->data;
d31f1109 232 nexthdr = ext_hdr->ip6e_nxt;
88366484
JG
233 if (!ofpbuf_try_pull(packet, (ext_hdr->ip6e_len + 2) * 4)) {
234 return EINVAL;
235 }
d31f1109 236 } else if (nexthdr == IPPROTO_FRAGMENT) {
4528f34f 237 const struct ovs_16aligned_ip6_frag *frag_hdr = packet->data;
d31f1109
JP
238
239 nexthdr = frag_hdr->ip6f_nxt;
88366484
JG
240 if (!ofpbuf_try_pull(packet, sizeof *frag_hdr)) {
241 return EINVAL;
242 }
d31f1109
JP
243
244 /* We only process the first fragment. */
0fd0d083 245 if (frag_hdr->ip6f_offlg != htons(0)) {
44c67c17
TK
246 flow->nw_frag = FLOW_NW_FRAG_ANY;
247 if ((frag_hdr->ip6f_offlg & IP6F_OFF_MASK) != htons(0)) {
0fd0d083
JG
248 flow->nw_frag |= FLOW_NW_FRAG_LATER;
249 nexthdr = IPPROTO_FRAGMENT;
250 break;
251 }
d31f1109
JP
252 }
253 }
254 }
255
d31f1109 256 flow->nw_proto = nexthdr;
88366484 257 return 0;
d31f1109
JP
258}
259
88366484
JG
260static void
261parse_tcp(struct ofpbuf *packet, struct ofpbuf *b, struct flow *flow)
262{
263 const struct tcp_header *tcp = pull_tcp(b);
264 if (tcp) {
265 flow->tp_src = tcp->tcp_src;
266 flow->tp_dst = tcp->tcp_dst;
dc235f7f 267 flow->tcp_flags = tcp->tcp_ctl & htons(0x0fff);
88366484
JG
268 packet->l7 = b->data;
269 }
270}
271
272static void
273parse_udp(struct ofpbuf *packet, struct ofpbuf *b, struct flow *flow)
274{
275 const struct udp_header *udp = pull_udp(b);
276 if (udp) {
277 flow->tp_src = udp->udp_src;
278 flow->tp_dst = udp->udp_dst;
279 packet->l7 = b->data;
280 }
281}
685a51a5 282
c6bcb685
JS
283static void
284parse_sctp(struct ofpbuf *packet, struct ofpbuf *b, struct flow *flow)
285{
286 const struct sctp_header *sctp = pull_sctp(b);
287 if (sctp) {
288 flow->tp_src = sctp->sctp_src;
289 flow->tp_dst = sctp->sctp_dst;
290 packet->l7 = b->data;
291 }
292}
293
685a51a5 294static bool
88366484 295parse_icmpv6(struct ofpbuf *b, struct flow *flow)
685a51a5
JP
296{
297 const struct icmp6_hdr *icmp = pull_icmpv6(b);
298
299 if (!icmp) {
300 return false;
301 }
302
303 /* The ICMPv6 type and code fields use the 16-bit transport port
304 * fields, so we need to store them in 16-bit network byte order. */
3ee8a9f0
BP
305 flow->tp_src = htons(icmp->icmp6_type);
306 flow->tp_dst = htons(icmp->icmp6_code);
685a51a5 307
88366484
JG
308 if (icmp->icmp6_code == 0 &&
309 (icmp->icmp6_type == ND_NEIGHBOR_SOLICIT ||
310 icmp->icmp6_type == ND_NEIGHBOR_ADVERT)) {
311 const struct in6_addr *nd_target;
685a51a5 312
88366484
JG
313 nd_target = ofpbuf_try_pull(b, sizeof *nd_target);
314 if (!nd_target) {
685a51a5
JP
315 return false;
316 }
88366484 317 flow->nd_target = *nd_target;
685a51a5 318
88366484 319 while (b->size >= 8) {
685a51a5
JP
320 /* The minimum size of an option is 8 bytes, which also is
321 * the size of Ethernet link-layer options. */
88366484
JG
322 const struct nd_opt_hdr *nd_opt = b->data;
323 int opt_len = nd_opt->nd_opt_len * 8;
324
325 if (!opt_len || opt_len > b->size) {
685a51a5
JP
326 goto invalid;
327 }
685a51a5
JP
328
329 /* Store the link layer address if the appropriate option is
330 * provided. It is considered an error if the same link
331 * layer option is specified twice. */
332 if (nd_opt->nd_opt_type == ND_OPT_SOURCE_LINKADDR
333 && opt_len == 8) {
334 if (eth_addr_is_zero(flow->arp_sha)) {
88366484 335 memcpy(flow->arp_sha, nd_opt + 1, ETH_ADDR_LEN);
685a51a5
JP
336 } else {
337 goto invalid;
338 }
339 } else if (nd_opt->nd_opt_type == ND_OPT_TARGET_LINKADDR
340 && opt_len == 8) {
341 if (eth_addr_is_zero(flow->arp_tha)) {
88366484 342 memcpy(flow->arp_tha, nd_opt + 1, ETH_ADDR_LEN);
685a51a5
JP
343 } else {
344 goto invalid;
345 }
346 }
347
88366484 348 if (!ofpbuf_try_pull(b, opt_len)) {
685a51a5
JP
349 goto invalid;
350 }
685a51a5
JP
351 }
352 }
353
354 return true;
355
356invalid:
88366484
JG
357 memset(&flow->nd_target, 0, sizeof(flow->nd_target));
358 memset(flow->arp_sha, 0, sizeof(flow->arp_sha));
359 memset(flow->arp_tha, 0, sizeof(flow->arp_tha));
685a51a5
JP
360
361 return false;
362
363}
364
296e07ac 365/* Initializes 'flow' members from 'packet', 'skb_priority', 'tnl', and
4e022ec0 366 * 'in_port'.
deedf7e7 367 *
0b3e77bb 368 * Initializes 'packet' header pointers as follows:
ca78c6b6
BP
369 *
370 * - packet->l2 to the start of the Ethernet header.
371 *
b02475c5
SH
372 * - packet->l2_5 to the start of the MPLS shim header.
373 *
ca78c6b6
BP
374 * - packet->l3 to just past the Ethernet header, or just past the
375 * vlan_header if one is present, to the first byte of the payload of the
376 * Ethernet frame.
377 *
378 * - packet->l4 to just past the IPv4 header, if one is present and has a
379 * correct length, and otherwise NULL.
380 *
c6bcb685 381 * - packet->l7 to just past the TCP/UDP/SCTP/ICMP header, if one is
ca78c6b6
BP
382 * present and has a correct length, and otherwise NULL.
383 */
7257b535 384void
1362e248 385flow_extract(struct ofpbuf *packet, uint32_t skb_priority, uint32_t pkt_mark,
4e022ec0 386 const struct flow_tnl *tnl, const union flow_in_port *in_port,
296e07ac 387 struct flow *flow)
064af421
BP
388{
389 struct ofpbuf b = *packet;
390 struct eth_header *eth;
064af421
BP
391
392 COVERAGE_INC(flow_extract);
393
394 memset(flow, 0, sizeof *flow);
296e07ac
JG
395
396 if (tnl) {
cb22974d 397 ovs_assert(tnl != &flow->tunnel);
296e07ac
JG
398 flow->tunnel = *tnl;
399 }
4e022ec0
AW
400 if (in_port) {
401 flow->in_port = *in_port;
402 }
deedf7e7 403 flow->skb_priority = skb_priority;
1362e248 404 flow->pkt_mark = pkt_mark;
064af421 405
b02475c5
SH
406 packet->l2 = b.data;
407 packet->l2_5 = NULL;
408 packet->l3 = NULL;
409 packet->l4 = NULL;
410 packet->l7 = NULL;
064af421 411
50f06e16 412 if (b.size < sizeof *eth) {
7257b535 413 return;
50f06e16 414 }
064af421 415
50f06e16
BP
416 /* Link layer. */
417 eth = b.data;
418 memcpy(flow->dl_src, eth->eth_src, ETH_ADDR_LEN);
419 memcpy(flow->dl_dst, eth->eth_dst, ETH_ADDR_LEN);
420
66642cb4 421 /* dl_type, vlan_tci. */
50f06e16
BP
422 ofpbuf_pull(&b, ETH_ADDR_LEN * 2);
423 if (eth->eth_type == htons(ETH_TYPE_VLAN)) {
424 parse_vlan(&b, flow);
425 }
426 flow->dl_type = parse_ethertype(&b);
427
b02475c5
SH
428 /* Parse mpls, copy l3 ttl. */
429 if (eth_type_mpls(flow->dl_type)) {
430 packet->l2_5 = b.data;
431 parse_mpls(&b, flow);
432 }
433
ad128cc1 434 /* Network layer. */
0c8dcab8
SH
435 packet->l3 = b.data;
436 if (flow->dl_type == htons(ETH_TYPE_IP)) {
50f06e16
BP
437 const struct ip_header *nh = pull_ip(&b);
438 if (nh) {
7257b535
BP
439 packet->l4 = b.data;
440
7c457c33
BP
441 flow->nw_src = get_16aligned_be32(&nh->ip_src);
442 flow->nw_dst = get_16aligned_be32(&nh->ip_dst);
50f06e16 443 flow->nw_proto = nh->ip_proto;
7257b535 444
eadef313 445 flow->nw_tos = nh->ip_tos;
7257b535 446 if (IP_IS_FRAGMENT(nh->ip_frag_off)) {
eadef313 447 flow->nw_frag = FLOW_NW_FRAG_ANY;
7257b535 448 if (nh->ip_frag_off & htons(IP_FRAG_OFF_MASK)) {
eadef313 449 flow->nw_frag |= FLOW_NW_FRAG_LATER;
7257b535
BP
450 }
451 }
a61680c6 452 flow->nw_ttl = nh->ip_ttl;
7257b535
BP
453
454 if (!(nh->ip_frag_off & htons(IP_FRAG_OFF_MASK))) {
6767a2cc 455 if (flow->nw_proto == IPPROTO_TCP) {
88366484 456 parse_tcp(packet, &b, flow);
6767a2cc 457 } else if (flow->nw_proto == IPPROTO_UDP) {
88366484 458 parse_udp(packet, &b, flow);
c6bcb685
JS
459 } else if (flow->nw_proto == IPPROTO_SCTP) {
460 parse_sctp(packet, &b, flow);
6767a2cc 461 } else if (flow->nw_proto == IPPROTO_ICMP) {
50f06e16
BP
462 const struct icmp_header *icmp = pull_icmp(&b);
463 if (icmp) {
3ee8a9f0
BP
464 flow->tp_src = htons(icmp->icmp_type);
465 flow->tp_dst = htons(icmp->icmp_code);
50f06e16 466 packet->l7 = b.data;
064af421 467 }
064af421 468 }
50f06e16
BP
469 }
470 }
0c8dcab8 471 } else if (flow->dl_type == htons(ETH_TYPE_IPV6)) {
7257b535
BP
472 if (parse_ipv6(&b, flow)) {
473 return;
d31f1109
JP
474 }
475
88366484
JG
476 packet->l4 = b.data;
477 if (flow->nw_proto == IPPROTO_TCP) {
478 parse_tcp(packet, &b, flow);
479 } else if (flow->nw_proto == IPPROTO_UDP) {
480 parse_udp(packet, &b, flow);
c6bcb685
JS
481 } else if (flow->nw_proto == IPPROTO_SCTP) {
482 parse_sctp(packet, &b, flow);
88366484
JG
483 } else if (flow->nw_proto == IPPROTO_ICMPV6) {
484 if (parse_icmpv6(&b, flow)) {
485 packet->l7 = b.data;
d31f1109
JP
486 }
487 }
0c8dcab8
SH
488 } else if (flow->dl_type == htons(ETH_TYPE_ARP) ||
489 flow->dl_type == htons(ETH_TYPE_RARP)) {
50f06e16
BP
490 const struct arp_eth_header *arp = pull_arp(&b);
491 if (arp && arp->ar_hrd == htons(1)
d295e8e9 492 && arp->ar_pro == htons(ETH_TYPE_IP)
50f06e16
BP
493 && arp->ar_hln == ETH_ADDR_LEN
494 && arp->ar_pln == 4) {
495 /* We only match on the lower 8 bits of the opcode. */
496 if (ntohs(arp->ar_op) <= 0xff) {
497 flow->nw_proto = ntohs(arp->ar_op);
064af421 498 }
a26ef517 499
7c457c33
BP
500 flow->nw_src = get_16aligned_be32(&arp->ar_spa);
501 flow->nw_dst = get_16aligned_be32(&arp->ar_tpa);
a3d3ad0c
MM
502 memcpy(flow->arp_sha, arp->ar_sha, ETH_ADDR_LEN);
503 memcpy(flow->arp_tha, arp->ar_tha, ETH_ADDR_LEN);
064af421
BP
504 }
505 }
064af421
BP
506}
507
993410fb
BP
508/* For every bit of a field that is wildcarded in 'wildcards', sets the
509 * corresponding bit in 'flow' to zero. */
510void
511flow_zero_wildcards(struct flow *flow, const struct flow_wildcards *wildcards)
512{
659c2346
BP
513 uint32_t *flow_u32 = (uint32_t *) flow;
514 const uint32_t *wc_u32 = (const uint32_t *) &wildcards->masks;
515 size_t i;
993410fb 516
659c2346
BP
517 for (i = 0; i < FLOW_U32S; i++) {
518 flow_u32[i] &= wc_u32[i];
26720e24 519 }
993410fb
BP
520}
521
d8d9c698
EJ
522void
523flow_unwildcard_tp_ports(const struct flow *flow, struct flow_wildcards *wc)
524{
525 if (flow->nw_proto != IPPROTO_ICMP) {
526 memset(&wc->masks.tp_src, 0xff, sizeof wc->masks.tp_src);
527 memset(&wc->masks.tp_dst, 0xff, sizeof wc->masks.tp_dst);
528 } else {
529 wc->masks.tp_src = htons(0xff);
530 wc->masks.tp_dst = htons(0xff);
531 }
532}
533
5d6c3af0
EJ
534/* Initializes 'fmd' with the metadata found in 'flow'. */
535void
536flow_get_metadata(const struct flow *flow, struct flow_metadata *fmd)
537{
476f36e8 538 BUILD_ASSERT_DECL(FLOW_WC_SEQ == 23);
e9358af6 539
296e07ac 540 fmd->tun_id = flow->tunnel.tun_id;
0ad90c84
JR
541 fmd->tun_src = flow->tunnel.ip_src;
542 fmd->tun_dst = flow->tunnel.ip_dst;
969fc56c 543 fmd->metadata = flow->metadata;
5d6c3af0 544 memcpy(fmd->regs, flow->regs, sizeof fmd->regs);
ac923e91 545 fmd->pkt_mark = flow->pkt_mark;
4e022ec0 546 fmd->in_port = flow->in_port.ofp_port;
5d6c3af0
EJ
547}
548
064af421 549char *
ae412e7d 550flow_to_string(const struct flow *flow)
064af421
BP
551{
552 struct ds ds = DS_EMPTY_INITIALIZER;
553 flow_format(&ds, flow);
554 return ds_cstr(&ds);
555}
556
4fe3445a
PS
557const char *
558flow_tun_flag_to_string(uint32_t flags)
559{
560 switch (flags) {
561 case FLOW_TNL_F_DONT_FRAGMENT:
562 return "df";
563 case FLOW_TNL_F_CSUM:
564 return "csum";
565 case FLOW_TNL_F_KEY:
566 return "key";
567 default:
568 return NULL;
569 }
570}
571
572void
573format_flags(struct ds *ds, const char *(*bit_to_string)(uint32_t),
574 uint32_t flags, char del)
575{
576 uint32_t bad = 0;
577
578 if (!flags) {
579 return;
580 }
581 while (flags) {
582 uint32_t bit = rightmost_1bit(flags);
583 const char *s;
584
585 s = bit_to_string(bit);
586 if (s) {
587 ds_put_format(ds, "%s%c", s, del);
588 } else {
589 bad |= bit;
590 }
591
592 flags &= ~bit;
593 }
594
595 if (bad) {
596 ds_put_format(ds, "0x%"PRIx32"%c", bad, del);
597 }
598 ds_chomp(ds, del);
599}
600
61bf6666
JR
601void
602format_flags_masked(struct ds *ds, const char *name,
603 const char *(*bit_to_string)(uint32_t), uint32_t flags,
604 uint32_t mask)
605{
606 if (name) {
607 ds_put_format(ds, "%s=", name);
608 }
609 while (mask) {
610 uint32_t bit = rightmost_1bit(mask);
611 const char *s = bit_to_string(bit);
612
613 ds_put_format(ds, "%s%s", (flags & bit) ? "+" : "-",
614 s ? s : "[Unknown]");
615 mask &= ~bit;
616 }
617}
618
064af421 619void
ae412e7d 620flow_format(struct ds *ds, const struct flow *flow)
064af421 621{
aa6c9932 622 struct match match;
296e07ac 623
aa6c9932 624 match_wc_init(&match, flow);
3f78c3cc 625 match_format(&match, ds, OFP_DEFAULT_PRIORITY);
064af421
BP
626}
627
628void
ae412e7d 629flow_print(FILE *stream, const struct flow *flow)
064af421
BP
630{
631 char *s = flow_to_string(flow);
632 fputs(s, stream);
633 free(s);
634}
54363004
BP
635\f
636/* flow_wildcards functions. */
637
d8ae4d67 638/* Initializes 'wc' as a set of wildcards that matches every packet. */
54363004 639void
d8ae4d67 640flow_wildcards_init_catchall(struct flow_wildcards *wc)
54363004 641{
659c2346 642 memset(&wc->masks, 0, sizeof wc->masks);
54363004
BP
643}
644
c11c6faa
AZ
645/* Clear the metadata and register wildcard masks. They are not packet
646 * header fields. */
647void
648flow_wildcards_clear_non_packet_fields(struct flow_wildcards *wc)
649{
650 memset(&wc->masks.metadata, 0, sizeof wc->masks.metadata);
651 memset(&wc->masks.regs, 0, sizeof wc->masks.regs);
652}
653
ecf1e7ac
BP
654/* Returns true if 'wc' matches every packet, false if 'wc' fixes any bits or
655 * fields. */
656bool
657flow_wildcards_is_catchall(const struct flow_wildcards *wc)
658{
659c2346
BP
659 const uint32_t *wc_u32 = (const uint32_t *) &wc->masks;
660 size_t i;
ecf1e7ac 661
659c2346
BP
662 for (i = 0; i < FLOW_U32S; i++) {
663 if (wc_u32[i]) {
ecf1e7ac
BP
664 return false;
665 }
666 }
ecf1e7ac
BP
667 return true;
668}
669
368eefac
EJ
670/* Sets 'dst' as the bitwise AND of wildcards in 'src1' and 'src2'.
671 * That is, a bit or a field is wildcarded in 'dst' if it is wildcarded
672 * in 'src1' or 'src2' or both. */
b5d97350 673void
368eefac
EJ
674flow_wildcards_and(struct flow_wildcards *dst,
675 const struct flow_wildcards *src1,
676 const struct flow_wildcards *src2)
b5d97350 677{
659c2346
BP
678 uint32_t *dst_u32 = (uint32_t *) &dst->masks;
679 const uint32_t *src1_u32 = (const uint32_t *) &src1->masks;
680 const uint32_t *src2_u32 = (const uint32_t *) &src2->masks;
681 size_t i;
a79c50f3 682
659c2346
BP
683 for (i = 0; i < FLOW_U32S; i++) {
684 dst_u32[i] = src1_u32[i] & src2_u32[i];
26720e24 685 }
b5d97350
BP
686}
687
368eefac
EJ
688/* Sets 'dst' as the bitwise OR of wildcards in 'src1' and 'src2'. That
689 * is, a bit or a field is wildcarded in 'dst' if it is neither
690 * wildcarded in 'src1' nor 'src2'. */
691void
692flow_wildcards_or(struct flow_wildcards *dst,
693 const struct flow_wildcards *src1,
694 const struct flow_wildcards *src2)
695{
696 uint32_t *dst_u32 = (uint32_t *) &dst->masks;
697 const uint32_t *src1_u32 = (const uint32_t *) &src1->masks;
698 const uint32_t *src2_u32 = (const uint32_t *) &src2->masks;
699 size_t i;
700
701 for (i = 0; i < FLOW_U32S; i++) {
702 dst_u32[i] = src1_u32[i] | src2_u32[i];
703 }
704}
705
ad77e3c5
EJ
706/* Perform a bitwise OR of miniflow 'src' flow data with the equivalent
707 * fields in 'dst', storing the result in 'dst'. */
708static void
709flow_union_with_miniflow(struct flow *dst, const struct miniflow *src)
710{
711 uint32_t *dst_u32 = (uint32_t *) dst;
476f36e8 712 const uint32_t *p = src->values;
080e28d0 713 uint64_t map;
ad77e3c5 714
080e28d0 715 for (map = src->map; map; map = zero_rightmost_1bit(map)) {
476f36e8 716 dst_u32[raw_ctz(map)] |= *p++;
ad77e3c5
EJ
717 }
718}
719
720/* Fold minimask 'mask''s wildcard mask into 'wc's wildcard mask. */
721void
722flow_wildcards_fold_minimask(struct flow_wildcards *wc,
723 const struct minimask *mask)
724{
725 flow_union_with_miniflow(&wc->masks, &mask->masks);
726}
727
83916319 728uint64_t
476f36e8 729miniflow_get_map_in_range(const struct miniflow *miniflow,
83916319 730 uint8_t start, uint8_t end, unsigned int *offset)
476f36e8
JR
731{
732 uint64_t map = miniflow->map;
83916319 733 *offset = 0;
476f36e8
JR
734
735 if (start > 0) {
736 uint64_t msk = (UINT64_C(1) << start) - 1; /* 'start' LSBs set */
83916319 737 *offset = count_1bits(map & msk);
476f36e8
JR
738 map &= ~msk;
739 }
740 if (end < FLOW_U32S) {
741 uint64_t msk = (UINT64_C(1) << end) - 1; /* 'end' LSBs set */
742 map &= msk;
743 }
476f36e8
JR
744 return map;
745}
746
747/* Fold minimask 'mask''s wildcard mask into 'wc's wildcard mask
748 * in range [start, end). */
749void
750flow_wildcards_fold_minimask_range(struct flow_wildcards *wc,
751 const struct minimask *mask,
752 uint8_t start, uint8_t end)
753{
754 uint32_t *dst_u32 = (uint32_t *)&wc->masks;
83916319
JR
755 unsigned int offset;
756 uint64_t map = miniflow_get_map_in_range(&mask->masks, start, end,
757 &offset);
758 const uint32_t *p = mask->masks.values + offset;
476f36e8
JR
759
760 for (; map; map = zero_rightmost_1bit(map)) {
761 dst_u32[raw_ctz(map)] |= *p++;
762 }
763}
764
b5d97350
BP
765/* Returns a hash of the wildcards in 'wc'. */
766uint32_t
1006cda6 767flow_wildcards_hash(const struct flow_wildcards *wc, uint32_t basis)
b5d97350 768{
ac31c5af 769 return flow_hash(&wc->masks, basis);
b5d97350
BP
770}
771
772/* Returns true if 'a' and 'b' represent the same wildcards, false if they are
773 * different. */
774bool
775flow_wildcards_equal(const struct flow_wildcards *a,
776 const struct flow_wildcards *b)
777{
659c2346 778 return flow_equal(&a->masks, &b->masks);
b5d97350
BP
779}
780
781/* Returns true if at least one bit or field is wildcarded in 'a' but not in
782 * 'b', false otherwise. */
783bool
784flow_wildcards_has_extra(const struct flow_wildcards *a,
785 const struct flow_wildcards *b)
786{
659c2346
BP
787 const uint32_t *a_u32 = (const uint32_t *) &a->masks;
788 const uint32_t *b_u32 = (const uint32_t *) &b->masks;
789 size_t i;
a79c50f3 790
659c2346
BP
791 for (i = 0; i < FLOW_U32S; i++) {
792 if ((a_u32[i] & b_u32[i]) != b_u32[i]) {
b6c9e612
BP
793 return true;
794 }
795 }
659c2346
BP
796 return false;
797}
b6c9e612 798
659c2346
BP
799/* Returns true if 'a' and 'b' are equal, except that 0-bits (wildcarded bits)
800 * in 'wc' do not need to be equal in 'a' and 'b'. */
801bool
802flow_equal_except(const struct flow *a, const struct flow *b,
803 const struct flow_wildcards *wc)
804{
805 const uint32_t *a_u32 = (const uint32_t *) a;
806 const uint32_t *b_u32 = (const uint32_t *) b;
807 const uint32_t *wc_u32 = (const uint32_t *) &wc->masks;
808 size_t i;
d31f1109 809
659c2346
BP
810 for (i = 0; i < FLOW_U32S; i++) {
811 if ((a_u32[i] ^ b_u32[i]) & wc_u32[i]) {
812 return false;
813 }
47284b1f 814 }
659c2346 815 return true;
b5d97350
BP
816}
817
b6c9e612
BP
818/* Sets the wildcard mask for register 'idx' in 'wc' to 'mask'.
819 * (A 0-bit indicates a wildcard bit.) */
820void
821flow_wildcards_set_reg_mask(struct flow_wildcards *wc, int idx, uint32_t mask)
822{
26720e24 823 wc->masks.regs[idx] = mask;
b6c9e612 824}
ff55ea1f
EJ
825
826/* Hashes 'flow' based on its L2 through L4 protocol information. */
827uint32_t
828flow_hash_symmetric_l4(const struct flow *flow, uint32_t basis)
829{
830 struct {
d31f1109
JP
831 union {
832 ovs_be32 ipv4_addr;
833 struct in6_addr ipv6_addr;
834 };
ff55ea1f
EJ
835 ovs_be16 eth_type;
836 ovs_be16 vlan_tci;
5b909cbb 837 ovs_be16 tp_port;
ff55ea1f
EJ
838 uint8_t eth_addr[ETH_ADDR_LEN];
839 uint8_t ip_proto;
840 } fields;
841
842 int i;
843
844 memset(&fields, 0, sizeof fields);
845 for (i = 0; i < ETH_ADDR_LEN; i++) {
846 fields.eth_addr[i] = flow->dl_src[i] ^ flow->dl_dst[i];
847 }
848 fields.vlan_tci = flow->vlan_tci & htons(VLAN_VID_MASK);
849 fields.eth_type = flow->dl_type;
3e3eda95
EJ
850
851 /* UDP source and destination port are not taken into account because they
852 * will not necessarily be symmetric in a bidirectional flow. */
ff55ea1f 853 if (fields.eth_type == htons(ETH_TYPE_IP)) {
d31f1109
JP
854 fields.ipv4_addr = flow->nw_src ^ flow->nw_dst;
855 fields.ip_proto = flow->nw_proto;
c6bcb685 856 if (fields.ip_proto == IPPROTO_TCP || fields.ip_proto == IPPROTO_SCTP) {
5b909cbb 857 fields.tp_port = flow->tp_src ^ flow->tp_dst;
d31f1109
JP
858 }
859 } else if (fields.eth_type == htons(ETH_TYPE_IPV6)) {
860 const uint8_t *a = &flow->ipv6_src.s6_addr[0];
861 const uint8_t *b = &flow->ipv6_dst.s6_addr[0];
862 uint8_t *ipv6_addr = &fields.ipv6_addr.s6_addr[0];
863
864 for (i=0; i<16; i++) {
865 ipv6_addr[i] = a[i] ^ b[i];
866 }
ff55ea1f 867 fields.ip_proto = flow->nw_proto;
c6bcb685 868 if (fields.ip_proto == IPPROTO_TCP || fields.ip_proto == IPPROTO_SCTP) {
5b909cbb 869 fields.tp_port = flow->tp_src ^ flow->tp_dst;
ff55ea1f 870 }
ff55ea1f 871 }
c49d1dd1 872 return jhash_bytes(&fields, sizeof fields, basis);
ff55ea1f 873}
520e9a2a 874
94639963
JR
875/* Initialize a flow with random fields that matter for nx_hash_fields. */
876void
877flow_random_hash_fields(struct flow *flow)
878{
879 uint16_t rnd = random_uint16();
880
881 /* Initialize to all zeros. */
882 memset(flow, 0, sizeof *flow);
883
884 eth_addr_random(flow->dl_src);
885 eth_addr_random(flow->dl_dst);
886
887 flow->vlan_tci = (OVS_FORCE ovs_be16) (random_uint16() & VLAN_VID_MASK);
888
889 /* Make most of the random flows IPv4, some IPv6, and rest random. */
890 flow->dl_type = rnd < 0x8000 ? htons(ETH_TYPE_IP) :
891 rnd < 0xc000 ? htons(ETH_TYPE_IPV6) : (OVS_FORCE ovs_be16)rnd;
892
893 if (dl_type_is_ip_any(flow->dl_type)) {
894 if (flow->dl_type == htons(ETH_TYPE_IP)) {
895 flow->nw_src = (OVS_FORCE ovs_be32)random_uint32();
896 flow->nw_dst = (OVS_FORCE ovs_be32)random_uint32();
897 } else {
898 random_bytes(&flow->ipv6_src, sizeof flow->ipv6_src);
899 random_bytes(&flow->ipv6_dst, sizeof flow->ipv6_dst);
900 }
901 /* Make most of IP flows TCP, some UDP or SCTP, and rest random. */
902 rnd = random_uint16();
903 flow->nw_proto = rnd < 0x8000 ? IPPROTO_TCP :
904 rnd < 0xc000 ? IPPROTO_UDP :
905 rnd < 0xd000 ? IPPROTO_SCTP : (uint8_t)rnd;
906 if (flow->nw_proto == IPPROTO_TCP ||
907 flow->nw_proto == IPPROTO_UDP ||
908 flow->nw_proto == IPPROTO_SCTP) {
909 flow->tp_src = (OVS_FORCE ovs_be16)random_uint16();
910 flow->tp_dst = (OVS_FORCE ovs_be16)random_uint16();
911 }
912 }
913}
914
bcd2633a
JP
915/* Masks the fields in 'wc' that are used by the flow hash 'fields'. */
916void
6cdd5145
JP
917flow_mask_hash_fields(const struct flow *flow, struct flow_wildcards *wc,
918 enum nx_hash_fields fields)
bcd2633a
JP
919{
920 switch (fields) {
921 case NX_HASH_FIELDS_ETH_SRC:
922 memset(&wc->masks.dl_src, 0xff, sizeof wc->masks.dl_src);
923 break;
924
925 case NX_HASH_FIELDS_SYMMETRIC_L4:
926 memset(&wc->masks.dl_src, 0xff, sizeof wc->masks.dl_src);
927 memset(&wc->masks.dl_dst, 0xff, sizeof wc->masks.dl_dst);
6cdd5145
JP
928 if (flow->dl_type == htons(ETH_TYPE_IP)) {
929 memset(&wc->masks.nw_src, 0xff, sizeof wc->masks.nw_src);
930 memset(&wc->masks.nw_dst, 0xff, sizeof wc->masks.nw_dst);
7f8a65ca 931 } else if (flow->dl_type == htons(ETH_TYPE_IPV6)) {
6cdd5145
JP
932 memset(&wc->masks.ipv6_src, 0xff, sizeof wc->masks.ipv6_src);
933 memset(&wc->masks.ipv6_dst, 0xff, sizeof wc->masks.ipv6_dst);
934 }
935 if (is_ip_any(flow)) {
936 memset(&wc->masks.nw_proto, 0xff, sizeof wc->masks.nw_proto);
d8d9c698 937 flow_unwildcard_tp_ports(flow, wc);
6cdd5145 938 }
1dd35f8a 939 wc->masks.vlan_tci |= htons(VLAN_VID_MASK | VLAN_CFI);
bcd2633a
JP
940 break;
941
942 default:
428b2edd 943 OVS_NOT_REACHED();
bcd2633a
JP
944 }
945}
946
520e9a2a
EJ
947/* Hashes the portions of 'flow' designated by 'fields'. */
948uint32_t
949flow_hash_fields(const struct flow *flow, enum nx_hash_fields fields,
950 uint16_t basis)
951{
952 switch (fields) {
953
954 case NX_HASH_FIELDS_ETH_SRC:
c49d1dd1 955 return jhash_bytes(flow->dl_src, sizeof flow->dl_src, basis);
520e9a2a
EJ
956
957 case NX_HASH_FIELDS_SYMMETRIC_L4:
958 return flow_hash_symmetric_l4(flow, basis);
959 }
960
428b2edd 961 OVS_NOT_REACHED();
520e9a2a
EJ
962}
963
964/* Returns a string representation of 'fields'. */
965const char *
966flow_hash_fields_to_str(enum nx_hash_fields fields)
967{
968 switch (fields) {
969 case NX_HASH_FIELDS_ETH_SRC: return "eth_src";
970 case NX_HASH_FIELDS_SYMMETRIC_L4: return "symmetric_l4";
971 default: return "<unknown>";
972 }
973}
974
975/* Returns true if the value of 'fields' is supported. Otherwise false. */
976bool
977flow_hash_fields_valid(enum nx_hash_fields fields)
978{
979 return fields == NX_HASH_FIELDS_ETH_SRC
980 || fields == NX_HASH_FIELDS_SYMMETRIC_L4;
981}
8b3b8dd1 982
368eefac
EJ
983/* Returns a hash value for the bits of 'flow' that are active based on
984 * 'wc', given 'basis'. */
985uint32_t
986flow_hash_in_wildcards(const struct flow *flow,
987 const struct flow_wildcards *wc, uint32_t basis)
988{
989 const uint32_t *wc_u32 = (const uint32_t *) &wc->masks;
990 const uint32_t *flow_u32 = (const uint32_t *) flow;
991 uint32_t hash;
992 size_t i;
993
994 hash = basis;
995 for (i = 0; i < FLOW_U32S; i++) {
996 hash = mhash_add(hash, flow_u32[i] & wc_u32[i]);
997 }
998 return mhash_finish(hash, 4 * FLOW_U32S);
999}
1000
3719455c
BP
1001/* Sets the VLAN VID that 'flow' matches to 'vid', which is interpreted as an
1002 * OpenFlow 1.0 "dl_vlan" value:
1003 *
1004 * - If it is in the range 0...4095, 'flow->vlan_tci' is set to match
1005 * that VLAN. Any existing PCP match is unchanged (it becomes 0 if
1006 * 'flow' previously matched packets without a VLAN header).
1007 *
1008 * - If it is OFP_VLAN_NONE, 'flow->vlan_tci' is set to match a packet
1009 * without a VLAN tag.
1010 *
1011 * - Other values of 'vid' should not be used. */
1012void
fb0451d9 1013flow_set_dl_vlan(struct flow *flow, ovs_be16 vid)
3719455c 1014{
0c436519 1015 if (vid == htons(OFP10_VLAN_NONE)) {
3719455c
BP
1016 flow->vlan_tci = htons(0);
1017 } else {
1018 vid &= htons(VLAN_VID_MASK);
1019 flow->vlan_tci &= ~htons(VLAN_VID_MASK);
1020 flow->vlan_tci |= htons(VLAN_CFI) | vid;
1021 }
1022}
1023
cc34bc8c
BP
1024/* Sets the VLAN VID that 'flow' matches to 'vid', which is interpreted as an
1025 * OpenFlow 1.2 "vlan_vid" value, that is, the low 13 bits of 'vlan_tci' (VID
1026 * plus CFI). */
1027void
1028flow_set_vlan_vid(struct flow *flow, ovs_be16 vid)
1029{
1030 ovs_be16 mask = htons(VLAN_VID_MASK | VLAN_CFI);
1031 flow->vlan_tci &= ~mask;
1032 flow->vlan_tci |= vid & mask;
1033}
1034
3719455c
BP
1035/* Sets the VLAN PCP that 'flow' matches to 'pcp', which should be in the
1036 * range 0...7.
1037 *
1038 * This function has no effect on the VLAN ID that 'flow' matches.
1039 *
1040 * After calling this function, 'flow' will not match packets without a VLAN
1041 * header. */
1042void
1043flow_set_vlan_pcp(struct flow *flow, uint8_t pcp)
1044{
1045 pcp &= 0x07;
1046 flow->vlan_tci &= ~htons(VLAN_PCP_MASK);
1047 flow->vlan_tci |= htons((pcp << VLAN_PCP_SHIFT) | VLAN_CFI);
1048}
1049
b02475c5
SH
1050/* Sets the MPLS Label that 'flow' matches to 'label', which is interpreted
1051 * as an OpenFlow 1.1 "mpls_label" value. */
1052void
1053flow_set_mpls_label(struct flow *flow, ovs_be32 label)
1054{
1055 set_mpls_lse_label(&flow->mpls_lse, label);
1056}
1057
b676167a
SH
1058/* Sets the MPLS TTL that 'flow' matches to 'ttl', which should be in the
1059 * range 0...255. */
1060void
1061flow_set_mpls_ttl(struct flow *flow, uint8_t ttl)
1062{
1063 set_mpls_lse_ttl(&flow->mpls_lse, ttl);
1064}
1065
b02475c5
SH
1066/* Sets the MPLS TC that 'flow' matches to 'tc', which should be in the
1067 * range 0...7. */
1068void
1069flow_set_mpls_tc(struct flow *flow, uint8_t tc)
1070{
1071 set_mpls_lse_tc(&flow->mpls_lse, tc);
1072}
1073
1074/* Sets the MPLS BOS bit that 'flow' matches to which should be 0 or 1. */
1075void
1076flow_set_mpls_bos(struct flow *flow, uint8_t bos)
1077{
1078 set_mpls_lse_bos(&flow->mpls_lse, bos);
1079}
1080
52105b67
JR
1081
1082static void
1083flow_compose_l4(struct ofpbuf *b, const struct flow *flow)
1084{
1085 if (!(flow->nw_frag & FLOW_NW_FRAG_ANY)
1086 || !(flow->nw_frag & FLOW_NW_FRAG_LATER)) {
1087 if (flow->nw_proto == IPPROTO_TCP) {
1088 struct tcp_header *tcp;
1089
1090 tcp = ofpbuf_put_zeros(b, sizeof *tcp);
1091 tcp->tcp_src = flow->tp_src;
1092 tcp->tcp_dst = flow->tp_dst;
1093 tcp->tcp_ctl = TCP_CTL(ntohs(flow->tcp_flags), 5);
1094 b->l7 = ofpbuf_tail(b);
1095 } else if (flow->nw_proto == IPPROTO_UDP) {
1096 struct udp_header *udp;
1097
1098 udp = ofpbuf_put_zeros(b, sizeof *udp);
1099 udp->udp_src = flow->tp_src;
1100 udp->udp_dst = flow->tp_dst;
1101 b->l7 = ofpbuf_tail(b);
1102 } else if (flow->nw_proto == IPPROTO_SCTP) {
1103 struct sctp_header *sctp;
1104
1105 sctp = ofpbuf_put_zeros(b, sizeof *sctp);
1106 sctp->sctp_src = flow->tp_src;
1107 sctp->sctp_dst = flow->tp_dst;
1108 b->l7 = ofpbuf_tail(b);
1109 } else if (flow->nw_proto == IPPROTO_ICMP) {
1110 struct icmp_header *icmp;
1111
1112 icmp = ofpbuf_put_zeros(b, sizeof *icmp);
1113 icmp->icmp_type = ntohs(flow->tp_src);
1114 icmp->icmp_code = ntohs(flow->tp_dst);
1115 icmp->icmp_csum = csum(icmp, ICMP_HEADER_LEN);
1116 b->l7 = ofpbuf_tail(b);
1117 } else if (flow->nw_proto == IPPROTO_ICMPV6) {
1118 struct icmp6_hdr *icmp;
1119
1120 icmp = ofpbuf_put_zeros(b, sizeof *icmp);
1121 icmp->icmp6_type = ntohs(flow->tp_src);
1122 icmp->icmp6_code = ntohs(flow->tp_dst);
1123
1124 if (icmp->icmp6_code == 0 &&
1125 (icmp->icmp6_type == ND_NEIGHBOR_SOLICIT ||
1126 icmp->icmp6_type == ND_NEIGHBOR_ADVERT)) {
1127 struct in6_addr *nd_target;
1128 struct nd_opt_hdr *nd_opt;
1129
1130 nd_target = ofpbuf_put_zeros(b, sizeof *nd_target);
1131 *nd_target = flow->nd_target;
1132
1133 if (!eth_addr_is_zero(flow->arp_sha)) {
1134 nd_opt = ofpbuf_put_zeros(b, 8);
1135 nd_opt->nd_opt_len = 1;
1136 nd_opt->nd_opt_type = ND_OPT_SOURCE_LINKADDR;
1137 memcpy(nd_opt + 1, flow->arp_sha, ETH_ADDR_LEN);
1138 }
1139 if (!eth_addr_is_zero(flow->arp_tha)) {
1140 nd_opt = ofpbuf_put_zeros(b, 8);
1141 nd_opt->nd_opt_len = 1;
1142 nd_opt->nd_opt_type = ND_OPT_TARGET_LINKADDR;
1143 memcpy(nd_opt + 1, flow->arp_tha, ETH_ADDR_LEN);
1144 }
1145 }
1146 icmp->icmp6_cksum = (OVS_FORCE uint16_t)
1147 csum(icmp, (char *)ofpbuf_tail(b) - (char *)icmp);
1148 b->l7 = ofpbuf_tail(b);
1149 }
1150 }
1151}
1152
8b3b8dd1
BP
1153/* Puts into 'b' a packet that flow_extract() would parse as having the given
1154 * 'flow'.
1155 *
1156 * (This is useful only for testing, obviously, and the packet isn't really
dc5a7ce7 1157 * valid. It hasn't got some checksums filled in, for one, and lots of fields
8b3b8dd1
BP
1158 * are just zeroed.) */
1159void
1160flow_compose(struct ofpbuf *b, const struct flow *flow)
1161{
52105b67 1162 /* eth_compose() sets l3 pointer and makes sure it is 32-bit aligned. */
8b3b8dd1
BP
1163 eth_compose(b, flow->dl_dst, flow->dl_src, ntohs(flow->dl_type), 0);
1164 if (flow->dl_type == htons(FLOW_DL_TYPE_NONE)) {
1165 struct eth_header *eth = b->l2;
1166 eth->eth_type = htons(b->size);
1167 return;
1168 }
1169
1170 if (flow->vlan_tci & htons(VLAN_CFI)) {
2f4ca41b 1171 eth_push_vlan(b, flow->vlan_tci);
8b3b8dd1
BP
1172 }
1173
cff78c88 1174 if (flow->dl_type == htons(ETH_TYPE_IP)) {
8b3b8dd1
BP
1175 struct ip_header *ip;
1176
52105b67 1177 ip = ofpbuf_put_zeros(b, sizeof *ip);
8b3b8dd1 1178 ip->ip_ihl_ver = IP_IHL_VER(5, 4);
eadef313 1179 ip->ip_tos = flow->nw_tos;
aabf5352 1180 ip->ip_ttl = flow->nw_ttl;
8b3b8dd1 1181 ip->ip_proto = flow->nw_proto;
7c457c33
BP
1182 put_16aligned_be32(&ip->ip_src, flow->nw_src);
1183 put_16aligned_be32(&ip->ip_dst, flow->nw_dst);
8b3b8dd1 1184
eadef313 1185 if (flow->nw_frag & FLOW_NW_FRAG_ANY) {
7257b535 1186 ip->ip_frag_off |= htons(IP_MORE_FRAGMENTS);
eadef313 1187 if (flow->nw_frag & FLOW_NW_FRAG_LATER) {
7257b535
BP
1188 ip->ip_frag_off |= htons(100);
1189 }
1190 }
df9b6612 1191
52105b67
JR
1192 b->l4 = ofpbuf_tail(b);
1193
1194 flow_compose_l4(b, flow);
1195
df9b6612
BP
1196 ip->ip_tot_len = htons((uint8_t *) b->data + b->size
1197 - (uint8_t *) b->l3);
dc5a7ce7 1198 ip->ip_csum = csum(ip, sizeof *ip);
cff78c88 1199 } else if (flow->dl_type == htons(ETH_TYPE_IPV6)) {
52105b67
JR
1200 struct ovs_16aligned_ip6_hdr *nh;
1201
1202 nh = ofpbuf_put_zeros(b, sizeof *nh);
1203 put_16aligned_be32(&nh->ip6_flow, htonl(6 << 28) |
1204 htonl(flow->nw_tos << 20) | flow->ipv6_label);
1205 nh->ip6_hlim = flow->nw_ttl;
1206 nh->ip6_nxt = flow->nw_proto;
1207
1208 memcpy(&nh->ip6_src, &flow->ipv6_src, sizeof(nh->ip6_src));
1209 memcpy(&nh->ip6_dst, &flow->ipv6_dst, sizeof(nh->ip6_dst));
1210
1211 b->l4 = ofpbuf_tail(b);
1212
1213 flow_compose_l4(b, flow);
1214
1215 nh->ip6_plen =
1216 b->l7 ? htons((uint8_t *) b->l7 - (uint8_t *) b->l4) : htons(0);
cff78c88
SH
1217 } else if (flow->dl_type == htons(ETH_TYPE_ARP) ||
1218 flow->dl_type == htons(ETH_TYPE_RARP)) {
8b3b8dd1
BP
1219 struct arp_eth_header *arp;
1220
1221 b->l3 = arp = ofpbuf_put_zeros(b, sizeof *arp);
1222 arp->ar_hrd = htons(1);
1223 arp->ar_pro = htons(ETH_TYPE_IP);
1224 arp->ar_hln = ETH_ADDR_LEN;
1225 arp->ar_pln = 4;
1226 arp->ar_op = htons(flow->nw_proto);
1227
1228 if (flow->nw_proto == ARP_OP_REQUEST ||
1229 flow->nw_proto == ARP_OP_REPLY) {
7c457c33
BP
1230 put_16aligned_be32(&arp->ar_spa, flow->nw_src);
1231 put_16aligned_be32(&arp->ar_tpa, flow->nw_dst);
8b3b8dd1
BP
1232 memcpy(arp->ar_sha, flow->arp_sha, ETH_ADDR_LEN);
1233 memcpy(arp->ar_tha, flow->arp_tha, ETH_ADDR_LEN);
1234 }
1235 }
b02475c5
SH
1236
1237 if (eth_type_mpls(flow->dl_type)) {
1238 b->l2_5 = b->l3;
1239 push_mpls(b, flow->dl_type, flow->mpls_lse);
1240 }
8b3b8dd1 1241}
5cb7a798
BP
1242\f
1243/* Compressed flow. */
1244
1245static int
1246miniflow_n_values(const struct miniflow *flow)
1247{
fb9aefa3 1248 return count_1bits(flow->map);
5cb7a798
BP
1249}
1250
1251static uint32_t *
1252miniflow_alloc_values(struct miniflow *flow, int n)
1253{
1254 if (n <= MINI_N_INLINE) {
1255 return flow->inline_values;
1256 } else {
1257 COVERAGE_INC(miniflow_malloc);
1258 return xmalloc(n * sizeof *flow->values);
1259 }
1260}
1261
df40c152
BP
1262/* Completes an initialization of 'dst' as a miniflow copy of 'src' begun by
1263 * the caller. The caller must have already initialized 'dst->map' properly
13751fd8
JR
1264 * to indicate the significant uint32_t elements of 'src'. 'n' must be the
1265 * number of 1-bits in 'dst->map'.
1266 *
1267 * Normally the significant elements are the ones that are non-zero. However,
1268 * when a miniflow is initialized from a (mini)mask, the values can be zeroes,
1269 * so that the flow and mask always have the same maps.
df40c152
BP
1270 *
1271 * This function initializes 'dst->values' (either inline if possible or with
13751fd8
JR
1272 * malloc() otherwise) and copies the uint32_t elements of 'src' indicated by
1273 * 'dst->map' into it. */
df40c152
BP
1274static void
1275miniflow_init__(struct miniflow *dst, const struct flow *src, int n)
1276{
1277 const uint32_t *src_u32 = (const uint32_t *) src;
1278 unsigned int ofs;
080e28d0 1279 uint64_t map;
df40c152
BP
1280
1281 dst->values = miniflow_alloc_values(dst, n);
1282 ofs = 0;
080e28d0 1283 for (map = dst->map; map; map = zero_rightmost_1bit(map)) {
d43d314e 1284 dst->values[ofs++] = src_u32[raw_ctz(map)];
df40c152
BP
1285 }
1286}
1287
5cb7a798
BP
1288/* Initializes 'dst' as a copy of 'src'. The caller must eventually free 'dst'
1289 * with miniflow_destroy(). */
1290void
1291miniflow_init(struct miniflow *dst, const struct flow *src)
1292{
1293 const uint32_t *src_u32 = (const uint32_t *) src;
5cb7a798
BP
1294 unsigned int i;
1295 int n;
1296
1297 /* Initialize dst->map, counting the number of nonzero elements. */
1298 n = 0;
080e28d0
JR
1299 dst->map = 0;
1300
5cb7a798
BP
1301 for (i = 0; i < FLOW_U32S; i++) {
1302 if (src_u32[i]) {
080e28d0 1303 dst->map |= UINT64_C(1) << i;
5cb7a798
BP
1304 n++;
1305 }
1306 }
1307
df40c152
BP
1308 miniflow_init__(dst, src, n);
1309}
5cb7a798 1310
df40c152
BP
1311/* Initializes 'dst' as a copy of 'src', using 'mask->map' as 'dst''s map. The
1312 * caller must eventually free 'dst' with miniflow_destroy(). */
1313void
1314miniflow_init_with_minimask(struct miniflow *dst, const struct flow *src,
1315 const struct minimask *mask)
1316{
080e28d0 1317 dst->map = mask->masks.map;
df40c152 1318 miniflow_init__(dst, src, miniflow_n_values(dst));
5cb7a798
BP
1319}
1320
1321/* Initializes 'dst' as a copy of 'src'. The caller must eventually free 'dst'
1322 * with miniflow_destroy(). */
1323void
1324miniflow_clone(struct miniflow *dst, const struct miniflow *src)
1325{
1326 int n = miniflow_n_values(src);
080e28d0 1327 dst->map = src->map;
5cb7a798
BP
1328 dst->values = miniflow_alloc_values(dst, n);
1329 memcpy(dst->values, src->values, n * sizeof *dst->values);
1330}
1331
b2c1f00b
BP
1332/* Initializes 'dst' with the data in 'src', destroying 'src'.
1333 * The caller must eventually free 'dst' with miniflow_destroy(). */
1334void
1335miniflow_move(struct miniflow *dst, struct miniflow *src)
1336{
cc1a2dcb 1337 if (src->values == src->inline_values) {
b2c1f00b 1338 dst->values = dst->inline_values;
cc1a2dcb
BP
1339 memcpy(dst->values, src->values,
1340 miniflow_n_values(src) * sizeof *dst->values);
b2c1f00b
BP
1341 } else {
1342 dst->values = src->values;
1343 }
080e28d0 1344 dst->map = src->map;
b2c1f00b
BP
1345}
1346
5cb7a798
BP
1347/* Frees any memory owned by 'flow'. Does not free the storage in which 'flow'
1348 * itself resides; the caller is responsible for that. */
1349void
1350miniflow_destroy(struct miniflow *flow)
1351{
1352 if (flow->values != flow->inline_values) {
1353 free(flow->values);
1354 }
1355}
1356
1357/* Initializes 'dst' as a copy of 'src'. */
1358void
1359miniflow_expand(const struct miniflow *src, struct flow *dst)
1360{
ad77e3c5
EJ
1361 memset(dst, 0, sizeof *dst);
1362 flow_union_with_miniflow(dst, src);
5cb7a798
BP
1363}
1364
1365static const uint32_t *
1366miniflow_get__(const struct miniflow *flow, unsigned int u32_ofs)
1367{
080e28d0 1368 if (!(flow->map & (UINT64_C(1) << u32_ofs))) {
5cb7a798
BP
1369 static const uint32_t zero = 0;
1370 return &zero;
5cb7a798 1371 }
fb9aefa3
BP
1372 return flow->values +
1373 count_1bits(flow->map & ((UINT64_C(1) << u32_ofs) - 1));
5cb7a798
BP
1374}
1375
1376/* Returns the uint32_t that would be at byte offset '4 * u32_ofs' if 'flow'
1377 * were expanded into a "struct flow". */
1378uint32_t
1379miniflow_get(const struct miniflow *flow, unsigned int u32_ofs)
1380{
1381 return *miniflow_get__(flow, u32_ofs);
1382}
1383
1384/* Returns the ovs_be16 that would be at byte offset 'u8_ofs' if 'flow' were
1385 * expanded into a "struct flow". */
1386static ovs_be16
1387miniflow_get_be16(const struct miniflow *flow, unsigned int u8_ofs)
1388{
1389 const uint32_t *u32p = miniflow_get__(flow, u8_ofs / 4);
1390 const ovs_be16 *be16p = (const ovs_be16 *) u32p;
1391 return be16p[u8_ofs % 4 != 0];
1392}
1393
1394/* Returns the VID within the vlan_tci member of the "struct flow" represented
1395 * by 'flow'. */
1396uint16_t
1397miniflow_get_vid(const struct miniflow *flow)
1398{
1399 ovs_be16 tci = miniflow_get_be16(flow, offsetof(struct flow, vlan_tci));
1400 return vlan_tci_to_vid(tci);
1401}
1402
1403/* Returns true if 'a' and 'b' are the same flow, false otherwise. */
1404bool
1405miniflow_equal(const struct miniflow *a, const struct miniflow *b)
1406{
df40c152
BP
1407 const uint32_t *ap = a->values;
1408 const uint32_t *bp = b->values;
080e28d0
JR
1409 const uint64_t a_map = a->map;
1410 const uint64_t b_map = b->map;
1411 uint64_t map;
5cb7a798 1412
080e28d0
JR
1413 if (a_map == b_map) {
1414 for (map = a_map; map; map = zero_rightmost_1bit(map)) {
1415 if (*ap++ != *bp++) {
1416 return false;
df40c152 1417 }
080e28d0
JR
1418 }
1419 } else {
1420 for (map = a_map | b_map; map; map = zero_rightmost_1bit(map)) {
1421 uint64_t bit = rightmost_1bit(map);
1422 uint64_t a_value = a_map & bit ? *ap++ : 0;
1423 uint64_t b_value = b_map & bit ? *bp++ : 0;
df40c152 1424
080e28d0
JR
1425 if (a_value != b_value) {
1426 return false;
df40c152 1427 }
5cb7a798
BP
1428 }
1429 }
1430
df40c152 1431 return true;
5cb7a798
BP
1432}
1433
1434/* Returns true if 'a' and 'b' are equal at the places where there are 1-bits
1435 * in 'mask', false if they differ. */
1436bool
1437miniflow_equal_in_minimask(const struct miniflow *a, const struct miniflow *b,
1438 const struct minimask *mask)
1439{
1440 const uint32_t *p;
080e28d0 1441 uint64_t map;
5cb7a798
BP
1442
1443 p = mask->masks.values;
5cb7a798 1444
080e28d0 1445 for (map = mask->masks.map; map; map = zero_rightmost_1bit(map)) {
d43d314e 1446 int ofs = raw_ctz(map);
5cb7a798 1447
080e28d0
JR
1448 if ((miniflow_get(a, ofs) ^ miniflow_get(b, ofs)) & *p) {
1449 return false;
5cb7a798 1450 }
080e28d0 1451 p++;
5cb7a798
BP
1452 }
1453
1454 return true;
1455}
1456
1457/* Returns true if 'a' and 'b' are equal at the places where there are 1-bits
1458 * in 'mask', false if they differ. */
1459bool
1460miniflow_equal_flow_in_minimask(const struct miniflow *a, const struct flow *b,
1461 const struct minimask *mask)
1462{
1463 const uint32_t *b_u32 = (const uint32_t *) b;
1464 const uint32_t *p;
080e28d0 1465 uint64_t map;
5cb7a798
BP
1466
1467 p = mask->masks.values;
5cb7a798 1468
080e28d0 1469 for (map = mask->masks.map; map; map = zero_rightmost_1bit(map)) {
d43d314e 1470 int ofs = raw_ctz(map);
5cb7a798 1471
080e28d0
JR
1472 if ((miniflow_get(a, ofs) ^ b_u32[ofs]) & *p) {
1473 return false;
5cb7a798 1474 }
080e28d0 1475 p++;
5cb7a798
BP
1476 }
1477
1478 return true;
1479}
1480
1481/* Returns a hash value for 'flow', given 'basis'. */
1482uint32_t
1483miniflow_hash(const struct miniflow *flow, uint32_t basis)
1484{
df40c152
BP
1485 const uint32_t *p = flow->values;
1486 uint32_t hash = basis;
080e28d0
JR
1487 uint64_t hash_map = 0;
1488 uint64_t map;
df40c152 1489
080e28d0
JR
1490 for (map = flow->map; map; map = zero_rightmost_1bit(map)) {
1491 if (*p) {
1492 hash = mhash_add(hash, *p);
1493 hash_map |= rightmost_1bit(map);
df40c152 1494 }
080e28d0 1495 p++;
df40c152 1496 }
080e28d0
JR
1497 hash = mhash_add(hash, hash_map);
1498 hash = mhash_add(hash, hash_map >> 32);
1499
df40c152 1500 return mhash_finish(hash, p - flow->values);
5cb7a798
BP
1501}
1502
1503/* Returns a hash value for the bits of 'flow' where there are 1-bits in
1504 * 'mask', given 'basis'.
1505 *
1506 * The hash values returned by this function are the same as those returned by
1507 * flow_hash_in_minimask(), only the form of the arguments differ. */
1508uint32_t
1509miniflow_hash_in_minimask(const struct miniflow *flow,
1510 const struct minimask *mask, uint32_t basis)
1511{
1512 const uint32_t *p = mask->masks.values;
1513 uint32_t hash;
080e28d0 1514 uint64_t map;
5cb7a798
BP
1515
1516 hash = basis;
5cb7a798 1517
080e28d0 1518 for (map = mask->masks.map; map; map = zero_rightmost_1bit(map)) {
83916319 1519 hash = mhash_add(hash, miniflow_get(flow, raw_ctz(map)) & *p++);
5cb7a798
BP
1520 }
1521
cb8ca815 1522 return mhash_finish(hash, (p - mask->masks.values) * 4);
5cb7a798
BP
1523}
1524
1525/* Returns a hash value for the bits of 'flow' where there are 1-bits in
1526 * 'mask', given 'basis'.
1527 *
1528 * The hash values returned by this function are the same as those returned by
1529 * miniflow_hash_in_minimask(), only the form of the arguments differ. */
1530uint32_t
1531flow_hash_in_minimask(const struct flow *flow, const struct minimask *mask,
1532 uint32_t basis)
1533{
080e28d0 1534 const uint32_t *flow_u32 = (const uint32_t *)flow;
5cb7a798
BP
1535 const uint32_t *p = mask->masks.values;
1536 uint32_t hash;
080e28d0 1537 uint64_t map;
5cb7a798
BP
1538
1539 hash = basis;
080e28d0 1540 for (map = mask->masks.map; map; map = zero_rightmost_1bit(map)) {
83916319 1541 hash = mhash_add(hash, flow_u32[raw_ctz(map)] & *p++);
5cb7a798
BP
1542 }
1543
cb8ca815 1544 return mhash_finish(hash, (p - mask->masks.values) * 4);
5cb7a798 1545}
476f36e8
JR
1546
1547/* Returns a hash value for the bits of range [start, end) in 'flow',
1548 * where there are 1-bits in 'mask', given 'hash'.
1549 *
1550 * The hash values returned by this function are the same as those returned by
1551 * minimatch_hash_range(), only the form of the arguments differ. */
1552uint32_t
1553flow_hash_in_minimask_range(const struct flow *flow,
1554 const struct minimask *mask,
1555 uint8_t start, uint8_t end, uint32_t *basis)
1556{
1557 const uint32_t *flow_u32 = (const uint32_t *)flow;
83916319
JR
1558 unsigned int offset;
1559 uint64_t map = miniflow_get_map_in_range(&mask->masks, start, end,
1560 &offset);
1561 const uint32_t *p = mask->masks.values + offset;
476f36e8
JR
1562 uint32_t hash = *basis;
1563
1564 for (; map; map = zero_rightmost_1bit(map)) {
83916319 1565 hash = mhash_add(hash, flow_u32[raw_ctz(map)] & *p++);
476f36e8
JR
1566 }
1567
1568 *basis = hash; /* Allow continuation from the unfinished value. */
1569 return mhash_finish(hash, (p - mask->masks.values) * 4);
1570}
1571
5cb7a798
BP
1572\f
1573/* Initializes 'dst' as a copy of 'src'. The caller must eventually free 'dst'
1574 * with minimask_destroy(). */
1575void
1576minimask_init(struct minimask *mask, const struct flow_wildcards *wc)
1577{
1578 miniflow_init(&mask->masks, &wc->masks);
1579}
1580
1581/* Initializes 'dst' as a copy of 'src'. The caller must eventually free 'dst'
1582 * with minimask_destroy(). */
1583void
1584minimask_clone(struct minimask *dst, const struct minimask *src)
1585{
1586 miniflow_clone(&dst->masks, &src->masks);
1587}
1588
b2c1f00b
BP
1589/* Initializes 'dst' with the data in 'src', destroying 'src'.
1590 * The caller must eventually free 'dst' with minimask_destroy(). */
1591void
1592minimask_move(struct minimask *dst, struct minimask *src)
1593{
a24de7ee 1594 miniflow_move(&dst->masks, &src->masks);
b2c1f00b
BP
1595}
1596
5cb7a798
BP
1597/* Initializes 'dst_' as the bit-wise "and" of 'a_' and 'b_'.
1598 *
1599 * The caller must provide room for FLOW_U32S "uint32_t"s in 'storage', for use
1600 * by 'dst_'. The caller must *not* free 'dst_' with minimask_destroy(). */
1601void
1602minimask_combine(struct minimask *dst_,
1603 const struct minimask *a_, const struct minimask *b_,
1604 uint32_t storage[FLOW_U32S])
1605{
1606 struct miniflow *dst = &dst_->masks;
1607 const struct miniflow *a = &a_->masks;
1608 const struct miniflow *b = &b_->masks;
080e28d0
JR
1609 uint64_t map;
1610 int n = 0;
5cb7a798 1611
5cb7a798 1612 dst->values = storage;
080e28d0
JR
1613
1614 dst->map = 0;
1615 for (map = a->map & b->map; map; map = zero_rightmost_1bit(map)) {
d43d314e 1616 int ofs = raw_ctz(map);
080e28d0
JR
1617 uint32_t mask = miniflow_get(a, ofs) & miniflow_get(b, ofs);
1618
1619 if (mask) {
1620 dst->map |= rightmost_1bit(map);
1621 dst->values[n++] = mask;
5cb7a798
BP
1622 }
1623 }
1624}
1625
1626/* Frees any memory owned by 'mask'. Does not free the storage in which 'mask'
1627 * itself resides; the caller is responsible for that. */
1628void
1629minimask_destroy(struct minimask *mask)
1630{
1631 miniflow_destroy(&mask->masks);
1632}
1633
1634/* Initializes 'dst' as a copy of 'src'. */
1635void
1636minimask_expand(const struct minimask *mask, struct flow_wildcards *wc)
1637{
1638 miniflow_expand(&mask->masks, &wc->masks);
1639}
1640
1641/* Returns the uint32_t that would be at byte offset '4 * u32_ofs' if 'mask'
1642 * were expanded into a "struct flow_wildcards". */
1643uint32_t
1644minimask_get(const struct minimask *mask, unsigned int u32_ofs)
1645{
1646 return miniflow_get(&mask->masks, u32_ofs);
1647}
1648
1649/* Returns the VID mask within the vlan_tci member of the "struct
1650 * flow_wildcards" represented by 'mask'. */
1651uint16_t
1652minimask_get_vid_mask(const struct minimask *mask)
1653{
1654 return miniflow_get_vid(&mask->masks);
1655}
1656
1657/* Returns true if 'a' and 'b' are the same flow mask, false otherwise. */
1658bool
1659minimask_equal(const struct minimask *a, const struct minimask *b)
1660{
1661 return miniflow_equal(&a->masks, &b->masks);
1662}
1663
1664/* Returns a hash value for 'mask', given 'basis'. */
1665uint32_t
1666minimask_hash(const struct minimask *mask, uint32_t basis)
1667{
1668 return miniflow_hash(&mask->masks, basis);
1669}
1670
1671/* Returns true if at least one bit is wildcarded in 'a_' but not in 'b_',
1672 * false otherwise. */
1673bool
1674minimask_has_extra(const struct minimask *a_, const struct minimask *b_)
1675{
1676 const struct miniflow *a = &a_->masks;
1677 const struct miniflow *b = &b_->masks;
080e28d0 1678 uint64_t map;
5cb7a798 1679
080e28d0 1680 for (map = a->map | b->map; map; map = zero_rightmost_1bit(map)) {
d43d314e 1681 int ofs = raw_ctz(map);
080e28d0
JR
1682 uint32_t a_u32 = miniflow_get(a, ofs);
1683 uint32_t b_u32 = miniflow_get(b, ofs);
5cb7a798 1684
080e28d0
JR
1685 if ((a_u32 & b_u32) != b_u32) {
1686 return true;
5cb7a798
BP
1687 }
1688 }
1689
1690 return false;
1691}
1692
1693/* Returns true if 'mask' matches every packet, false if 'mask' fixes any bits
1694 * or fields. */
1695bool
1696minimask_is_catchall(const struct minimask *mask_)
1697{
1698 const struct miniflow *mask = &mask_->masks;
df40c152 1699 const uint32_t *p = mask->values;
080e28d0 1700 uint64_t map;
df40c152 1701
080e28d0
JR
1702 for (map = mask->map; map; map = zero_rightmost_1bit(map)) {
1703 if (*p++) {
1704 return false;
df40c152
BP
1705 }
1706 }
1707 return true;
5cb7a798 1708}