2 * Copyright (c) 2008, 2009, 2010, 2011, 2012, 2013, 2014, 2015 Nicira, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 #include <sys/types.h>
22 #include <netinet/in.h>
23 #include <netinet/icmp6.h>
24 #include <netinet/ip6.h>
28 #include "byte-order.h"
32 #include "openvswitch/dynamic-string.h"
35 #include "openvswitch/match.h"
36 #include "dp-packet.h"
37 #include "openflow/openflow.h"
41 #include "unaligned.h"
43 COVERAGE_DEFINE(flow_extract
);
44 COVERAGE_DEFINE(miniflow_malloc
);
46 /* U64 indices for segmented flow classification. */
47 const uint8_t flow_segment_u64s
[4] = {
48 FLOW_SEGMENT_1_ENDS_AT
/ sizeof(uint64_t),
49 FLOW_SEGMENT_2_ENDS_AT
/ sizeof(uint64_t),
50 FLOW_SEGMENT_3_ENDS_AT
/ sizeof(uint64_t),
54 /* Asserts that field 'f1' follows immediately after 'f0' in struct flow,
55 * without any intervening padding. */
56 #define ASSERT_SEQUENTIAL(f0, f1) \
57 BUILD_ASSERT_DECL(offsetof(struct flow, f0) \
58 + MEMBER_SIZEOF(struct flow, f0) \
59 == offsetof(struct flow, f1))
61 /* Asserts that fields 'f0' and 'f1' are in the same 32-bit aligned word within
63 #define ASSERT_SAME_WORD(f0, f1) \
64 BUILD_ASSERT_DECL(offsetof(struct flow, f0) / 4 \
65 == offsetof(struct flow, f1) / 4)
67 /* Asserts that 'f0' and 'f1' are both sequential and within the same 32-bit
68 * aligned word in struct flow. */
69 #define ASSERT_SEQUENTIAL_SAME_WORD(f0, f1) \
70 ASSERT_SEQUENTIAL(f0, f1); \
71 ASSERT_SAME_WORD(f0, f1)
73 /* miniflow_extract() assumes the following to be true to optimize the
74 * extraction process. */
75 ASSERT_SEQUENTIAL_SAME_WORD(dl_type
, vlan_tci
);
77 ASSERT_SEQUENTIAL_SAME_WORD(nw_frag
, nw_tos
);
78 ASSERT_SEQUENTIAL_SAME_WORD(nw_tos
, nw_ttl
);
79 ASSERT_SEQUENTIAL_SAME_WORD(nw_ttl
, nw_proto
);
81 /* TCP flags in the middle of a BE64, zeroes in the other half. */
82 BUILD_ASSERT_DECL(offsetof(struct flow
, tcp_flags
) % 8 == 4);
85 #define TCP_FLAGS_BE32(tcp_ctl) ((OVS_FORCE ovs_be32)TCP_FLAGS_BE16(tcp_ctl) \
88 #define TCP_FLAGS_BE32(tcp_ctl) ((OVS_FORCE ovs_be32)TCP_FLAGS_BE16(tcp_ctl))
91 ASSERT_SEQUENTIAL_SAME_WORD(tp_src
, tp_dst
);
93 /* Removes 'size' bytes from the head end of '*datap', of size '*sizep', which
94 * must contain at least 'size' bytes of data. Returns the first byte of data
96 static inline const void *
97 data_pull(const void **datap
, size_t *sizep
, size_t size
)
99 const char *data
= *datap
;
100 *datap
= data
+ size
;
105 /* If '*datap' has at least 'size' bytes of data, removes that many bytes from
106 * the head end of '*datap' and returns the first byte removed. Otherwise,
107 * returns a null pointer without modifying '*datap'. */
108 static inline const void *
109 data_try_pull(const void **datap
, size_t *sizep
, size_t size
)
111 return OVS_LIKELY(*sizep
>= size
) ? data_pull(datap
, sizep
, size
) : NULL
;
114 /* Context for pushing data to a miniflow. */
118 uint64_t * const end
;
121 /* miniflow_push_* macros allow filling in a miniflow data values in order.
122 * Assertions are needed only when the layout of the struct flow is modified.
123 * 'ofs' is a compile-time constant, which allows most of the code be optimized
124 * away. Some GCC versions gave warnings on ALWAYS_INLINE, so these are
125 * defined as macros. */
127 #if (FLOW_WC_SEQ != 35)
128 #define MINIFLOW_ASSERT(X) ovs_assert(X)
129 BUILD_MESSAGE("FLOW_WC_SEQ changed: miniflow_extract() will have runtime "
130 "assertions enabled. Consider updating FLOW_WC_SEQ after "
133 #define MINIFLOW_ASSERT(X)
136 /* True if 'IDX' and higher bits are not set. */
137 #define ASSERT_FLOWMAP_NOT_SET(FM, IDX) \
139 MINIFLOW_ASSERT(!((FM)->bits[(IDX) / MAP_T_BITS] & \
140 (MAP_MAX << ((IDX) % MAP_T_BITS)))); \
141 for (size_t i = (IDX) / MAP_T_BITS + 1; i < FLOWMAP_UNITS; i++) { \
142 MINIFLOW_ASSERT(!(FM)->bits[i]); \
146 #define miniflow_set_map(MF, OFS) \
148 ASSERT_FLOWMAP_NOT_SET(&MF.map, (OFS)); \
149 flowmap_set(&MF.map, (OFS), 1); \
152 #define miniflow_assert_in_map(MF, OFS) \
153 MINIFLOW_ASSERT(flowmap_is_set(&MF.map, (OFS))); \
154 ASSERT_FLOWMAP_NOT_SET(&MF.map, (OFS) + 1)
156 #define miniflow_push_uint64_(MF, OFS, VALUE) \
158 MINIFLOW_ASSERT(MF.data < MF.end && (OFS) % 8 == 0); \
159 *MF.data++ = VALUE; \
160 miniflow_set_map(MF, OFS / 8); \
163 #define miniflow_push_be64_(MF, OFS, VALUE) \
164 miniflow_push_uint64_(MF, OFS, (OVS_FORCE uint64_t)(VALUE))
166 #define miniflow_push_uint32_(MF, OFS, VALUE) \
168 MINIFLOW_ASSERT(MF.data < MF.end); \
170 if ((OFS) % 8 == 0) { \
171 miniflow_set_map(MF, OFS / 8); \
172 *(uint32_t *)MF.data = VALUE; \
173 } else if ((OFS) % 8 == 4) { \
174 miniflow_assert_in_map(MF, OFS / 8); \
175 *((uint32_t *)MF.data + 1) = VALUE; \
180 #define miniflow_push_be32_(MF, OFS, VALUE) \
181 miniflow_push_uint32_(MF, OFS, (OVS_FORCE uint32_t)(VALUE))
183 #define miniflow_push_uint16_(MF, OFS, VALUE) \
185 MINIFLOW_ASSERT(MF.data < MF.end); \
187 if ((OFS) % 8 == 0) { \
188 miniflow_set_map(MF, OFS / 8); \
189 *(uint16_t *)MF.data = VALUE; \
190 } else if ((OFS) % 8 == 2) { \
191 miniflow_assert_in_map(MF, OFS / 8); \
192 *((uint16_t *)MF.data + 1) = VALUE; \
193 } else if ((OFS) % 8 == 4) { \
194 miniflow_assert_in_map(MF, OFS / 8); \
195 *((uint16_t *)MF.data + 2) = VALUE; \
196 } else if ((OFS) % 8 == 6) { \
197 miniflow_assert_in_map(MF, OFS / 8); \
198 *((uint16_t *)MF.data + 3) = VALUE; \
203 #define miniflow_push_uint8_(MF, OFS, VALUE) \
205 MINIFLOW_ASSERT(MF.data < MF.end); \
207 if ((OFS) % 8 == 0) { \
208 miniflow_set_map(MF, OFS / 8); \
209 *(uint8_t *)MF.data = VALUE; \
210 } else if ((OFS) % 8 == 7) { \
211 miniflow_assert_in_map(MF, OFS / 8); \
212 *((uint8_t *)MF.data + 7) = VALUE; \
215 miniflow_assert_in_map(MF, OFS / 8); \
216 *((uint8_t *)MF.data + ((OFS) % 8)) = VALUE; \
220 #define miniflow_pad_to_64_(MF, OFS) \
222 MINIFLOW_ASSERT((OFS) % 8 != 0); \
223 miniflow_assert_in_map(MF, OFS / 8); \
225 memset((uint8_t *)MF.data + (OFS) % 8, 0, 8 - (OFS) % 8); \
229 #define miniflow_pad_from_64_(MF, OFS) \
231 MINIFLOW_ASSERT(MF.data < MF.end); \
233 MINIFLOW_ASSERT((OFS) % 8 != 0); \
234 miniflow_set_map(MF, OFS / 8); \
236 memset((uint8_t *)MF.data, 0, (OFS) % 8); \
239 #define miniflow_push_be16_(MF, OFS, VALUE) \
240 miniflow_push_uint16_(MF, OFS, (OVS_FORCE uint16_t)VALUE);
242 #define miniflow_push_be8_(MF, OFS, VALUE) \
243 miniflow_push_uint8_(MF, OFS, (OVS_FORCE uint8_t)VALUE);
245 #define miniflow_set_maps(MF, OFS, N_WORDS) \
247 size_t ofs = (OFS); \
248 size_t n_words = (N_WORDS); \
250 MINIFLOW_ASSERT(n_words && MF.data + n_words <= MF.end); \
251 ASSERT_FLOWMAP_NOT_SET(&MF.map, ofs); \
252 flowmap_set(&MF.map, ofs, n_words); \
255 /* Data at 'valuep' may be unaligned. */
256 #define miniflow_push_words_(MF, OFS, VALUEP, N_WORDS) \
258 MINIFLOW_ASSERT((OFS) % 8 == 0); \
259 miniflow_set_maps(MF, (OFS) / 8, (N_WORDS)); \
260 memcpy(MF.data, (VALUEP), (N_WORDS) * sizeof *MF.data); \
261 MF.data += (N_WORDS); \
264 /* Push 32-bit words padded to 64-bits. */
265 #define miniflow_push_words_32_(MF, OFS, VALUEP, N_WORDS) \
267 miniflow_set_maps(MF, (OFS) / 8, DIV_ROUND_UP(N_WORDS, 2)); \
268 memcpy(MF.data, (VALUEP), (N_WORDS) * sizeof(uint32_t)); \
269 MF.data += DIV_ROUND_UP(N_WORDS, 2); \
270 if ((N_WORDS) & 1) { \
271 *((uint32_t *)MF.data - 1) = 0; \
275 /* Data at 'valuep' may be unaligned. */
276 /* MACs start 64-aligned, and must be followed by other data or padding. */
277 #define miniflow_push_macs_(MF, OFS, VALUEP) \
279 miniflow_set_maps(MF, (OFS) / 8, 2); \
280 memcpy(MF.data, (VALUEP), 2 * ETH_ADDR_LEN); \
281 MF.data += 1; /* First word only. */ \
284 #define miniflow_push_uint32(MF, FIELD, VALUE) \
285 miniflow_push_uint32_(MF, offsetof(struct flow, FIELD), VALUE)
287 #define miniflow_push_be32(MF, FIELD, VALUE) \
288 miniflow_push_be32_(MF, offsetof(struct flow, FIELD), VALUE)
290 #define miniflow_push_uint16(MF, FIELD, VALUE) \
291 miniflow_push_uint16_(MF, offsetof(struct flow, FIELD), VALUE)
293 #define miniflow_push_be16(MF, FIELD, VALUE) \
294 miniflow_push_be16_(MF, offsetof(struct flow, FIELD), VALUE)
296 #define miniflow_push_uint8(MF, FIELD, VALUE) \
297 miniflow_push_uint8_(MF, offsetof(struct flow, FIELD), VALUE)
299 #define miniflow_pad_to_64(MF, FIELD) \
300 miniflow_pad_to_64_(MF, OFFSETOFEND(struct flow, FIELD))
302 #define miniflow_pad_from_64(MF, FIELD) \
303 miniflow_pad_from_64_(MF, offsetof(struct flow, FIELD))
305 #define miniflow_push_words(MF, FIELD, VALUEP, N_WORDS) \
306 miniflow_push_words_(MF, offsetof(struct flow, FIELD), VALUEP, N_WORDS)
308 #define miniflow_push_words_32(MF, FIELD, VALUEP, N_WORDS) \
309 miniflow_push_words_32_(MF, offsetof(struct flow, FIELD), VALUEP, N_WORDS)
311 #define miniflow_push_macs(MF, FIELD, VALUEP) \
312 miniflow_push_macs_(MF, offsetof(struct flow, FIELD), VALUEP)
314 /* Pulls the MPLS headers at '*datap' and returns the count of them. */
316 parse_mpls(const void **datap
, size_t *sizep
)
318 const struct mpls_hdr
*mh
;
321 while ((mh
= data_try_pull(datap
, sizep
, sizeof *mh
))) {
323 if (mh
->mpls_lse
.lo
& htons(1 << MPLS_BOS_SHIFT
)) {
327 return MIN(count
, FLOW_MAX_MPLS_LABELS
);
330 static inline ovs_be16
331 parse_vlan(const void **datap
, size_t *sizep
)
333 const struct eth_header
*eth
= *datap
;
336 ovs_be16 eth_type
; /* ETH_TYPE_VLAN */
340 data_pull(datap
, sizep
, ETH_ADDR_LEN
* 2);
342 if (eth
->eth_type
== htons(ETH_TYPE_VLAN
)) {
343 if (OVS_LIKELY(*sizep
344 >= sizeof(struct qtag_prefix
) + sizeof(ovs_be16
))) {
345 const struct qtag_prefix
*qp
= data_pull(datap
, sizep
, sizeof *qp
);
346 return qp
->tci
| htons(VLAN_CFI
);
352 static inline ovs_be16
353 parse_ethertype(const void **datap
, size_t *sizep
)
355 const struct llc_snap_header
*llc
;
358 proto
= *(ovs_be16
*) data_pull(datap
, sizep
, sizeof proto
);
359 if (OVS_LIKELY(ntohs(proto
) >= ETH_TYPE_MIN
)) {
363 if (OVS_UNLIKELY(*sizep
< sizeof *llc
)) {
364 return htons(FLOW_DL_TYPE_NONE
);
368 if (OVS_UNLIKELY(llc
->llc
.llc_dsap
!= LLC_DSAP_SNAP
369 || llc
->llc
.llc_ssap
!= LLC_SSAP_SNAP
370 || llc
->llc
.llc_cntl
!= LLC_CNTL_SNAP
371 || memcmp(llc
->snap
.snap_org
, SNAP_ORG_ETHERNET
,
372 sizeof llc
->snap
.snap_org
))) {
373 return htons(FLOW_DL_TYPE_NONE
);
376 data_pull(datap
, sizep
, sizeof *llc
);
378 if (OVS_LIKELY(ntohs(llc
->snap
.snap_type
) >= ETH_TYPE_MIN
)) {
379 return llc
->snap
.snap_type
;
382 return htons(FLOW_DL_TYPE_NONE
);
386 parse_icmpv6(const void **datap
, size_t *sizep
, const struct icmp6_hdr
*icmp
,
387 const struct in6_addr
**nd_target
,
388 struct eth_addr arp_buf
[2])
390 if (icmp
->icmp6_code
== 0 &&
391 (icmp
->icmp6_type
== ND_NEIGHBOR_SOLICIT
||
392 icmp
->icmp6_type
== ND_NEIGHBOR_ADVERT
)) {
394 *nd_target
= data_try_pull(datap
, sizep
, sizeof **nd_target
);
395 if (OVS_UNLIKELY(!*nd_target
)) {
399 while (*sizep
>= 8) {
400 /* The minimum size of an option is 8 bytes, which also is
401 * the size of Ethernet link-layer options. */
402 const struct ovs_nd_opt
*nd_opt
= *datap
;
403 int opt_len
= nd_opt
->nd_opt_len
* ND_OPT_LEN
;
405 if (!opt_len
|| opt_len
> *sizep
) {
409 /* Store the link layer address if the appropriate option is
410 * provided. It is considered an error if the same link
411 * layer option is specified twice. */
412 if (nd_opt
->nd_opt_type
== ND_OPT_SOURCE_LINKADDR
414 if (OVS_LIKELY(eth_addr_is_zero(arp_buf
[0]))) {
415 arp_buf
[0] = nd_opt
->nd_opt_mac
;
419 } else if (nd_opt
->nd_opt_type
== ND_OPT_TARGET_LINKADDR
421 if (OVS_LIKELY(eth_addr_is_zero(arp_buf
[1]))) {
422 arp_buf
[1] = nd_opt
->nd_opt_mac
;
428 if (OVS_UNLIKELY(!data_try_pull(datap
, sizep
, opt_len
))) {
438 arp_buf
[0] = eth_addr_zero
;
439 arp_buf
[1] = eth_addr_zero
;
442 /* Initializes 'flow' members from 'packet' and 'md'
444 * Initializes 'packet' header l2 pointer to the start of the Ethernet
445 * header, and the layer offsets as follows:
447 * - packet->l2_5_ofs to the start of the MPLS shim header, or UINT16_MAX
448 * when there is no MPLS shim header.
450 * - packet->l3_ofs to just past the Ethernet header, or just past the
451 * vlan_header if one is present, to the first byte of the payload of the
452 * Ethernet frame. UINT16_MAX if the frame is too short to contain an
455 * - packet->l4_ofs to just past the IPv4 header, if one is present and
456 * has at least the content used for the fields of interest for the flow,
457 * otherwise UINT16_MAX.
460 flow_extract(struct dp_packet
*packet
, struct flow
*flow
)
464 uint64_t buf
[FLOW_U64S
];
467 COVERAGE_INC(flow_extract
);
469 miniflow_extract(packet
, &m
.mf
);
470 miniflow_expand(&m
.mf
, flow
);
473 /* Caller is responsible for initializing 'dst' with enough storage for
474 * FLOW_U64S * 8 bytes. */
476 miniflow_extract(struct dp_packet
*packet
, struct miniflow
*dst
)
478 const struct pkt_metadata
*md
= &packet
->md
;
479 const void *data
= dp_packet_data(packet
);
480 size_t size
= dp_packet_size(packet
);
481 uint64_t *values
= miniflow_values(dst
);
482 struct mf_ctx mf
= { FLOWMAP_EMPTY_INITIALIZER
, values
,
483 values
+ FLOW_U64S
};
486 uint8_t nw_frag
, nw_tos
, nw_ttl
, nw_proto
;
489 if (flow_tnl_dst_is_set(&md
->tunnel
)) {
490 miniflow_push_words(mf
, tunnel
, &md
->tunnel
,
491 offsetof(struct flow_tnl
, metadata
) /
494 if (!(md
->tunnel
.flags
& FLOW_TNL_F_UDPIF
)) {
495 if (md
->tunnel
.metadata
.present
.map
) {
496 miniflow_push_words(mf
, tunnel
.metadata
, &md
->tunnel
.metadata
,
497 sizeof md
->tunnel
.metadata
/
501 if (md
->tunnel
.metadata
.present
.len
) {
502 miniflow_push_words(mf
, tunnel
.metadata
.present
,
503 &md
->tunnel
.metadata
.present
, 1);
504 miniflow_push_words(mf
, tunnel
.metadata
.opts
.gnv
,
505 md
->tunnel
.metadata
.opts
.gnv
,
506 DIV_ROUND_UP(md
->tunnel
.metadata
.present
.len
,
511 if (md
->skb_priority
|| md
->pkt_mark
) {
512 miniflow_push_uint32(mf
, skb_priority
, md
->skb_priority
);
513 miniflow_push_uint32(mf
, pkt_mark
, md
->pkt_mark
);
515 miniflow_push_uint32(mf
, dp_hash
, md
->dp_hash
);
516 miniflow_push_uint32(mf
, in_port
, odp_to_u32(md
->in_port
.odp_port
));
517 if (md
->recirc_id
|| md
->ct_state
) {
518 miniflow_push_uint32(mf
, recirc_id
, md
->recirc_id
);
519 miniflow_push_uint16(mf
, ct_state
, md
->ct_state
);
520 miniflow_push_uint16(mf
, ct_zone
, md
->ct_zone
);
524 miniflow_push_uint32(mf
, ct_mark
, md
->ct_mark
);
525 miniflow_pad_to_64(mf
, ct_mark
);
527 if (!ovs_u128_is_zero(md
->ct_label
)) {
528 miniflow_push_words(mf
, ct_label
, &md
->ct_label
,
529 sizeof md
->ct_label
/ sizeof(uint64_t));
533 /* Initialize packet's layer pointer and offsets. */
535 dp_packet_reset_offsets(packet
);
537 /* Must have full Ethernet header to proceed. */
538 if (OVS_UNLIKELY(size
< sizeof(struct eth_header
))) {
544 ASSERT_SEQUENTIAL(dl_dst
, dl_src
);
545 miniflow_push_macs(mf
, dl_dst
, data
);
546 /* dl_type, vlan_tci. */
547 vlan_tci
= parse_vlan(&data
, &size
);
548 dl_type
= parse_ethertype(&data
, &size
);
549 miniflow_push_be16(mf
, dl_type
, dl_type
);
550 miniflow_push_be16(mf
, vlan_tci
, vlan_tci
);
554 if (OVS_UNLIKELY(eth_type_mpls(dl_type
))) {
556 const void *mpls
= data
;
558 packet
->l2_5_ofs
= (char *)data
- l2
;
559 count
= parse_mpls(&data
, &size
);
560 miniflow_push_words_32(mf
, mpls_lse
, mpls
, count
);
564 packet
->l3_ofs
= (char *)data
- l2
;
567 if (OVS_LIKELY(dl_type
== htons(ETH_TYPE_IP
))) {
568 const struct ip_header
*nh
= data
;
572 if (OVS_UNLIKELY(size
< IP_HEADER_LEN
)) {
575 ip_len
= IP_IHL(nh
->ip_ihl_ver
) * 4;
577 if (OVS_UNLIKELY(ip_len
< IP_HEADER_LEN
)) {
580 if (OVS_UNLIKELY(size
< ip_len
)) {
583 tot_len
= ntohs(nh
->ip_tot_len
);
584 if (OVS_UNLIKELY(tot_len
> size
)) {
587 if (OVS_UNLIKELY(size
- tot_len
> UINT8_MAX
)) {
590 dp_packet_set_l2_pad_size(packet
, size
- tot_len
);
591 size
= tot_len
; /* Never pull padding. */
593 /* Push both source and destination address at once. */
594 miniflow_push_words(mf
, nw_src
, &nh
->ip_src
, 1);
596 miniflow_push_be32(mf
, ipv6_label
, 0); /* Padding for IPv4. */
600 nw_proto
= nh
->ip_proto
;
601 if (OVS_UNLIKELY(IP_IS_FRAGMENT(nh
->ip_frag_off
))) {
602 nw_frag
= FLOW_NW_FRAG_ANY
;
603 if (nh
->ip_frag_off
& htons(IP_FRAG_OFF_MASK
)) {
604 nw_frag
|= FLOW_NW_FRAG_LATER
;
607 data_pull(&data
, &size
, ip_len
);
608 } else if (dl_type
== htons(ETH_TYPE_IPV6
)) {
609 const struct ovs_16aligned_ip6_hdr
*nh
;
613 if (OVS_UNLIKELY(size
< sizeof *nh
)) {
616 nh
= data_pull(&data
, &size
, sizeof *nh
);
618 plen
= ntohs(nh
->ip6_plen
);
619 if (OVS_UNLIKELY(plen
> size
)) {
622 /* Jumbo Payload option not supported yet. */
623 if (OVS_UNLIKELY(size
- plen
> UINT8_MAX
)) {
626 dp_packet_set_l2_pad_size(packet
, size
- plen
);
627 size
= plen
; /* Never pull padding. */
629 miniflow_push_words(mf
, ipv6_src
, &nh
->ip6_src
,
630 sizeof nh
->ip6_src
/ 8);
631 miniflow_push_words(mf
, ipv6_dst
, &nh
->ip6_dst
,
632 sizeof nh
->ip6_dst
/ 8);
634 tc_flow
= get_16aligned_be32(&nh
->ip6_flow
);
636 ovs_be32 label
= tc_flow
& htonl(IPV6_LABEL_MASK
);
637 miniflow_push_be32(mf
, ipv6_label
, label
);
640 nw_tos
= ntohl(tc_flow
) >> 20;
641 nw_ttl
= nh
->ip6_hlim
;
642 nw_proto
= nh
->ip6_nxt
;
645 if (OVS_LIKELY((nw_proto
!= IPPROTO_HOPOPTS
)
646 && (nw_proto
!= IPPROTO_ROUTING
)
647 && (nw_proto
!= IPPROTO_DSTOPTS
)
648 && (nw_proto
!= IPPROTO_AH
)
649 && (nw_proto
!= IPPROTO_FRAGMENT
))) {
650 /* It's either a terminal header (e.g., TCP, UDP) or one we
651 * don't understand. In either case, we're done with the
652 * packet, so use it to fill in 'nw_proto'. */
656 /* We only verify that at least 8 bytes of the next header are
657 * available, but many of these headers are longer. Ensure that
658 * accesses within the extension header are within those first 8
659 * bytes. All extension headers are required to be at least 8
661 if (OVS_UNLIKELY(size
< 8)) {
665 if ((nw_proto
== IPPROTO_HOPOPTS
)
666 || (nw_proto
== IPPROTO_ROUTING
)
667 || (nw_proto
== IPPROTO_DSTOPTS
)) {
668 /* These headers, while different, have the fields we care
669 * about in the same location and with the same
671 const struct ip6_ext
*ext_hdr
= data
;
672 nw_proto
= ext_hdr
->ip6e_nxt
;
673 if (OVS_UNLIKELY(!data_try_pull(&data
, &size
,
674 (ext_hdr
->ip6e_len
+ 1) * 8))) {
677 } else if (nw_proto
== IPPROTO_AH
) {
678 /* A standard AH definition isn't available, but the fields
679 * we care about are in the same location as the generic
680 * option header--only the header length is calculated
682 const struct ip6_ext
*ext_hdr
= data
;
683 nw_proto
= ext_hdr
->ip6e_nxt
;
684 if (OVS_UNLIKELY(!data_try_pull(&data
, &size
,
685 (ext_hdr
->ip6e_len
+ 2) * 4))) {
688 } else if (nw_proto
== IPPROTO_FRAGMENT
) {
689 const struct ovs_16aligned_ip6_frag
*frag_hdr
= data
;
691 nw_proto
= frag_hdr
->ip6f_nxt
;
692 if (!data_try_pull(&data
, &size
, sizeof *frag_hdr
)) {
696 /* We only process the first fragment. */
697 if (frag_hdr
->ip6f_offlg
!= htons(0)) {
698 nw_frag
= FLOW_NW_FRAG_ANY
;
699 if ((frag_hdr
->ip6f_offlg
& IP6F_OFF_MASK
) != htons(0)) {
700 nw_frag
|= FLOW_NW_FRAG_LATER
;
701 nw_proto
= IPPROTO_FRAGMENT
;
708 if (dl_type
== htons(ETH_TYPE_ARP
) ||
709 dl_type
== htons(ETH_TYPE_RARP
)) {
710 struct eth_addr arp_buf
[2];
711 const struct arp_eth_header
*arp
= (const struct arp_eth_header
*)
712 data_try_pull(&data
, &size
, ARP_ETH_HEADER_LEN
);
714 if (OVS_LIKELY(arp
) && OVS_LIKELY(arp
->ar_hrd
== htons(1))
715 && OVS_LIKELY(arp
->ar_pro
== htons(ETH_TYPE_IP
))
716 && OVS_LIKELY(arp
->ar_hln
== ETH_ADDR_LEN
)
717 && OVS_LIKELY(arp
->ar_pln
== 4)) {
718 miniflow_push_be32(mf
, nw_src
,
719 get_16aligned_be32(&arp
->ar_spa
));
720 miniflow_push_be32(mf
, nw_dst
,
721 get_16aligned_be32(&arp
->ar_tpa
));
723 /* We only match on the lower 8 bits of the opcode. */
724 if (OVS_LIKELY(ntohs(arp
->ar_op
) <= 0xff)) {
725 miniflow_push_be32(mf
, ipv6_label
, 0); /* Pad with ARP. */
726 miniflow_push_be32(mf
, nw_frag
, htonl(ntohs(arp
->ar_op
)));
729 /* Must be adjacent. */
730 ASSERT_SEQUENTIAL(arp_sha
, arp_tha
);
732 arp_buf
[0] = arp
->ar_sha
;
733 arp_buf
[1] = arp
->ar_tha
;
734 miniflow_push_macs(mf
, arp_sha
, arp_buf
);
735 miniflow_pad_to_64(mf
, arp_tha
);
741 packet
->l4_ofs
= (char *)data
- l2
;
742 miniflow_push_be32(mf
, nw_frag
,
743 BYTES_TO_BE32(nw_frag
, nw_tos
, nw_ttl
, nw_proto
));
745 if (OVS_LIKELY(!(nw_frag
& FLOW_NW_FRAG_LATER
))) {
746 if (OVS_LIKELY(nw_proto
== IPPROTO_TCP
)) {
747 if (OVS_LIKELY(size
>= TCP_HEADER_LEN
)) {
748 const struct tcp_header
*tcp
= data
;
750 miniflow_push_be32(mf
, arp_tha
.ea
[2], 0);
751 miniflow_push_be32(mf
, tcp_flags
,
752 TCP_FLAGS_BE32(tcp
->tcp_ctl
));
753 miniflow_push_be16(mf
, tp_src
, tcp
->tcp_src
);
754 miniflow_push_be16(mf
, tp_dst
, tcp
->tcp_dst
);
755 miniflow_pad_to_64(mf
, tp_dst
);
757 } else if (OVS_LIKELY(nw_proto
== IPPROTO_UDP
)) {
758 if (OVS_LIKELY(size
>= UDP_HEADER_LEN
)) {
759 const struct udp_header
*udp
= data
;
761 miniflow_push_be16(mf
, tp_src
, udp
->udp_src
);
762 miniflow_push_be16(mf
, tp_dst
, udp
->udp_dst
);
763 miniflow_pad_to_64(mf
, tp_dst
);
765 } else if (OVS_LIKELY(nw_proto
== IPPROTO_SCTP
)) {
766 if (OVS_LIKELY(size
>= SCTP_HEADER_LEN
)) {
767 const struct sctp_header
*sctp
= data
;
769 miniflow_push_be16(mf
, tp_src
, sctp
->sctp_src
);
770 miniflow_push_be16(mf
, tp_dst
, sctp
->sctp_dst
);
771 miniflow_pad_to_64(mf
, tp_dst
);
773 } else if (OVS_LIKELY(nw_proto
== IPPROTO_ICMP
)) {
774 if (OVS_LIKELY(size
>= ICMP_HEADER_LEN
)) {
775 const struct icmp_header
*icmp
= data
;
777 miniflow_push_be16(mf
, tp_src
, htons(icmp
->icmp_type
));
778 miniflow_push_be16(mf
, tp_dst
, htons(icmp
->icmp_code
));
779 miniflow_pad_to_64(mf
, tp_dst
);
781 } else if (OVS_LIKELY(nw_proto
== IPPROTO_IGMP
)) {
782 if (OVS_LIKELY(size
>= IGMP_HEADER_LEN
)) {
783 const struct igmp_header
*igmp
= data
;
785 miniflow_push_be16(mf
, tp_src
, htons(igmp
->igmp_type
));
786 miniflow_push_be16(mf
, tp_dst
, htons(igmp
->igmp_code
));
787 miniflow_push_be32(mf
, igmp_group_ip4
,
788 get_16aligned_be32(&igmp
->group
));
790 } else if (OVS_LIKELY(nw_proto
== IPPROTO_ICMPV6
)) {
791 if (OVS_LIKELY(size
>= sizeof(struct icmp6_hdr
))) {
792 const struct in6_addr
*nd_target
= NULL
;
793 struct eth_addr arp_buf
[2] = { { { { 0 } } } };
794 const struct icmp6_hdr
*icmp
= data_pull(&data
, &size
,
796 parse_icmpv6(&data
, &size
, icmp
, &nd_target
, arp_buf
);
798 miniflow_push_words(mf
, nd_target
, nd_target
,
799 sizeof *nd_target
/ sizeof(uint64_t));
801 miniflow_push_macs(mf
, arp_sha
, arp_buf
);
802 miniflow_pad_to_64(mf
, arp_tha
);
803 miniflow_push_be16(mf
, tp_src
, htons(icmp
->icmp6_type
));
804 miniflow_push_be16(mf
, tp_dst
, htons(icmp
->icmp6_code
));
805 miniflow_pad_to_64(mf
, tp_dst
);
813 /* For every bit of a field that is wildcarded in 'wildcards', sets the
814 * corresponding bit in 'flow' to zero. */
816 flow_zero_wildcards(struct flow
*flow
, const struct flow_wildcards
*wildcards
)
818 uint64_t *flow_u64
= (uint64_t *) flow
;
819 const uint64_t *wc_u64
= (const uint64_t *) &wildcards
->masks
;
822 for (i
= 0; i
< FLOW_U64S
; i
++) {
823 flow_u64
[i
] &= wc_u64
[i
];
828 flow_unwildcard_tp_ports(const struct flow
*flow
, struct flow_wildcards
*wc
)
830 if (flow
->nw_proto
!= IPPROTO_ICMP
) {
831 memset(&wc
->masks
.tp_src
, 0xff, sizeof wc
->masks
.tp_src
);
832 memset(&wc
->masks
.tp_dst
, 0xff, sizeof wc
->masks
.tp_dst
);
834 wc
->masks
.tp_src
= htons(0xff);
835 wc
->masks
.tp_dst
= htons(0xff);
839 /* Initializes 'flow_metadata' with the metadata found in 'flow'. */
841 flow_get_metadata(const struct flow
*flow
, struct match
*flow_metadata
)
845 BUILD_ASSERT_DECL(FLOW_WC_SEQ
== 35);
847 match_init_catchall(flow_metadata
);
848 if (flow
->tunnel
.tun_id
!= htonll(0)) {
849 match_set_tun_id(flow_metadata
, flow
->tunnel
.tun_id
);
851 if (flow
->tunnel
.flags
& FLOW_TNL_PUB_F_MASK
) {
852 match_set_tun_flags(flow_metadata
,
853 flow
->tunnel
.flags
& FLOW_TNL_PUB_F_MASK
);
855 if (flow
->tunnel
.ip_src
) {
856 match_set_tun_src(flow_metadata
, flow
->tunnel
.ip_src
);
858 if (flow
->tunnel
.ip_dst
) {
859 match_set_tun_dst(flow_metadata
, flow
->tunnel
.ip_dst
);
861 if (ipv6_addr_is_set(&flow
->tunnel
.ipv6_src
)) {
862 match_set_tun_ipv6_src(flow_metadata
, &flow
->tunnel
.ipv6_src
);
864 if (ipv6_addr_is_set(&flow
->tunnel
.ipv6_dst
)) {
865 match_set_tun_ipv6_dst(flow_metadata
, &flow
->tunnel
.ipv6_dst
);
867 if (flow
->tunnel
.gbp_id
!= htons(0)) {
868 match_set_tun_gbp_id(flow_metadata
, flow
->tunnel
.gbp_id
);
870 if (flow
->tunnel
.gbp_flags
) {
871 match_set_tun_gbp_flags(flow_metadata
, flow
->tunnel
.gbp_flags
);
873 tun_metadata_get_fmd(&flow
->tunnel
, flow_metadata
);
874 if (flow
->metadata
!= htonll(0)) {
875 match_set_metadata(flow_metadata
, flow
->metadata
);
878 for (i
= 0; i
< FLOW_N_REGS
; i
++) {
880 match_set_reg(flow_metadata
, i
, flow
->regs
[i
]);
884 if (flow
->pkt_mark
!= 0) {
885 match_set_pkt_mark(flow_metadata
, flow
->pkt_mark
);
888 match_set_in_port(flow_metadata
, flow
->in_port
.ofp_port
);
889 if (flow
->ct_state
!= 0) {
890 match_set_ct_state(flow_metadata
, flow
->ct_state
);
892 if (flow
->ct_zone
!= 0) {
893 match_set_ct_zone(flow_metadata
, flow
->ct_zone
);
895 if (flow
->ct_mark
!= 0) {
896 match_set_ct_mark(flow_metadata
, flow
->ct_mark
);
898 if (!ovs_u128_is_zero(flow
->ct_label
)) {
899 match_set_ct_label(flow_metadata
, flow
->ct_label
);
903 const char *ct_state_to_string(uint32_t state
)
928 flow_to_string(const struct flow
*flow
)
930 struct ds ds
= DS_EMPTY_INITIALIZER
;
931 flow_format(&ds
, flow
);
936 flow_tun_flag_to_string(uint32_t flags
)
939 case FLOW_TNL_F_DONT_FRAGMENT
:
941 case FLOW_TNL_F_CSUM
:
953 format_flags(struct ds
*ds
, const char *(*bit_to_string
)(uint32_t),
954 uint32_t flags
, char del
)
959 ds_put_char(ds
, '0');
963 uint32_t bit
= rightmost_1bit(flags
);
966 s
= bit_to_string(bit
);
968 ds_put_format(ds
, "%s%c", s
, del
);
977 ds_put_format(ds
, "0x%"PRIx32
"%c", bad
, del
);
983 format_flags_masked(struct ds
*ds
, const char *name
,
984 const char *(*bit_to_string
)(uint32_t), uint32_t flags
,
985 uint32_t mask
, uint32_t max_mask
)
988 ds_put_format(ds
, "%s%s=%s", colors
.param
, name
, colors
.end
);
991 if (mask
== max_mask
) {
992 format_flags(ds
, bit_to_string
, flags
, '|');
997 ds_put_cstr(ds
, "0/0");
1002 uint32_t bit
= rightmost_1bit(mask
);
1003 const char *s
= bit_to_string(bit
);
1005 ds_put_format(ds
, "%s%s", (flags
& bit
) ? "+" : "-",
1006 s
? s
: "[Unknown]");
1011 /* Scans a string 's' of flags to determine their numerical value and
1012 * returns the number of characters parsed using 'bit_to_string' to
1013 * lookup flag names. Scanning continues until the character 'end' is
1016 * In the event of a failure, a negative error code will be returned. In
1017 * addition, if 'res_string' is non-NULL then a descriptive string will
1018 * be returned incorporating the identifying string 'field_name'. This
1019 * error string must be freed by the caller.
1021 * Upon success, the flag values will be stored in 'res_flags' and
1022 * optionally 'res_mask', if it is non-NULL (if it is NULL then any masks
1023 * present in the original string will be considered an error). The
1024 * caller may restrict the acceptable set of values through the mask
1027 parse_flags(const char *s
, const char *(*bit_to_string
)(uint32_t),
1028 char end
, const char *field_name
, char **res_string
,
1029 uint32_t *res_flags
, uint32_t allowed
, uint32_t *res_mask
)
1031 uint32_t result
= 0;
1034 /* Parse masked flags in numeric format? */
1035 if (res_mask
&& ovs_scan(s
, "%"SCNi32
"/%"SCNi32
"%n",
1036 res_flags
, res_mask
, &n
) && n
> 0) {
1037 if (*res_flags
& ~allowed
|| *res_mask
& ~allowed
) {
1045 if (res_mask
&& (*s
== '+' || *s
== '-')) {
1046 uint32_t flags
= 0, mask
= 0;
1048 /* Parse masked flags. */
1049 while (s
[0] != end
) {
1056 } else if (s
[0] == '-') {
1060 *res_string
= xasprintf("%s: %s must be preceded by '+' "
1061 "(for SET) or '-' (NOT SET)", s
,
1069 for (bit
= 1; bit
; bit
<<= 1) {
1070 const char *fname
= bit_to_string(bit
);
1076 len
= strlen(fname
);
1077 if (strncmp(s
, fname
, len
) ||
1078 (s
[len
] != '+' && s
[len
] != '-' && s
[len
] != end
)) {
1083 /* bit already set. */
1085 *res_string
= xasprintf("%s: Each %s flag can be "
1086 "specified only once", s
,
1091 if (!(bit
& allowed
)) {
1113 /* Parse unmasked flags. If a flag is present, it is set, otherwise
1115 while (s
[n
] != end
) {
1116 unsigned long long int flags
;
1120 if (ovs_scan(&s
[n
], "%lli%n", &flags
, &n0
)) {
1121 if (flags
& ~allowed
) {
1124 n
+= n0
+ (s
[n
+ n0
] == '|');
1129 for (bit
= 1; bit
; bit
<<= 1) {
1130 const char *name
= bit_to_string(bit
);
1138 if (!strncmp(s
+ n
, name
, len
) &&
1139 (s
[n
+ len
] == '|' || s
[n
+ len
] == end
)) {
1140 if (!(bit
& allowed
)) {
1144 n
+= len
+ (s
[n
+ len
] == '|');
1154 *res_flags
= result
;
1156 *res_mask
= UINT32_MAX
;
1165 *res_string
= xasprintf("%s: unknown %s flag(s)", s
, field_name
);
1171 flow_format(struct ds
*ds
, const struct flow
*flow
)
1174 struct flow_wildcards
*wc
= &match
.wc
;
1176 match_wc_init(&match
, flow
);
1178 /* As this function is most often used for formatting a packet in a
1179 * packet-in message, skip formatting the packet context fields that are
1180 * all-zeroes to make the print-out easier on the eyes. This means that a
1181 * missing context field implies a zero value for that field. This is
1182 * similar to OpenFlow encoding of these fields, as the specification
1183 * states that all-zeroes context fields should not be encoded in the
1184 * packet-in messages. */
1185 if (!flow
->in_port
.ofp_port
) {
1186 WC_UNMASK_FIELD(wc
, in_port
);
1188 if (!flow
->skb_priority
) {
1189 WC_UNMASK_FIELD(wc
, skb_priority
);
1191 if (!flow
->pkt_mark
) {
1192 WC_UNMASK_FIELD(wc
, pkt_mark
);
1194 if (!flow
->recirc_id
) {
1195 WC_UNMASK_FIELD(wc
, recirc_id
);
1197 if (!flow
->dp_hash
) {
1198 WC_UNMASK_FIELD(wc
, dp_hash
);
1200 if (!flow
->ct_state
) {
1201 WC_UNMASK_FIELD(wc
, ct_state
);
1203 if (!flow
->ct_zone
) {
1204 WC_UNMASK_FIELD(wc
, ct_zone
);
1206 if (!flow
->ct_mark
) {
1207 WC_UNMASK_FIELD(wc
, ct_mark
);
1209 if (ovs_u128_is_zero(flow
->ct_label
)) {
1210 WC_UNMASK_FIELD(wc
, ct_label
);
1212 for (int i
= 0; i
< FLOW_N_REGS
; i
++) {
1213 if (!flow
->regs
[i
]) {
1214 WC_UNMASK_FIELD(wc
, regs
[i
]);
1217 if (!flow
->metadata
) {
1218 WC_UNMASK_FIELD(wc
, metadata
);
1221 match_format(&match
, ds
, OFP_DEFAULT_PRIORITY
);
1225 flow_print(FILE *stream
, const struct flow
*flow
)
1227 char *s
= flow_to_string(flow
);
1232 /* flow_wildcards functions. */
1234 /* Initializes 'wc' as a set of wildcards that matches every packet. */
1236 flow_wildcards_init_catchall(struct flow_wildcards
*wc
)
1238 memset(&wc
->masks
, 0, sizeof wc
->masks
);
1241 /* Converts a flow into flow wildcards. It sets the wildcard masks based on
1242 * the packet headers extracted to 'flow'. It will not set the mask for fields
1243 * that do not make sense for the packet type. OpenFlow-only metadata is
1244 * wildcarded, but other metadata is unconditionally exact-matched. */
1245 void flow_wildcards_init_for_packet(struct flow_wildcards
*wc
,
1246 const struct flow
*flow
)
1248 memset(&wc
->masks
, 0x0, sizeof wc
->masks
);
1250 /* Update this function whenever struct flow changes. */
1251 BUILD_ASSERT_DECL(FLOW_WC_SEQ
== 35);
1253 if (flow_tnl_dst_is_set(&flow
->tunnel
)) {
1254 if (flow
->tunnel
.flags
& FLOW_TNL_F_KEY
) {
1255 WC_MASK_FIELD(wc
, tunnel
.tun_id
);
1257 WC_MASK_FIELD(wc
, tunnel
.ip_src
);
1258 WC_MASK_FIELD(wc
, tunnel
.ip_dst
);
1259 WC_MASK_FIELD(wc
, tunnel
.ipv6_src
);
1260 WC_MASK_FIELD(wc
, tunnel
.ipv6_dst
);
1261 WC_MASK_FIELD(wc
, tunnel
.flags
);
1262 WC_MASK_FIELD(wc
, tunnel
.ip_tos
);
1263 WC_MASK_FIELD(wc
, tunnel
.ip_ttl
);
1264 WC_MASK_FIELD(wc
, tunnel
.tp_src
);
1265 WC_MASK_FIELD(wc
, tunnel
.tp_dst
);
1266 WC_MASK_FIELD(wc
, tunnel
.gbp_id
);
1267 WC_MASK_FIELD(wc
, tunnel
.gbp_flags
);
1269 if (!(flow
->tunnel
.flags
& FLOW_TNL_F_UDPIF
)) {
1270 if (flow
->tunnel
.metadata
.present
.map
) {
1271 wc
->masks
.tunnel
.metadata
.present
.map
=
1272 flow
->tunnel
.metadata
.present
.map
;
1273 WC_MASK_FIELD(wc
, tunnel
.metadata
.opts
.u8
);
1276 WC_MASK_FIELD(wc
, tunnel
.metadata
.present
.len
);
1277 memset(wc
->masks
.tunnel
.metadata
.opts
.gnv
, 0xff,
1278 flow
->tunnel
.metadata
.present
.len
);
1280 } else if (flow
->tunnel
.tun_id
) {
1281 WC_MASK_FIELD(wc
, tunnel
.tun_id
);
1284 /* metadata, regs, and conj_id wildcarded. */
1286 WC_MASK_FIELD(wc
, skb_priority
);
1287 WC_MASK_FIELD(wc
, pkt_mark
);
1288 WC_MASK_FIELD(wc
, ct_state
);
1289 WC_MASK_FIELD(wc
, ct_zone
);
1290 WC_MASK_FIELD(wc
, ct_mark
);
1291 WC_MASK_FIELD(wc
, ct_label
);
1292 WC_MASK_FIELD(wc
, recirc_id
);
1293 WC_MASK_FIELD(wc
, dp_hash
);
1294 WC_MASK_FIELD(wc
, in_port
);
1296 /* actset_output wildcarded. */
1298 WC_MASK_FIELD(wc
, dl_dst
);
1299 WC_MASK_FIELD(wc
, dl_src
);
1300 WC_MASK_FIELD(wc
, dl_type
);
1301 WC_MASK_FIELD(wc
, vlan_tci
);
1303 if (flow
->dl_type
== htons(ETH_TYPE_IP
)) {
1304 WC_MASK_FIELD(wc
, nw_src
);
1305 WC_MASK_FIELD(wc
, nw_dst
);
1306 } else if (flow
->dl_type
== htons(ETH_TYPE_IPV6
)) {
1307 WC_MASK_FIELD(wc
, ipv6_src
);
1308 WC_MASK_FIELD(wc
, ipv6_dst
);
1309 WC_MASK_FIELD(wc
, ipv6_label
);
1310 } else if (flow
->dl_type
== htons(ETH_TYPE_ARP
) ||
1311 flow
->dl_type
== htons(ETH_TYPE_RARP
)) {
1312 WC_MASK_FIELD(wc
, nw_src
);
1313 WC_MASK_FIELD(wc
, nw_dst
);
1314 WC_MASK_FIELD(wc
, nw_proto
);
1315 WC_MASK_FIELD(wc
, arp_sha
);
1316 WC_MASK_FIELD(wc
, arp_tha
);
1318 } else if (eth_type_mpls(flow
->dl_type
)) {
1319 for (int i
= 0; i
< FLOW_MAX_MPLS_LABELS
; i
++) {
1320 WC_MASK_FIELD(wc
, mpls_lse
[i
]);
1321 if (flow
->mpls_lse
[i
] & htonl(MPLS_BOS_MASK
)) {
1327 return; /* Unknown ethertype. */
1331 WC_MASK_FIELD(wc
, nw_frag
);
1332 WC_MASK_FIELD(wc
, nw_tos
);
1333 WC_MASK_FIELD(wc
, nw_ttl
);
1334 WC_MASK_FIELD(wc
, nw_proto
);
1336 /* No transport layer header in later fragments. */
1337 if (!(flow
->nw_frag
& FLOW_NW_FRAG_LATER
) &&
1338 (flow
->nw_proto
== IPPROTO_ICMP
||
1339 flow
->nw_proto
== IPPROTO_ICMPV6
||
1340 flow
->nw_proto
== IPPROTO_TCP
||
1341 flow
->nw_proto
== IPPROTO_UDP
||
1342 flow
->nw_proto
== IPPROTO_SCTP
||
1343 flow
->nw_proto
== IPPROTO_IGMP
)) {
1344 WC_MASK_FIELD(wc
, tp_src
);
1345 WC_MASK_FIELD(wc
, tp_dst
);
1347 if (flow
->nw_proto
== IPPROTO_TCP
) {
1348 WC_MASK_FIELD(wc
, tcp_flags
);
1349 } else if (flow
->nw_proto
== IPPROTO_ICMPV6
) {
1350 WC_MASK_FIELD(wc
, arp_sha
);
1351 WC_MASK_FIELD(wc
, arp_tha
);
1352 WC_MASK_FIELD(wc
, nd_target
);
1353 } else if (flow
->nw_proto
== IPPROTO_IGMP
) {
1354 WC_MASK_FIELD(wc
, igmp_group_ip4
);
1359 /* Return a map of possible fields for a packet of the same type as 'flow'.
1360 * Including extra bits in the returned mask is not wrong, it is just less
1363 * This is a less precise version of flow_wildcards_init_for_packet() above. */
1365 flow_wc_map(const struct flow
*flow
, struct flowmap
*map
)
1367 /* Update this function whenever struct flow changes. */
1368 BUILD_ASSERT_DECL(FLOW_WC_SEQ
== 35);
1372 if (flow_tnl_dst_is_set(&flow
->tunnel
)) {
1373 FLOWMAP_SET__(map
, tunnel
, offsetof(struct flow_tnl
, metadata
));
1374 if (!(flow
->tunnel
.flags
& FLOW_TNL_F_UDPIF
)) {
1375 if (flow
->tunnel
.metadata
.present
.map
) {
1376 FLOWMAP_SET(map
, tunnel
.metadata
);
1379 FLOWMAP_SET(map
, tunnel
.metadata
.present
.len
);
1380 FLOWMAP_SET__(map
, tunnel
.metadata
.opts
.gnv
,
1381 flow
->tunnel
.metadata
.present
.len
);
1385 /* Metadata fields that can appear on packet input. */
1386 FLOWMAP_SET(map
, skb_priority
);
1387 FLOWMAP_SET(map
, pkt_mark
);
1388 FLOWMAP_SET(map
, recirc_id
);
1389 FLOWMAP_SET(map
, dp_hash
);
1390 FLOWMAP_SET(map
, in_port
);
1391 FLOWMAP_SET(map
, dl_dst
);
1392 FLOWMAP_SET(map
, dl_src
);
1393 FLOWMAP_SET(map
, dl_type
);
1394 FLOWMAP_SET(map
, vlan_tci
);
1395 FLOWMAP_SET(map
, ct_state
);
1396 FLOWMAP_SET(map
, ct_zone
);
1397 FLOWMAP_SET(map
, ct_mark
);
1398 FLOWMAP_SET(map
, ct_label
);
1400 /* Ethertype-dependent fields. */
1401 if (OVS_LIKELY(flow
->dl_type
== htons(ETH_TYPE_IP
))) {
1402 FLOWMAP_SET(map
, nw_src
);
1403 FLOWMAP_SET(map
, nw_dst
);
1404 FLOWMAP_SET(map
, nw_proto
);
1405 FLOWMAP_SET(map
, nw_frag
);
1406 FLOWMAP_SET(map
, nw_tos
);
1407 FLOWMAP_SET(map
, nw_ttl
);
1408 FLOWMAP_SET(map
, tp_src
);
1409 FLOWMAP_SET(map
, tp_dst
);
1411 if (OVS_UNLIKELY(flow
->nw_proto
== IPPROTO_IGMP
)) {
1412 FLOWMAP_SET(map
, igmp_group_ip4
);
1414 FLOWMAP_SET(map
, tcp_flags
);
1416 } else if (flow
->dl_type
== htons(ETH_TYPE_IPV6
)) {
1417 FLOWMAP_SET(map
, ipv6_src
);
1418 FLOWMAP_SET(map
, ipv6_dst
);
1419 FLOWMAP_SET(map
, ipv6_label
);
1420 FLOWMAP_SET(map
, nw_proto
);
1421 FLOWMAP_SET(map
, nw_frag
);
1422 FLOWMAP_SET(map
, nw_tos
);
1423 FLOWMAP_SET(map
, nw_ttl
);
1424 FLOWMAP_SET(map
, tp_src
);
1425 FLOWMAP_SET(map
, tp_dst
);
1427 if (OVS_UNLIKELY(flow
->nw_proto
== IPPROTO_ICMPV6
)) {
1428 FLOWMAP_SET(map
, nd_target
);
1429 FLOWMAP_SET(map
, arp_sha
);
1430 FLOWMAP_SET(map
, arp_tha
);
1432 FLOWMAP_SET(map
, tcp_flags
);
1434 } else if (eth_type_mpls(flow
->dl_type
)) {
1435 FLOWMAP_SET(map
, mpls_lse
);
1436 } else if (flow
->dl_type
== htons(ETH_TYPE_ARP
) ||
1437 flow
->dl_type
== htons(ETH_TYPE_RARP
)) {
1438 FLOWMAP_SET(map
, nw_src
);
1439 FLOWMAP_SET(map
, nw_dst
);
1440 FLOWMAP_SET(map
, nw_proto
);
1441 FLOWMAP_SET(map
, arp_sha
);
1442 FLOWMAP_SET(map
, arp_tha
);
1446 /* Clear the metadata and register wildcard masks. They are not packet
1449 flow_wildcards_clear_non_packet_fields(struct flow_wildcards
*wc
)
1451 /* Update this function whenever struct flow changes. */
1452 BUILD_ASSERT_DECL(FLOW_WC_SEQ
== 35);
1454 memset(&wc
->masks
.metadata
, 0, sizeof wc
->masks
.metadata
);
1455 memset(&wc
->masks
.regs
, 0, sizeof wc
->masks
.regs
);
1456 wc
->masks
.actset_output
= 0;
1457 wc
->masks
.conj_id
= 0;
1460 /* Returns true if 'wc' matches every packet, false if 'wc' fixes any bits or
1463 flow_wildcards_is_catchall(const struct flow_wildcards
*wc
)
1465 const uint64_t *wc_u64
= (const uint64_t *) &wc
->masks
;
1468 for (i
= 0; i
< FLOW_U64S
; i
++) {
1476 /* Sets 'dst' as the bitwise AND of wildcards in 'src1' and 'src2'.
1477 * That is, a bit or a field is wildcarded in 'dst' if it is wildcarded
1478 * in 'src1' or 'src2' or both. */
1480 flow_wildcards_and(struct flow_wildcards
*dst
,
1481 const struct flow_wildcards
*src1
,
1482 const struct flow_wildcards
*src2
)
1484 uint64_t *dst_u64
= (uint64_t *) &dst
->masks
;
1485 const uint64_t *src1_u64
= (const uint64_t *) &src1
->masks
;
1486 const uint64_t *src2_u64
= (const uint64_t *) &src2
->masks
;
1489 for (i
= 0; i
< FLOW_U64S
; i
++) {
1490 dst_u64
[i
] = src1_u64
[i
] & src2_u64
[i
];
1494 /* Sets 'dst' as the bitwise OR of wildcards in 'src1' and 'src2'. That
1495 * is, a bit or a field is wildcarded in 'dst' if it is neither
1496 * wildcarded in 'src1' nor 'src2'. */
1498 flow_wildcards_or(struct flow_wildcards
*dst
,
1499 const struct flow_wildcards
*src1
,
1500 const struct flow_wildcards
*src2
)
1502 uint64_t *dst_u64
= (uint64_t *) &dst
->masks
;
1503 const uint64_t *src1_u64
= (const uint64_t *) &src1
->masks
;
1504 const uint64_t *src2_u64
= (const uint64_t *) &src2
->masks
;
1507 for (i
= 0; i
< FLOW_U64S
; i
++) {
1508 dst_u64
[i
] = src1_u64
[i
] | src2_u64
[i
];
1512 /* Returns a hash of the wildcards in 'wc'. */
1514 flow_wildcards_hash(const struct flow_wildcards
*wc
, uint32_t basis
)
1516 return flow_hash(&wc
->masks
, basis
);
1519 /* Returns true if 'a' and 'b' represent the same wildcards, false if they are
1522 flow_wildcards_equal(const struct flow_wildcards
*a
,
1523 const struct flow_wildcards
*b
)
1525 return flow_equal(&a
->masks
, &b
->masks
);
1528 /* Returns true if at least one bit or field is wildcarded in 'a' but not in
1529 * 'b', false otherwise. */
1531 flow_wildcards_has_extra(const struct flow_wildcards
*a
,
1532 const struct flow_wildcards
*b
)
1534 const uint64_t *a_u64
= (const uint64_t *) &a
->masks
;
1535 const uint64_t *b_u64
= (const uint64_t *) &b
->masks
;
1538 for (i
= 0; i
< FLOW_U64S
; i
++) {
1539 if ((a_u64
[i
] & b_u64
[i
]) != b_u64
[i
]) {
1546 /* Returns true if 'a' and 'b' are equal, except that 0-bits (wildcarded bits)
1547 * in 'wc' do not need to be equal in 'a' and 'b'. */
1549 flow_equal_except(const struct flow
*a
, const struct flow
*b
,
1550 const struct flow_wildcards
*wc
)
1552 const uint64_t *a_u64
= (const uint64_t *) a
;
1553 const uint64_t *b_u64
= (const uint64_t *) b
;
1554 const uint64_t *wc_u64
= (const uint64_t *) &wc
->masks
;
1557 for (i
= 0; i
< FLOW_U64S
; i
++) {
1558 if ((a_u64
[i
] ^ b_u64
[i
]) & wc_u64
[i
]) {
1565 /* Sets the wildcard mask for register 'idx' in 'wc' to 'mask'.
1566 * (A 0-bit indicates a wildcard bit.) */
1568 flow_wildcards_set_reg_mask(struct flow_wildcards
*wc
, int idx
, uint32_t mask
)
1570 wc
->masks
.regs
[idx
] = mask
;
1573 /* Sets the wildcard mask for register 'idx' in 'wc' to 'mask'.
1574 * (A 0-bit indicates a wildcard bit.) */
1576 flow_wildcards_set_xreg_mask(struct flow_wildcards
*wc
, int idx
, uint64_t mask
)
1578 flow_set_xreg(&wc
->masks
, idx
, mask
);
1581 /* Calculates the 5-tuple hash from the given miniflow.
1582 * This returns the same value as flow_hash_5tuple for the corresponding
1585 miniflow_hash_5tuple(const struct miniflow
*flow
, uint32_t basis
)
1587 BUILD_ASSERT_DECL(FLOW_WC_SEQ
== 35);
1588 uint32_t hash
= basis
;
1591 ovs_be16 dl_type
= MINIFLOW_GET_BE16(flow
, dl_type
);
1594 if (dl_type
== htons(ETH_TYPE_IPV6
)) {
1595 struct flowmap map
= FLOWMAP_EMPTY_INITIALIZER
;
1598 FLOWMAP_SET(&map
, ipv6_src
);
1599 FLOWMAP_SET(&map
, ipv6_dst
);
1601 MINIFLOW_FOR_EACH_IN_FLOWMAP(value
, flow
, map
) {
1602 hash
= hash_add64(hash
, value
);
1604 } else if (dl_type
== htons(ETH_TYPE_IP
)
1605 || dl_type
== htons(ETH_TYPE_ARP
)) {
1606 hash
= hash_add(hash
, MINIFLOW_GET_U32(flow
, nw_src
));
1607 hash
= hash_add(hash
, MINIFLOW_GET_U32(flow
, nw_dst
));
1612 nw_proto
= MINIFLOW_GET_U8(flow
, nw_proto
);
1613 hash
= hash_add(hash
, nw_proto
);
1614 if (nw_proto
!= IPPROTO_TCP
&& nw_proto
!= IPPROTO_UDP
1615 && nw_proto
!= IPPROTO_SCTP
&& nw_proto
!= IPPROTO_ICMP
1616 && nw_proto
!= IPPROTO_ICMPV6
) {
1620 /* Add both ports at once. */
1621 hash
= hash_add(hash
, MINIFLOW_GET_U32(flow
, tp_src
));
1624 return hash_finish(hash
, 42);
1627 ASSERT_SEQUENTIAL_SAME_WORD(tp_src
, tp_dst
);
1628 ASSERT_SEQUENTIAL(ipv6_src
, ipv6_dst
);
1630 /* Calculates the 5-tuple hash from the given flow. */
1632 flow_hash_5tuple(const struct flow
*flow
, uint32_t basis
)
1634 BUILD_ASSERT_DECL(FLOW_WC_SEQ
== 35);
1635 uint32_t hash
= basis
;
1639 if (flow
->dl_type
== htons(ETH_TYPE_IPV6
)) {
1640 const uint64_t *flow_u64
= (const uint64_t *)flow
;
1641 int ofs
= offsetof(struct flow
, ipv6_src
) / 8;
1642 int end
= ofs
+ 2 * sizeof flow
->ipv6_src
/ 8;
1644 for (;ofs
< end
; ofs
++) {
1645 hash
= hash_add64(hash
, flow_u64
[ofs
]);
1647 } else if (flow
->dl_type
== htons(ETH_TYPE_IP
)
1648 || flow
->dl_type
== htons(ETH_TYPE_ARP
)) {
1649 hash
= hash_add(hash
, (OVS_FORCE
uint32_t) flow
->nw_src
);
1650 hash
= hash_add(hash
, (OVS_FORCE
uint32_t) flow
->nw_dst
);
1655 hash
= hash_add(hash
, flow
->nw_proto
);
1656 if (flow
->nw_proto
!= IPPROTO_TCP
&& flow
->nw_proto
!= IPPROTO_UDP
1657 && flow
->nw_proto
!= IPPROTO_SCTP
&& flow
->nw_proto
!= IPPROTO_ICMP
1658 && flow
->nw_proto
!= IPPROTO_ICMPV6
) {
1662 /* Add both ports at once. */
1663 hash
= hash_add(hash
,
1664 ((const uint32_t *)flow
)[offsetof(struct flow
, tp_src
)
1665 / sizeof(uint32_t)]);
1668 return hash_finish(hash
, 42); /* Arbitrary number. */
1671 /* Hashes 'flow' based on its L2 through L4 protocol information. */
1673 flow_hash_symmetric_l4(const struct flow
*flow
, uint32_t basis
)
1678 struct in6_addr ipv6_addr
;
1683 struct eth_addr eth_addr
;
1689 memset(&fields
, 0, sizeof fields
);
1690 for (i
= 0; i
< ARRAY_SIZE(fields
.eth_addr
.be16
); i
++) {
1691 fields
.eth_addr
.be16
[i
] = flow
->dl_src
.be16
[i
] ^ flow
->dl_dst
.be16
[i
];
1693 fields
.vlan_tci
= flow
->vlan_tci
& htons(VLAN_VID_MASK
);
1694 fields
.eth_type
= flow
->dl_type
;
1696 /* UDP source and destination port are not taken into account because they
1697 * will not necessarily be symmetric in a bidirectional flow. */
1698 if (fields
.eth_type
== htons(ETH_TYPE_IP
)) {
1699 fields
.ipv4_addr
= flow
->nw_src
^ flow
->nw_dst
;
1700 fields
.ip_proto
= flow
->nw_proto
;
1701 if (fields
.ip_proto
== IPPROTO_TCP
|| fields
.ip_proto
== IPPROTO_SCTP
) {
1702 fields
.tp_port
= flow
->tp_src
^ flow
->tp_dst
;
1704 } else if (fields
.eth_type
== htons(ETH_TYPE_IPV6
)) {
1705 const uint8_t *a
= &flow
->ipv6_src
.s6_addr
[0];
1706 const uint8_t *b
= &flow
->ipv6_dst
.s6_addr
[0];
1707 uint8_t *ipv6_addr
= &fields
.ipv6_addr
.s6_addr
[0];
1709 for (i
=0; i
<16; i
++) {
1710 ipv6_addr
[i
] = a
[i
] ^ b
[i
];
1712 fields
.ip_proto
= flow
->nw_proto
;
1713 if (fields
.ip_proto
== IPPROTO_TCP
|| fields
.ip_proto
== IPPROTO_SCTP
) {
1714 fields
.tp_port
= flow
->tp_src
^ flow
->tp_dst
;
1717 return jhash_bytes(&fields
, sizeof fields
, basis
);
1720 /* Hashes 'flow' based on its L3 through L4 protocol information */
1722 flow_hash_symmetric_l3l4(const struct flow
*flow
, uint32_t basis
,
1725 uint32_t hash
= basis
;
1727 /* UDP source and destination port are also taken into account. */
1728 if (flow
->dl_type
== htons(ETH_TYPE_IP
)) {
1729 hash
= hash_add(hash
,
1730 (OVS_FORCE
uint32_t) (flow
->nw_src
^ flow
->nw_dst
));
1731 } else if (flow
->dl_type
== htons(ETH_TYPE_IPV6
)) {
1732 /* IPv6 addresses are 64-bit aligned inside struct flow. */
1733 const uint64_t *a
= ALIGNED_CAST(uint64_t *, flow
->ipv6_src
.s6_addr
);
1734 const uint64_t *b
= ALIGNED_CAST(uint64_t *, flow
->ipv6_dst
.s6_addr
);
1736 for (int i
= 0; i
< 4; i
++) {
1737 hash
= hash_add64(hash
, a
[i
] ^ b
[i
]);
1740 /* Cannot hash non-IP flows */
1744 hash
= hash_add(hash
, flow
->nw_proto
);
1745 if (flow
->nw_proto
== IPPROTO_TCP
|| flow
->nw_proto
== IPPROTO_SCTP
||
1746 (inc_udp_ports
&& flow
->nw_proto
== IPPROTO_UDP
)) {
1747 hash
= hash_add(hash
,
1748 (OVS_FORCE
uint16_t) (flow
->tp_src
^ flow
->tp_dst
));
1751 return hash_finish(hash
, basis
);
1754 /* Initialize a flow with random fields that matter for nx_hash_fields. */
1756 flow_random_hash_fields(struct flow
*flow
)
1758 uint16_t rnd
= random_uint16();
1760 /* Initialize to all zeros. */
1761 memset(flow
, 0, sizeof *flow
);
1763 eth_addr_random(&flow
->dl_src
);
1764 eth_addr_random(&flow
->dl_dst
);
1766 flow
->vlan_tci
= (OVS_FORCE ovs_be16
) (random_uint16() & VLAN_VID_MASK
);
1768 /* Make most of the random flows IPv4, some IPv6, and rest random. */
1769 flow
->dl_type
= rnd
< 0x8000 ? htons(ETH_TYPE_IP
) :
1770 rnd
< 0xc000 ? htons(ETH_TYPE_IPV6
) : (OVS_FORCE ovs_be16
)rnd
;
1772 if (dl_type_is_ip_any(flow
->dl_type
)) {
1773 if (flow
->dl_type
== htons(ETH_TYPE_IP
)) {
1774 flow
->nw_src
= (OVS_FORCE ovs_be32
)random_uint32();
1775 flow
->nw_dst
= (OVS_FORCE ovs_be32
)random_uint32();
1777 random_bytes(&flow
->ipv6_src
, sizeof flow
->ipv6_src
);
1778 random_bytes(&flow
->ipv6_dst
, sizeof flow
->ipv6_dst
);
1780 /* Make most of IP flows TCP, some UDP or SCTP, and rest random. */
1781 rnd
= random_uint16();
1782 flow
->nw_proto
= rnd
< 0x8000 ? IPPROTO_TCP
:
1783 rnd
< 0xc000 ? IPPROTO_UDP
:
1784 rnd
< 0xd000 ? IPPROTO_SCTP
: (uint8_t)rnd
;
1785 if (flow
->nw_proto
== IPPROTO_TCP
||
1786 flow
->nw_proto
== IPPROTO_UDP
||
1787 flow
->nw_proto
== IPPROTO_SCTP
) {
1788 flow
->tp_src
= (OVS_FORCE ovs_be16
)random_uint16();
1789 flow
->tp_dst
= (OVS_FORCE ovs_be16
)random_uint16();
1794 /* Masks the fields in 'wc' that are used by the flow hash 'fields'. */
1796 flow_mask_hash_fields(const struct flow
*flow
, struct flow_wildcards
*wc
,
1797 enum nx_hash_fields fields
)
1800 case NX_HASH_FIELDS_ETH_SRC
:
1801 memset(&wc
->masks
.dl_src
, 0xff, sizeof wc
->masks
.dl_src
);
1804 case NX_HASH_FIELDS_SYMMETRIC_L4
:
1805 memset(&wc
->masks
.dl_src
, 0xff, sizeof wc
->masks
.dl_src
);
1806 memset(&wc
->masks
.dl_dst
, 0xff, sizeof wc
->masks
.dl_dst
);
1807 if (flow
->dl_type
== htons(ETH_TYPE_IP
)) {
1808 memset(&wc
->masks
.nw_src
, 0xff, sizeof wc
->masks
.nw_src
);
1809 memset(&wc
->masks
.nw_dst
, 0xff, sizeof wc
->masks
.nw_dst
);
1810 } else if (flow
->dl_type
== htons(ETH_TYPE_IPV6
)) {
1811 memset(&wc
->masks
.ipv6_src
, 0xff, sizeof wc
->masks
.ipv6_src
);
1812 memset(&wc
->masks
.ipv6_dst
, 0xff, sizeof wc
->masks
.ipv6_dst
);
1814 if (is_ip_any(flow
)) {
1815 memset(&wc
->masks
.nw_proto
, 0xff, sizeof wc
->masks
.nw_proto
);
1816 flow_unwildcard_tp_ports(flow
, wc
);
1818 wc
->masks
.vlan_tci
|= htons(VLAN_VID_MASK
| VLAN_CFI
);
1821 case NX_HASH_FIELDS_SYMMETRIC_L3L4_UDP
:
1822 if (is_ip_any(flow
) && flow
->nw_proto
== IPPROTO_UDP
) {
1823 memset(&wc
->masks
.tp_src
, 0xff, sizeof wc
->masks
.tp_src
);
1824 memset(&wc
->masks
.tp_dst
, 0xff, sizeof wc
->masks
.tp_dst
);
1827 case NX_HASH_FIELDS_SYMMETRIC_L3L4
:
1828 if (flow
->dl_type
== htons(ETH_TYPE_IP
)) {
1829 memset(&wc
->masks
.nw_src
, 0xff, sizeof wc
->masks
.nw_src
);
1830 memset(&wc
->masks
.nw_dst
, 0xff, sizeof wc
->masks
.nw_dst
);
1831 } else if (flow
->dl_type
== htons(ETH_TYPE_IPV6
)) {
1832 memset(&wc
->masks
.ipv6_src
, 0xff, sizeof wc
->masks
.ipv6_src
);
1833 memset(&wc
->masks
.ipv6_dst
, 0xff, sizeof wc
->masks
.ipv6_dst
);
1835 break; /* non-IP flow */
1838 memset(&wc
->masks
.nw_proto
, 0xff, sizeof wc
->masks
.nw_proto
);
1839 if (flow
->nw_proto
== IPPROTO_TCP
|| flow
->nw_proto
== IPPROTO_SCTP
) {
1840 memset(&wc
->masks
.tp_src
, 0xff, sizeof wc
->masks
.tp_src
);
1841 memset(&wc
->masks
.tp_dst
, 0xff, sizeof wc
->masks
.tp_dst
);
1850 /* Hashes the portions of 'flow' designated by 'fields'. */
1852 flow_hash_fields(const struct flow
*flow
, enum nx_hash_fields fields
,
1857 case NX_HASH_FIELDS_ETH_SRC
:
1858 return jhash_bytes(&flow
->dl_src
, sizeof flow
->dl_src
, basis
);
1860 case NX_HASH_FIELDS_SYMMETRIC_L4
:
1861 return flow_hash_symmetric_l4(flow
, basis
);
1863 case NX_HASH_FIELDS_SYMMETRIC_L3L4
:
1864 return flow_hash_symmetric_l3l4(flow
, basis
, false);
1866 case NX_HASH_FIELDS_SYMMETRIC_L3L4_UDP
:
1867 return flow_hash_symmetric_l3l4(flow
, basis
, true);
1874 /* Returns a string representation of 'fields'. */
1876 flow_hash_fields_to_str(enum nx_hash_fields fields
)
1879 case NX_HASH_FIELDS_ETH_SRC
: return "eth_src";
1880 case NX_HASH_FIELDS_SYMMETRIC_L4
: return "symmetric_l4";
1881 case NX_HASH_FIELDS_SYMMETRIC_L3L4
: return "symmetric_l3l4";
1882 case NX_HASH_FIELDS_SYMMETRIC_L3L4_UDP
: return "symmetric_l3l4+udp";
1883 default: return "<unknown>";
1887 /* Returns true if the value of 'fields' is supported. Otherwise false. */
1889 flow_hash_fields_valid(enum nx_hash_fields fields
)
1891 return fields
== NX_HASH_FIELDS_ETH_SRC
1892 || fields
== NX_HASH_FIELDS_SYMMETRIC_L4
1893 || fields
== NX_HASH_FIELDS_SYMMETRIC_L3L4
1894 || fields
== NX_HASH_FIELDS_SYMMETRIC_L3L4_UDP
;
1897 /* Returns a hash value for the bits of 'flow' that are active based on
1898 * 'wc', given 'basis'. */
1900 flow_hash_in_wildcards(const struct flow
*flow
,
1901 const struct flow_wildcards
*wc
, uint32_t basis
)
1903 const uint64_t *wc_u64
= (const uint64_t *) &wc
->masks
;
1904 const uint64_t *flow_u64
= (const uint64_t *) flow
;
1909 for (i
= 0; i
< FLOW_U64S
; i
++) {
1910 hash
= hash_add64(hash
, flow_u64
[i
] & wc_u64
[i
]);
1912 return hash_finish(hash
, 8 * FLOW_U64S
);
1915 /* Sets the VLAN VID that 'flow' matches to 'vid', which is interpreted as an
1916 * OpenFlow 1.0 "dl_vlan" value:
1918 * - If it is in the range 0...4095, 'flow->vlan_tci' is set to match
1919 * that VLAN. Any existing PCP match is unchanged (it becomes 0 if
1920 * 'flow' previously matched packets without a VLAN header).
1922 * - If it is OFP_VLAN_NONE, 'flow->vlan_tci' is set to match a packet
1923 * without a VLAN tag.
1925 * - Other values of 'vid' should not be used. */
1927 flow_set_dl_vlan(struct flow
*flow
, ovs_be16 vid
)
1929 if (vid
== htons(OFP10_VLAN_NONE
)) {
1930 flow
->vlan_tci
= htons(0);
1932 vid
&= htons(VLAN_VID_MASK
);
1933 flow
->vlan_tci
&= ~htons(VLAN_VID_MASK
);
1934 flow
->vlan_tci
|= htons(VLAN_CFI
) | vid
;
1938 /* Sets the VLAN VID that 'flow' matches to 'vid', which is interpreted as an
1939 * OpenFlow 1.2 "vlan_vid" value, that is, the low 13 bits of 'vlan_tci' (VID
1942 flow_set_vlan_vid(struct flow
*flow
, ovs_be16 vid
)
1944 ovs_be16 mask
= htons(VLAN_VID_MASK
| VLAN_CFI
);
1945 flow
->vlan_tci
&= ~mask
;
1946 flow
->vlan_tci
|= vid
& mask
;
1949 /* Sets the VLAN PCP that 'flow' matches to 'pcp', which should be in the
1952 * This function has no effect on the VLAN ID that 'flow' matches.
1954 * After calling this function, 'flow' will not match packets without a VLAN
1957 flow_set_vlan_pcp(struct flow
*flow
, uint8_t pcp
)
1960 flow
->vlan_tci
&= ~htons(VLAN_PCP_MASK
);
1961 flow
->vlan_tci
|= htons((pcp
<< VLAN_PCP_SHIFT
) | VLAN_CFI
);
1964 /* Returns the number of MPLS LSEs present in 'flow'
1966 * Returns 0 if the 'dl_type' of 'flow' is not an MPLS ethernet type.
1967 * Otherwise traverses 'flow''s MPLS label stack stopping at the
1968 * first entry that has the BoS bit set. If no such entry exists then
1969 * the maximum number of LSEs that can be stored in 'flow' is returned.
1972 flow_count_mpls_labels(const struct flow
*flow
, struct flow_wildcards
*wc
)
1974 /* dl_type is always masked. */
1975 if (eth_type_mpls(flow
->dl_type
)) {
1980 for (i
= 0; i
< FLOW_MAX_MPLS_LABELS
; i
++) {
1982 wc
->masks
.mpls_lse
[i
] |= htonl(MPLS_BOS_MASK
);
1984 if (flow
->mpls_lse
[i
] & htonl(MPLS_BOS_MASK
)) {
1987 if (flow
->mpls_lse
[i
]) {
1997 /* Returns the number consecutive of MPLS LSEs, starting at the
1998 * innermost LSE, that are common in 'a' and 'b'.
2000 * 'an' must be flow_count_mpls_labels(a).
2001 * 'bn' must be flow_count_mpls_labels(b).
2004 flow_count_common_mpls_labels(const struct flow
*a
, int an
,
2005 const struct flow
*b
, int bn
,
2006 struct flow_wildcards
*wc
)
2008 int min_n
= MIN(an
, bn
);
2013 int a_last
= an
- 1;
2014 int b_last
= bn
- 1;
2017 for (i
= 0; i
< min_n
; i
++) {
2019 wc
->masks
.mpls_lse
[a_last
- i
] = OVS_BE32_MAX
;
2020 wc
->masks
.mpls_lse
[b_last
- i
] = OVS_BE32_MAX
;
2022 if (a
->mpls_lse
[a_last
- i
] != b
->mpls_lse
[b_last
- i
]) {
2033 /* Adds a new outermost MPLS label to 'flow' and changes 'flow''s Ethernet type
2034 * to 'mpls_eth_type', which must be an MPLS Ethertype.
2036 * If the new label is the first MPLS label in 'flow', it is generated as;
2038 * - label: 2, if 'flow' is IPv6, otherwise 0.
2040 * - TTL: IPv4 or IPv6 TTL, if present and nonzero, otherwise 64.
2042 * - TC: IPv4 or IPv6 TOS, if present, otherwise 0.
2046 * If the new label is the second or later label MPLS label in 'flow', it is
2049 * - label: Copied from outer label.
2051 * - TTL: Copied from outer label.
2053 * - TC: Copied from outer label.
2057 * 'n' must be flow_count_mpls_labels(flow). 'n' must be less than
2058 * FLOW_MAX_MPLS_LABELS (because otherwise flow->mpls_lse[] would overflow).
2061 flow_push_mpls(struct flow
*flow
, int n
, ovs_be16 mpls_eth_type
,
2062 struct flow_wildcards
*wc
)
2064 ovs_assert(eth_type_mpls(mpls_eth_type
));
2065 ovs_assert(n
< FLOW_MAX_MPLS_LABELS
);
2071 memset(&wc
->masks
.mpls_lse
, 0xff, sizeof *wc
->masks
.mpls_lse
* n
);
2073 for (i
= n
; i
>= 1; i
--) {
2074 flow
->mpls_lse
[i
] = flow
->mpls_lse
[i
- 1];
2076 flow
->mpls_lse
[0] = (flow
->mpls_lse
[1] & htonl(~MPLS_BOS_MASK
));
2078 int label
= 0; /* IPv4 Explicit Null. */
2082 if (flow
->dl_type
== htons(ETH_TYPE_IPV6
)) {
2086 if (is_ip_any(flow
)) {
2087 tc
= (flow
->nw_tos
& IP_DSCP_MASK
) >> 2;
2089 wc
->masks
.nw_tos
|= IP_DSCP_MASK
;
2090 wc
->masks
.nw_ttl
= 0xff;
2098 flow
->mpls_lse
[0] = set_mpls_lse_values(ttl
, tc
, 1, htonl(label
));
2100 /* Clear all L3 and L4 fields and dp_hash. */
2101 BUILD_ASSERT(FLOW_WC_SEQ
== 35);
2102 memset((char *) flow
+ FLOW_SEGMENT_2_ENDS_AT
, 0,
2103 sizeof(struct flow
) - FLOW_SEGMENT_2_ENDS_AT
);
2106 flow
->dl_type
= mpls_eth_type
;
2109 /* Tries to remove the outermost MPLS label from 'flow'. Returns true if
2110 * successful, false otherwise. On success, sets 'flow''s Ethernet type to
2113 * 'n' must be flow_count_mpls_labels(flow). */
2115 flow_pop_mpls(struct flow
*flow
, int n
, ovs_be16 eth_type
,
2116 struct flow_wildcards
*wc
)
2121 /* Nothing to pop. */
2123 } else if (n
== FLOW_MAX_MPLS_LABELS
) {
2125 wc
->masks
.mpls_lse
[n
- 1] |= htonl(MPLS_BOS_MASK
);
2127 if (!(flow
->mpls_lse
[n
- 1] & htonl(MPLS_BOS_MASK
))) {
2128 /* Can't pop because don't know what to fill in mpls_lse[n - 1]. */
2134 memset(&wc
->masks
.mpls_lse
[1], 0xff,
2135 sizeof *wc
->masks
.mpls_lse
* (n
- 1));
2137 for (i
= 1; i
< n
; i
++) {
2138 flow
->mpls_lse
[i
- 1] = flow
->mpls_lse
[i
];
2140 flow
->mpls_lse
[n
- 1] = 0;
2141 flow
->dl_type
= eth_type
;
2145 /* Sets the MPLS Label that 'flow' matches to 'label', which is interpreted
2146 * as an OpenFlow 1.1 "mpls_label" value. */
2148 flow_set_mpls_label(struct flow
*flow
, int idx
, ovs_be32 label
)
2150 set_mpls_lse_label(&flow
->mpls_lse
[idx
], label
);
2153 /* Sets the MPLS TTL that 'flow' matches to 'ttl', which should be in the
2156 flow_set_mpls_ttl(struct flow
*flow
, int idx
, uint8_t ttl
)
2158 set_mpls_lse_ttl(&flow
->mpls_lse
[idx
], ttl
);
2161 /* Sets the MPLS TC that 'flow' matches to 'tc', which should be in the
2164 flow_set_mpls_tc(struct flow
*flow
, int idx
, uint8_t tc
)
2166 set_mpls_lse_tc(&flow
->mpls_lse
[idx
], tc
);
2169 /* Sets the MPLS BOS bit that 'flow' matches to which should be 0 or 1. */
2171 flow_set_mpls_bos(struct flow
*flow
, int idx
, uint8_t bos
)
2173 set_mpls_lse_bos(&flow
->mpls_lse
[idx
], bos
);
2176 /* Sets the entire MPLS LSE. */
2178 flow_set_mpls_lse(struct flow
*flow
, int idx
, ovs_be32 lse
)
2180 flow
->mpls_lse
[idx
] = lse
;
2184 flow_compose_l4(struct dp_packet
*p
, const struct flow
*flow
)
2188 if (!(flow
->nw_frag
& FLOW_NW_FRAG_ANY
)
2189 || !(flow
->nw_frag
& FLOW_NW_FRAG_LATER
)) {
2190 if (flow
->nw_proto
== IPPROTO_TCP
) {
2191 struct tcp_header
*tcp
;
2193 l4_len
= sizeof *tcp
;
2194 tcp
= dp_packet_put_zeros(p
, l4_len
);
2195 tcp
->tcp_src
= flow
->tp_src
;
2196 tcp
->tcp_dst
= flow
->tp_dst
;
2197 tcp
->tcp_ctl
= TCP_CTL(ntohs(flow
->tcp_flags
), 5);
2198 } else if (flow
->nw_proto
== IPPROTO_UDP
) {
2199 struct udp_header
*udp
;
2201 l4_len
= sizeof *udp
;
2202 udp
= dp_packet_put_zeros(p
, l4_len
);
2203 udp
->udp_src
= flow
->tp_src
;
2204 udp
->udp_dst
= flow
->tp_dst
;
2205 } else if (flow
->nw_proto
== IPPROTO_SCTP
) {
2206 struct sctp_header
*sctp
;
2208 l4_len
= sizeof *sctp
;
2209 sctp
= dp_packet_put_zeros(p
, l4_len
);
2210 sctp
->sctp_src
= flow
->tp_src
;
2211 sctp
->sctp_dst
= flow
->tp_dst
;
2212 } else if (flow
->nw_proto
== IPPROTO_ICMP
) {
2213 struct icmp_header
*icmp
;
2215 l4_len
= sizeof *icmp
;
2216 icmp
= dp_packet_put_zeros(p
, l4_len
);
2217 icmp
->icmp_type
= ntohs(flow
->tp_src
);
2218 icmp
->icmp_code
= ntohs(flow
->tp_dst
);
2219 icmp
->icmp_csum
= csum(icmp
, ICMP_HEADER_LEN
);
2220 } else if (flow
->nw_proto
== IPPROTO_IGMP
) {
2221 struct igmp_header
*igmp
;
2223 l4_len
= sizeof *igmp
;
2224 igmp
= dp_packet_put_zeros(p
, l4_len
);
2225 igmp
->igmp_type
= ntohs(flow
->tp_src
);
2226 igmp
->igmp_code
= ntohs(flow
->tp_dst
);
2227 put_16aligned_be32(&igmp
->group
, flow
->igmp_group_ip4
);
2228 igmp
->igmp_csum
= csum(igmp
, IGMP_HEADER_LEN
);
2229 } else if (flow
->nw_proto
== IPPROTO_ICMPV6
) {
2230 struct icmp6_hdr
*icmp
;
2232 l4_len
= sizeof *icmp
;
2233 icmp
= dp_packet_put_zeros(p
, l4_len
);
2234 icmp
->icmp6_type
= ntohs(flow
->tp_src
);
2235 icmp
->icmp6_code
= ntohs(flow
->tp_dst
);
2237 if (icmp
->icmp6_code
== 0 &&
2238 (icmp
->icmp6_type
== ND_NEIGHBOR_SOLICIT
||
2239 icmp
->icmp6_type
== ND_NEIGHBOR_ADVERT
)) {
2240 struct in6_addr
*nd_target
;
2241 struct ovs_nd_opt
*nd_opt
;
2243 l4_len
+= sizeof *nd_target
;
2244 nd_target
= dp_packet_put_zeros(p
, sizeof *nd_target
);
2245 *nd_target
= flow
->nd_target
;
2247 if (!eth_addr_is_zero(flow
->arp_sha
)) {
2249 nd_opt
= dp_packet_put_zeros(p
, 8);
2250 nd_opt
->nd_opt_len
= 1;
2251 nd_opt
->nd_opt_type
= ND_OPT_SOURCE_LINKADDR
;
2252 nd_opt
->nd_opt_mac
= flow
->arp_sha
;
2254 if (!eth_addr_is_zero(flow
->arp_tha
)) {
2256 nd_opt
= dp_packet_put_zeros(p
, 8);
2257 nd_opt
->nd_opt_len
= 1;
2258 nd_opt
->nd_opt_type
= ND_OPT_TARGET_LINKADDR
;
2259 nd_opt
->nd_opt_mac
= flow
->arp_tha
;
2262 icmp
->icmp6_cksum
= (OVS_FORCE
uint16_t)
2263 csum(icmp
, (char *)dp_packet_tail(p
) - (char *)icmp
);
2269 /* Puts into 'b' a packet that flow_extract() would parse as having the given
2272 * (This is useful only for testing, obviously, and the packet isn't really
2273 * valid. It hasn't got some checksums filled in, for one, and lots of fields
2274 * are just zeroed.) */
2276 flow_compose(struct dp_packet
*p
, const struct flow
*flow
)
2280 /* eth_compose() sets l3 pointer and makes sure it is 32-bit aligned. */
2281 eth_compose(p
, flow
->dl_dst
, flow
->dl_src
, ntohs(flow
->dl_type
), 0);
2282 if (flow
->dl_type
== htons(FLOW_DL_TYPE_NONE
)) {
2283 struct eth_header
*eth
= dp_packet_l2(p
);
2284 eth
->eth_type
= htons(dp_packet_size(p
));
2288 if (flow
->vlan_tci
& htons(VLAN_CFI
)) {
2289 eth_push_vlan(p
, htons(ETH_TYPE_VLAN
), flow
->vlan_tci
);
2292 if (flow
->dl_type
== htons(ETH_TYPE_IP
)) {
2293 struct ip_header
*ip
;
2295 ip
= dp_packet_put_zeros(p
, sizeof *ip
);
2296 ip
->ip_ihl_ver
= IP_IHL_VER(5, 4);
2297 ip
->ip_tos
= flow
->nw_tos
;
2298 ip
->ip_ttl
= flow
->nw_ttl
;
2299 ip
->ip_proto
= flow
->nw_proto
;
2300 put_16aligned_be32(&ip
->ip_src
, flow
->nw_src
);
2301 put_16aligned_be32(&ip
->ip_dst
, flow
->nw_dst
);
2303 if (flow
->nw_frag
& FLOW_NW_FRAG_ANY
) {
2304 ip
->ip_frag_off
|= htons(IP_MORE_FRAGMENTS
);
2305 if (flow
->nw_frag
& FLOW_NW_FRAG_LATER
) {
2306 ip
->ip_frag_off
|= htons(100);
2310 dp_packet_set_l4(p
, dp_packet_tail(p
));
2312 l4_len
= flow_compose_l4(p
, flow
);
2314 ip
= dp_packet_l3(p
);
2315 ip
->ip_tot_len
= htons(p
->l4_ofs
- p
->l3_ofs
+ l4_len
);
2316 ip
->ip_csum
= csum(ip
, sizeof *ip
);
2317 } else if (flow
->dl_type
== htons(ETH_TYPE_IPV6
)) {
2318 struct ovs_16aligned_ip6_hdr
*nh
;
2320 nh
= dp_packet_put_zeros(p
, sizeof *nh
);
2321 put_16aligned_be32(&nh
->ip6_flow
, htonl(6 << 28) |
2322 htonl(flow
->nw_tos
<< 20) | flow
->ipv6_label
);
2323 nh
->ip6_hlim
= flow
->nw_ttl
;
2324 nh
->ip6_nxt
= flow
->nw_proto
;
2326 memcpy(&nh
->ip6_src
, &flow
->ipv6_src
, sizeof(nh
->ip6_src
));
2327 memcpy(&nh
->ip6_dst
, &flow
->ipv6_dst
, sizeof(nh
->ip6_dst
));
2329 dp_packet_set_l4(p
, dp_packet_tail(p
));
2331 l4_len
= flow_compose_l4(p
, flow
);
2333 nh
= dp_packet_l3(p
);
2334 nh
->ip6_plen
= htons(l4_len
);
2335 } else if (flow
->dl_type
== htons(ETH_TYPE_ARP
) ||
2336 flow
->dl_type
== htons(ETH_TYPE_RARP
)) {
2337 struct arp_eth_header
*arp
;
2339 arp
= dp_packet_put_zeros(p
, sizeof *arp
);
2340 dp_packet_set_l3(p
, arp
);
2341 arp
->ar_hrd
= htons(1);
2342 arp
->ar_pro
= htons(ETH_TYPE_IP
);
2343 arp
->ar_hln
= ETH_ADDR_LEN
;
2345 arp
->ar_op
= htons(flow
->nw_proto
);
2347 if (flow
->nw_proto
== ARP_OP_REQUEST
||
2348 flow
->nw_proto
== ARP_OP_REPLY
) {
2349 put_16aligned_be32(&arp
->ar_spa
, flow
->nw_src
);
2350 put_16aligned_be32(&arp
->ar_tpa
, flow
->nw_dst
);
2351 arp
->ar_sha
= flow
->arp_sha
;
2352 arp
->ar_tha
= flow
->arp_tha
;
2356 if (eth_type_mpls(flow
->dl_type
)) {
2359 p
->l2_5_ofs
= p
->l3_ofs
;
2360 for (n
= 1; n
< FLOW_MAX_MPLS_LABELS
; n
++) {
2361 if (flow
->mpls_lse
[n
- 1] & htonl(MPLS_BOS_MASK
)) {
2366 push_mpls(p
, flow
->dl_type
, flow
->mpls_lse
[--n
]);
2371 /* Compressed flow. */
2373 /* Completes an initialization of 'dst' as a miniflow copy of 'src' begun by
2374 * the caller. The caller must have already computed 'dst->map' properly to
2375 * indicate the significant uint64_t elements of 'src'.
2377 * Normally the significant elements are the ones that are non-zero. However,
2378 * when a miniflow is initialized from a (mini)mask, the values can be zeroes,
2379 * so that the flow and mask always have the same maps. */
2381 miniflow_init(struct miniflow
*dst
, const struct flow
*src
)
2383 uint64_t *dst_u64
= miniflow_values(dst
);
2386 FLOWMAP_FOR_EACH_INDEX(idx
, dst
->map
) {
2387 *dst_u64
++ = flow_u64_value(src
, idx
);
2391 /* Initialize the maps of 'flow' from 'src'. */
2393 miniflow_map_init(struct miniflow
*flow
, const struct flow
*src
)
2395 /* Initialize map, counting the number of nonzero elements. */
2396 flowmap_init(&flow
->map
);
2397 for (size_t i
= 0; i
< FLOW_U64S
; i
++) {
2398 if (flow_u64_value(src
, i
)) {
2399 flowmap_set(&flow
->map
, i
, 1);
2404 /* Allocates 'n' count of miniflows, consecutive in memory, initializing the
2405 * map of each from 'src'.
2406 * Returns the size of the miniflow data. */
2408 miniflow_alloc(struct miniflow
*dsts
[], size_t n
, const struct miniflow
*src
)
2410 size_t n_values
= miniflow_n_values(src
);
2411 size_t data_size
= MINIFLOW_VALUES_SIZE(n_values
);
2412 struct miniflow
*dst
= xmalloc(n
* (sizeof *src
+ data_size
));
2415 COVERAGE_INC(miniflow_malloc
);
2417 for (i
= 0; i
< n
; i
++) {
2418 *dst
= *src
; /* Copy maps. */
2420 dst
+= 1; /* Just past the maps. */
2421 dst
= (struct miniflow
*)((uint64_t *)dst
+ n_values
); /* Skip data. */
2426 /* Returns a miniflow copy of 'src'. The caller must eventually free() the
2427 * returned miniflow. */
2429 miniflow_create(const struct flow
*src
)
2431 struct miniflow tmp
;
2432 struct miniflow
*dst
;
2434 miniflow_map_init(&tmp
, src
);
2436 miniflow_alloc(&dst
, 1, &tmp
);
2437 miniflow_init(dst
, src
);
2441 /* Initializes 'dst' as a copy of 'src'. The caller must have allocated
2442 * 'dst' to have inline space for 'n_values' data in 'src'. */
2444 miniflow_clone(struct miniflow
*dst
, const struct miniflow
*src
,
2447 *dst
= *src
; /* Copy maps. */
2448 memcpy(miniflow_values(dst
), miniflow_get_values(src
),
2449 MINIFLOW_VALUES_SIZE(n_values
));
2452 /* Initializes 'dst' as a copy of 'src'. */
2454 miniflow_expand(const struct miniflow
*src
, struct flow
*dst
)
2456 memset(dst
, 0, sizeof *dst
);
2457 flow_union_with_miniflow(dst
, src
);
2460 /* Returns true if 'a' and 'b' are equal miniflows, false otherwise. */
2462 miniflow_equal(const struct miniflow
*a
, const struct miniflow
*b
)
2464 const uint64_t *ap
= miniflow_get_values(a
);
2465 const uint64_t *bp
= miniflow_get_values(b
);
2467 /* This is mostly called after a matching hash, so it is highly likely that
2468 * the maps are equal as well. */
2469 if (OVS_LIKELY(flowmap_equal(a
->map
, b
->map
))) {
2470 return !memcmp(ap
, bp
, miniflow_n_values(a
) * sizeof *ap
);
2474 FLOWMAP_FOR_EACH_INDEX (idx
, flowmap_or(a
->map
, b
->map
)) {
2475 if ((flowmap_is_set(&a
->map
, idx
) ? *ap
++ : 0)
2476 != (flowmap_is_set(&b
->map
, idx
) ? *bp
++ : 0)) {
2485 /* Returns false if 'a' and 'b' differ at the places where there are 1-bits
2486 * in 'mask', true otherwise. */
2488 miniflow_equal_in_minimask(const struct miniflow
*a
, const struct miniflow
*b
,
2489 const struct minimask
*mask
)
2491 const uint64_t *p
= miniflow_get_values(&mask
->masks
);
2494 FLOWMAP_FOR_EACH_INDEX(idx
, mask
->masks
.map
) {
2495 if ((miniflow_get(a
, idx
) ^ miniflow_get(b
, idx
)) & *p
++) {
2503 /* Returns true if 'a' and 'b' are equal at the places where there are 1-bits
2504 * in 'mask', false if they differ. */
2506 miniflow_equal_flow_in_minimask(const struct miniflow
*a
, const struct flow
*b
,
2507 const struct minimask
*mask
)
2509 const uint64_t *p
= miniflow_get_values(&mask
->masks
);
2512 FLOWMAP_FOR_EACH_INDEX(idx
, mask
->masks
.map
) {
2513 if ((miniflow_get(a
, idx
) ^ flow_u64_value(b
, idx
)) & *p
++) {
2523 minimask_init(struct minimask
*mask
, const struct flow_wildcards
*wc
)
2525 miniflow_init(&mask
->masks
, &wc
->masks
);
2528 /* Returns a minimask copy of 'wc'. The caller must eventually free the
2529 * returned minimask with free(). */
2531 minimask_create(const struct flow_wildcards
*wc
)
2533 return (struct minimask
*)miniflow_create(&wc
->masks
);
2536 /* Initializes 'dst_' as the bit-wise "and" of 'a_' and 'b_'.
2538 * The caller must provide room for FLOW_U64S "uint64_t"s in 'storage', which
2539 * must follow '*dst_' in memory, for use by 'dst_'. The caller must *not*
2540 * free 'dst_' free(). */
2542 minimask_combine(struct minimask
*dst_
,
2543 const struct minimask
*a_
, const struct minimask
*b_
,
2544 uint64_t storage
[FLOW_U64S
])
2546 struct miniflow
*dst
= &dst_
->masks
;
2547 uint64_t *dst_values
= storage
;
2548 const struct miniflow
*a
= &a_
->masks
;
2549 const struct miniflow
*b
= &b_
->masks
;
2552 flowmap_init(&dst
->map
);
2554 FLOWMAP_FOR_EACH_INDEX(idx
, flowmap_and(a
->map
, b
->map
)) {
2555 /* Both 'a' and 'b' have non-zero data at 'idx'. */
2556 uint64_t mask
= *miniflow_get__(a
, idx
) & *miniflow_get__(b
, idx
);
2559 flowmap_set(&dst
->map
, idx
, 1);
2560 *dst_values
++ = mask
;
2565 /* Initializes 'wc' as a copy of 'mask'. */
2567 minimask_expand(const struct minimask
*mask
, struct flow_wildcards
*wc
)
2569 miniflow_expand(&mask
->masks
, &wc
->masks
);
2572 /* Returns true if 'a' and 'b' are the same flow mask, false otherwise.
2573 * Minimasks may not have zero data values, so for the minimasks to be the
2574 * same, they need to have the same map and the same data values. */
2576 minimask_equal(const struct minimask
*a
, const struct minimask
*b
)
2578 return !memcmp(a
, b
, sizeof *a
2579 + MINIFLOW_VALUES_SIZE(miniflow_n_values(&a
->masks
)));
2582 /* Returns true if at least one bit matched by 'b' is wildcarded by 'a',
2583 * false otherwise. */
2585 minimask_has_extra(const struct minimask
*a
, const struct minimask
*b
)
2587 const uint64_t *bp
= miniflow_get_values(&b
->masks
);
2590 FLOWMAP_FOR_EACH_INDEX(idx
, b
->masks
.map
) {
2591 uint64_t b_u64
= *bp
++;
2593 /* 'b_u64' is non-zero, check if the data in 'a' is either zero
2594 * or misses some of the bits in 'b_u64'. */
2595 if (!MINIFLOW_IN_MAP(&a
->masks
, idx
)
2596 || ((*miniflow_get__(&a
->masks
, idx
) & b_u64
) != b_u64
)) {
2597 return true; /* 'a' wildcards some bits 'b' doesn't. */