]> git.proxmox.com Git - mirror_ovs.git/blob - lib/flow.h
netdev-offload-tc: Use single 'once' variable for probing tc features
[mirror_ovs.git] / lib / flow.h
1 /*
2 * Copyright (c) 2008, 2009, 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017 Nicira, Inc.
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16 #ifndef FLOW_H
17 #define FLOW_H 1
18
19 #include <sys/types.h>
20 #include <netinet/in.h>
21 #include <netinet/icmp6.h>
22 #include <stdbool.h>
23 #include <stdint.h>
24 #include <string.h>
25 #include "bitmap.h"
26 #include "byte-order.h"
27 #include "openvswitch/compiler.h"
28 #include "openflow/nicira-ext.h"
29 #include "openflow/openflow.h"
30 #include "openvswitch/flow.h"
31 #include "packets.h"
32 #include "hash.h"
33 #include "util.h"
34
35 struct dpif_flow_stats;
36 struct dpif_flow_attrs;
37 struct ds;
38 struct flow_wildcards;
39 struct minimask;
40 struct dp_packet;
41 struct ofputil_port_map;
42 struct pkt_metadata;
43 struct match;
44
45 /* Some flow fields are mutually exclusive or only appear within the flow
46 * pipeline. IPv6 headers are bigger than IPv4 and MPLS, and IPv6 ND packets
47 * are bigger than TCP,UDP and IGMP packets. */
48 #define FLOW_MAX_PACKET_U64S (FLOW_U64S \
49 /* Unused in datapath */ - FLOW_U64_SIZE(regs) \
50 - FLOW_U64_SIZE(metadata) \
51 /* L2.5/3 */ - FLOW_U64_SIZE(nw_src) /* incl. nw_dst */ \
52 - FLOW_U64_SIZE(mpls_lse) \
53 /* L4 */ - FLOW_U64_SIZE(tp_src) \
54 )
55
56 extern const uint8_t flow_segment_u64s[];
57
58 /* Configured maximum VLAN headers. */
59 extern int flow_vlan_limit;
60
61 #define FLOW_U64_OFFSET(FIELD) \
62 (offsetof(struct flow, FIELD) / sizeof(uint64_t))
63 #define FLOW_U64_OFFREM(FIELD) \
64 (offsetof(struct flow, FIELD) % sizeof(uint64_t))
65
66 /* Number of 64-bit units spanned by a 'FIELD'. */
67 #define FLOW_U64_SIZE(FIELD) \
68 DIV_ROUND_UP(FLOW_U64_OFFREM(FIELD) + MEMBER_SIZEOF(struct flow, FIELD), \
69 sizeof(uint64_t))
70
71 void flow_extract(struct dp_packet *, struct flow *);
72
73 void flow_zero_wildcards(struct flow *, const struct flow_wildcards *);
74 void flow_unwildcard_tp_ports(const struct flow *, struct flow_wildcards *);
75 void flow_get_metadata(const struct flow *, struct match *flow_metadata);
76 struct netdev *flow_get_tunnel_netdev(struct flow_tnl *tunnel);
77
78 const char *ct_state_to_string(uint32_t state);
79 uint32_t ct_state_from_string(const char *);
80 bool parse_ct_state(const char *state_str, uint32_t default_state,
81 uint32_t *ct_state, struct ds *);
82 bool validate_ct_state(uint32_t state, struct ds *);
83 void flow_clear_conntrack(struct flow *);
84
85 char *flow_to_string(const struct flow *, const struct ofputil_port_map *);
86 void format_flags(struct ds *ds, const char *(*bit_to_string)(uint32_t),
87 uint32_t flags, char del);
88 void format_flags_masked(struct ds *ds, const char *name,
89 const char *(*bit_to_string)(uint32_t),
90 uint32_t flags, uint32_t mask, uint32_t max_mask);
91 void format_packet_type_masked(struct ds *, ovs_be32 value, ovs_be32 mask);
92 int parse_flags(const char *s, const char *(*bit_to_string)(uint32_t),
93 char end, const char *field_name, char **res_string,
94 uint32_t *res_flags, uint32_t allowed, uint32_t *res_mask);
95
96 void flow_format(struct ds *, const struct flow *,
97 const struct ofputil_port_map *);
98 void flow_print(FILE *, const struct flow *, const struct ofputil_port_map *);
99 static inline int flow_compare_3way(const struct flow *, const struct flow *);
100 static inline bool flow_equal(const struct flow *, const struct flow *);
101 static inline size_t flow_hash(const struct flow *, uint32_t basis);
102
103 void flow_set_dl_vlan(struct flow *, ovs_be16 vid, int id);
104 void flow_fix_vlan_tpid(struct flow *);
105 void flow_set_vlan_vid(struct flow *, ovs_be16 vid);
106 void flow_set_vlan_pcp(struct flow *, uint8_t pcp, int id);
107
108 void flow_limit_vlans(int vlan_limit);
109 int flow_count_vlan_headers(const struct flow *);
110 void flow_skip_common_vlan_headers(const struct flow *a, int *p_an,
111 const struct flow *b, int *p_bn);
112 void flow_pop_vlan(struct flow*, struct flow_wildcards*);
113 void flow_push_vlan_uninit(struct flow*, struct flow_wildcards*);
114
115 int flow_count_mpls_labels(const struct flow *, struct flow_wildcards *);
116 int flow_count_common_mpls_labels(const struct flow *a, int an,
117 const struct flow *b, int bn,
118 struct flow_wildcards *wc);
119 void flow_push_mpls(struct flow *, int n, ovs_be16 mpls_eth_type,
120 struct flow_wildcards *, bool clear_flow_L3);
121 bool flow_pop_mpls(struct flow *, int n, ovs_be16 eth_type,
122 struct flow_wildcards *);
123 void flow_set_mpls_label(struct flow *, int idx, ovs_be32 label);
124 void flow_set_mpls_ttl(struct flow *, int idx, uint8_t ttl);
125 void flow_set_mpls_tc(struct flow *, int idx, uint8_t tc);
126 void flow_set_mpls_bos(struct flow *, int idx, uint8_t stack);
127 void flow_set_mpls_lse(struct flow *, int idx, ovs_be32 lse);
128
129 void flow_compose(struct dp_packet *, const struct flow *,
130 const void *l7, size_t l7_len);
131 void packet_expand(struct dp_packet *, const struct flow *, size_t size);
132
133 bool parse_ipv6_ext_hdrs(const void **datap, size_t *sizep, uint8_t *nw_proto,
134 uint8_t *nw_frag,
135 const struct ovs_16aligned_ip6_frag **frag_hdr);
136 bool parse_nsh(const void **datap, size_t *sizep, struct ovs_key_nsh *key);
137 uint16_t parse_tcp_flags(struct dp_packet *packet);
138
139 static inline uint64_t
140 flow_get_xreg(const struct flow *flow, int idx)
141 {
142 return ((uint64_t) flow->regs[idx * 2] << 32) | flow->regs[idx * 2 + 1];
143 }
144
145 static inline void
146 flow_set_xreg(struct flow *flow, int idx, uint64_t value)
147 {
148 flow->regs[idx * 2] = value >> 32;
149 flow->regs[idx * 2 + 1] = value;
150 }
151
152 static inline ovs_u128
153 flow_get_xxreg(const struct flow *flow, int idx)
154 {
155 ovs_u128 value;
156
157 value.u64.hi = (uint64_t) flow->regs[idx * 4] << 32;
158 value.u64.hi |= flow->regs[idx * 4 + 1];
159 value.u64.lo = (uint64_t) flow->regs[idx * 4 + 2] << 32;
160 value.u64.lo |= flow->regs[idx * 4 + 3];
161
162 return value;
163 }
164
165 static inline void
166 flow_set_xxreg(struct flow *flow, int idx, ovs_u128 value)
167 {
168 flow->regs[idx * 4] = value.u64.hi >> 32;
169 flow->regs[idx * 4 + 1] = value.u64.hi;
170 flow->regs[idx * 4 + 2] = value.u64.lo >> 32;
171 flow->regs[idx * 4 + 3] = value.u64.lo;
172 }
173
174 static inline int
175 flow_compare_3way(const struct flow *a, const struct flow *b)
176 {
177 return memcmp(a, b, sizeof *a);
178 }
179
180 static inline bool
181 flow_equal(const struct flow *a, const struct flow *b)
182 {
183 return !flow_compare_3way(a, b);
184 }
185
186 static inline size_t
187 flow_hash(const struct flow *flow, uint32_t basis)
188 {
189 return hash_bytes64((const uint64_t *)flow, sizeof *flow, basis);
190 }
191
192 static inline uint16_t
193 ofp_to_u16(ofp_port_t ofp_port)
194 {
195 return (OVS_FORCE uint16_t) ofp_port;
196 }
197
198 static inline uint32_t
199 odp_to_u32(odp_port_t odp_port)
200 {
201 return (OVS_FORCE uint32_t) odp_port;
202 }
203
204 static inline uint32_t
205 ofp11_to_u32(ofp11_port_t ofp11_port)
206 {
207 return (OVS_FORCE uint32_t) ofp11_port;
208 }
209
210 static inline ofp_port_t
211 u16_to_ofp(uint16_t port)
212 {
213 return OFP_PORT_C(port);
214 }
215
216 static inline odp_port_t
217 u32_to_odp(uint32_t port)
218 {
219 return ODP_PORT_C(port);
220 }
221
222 static inline ofp11_port_t
223 u32_to_ofp11(uint32_t port)
224 {
225 return OFP11_PORT_C(port);
226 }
227
228 static inline uint32_t
229 hash_ofp_port(ofp_port_t ofp_port)
230 {
231 return hash_int(ofp_to_u16(ofp_port), 0);
232 }
233
234 static inline uint32_t
235 hash_odp_port(odp_port_t odp_port)
236 {
237 return hash_int(odp_to_u32(odp_port), 0);
238 }
239 \f
240 uint32_t flow_hash_5tuple(const struct flow *flow, uint32_t basis);
241 uint32_t flow_hash_symmetric_l4(const struct flow *flow, uint32_t basis);
242 uint32_t flow_hash_symmetric_l2(const struct flow *flow, uint32_t basis);
243 uint32_t flow_hash_symmetric_l3l4(const struct flow *flow, uint32_t basis,
244 bool inc_udp_ports );
245 uint32_t flow_hash_symmetric_l3(const struct flow *flow, uint32_t basis);
246
247 /* Initialize a flow with random fields that matter for nx_hash_fields. */
248 void flow_random_hash_fields(struct flow *);
249 void flow_mask_hash_fields(const struct flow *, struct flow_wildcards *,
250 enum nx_hash_fields);
251 uint32_t flow_hash_fields(const struct flow *, enum nx_hash_fields,
252 uint16_t basis);
253 const char *flow_hash_fields_to_str(enum nx_hash_fields);
254 bool flow_hash_fields_valid(enum nx_hash_fields);
255
256 uint32_t flow_hash_in_wildcards(const struct flow *,
257 const struct flow_wildcards *,
258 uint32_t basis);
259
260 bool flow_equal_except(const struct flow *a, const struct flow *b,
261 const struct flow_wildcards *);
262 \f
263 /* Bitmap for flow values. For each 1-bit the corresponding flow value is
264 * explicitly specified, other values are zeroes.
265 *
266 * map_t must be wide enough to hold any member of struct flow. */
267 typedef unsigned long long map_t;
268 #define MAP_T_BITS (sizeof(map_t) * CHAR_BIT)
269 #define MAP_1 (map_t)1
270 #define MAP_MAX TYPE_MAXIMUM(map_t)
271
272 #define MAP_IS_SET(MAP, IDX) ((MAP) & (MAP_1 << (IDX)))
273
274 /* Iterate through the indices of all 1-bits in 'MAP'. */
275 #define MAP_FOR_EACH_INDEX(IDX, MAP) \
276 ULLONG_FOR_EACH_1(IDX, MAP)
277
278 #define FLOWMAP_UNITS DIV_ROUND_UP(FLOW_U64S, MAP_T_BITS)
279
280 struct flowmap {
281 map_t bits[FLOWMAP_UNITS];
282 };
283
284 #define FLOWMAP_EMPTY_INITIALIZER { { 0 } }
285
286 static inline void flowmap_init(struct flowmap *);
287 static inline bool flowmap_equal(struct flowmap, struct flowmap);
288 static inline bool flowmap_is_set(const struct flowmap *, size_t idx);
289 static inline bool flowmap_are_set(const struct flowmap *, size_t idx,
290 unsigned int n_bits);
291 static inline void flowmap_set(struct flowmap *, size_t idx,
292 unsigned int n_bits);
293 static inline void flowmap_clear(struct flowmap *, size_t idx,
294 unsigned int n_bits);
295 static inline struct flowmap flowmap_or(struct flowmap, struct flowmap);
296 static inline struct flowmap flowmap_and(struct flowmap, struct flowmap);
297 static inline bool flowmap_is_empty(struct flowmap);
298 static inline unsigned int flowmap_n_1bits(struct flowmap);
299
300 #define FLOWMAP_HAS_FIELD(FM, FIELD) \
301 flowmap_are_set(FM, FLOW_U64_OFFSET(FIELD), FLOW_U64_SIZE(FIELD))
302
303 #define FLOWMAP_SET(FM, FIELD) \
304 flowmap_set(FM, FLOW_U64_OFFSET(FIELD), FLOW_U64_SIZE(FIELD))
305
306 #define FLOWMAP_SET__(FM, FIELD, SIZE) \
307 flowmap_set(FM, FLOW_U64_OFFSET(FIELD), \
308 DIV_ROUND_UP(SIZE, sizeof(uint64_t)))
309
310 /* XXX: Only works for full 64-bit units. */
311 #define FLOWMAP_CLEAR(FM, FIELD) \
312 BUILD_ASSERT_DECL(FLOW_U64_OFFREM(FIELD) == 0); \
313 BUILD_ASSERT_DECL(sizeof(((struct flow *)0)->FIELD) % sizeof(uint64_t) == 0); \
314 flowmap_clear(FM, FLOW_U64_OFFSET(FIELD), FLOW_U64_SIZE(FIELD))
315
316 /* Iterate through all units in 'FMAP'. */
317 #define FLOWMAP_FOR_EACH_UNIT(UNIT) \
318 for ((UNIT) = 0; (UNIT) < FLOWMAP_UNITS; (UNIT)++)
319
320 /* Iterate through all map units in 'FMAP'. */
321 #define FLOWMAP_FOR_EACH_MAP(MAP, FLOWMAP) \
322 for (size_t unit__ = 0; \
323 unit__ < FLOWMAP_UNITS && ((MAP) = (FLOWMAP).bits[unit__], true); \
324 unit__++)
325
326 struct flowmap_aux;
327 static inline bool flowmap_next_index(struct flowmap_aux *, size_t *idx);
328
329 #define FLOWMAP_AUX_INITIALIZER(FLOWMAP) { .unit = 0, .map = (FLOWMAP) }
330
331 /* Iterate through all struct flow u64 indices specified by 'MAP'. This is a
332 * slower but easier version of the FLOWMAP_FOR_EACH_MAP() &
333 * MAP_FOR_EACH_INDEX() combination. */
334 #define FLOWMAP_FOR_EACH_INDEX(IDX, MAP) \
335 for (struct flowmap_aux aux__ = FLOWMAP_AUX_INITIALIZER(MAP); \
336 flowmap_next_index(&aux__, &(IDX));)
337
338 /* Flowmap inline implementations. */
339 static inline void
340 flowmap_init(struct flowmap *fm)
341 {
342 memset(fm, 0, sizeof *fm);
343 }
344
345 static inline bool
346 flowmap_equal(struct flowmap a, struct flowmap b)
347 {
348 return !memcmp(&a, &b, sizeof a);
349 }
350
351 static inline bool
352 flowmap_is_set(const struct flowmap *fm, size_t idx)
353 {
354 return (fm->bits[idx / MAP_T_BITS] & (MAP_1 << (idx % MAP_T_BITS))) != 0;
355 }
356
357 /* Returns 'true' if any of the 'n_bits' bits starting at 'idx' are set in
358 * 'fm'. 'n_bits' can be at most MAP_T_BITS. */
359 static inline bool
360 flowmap_are_set(const struct flowmap *fm, size_t idx, unsigned int n_bits)
361 {
362 map_t n_bits_mask = (MAP_1 << n_bits) - 1;
363 size_t unit = idx / MAP_T_BITS;
364
365 idx %= MAP_T_BITS;
366
367 if (fm->bits[unit] & (n_bits_mask << idx)) {
368 return true;
369 }
370 /* The seemingly unnecessary bounds check on 'unit' is a workaround for a
371 * false-positive array out of bounds error by GCC 4.9. */
372 if (unit + 1 < FLOWMAP_UNITS && idx + n_bits > MAP_T_BITS) {
373 /* Check the remaining bits from the next unit. */
374 return fm->bits[unit + 1] & (n_bits_mask >> (MAP_T_BITS - idx));
375 }
376 return false;
377 }
378
379 /* Set the 'n_bits' consecutive bits in 'fm', starting at bit 'idx'.
380 * 'n_bits' can be at most MAP_T_BITS. */
381 static inline void
382 flowmap_set(struct flowmap *fm, size_t idx, unsigned int n_bits)
383 {
384 map_t n_bits_mask = (MAP_1 << n_bits) - 1;
385 size_t unit = idx / MAP_T_BITS;
386
387 idx %= MAP_T_BITS;
388
389 fm->bits[unit] |= n_bits_mask << idx;
390 /* The seemingly unnecessary bounds check on 'unit' is a workaround for a
391 * false-positive array out of bounds error by GCC 4.9. */
392 if (unit + 1 < FLOWMAP_UNITS && idx + n_bits > MAP_T_BITS) {
393 /* 'MAP_T_BITS - idx' bits were set on 'unit', set the remaining
394 * bits from the next unit. */
395 fm->bits[unit + 1] |= n_bits_mask >> (MAP_T_BITS - idx);
396 }
397 }
398
399 /* Clears the 'n_bits' consecutive bits in 'fm', starting at bit 'idx'.
400 * 'n_bits' can be at most MAP_T_BITS. */
401 static inline void
402 flowmap_clear(struct flowmap *fm, size_t idx, unsigned int n_bits)
403 {
404 map_t n_bits_mask = (MAP_1 << n_bits) - 1;
405 size_t unit = idx / MAP_T_BITS;
406
407 idx %= MAP_T_BITS;
408
409 fm->bits[unit] &= ~(n_bits_mask << idx);
410 /* The seemingly unnecessary bounds check on 'unit' is a workaround for a
411 * false-positive array out of bounds error by GCC 4.9. */
412 if (unit + 1 < FLOWMAP_UNITS && idx + n_bits > MAP_T_BITS) {
413 /* 'MAP_T_BITS - idx' bits were cleared on 'unit', clear the
414 * remaining bits from the next unit. */
415 fm->bits[unit + 1] &= ~(n_bits_mask >> (MAP_T_BITS - idx));
416 }
417 }
418
419 /* OR the bits in the flowmaps. */
420 static inline struct flowmap
421 flowmap_or(struct flowmap a, struct flowmap b)
422 {
423 struct flowmap map;
424 size_t unit;
425
426 FLOWMAP_FOR_EACH_UNIT (unit) {
427 map.bits[unit] = a.bits[unit] | b.bits[unit];
428 }
429 return map;
430 }
431
432 /* AND the bits in the flowmaps. */
433 static inline struct flowmap
434 flowmap_and(struct flowmap a, struct flowmap b)
435 {
436 struct flowmap map;
437 size_t unit;
438
439 FLOWMAP_FOR_EACH_UNIT (unit) {
440 map.bits[unit] = a.bits[unit] & b.bits[unit];
441 }
442 return map;
443 }
444
445 static inline bool
446 flowmap_is_empty(struct flowmap fm)
447 {
448 map_t map;
449
450 FLOWMAP_FOR_EACH_MAP (map, fm) {
451 if (map) {
452 return false;
453 }
454 }
455 return true;
456 }
457
458 static inline unsigned int
459 flowmap_n_1bits(struct flowmap fm)
460 {
461 unsigned int n_1bits = 0;
462 size_t unit;
463
464 FLOWMAP_FOR_EACH_UNIT (unit) {
465 n_1bits += count_1bits(fm.bits[unit]);
466 }
467 return n_1bits;
468 }
469
470 struct flowmap_aux {
471 size_t unit;
472 struct flowmap map;
473 };
474
475 static inline bool
476 flowmap_next_index(struct flowmap_aux *aux, size_t *idx)
477 {
478 for (;;) {
479 map_t *map = &aux->map.bits[aux->unit];
480 if (*map) {
481 *idx = aux->unit * MAP_T_BITS + raw_ctz(*map);
482 *map = zero_rightmost_1bit(*map);
483 return true;
484 }
485 if (++aux->unit >= FLOWMAP_UNITS) {
486 return false;
487 }
488 }
489 }
490
491 \f
492 /* Compressed flow. */
493
494 /* A sparse representation of a "struct flow".
495 *
496 * A "struct flow" is fairly large and tends to be mostly zeros. Sparse
497 * representation has two advantages. First, it saves memory and, more
498 * importantly, minimizes the number of accessed cache lines. Second, it saves
499 * time when the goal is to iterate over only the nonzero parts of the struct.
500 *
501 * The map member hold one bit for each uint64_t in a "struct flow". Each
502 * 0-bit indicates that the corresponding uint64_t is zero, each 1-bit that it
503 * *may* be nonzero (see below how this applies to minimasks).
504 *
505 * The values indicated by 'map' always follow the miniflow in memory. The
506 * user of the miniflow is responsible for always having enough storage after
507 * the struct miniflow corresponding to the number of 1-bits in maps.
508 *
509 * Elements in values array are allowed to be zero. This is useful for "struct
510 * minimatch", for which ensuring that the miniflow and minimask members have
511 * same maps allows optimization. This allowance applies only to a miniflow
512 * that is not a mask. That is, a minimask may NOT have zero elements in its
513 * values.
514 *
515 * A miniflow is always dynamically allocated so that the maps are followed by
516 * at least as many elements as there are 1-bits in maps. */
517 struct miniflow {
518 struct flowmap map;
519 /* Followed by:
520 * uint64_t values[n];
521 * where 'n' is miniflow_n_values(miniflow). */
522 };
523 BUILD_ASSERT_DECL(sizeof(struct miniflow) % sizeof(uint64_t) == 0);
524
525 #define MINIFLOW_VALUES_SIZE(COUNT) ((COUNT) * sizeof(uint64_t))
526
527 static inline uint64_t *miniflow_values(struct miniflow *mf)
528 {
529 return (uint64_t *)(mf + 1);
530 }
531
532 static inline const uint64_t *miniflow_get_values(const struct miniflow *mf)
533 {
534 return (const uint64_t *)(mf + 1);
535 }
536
537 struct pkt_metadata;
538
539 /* The 'dst' must follow with buffer space for FLOW_U64S 64-bit units.
540 * 'dst->map' is ignored on input and set on output to indicate which fields
541 * were extracted. */
542 void miniflow_extract(struct dp_packet *packet, struct miniflow *dst);
543 void miniflow_map_init(struct miniflow *, const struct flow *);
544 void flow_wc_map(const struct flow *, struct flowmap *);
545 size_t miniflow_alloc(struct miniflow *dsts[], size_t n,
546 const struct miniflow *src);
547 void miniflow_init(struct miniflow *, const struct flow *);
548 void miniflow_clone(struct miniflow *, const struct miniflow *,
549 size_t n_values);
550 struct miniflow * miniflow_create(const struct flow *);
551
552 void miniflow_expand(const struct miniflow *, struct flow *);
553
554 static inline uint64_t flow_u64_value(const struct flow *flow, size_t index)
555 {
556 return ((uint64_t *)flow)[index];
557 }
558
559 static inline uint64_t *flow_u64_lvalue(struct flow *flow, size_t index)
560 {
561 return &((uint64_t *)flow)[index];
562 }
563
564 static inline size_t
565 miniflow_n_values(const struct miniflow *flow)
566 {
567 return flowmap_n_1bits(flow->map);
568 }
569
570 struct flow_for_each_in_maps_aux {
571 const struct flow *flow;
572 struct flowmap_aux map_aux;
573 };
574
575 static inline bool
576 flow_values_get_next_in_maps(struct flow_for_each_in_maps_aux *aux,
577 uint64_t *value)
578 {
579 size_t idx;
580
581 if (flowmap_next_index(&aux->map_aux, &idx)) {
582 *value = flow_u64_value(aux->flow, idx);
583 return true;
584 }
585 return false;
586 }
587
588 /* Iterate through all flow u64 values specified by 'MAPS'. */
589 #define FLOW_FOR_EACH_IN_MAPS(VALUE, FLOW, MAPS) \
590 for (struct flow_for_each_in_maps_aux aux__ \
591 = { (FLOW), FLOWMAP_AUX_INITIALIZER(MAPS) }; \
592 flow_values_get_next_in_maps(&aux__, &(VALUE));)
593
594 struct mf_for_each_in_map_aux {
595 size_t unit; /* Current 64-bit unit of the flowmaps
596 being processed. */
597 struct flowmap fmap; /* Remaining 1-bits corresponding to the
598 64-bit words in ‘values’ */
599 struct flowmap map; /* Remaining 1-bits corresponding to the
600 64-bit words of interest. */
601 const uint64_t *values; /* 64-bit words corresponding to the
602 1-bits in ‘fmap’. */
603 };
604
605 /* Get the data from ‘aux->values’ corresponding to the next lowest 1-bit
606 * in ‘aux->map’, given that ‘aux->values’ points to an array of 64-bit
607 * words corresponding to the 1-bits in ‘aux->fmap’, starting from the
608 * rightmost 1-bit.
609 *
610 * Returns ’true’ if the traversal is incomplete, ‘false’ otherwise.
611 * ‘aux’ is prepared for the next iteration after each call.
612 *
613 * This is used to traverse through, for example, the values in a miniflow
614 * representation of a flow key selected by non-zero 64-bit words in a
615 * corresponding subtable mask. */
616 static inline bool
617 mf_get_next_in_map(struct mf_for_each_in_map_aux *aux,
618 uint64_t *value)
619 {
620 map_t *map, *fmap;
621 map_t rm1bit;
622
623 /* Skip empty map units. */
624 while (OVS_UNLIKELY(!*(map = &aux->map.bits[aux->unit]))) {
625 /* Skip remaining data in the current unit before advancing
626 * to the next. */
627 aux->values += count_1bits(aux->fmap.bits[aux->unit]);
628 if (++aux->unit == FLOWMAP_UNITS) {
629 return false;
630 }
631 }
632
633 rm1bit = rightmost_1bit(*map);
634 *map -= rm1bit;
635 fmap = &aux->fmap.bits[aux->unit];
636
637 /* If the rightmost 1-bit found from the current unit in ‘aux->map’
638 * (‘rm1bit’) is also present in ‘aux->fmap’, store the corresponding
639 * value from ‘aux->values’ to ‘*value', otherwise store 0. */
640 if (OVS_LIKELY(*fmap & rm1bit)) {
641 /* Skip all 64-bit words in ‘values’ preceding the one corresponding
642 * to ‘rm1bit’. */
643 map_t trash = *fmap & (rm1bit - 1);
644
645 /* Avoid resetting 'fmap' and calling count_1bits() when trash is
646 * zero. */
647 if (trash) {
648 *fmap -= trash;
649 aux->values += count_1bits(trash);
650 }
651
652 *value = *aux->values;
653 } else {
654 *value = 0;
655 }
656 return true;
657 }
658
659 /* Iterate through miniflow u64 values specified by 'FLOWMAP'. */
660 #define MINIFLOW_FOR_EACH_IN_FLOWMAP(VALUE, FLOW, FLOWMAP) \
661 for (struct mf_for_each_in_map_aux aux__ = \
662 { 0, (FLOW)->map, (FLOWMAP), miniflow_get_values(FLOW) }; \
663 mf_get_next_in_map(&aux__, &(VALUE));)
664
665 /* This can be used when it is known that 'idx' is set in 'map'. */
666 static inline const uint64_t *
667 miniflow_values_get__(const uint64_t *values, map_t map, size_t idx)
668 {
669 return values + count_1bits(map & ((MAP_1 << idx) - 1));
670 }
671
672 /* This can be used when it is known that 'u64_idx' is set in
673 * the map of 'mf'. */
674 static inline const uint64_t *
675 miniflow_get__(const struct miniflow *mf, size_t idx)
676 {
677 const uint64_t *values = miniflow_get_values(mf);
678 const map_t *map = mf->map.bits;
679
680 while (idx >= MAP_T_BITS) {
681 idx -= MAP_T_BITS;
682 values += count_1bits(*map++);
683 }
684 return miniflow_values_get__(values, *map, idx);
685 }
686
687 #define MINIFLOW_IN_MAP(MF, IDX) flowmap_is_set(&(MF)->map, IDX)
688
689 /* Get the value of the struct flow 'FIELD' as up to 8 byte wide integer type
690 * 'TYPE' from miniflow 'MF'. */
691 #define MINIFLOW_GET_TYPE(MF, TYPE, FIELD) \
692 (BUILD_ASSERT(sizeof(TYPE) == sizeof(((struct flow *)0)->FIELD)), \
693 BUILD_ASSERT_GCCONLY(__builtin_types_compatible_p(TYPE, typeof(((struct flow *)0)->FIELD))), \
694 MINIFLOW_GET_TYPE__(MF, TYPE, FIELD))
695
696 /* Like MINIFLOW_GET_TYPE, but without checking that TYPE is the correct width
697 * for FIELD. (This is useful for deliberately reading adjacent fields in one
698 * go.) */
699 #define MINIFLOW_GET_TYPE__(MF, TYPE, FIELD) \
700 (MINIFLOW_IN_MAP(MF, FLOW_U64_OFFSET(FIELD)) \
701 ? ((OVS_FORCE const TYPE *)miniflow_get__(MF, FLOW_U64_OFFSET(FIELD))) \
702 [FLOW_U64_OFFREM(FIELD) / sizeof(TYPE)] \
703 : 0)
704
705 #define MINIFLOW_GET_U128(FLOW, FIELD) \
706 (ovs_u128) { .u64 = { \
707 (MINIFLOW_IN_MAP(FLOW, FLOW_U64_OFFSET(FIELD)) ? \
708 *miniflow_get__(FLOW, FLOW_U64_OFFSET(FIELD)) : 0), \
709 (MINIFLOW_IN_MAP(FLOW, FLOW_U64_OFFSET(FIELD) + 1) ? \
710 *miniflow_get__(FLOW, FLOW_U64_OFFSET(FIELD) + 1) : 0) } }
711
712 #define MINIFLOW_GET_U8(FLOW, FIELD) \
713 MINIFLOW_GET_TYPE(FLOW, uint8_t, FIELD)
714 #define MINIFLOW_GET_U16(FLOW, FIELD) \
715 MINIFLOW_GET_TYPE(FLOW, uint16_t, FIELD)
716 #define MINIFLOW_GET_BE16(FLOW, FIELD) \
717 MINIFLOW_GET_TYPE(FLOW, ovs_be16, FIELD)
718 #define MINIFLOW_GET_U32(FLOW, FIELD) \
719 MINIFLOW_GET_TYPE(FLOW, uint32_t, FIELD)
720 #define MINIFLOW_GET_BE32(FLOW, FIELD) \
721 MINIFLOW_GET_TYPE(FLOW, ovs_be32, FIELD)
722 #define MINIFLOW_GET_U64(FLOW, FIELD) \
723 MINIFLOW_GET_TYPE(FLOW, uint64_t, FIELD)
724 #define MINIFLOW_GET_BE64(FLOW, FIELD) \
725 MINIFLOW_GET_TYPE(FLOW, ovs_be64, FIELD)
726
727 static inline uint64_t miniflow_get(const struct miniflow *,
728 unsigned int u64_ofs);
729 static inline uint32_t miniflow_get_u32(const struct miniflow *,
730 unsigned int u32_ofs);
731 static inline ovs_be32 miniflow_get_be32(const struct miniflow *,
732 unsigned int be32_ofs);
733 static inline uint16_t miniflow_get_vid(const struct miniflow *, size_t);
734 static inline uint16_t miniflow_get_tcp_flags(const struct miniflow *);
735 static inline ovs_be64 miniflow_get_metadata(const struct miniflow *);
736 static inline uint64_t miniflow_get_tun_metadata_present_map(
737 const struct miniflow *);
738 static inline uint32_t miniflow_get_recirc_id(const struct miniflow *);
739 static inline uint32_t miniflow_get_dp_hash(const struct miniflow *);
740 static inline ovs_be32 miniflow_get_ports(const struct miniflow *);
741
742 bool miniflow_equal(const struct miniflow *a, const struct miniflow *b);
743 bool miniflow_equal_in_minimask(const struct miniflow *a,
744 const struct miniflow *b,
745 const struct minimask *);
746 bool miniflow_equal_flow_in_minimask(const struct miniflow *a,
747 const struct flow *b,
748 const struct minimask *);
749 uint32_t miniflow_hash_5tuple(const struct miniflow *flow, uint32_t basis);
750
751 \f
752 /* Compressed flow wildcards. */
753
754 /* A sparse representation of a "struct flow_wildcards".
755 *
756 * See the large comment on struct miniflow for details.
757 *
758 * Note: While miniflow can have zero data for a 1-bit in the map,
759 * a minimask may not! We rely on this in the implementation. */
760 struct minimask {
761 struct miniflow masks;
762 };
763
764 void minimask_init(struct minimask *, const struct flow_wildcards *);
765 struct minimask * minimask_create(const struct flow_wildcards *);
766 void minimask_combine(struct minimask *dst,
767 const struct minimask *a, const struct minimask *b,
768 uint64_t storage[FLOW_U64S]);
769
770 void minimask_expand(const struct minimask *, struct flow_wildcards *);
771
772 static inline uint32_t minimask_get_u32(const struct minimask *,
773 unsigned int u32_ofs);
774 static inline ovs_be32 minimask_get_be32(const struct minimask *,
775 unsigned int be32_ofs);
776 static inline uint16_t minimask_get_vid_mask(const struct minimask *, size_t);
777 static inline ovs_be64 minimask_get_metadata_mask(const struct minimask *);
778
779 bool minimask_equal(const struct minimask *a, const struct minimask *b);
780 bool minimask_has_extra(const struct minimask *, const struct minimask *);
781
782 \f
783 /* Returns true if 'mask' matches every packet, false if 'mask' fixes any bits
784 * or fields. */
785 static inline bool
786 minimask_is_catchall(const struct minimask *mask)
787 {
788 /* For every 1-bit in mask's map, the corresponding value is non-zero,
789 * so the only way the mask can not fix any bits or fields is for the
790 * map the be zero. */
791 return flowmap_is_empty(mask->masks.map);
792 }
793
794 /* Returns the uint64_t that would be at byte offset '8 * u64_ofs' if 'flow'
795 * were expanded into a "struct flow". */
796 static inline uint64_t miniflow_get(const struct miniflow *flow,
797 unsigned int u64_ofs)
798 {
799 return MINIFLOW_IN_MAP(flow, u64_ofs) ? *miniflow_get__(flow, u64_ofs) : 0;
800 }
801
802 static inline uint32_t miniflow_get_u32(const struct miniflow *flow,
803 unsigned int u32_ofs)
804 {
805 uint64_t value = miniflow_get(flow, u32_ofs / 2);
806
807 #if WORDS_BIGENDIAN
808 return (u32_ofs & 1) ? value : value >> 32;
809 #else
810 return (u32_ofs & 1) ? value >> 32 : value;
811 #endif
812 }
813
814 static inline ovs_be32 miniflow_get_be32(const struct miniflow *flow,
815 unsigned int be32_ofs)
816 {
817 return (OVS_FORCE ovs_be32)miniflow_get_u32(flow, be32_ofs);
818 }
819
820 /* Returns the VID within the vlan_tci member of the "struct flow" represented
821 * by 'flow'. */
822 static inline uint16_t
823 miniflow_get_vid(const struct miniflow *flow, size_t n)
824 {
825 if (n < FLOW_MAX_VLAN_HEADERS) {
826 union flow_vlan_hdr hdr = {
827 .qtag = MINIFLOW_GET_BE32(flow, vlans[n].qtag)
828 };
829 return vlan_tci_to_vid(hdr.tci);
830 }
831 return 0;
832 }
833
834 /* Returns the uint32_t that would be at byte offset '4 * u32_ofs' if 'mask'
835 * were expanded into a "struct flow_wildcards". */
836 static inline uint32_t
837 minimask_get_u32(const struct minimask *mask, unsigned int u32_ofs)
838 {
839 return miniflow_get_u32(&mask->masks, u32_ofs);
840 }
841
842 static inline ovs_be32
843 minimask_get_be32(const struct minimask *mask, unsigned int be32_ofs)
844 {
845 return (OVS_FORCE ovs_be32)minimask_get_u32(mask, be32_ofs);
846 }
847
848 /* Returns the VID mask within the vlan_tci member of the "struct
849 * flow_wildcards" represented by 'mask'. */
850 static inline uint16_t
851 minimask_get_vid_mask(const struct minimask *mask, size_t n)
852 {
853 return miniflow_get_vid(&mask->masks, n);
854 }
855
856 /* Returns the value of the "tcp_flags" field in 'flow'. */
857 static inline uint16_t
858 miniflow_get_tcp_flags(const struct miniflow *flow)
859 {
860 return ntohs(MINIFLOW_GET_BE16(flow, tcp_flags));
861 }
862
863 /* Returns the value of the OpenFlow 1.1+ "metadata" field in 'flow'. */
864 static inline ovs_be64
865 miniflow_get_metadata(const struct miniflow *flow)
866 {
867 return MINIFLOW_GET_BE64(flow, metadata);
868 }
869
870 /* Returns the bitmap that indicates which tunnel metadata fields are present
871 * in 'flow'. */
872 static inline uint64_t
873 miniflow_get_tun_metadata_present_map(const struct miniflow *flow)
874 {
875 return MINIFLOW_GET_U64(flow, tunnel.metadata.present.map);
876 }
877
878 /* Returns the recirc_id in 'flow.' */
879 static inline uint32_t
880 miniflow_get_recirc_id(const struct miniflow *flow)
881 {
882 return MINIFLOW_GET_U32(flow, recirc_id);
883 }
884
885 /* Returns the dp_hash in 'flow.' */
886 static inline uint32_t
887 miniflow_get_dp_hash(const struct miniflow *flow)
888 {
889 return MINIFLOW_GET_U32(flow, dp_hash);
890 }
891
892 /* Returns the 'tp_src' and 'tp_dst' fields together as one piece of data. */
893 static inline ovs_be32
894 miniflow_get_ports(const struct miniflow *flow)
895 {
896 return MINIFLOW_GET_TYPE__(flow, ovs_be32, tp_src);
897 }
898
899 /* Returns the mask for the OpenFlow 1.1+ "metadata" field in 'mask'.
900 *
901 * The return value is all-1-bits if 'mask' matches on the whole value of the
902 * metadata field, all-0-bits if 'mask' entirely wildcards the metadata field,
903 * or some other value if the metadata field is partially matched, partially
904 * wildcarded. */
905 static inline ovs_be64
906 minimask_get_metadata_mask(const struct minimask *mask)
907 {
908 return MINIFLOW_GET_BE64(&mask->masks, metadata);
909 }
910
911 /* Perform a bitwise OR of miniflow 'src' flow data specified in 'subset' with
912 * the equivalent fields in 'dst', storing the result in 'dst'. 'subset' must
913 * be a subset of 'src's map. */
914 static inline void
915 flow_union_with_miniflow_subset(struct flow *dst, const struct miniflow *src,
916 struct flowmap subset)
917 {
918 uint64_t *dst_u64 = (uint64_t *) dst;
919 const uint64_t *p = miniflow_get_values(src);
920 map_t map;
921
922 FLOWMAP_FOR_EACH_MAP (map, subset) {
923 size_t idx;
924
925 MAP_FOR_EACH_INDEX(idx, map) {
926 dst_u64[idx] |= *p++;
927 }
928 dst_u64 += MAP_T_BITS;
929 }
930 }
931
932 /* Perform a bitwise OR of miniflow 'src' flow data with the equivalent
933 * fields in 'dst', storing the result in 'dst'. */
934 static inline void
935 flow_union_with_miniflow(struct flow *dst, const struct miniflow *src)
936 {
937 flow_union_with_miniflow_subset(dst, src, src->map);
938 }
939
940 static inline bool is_ct_valid(const struct flow *flow,
941 const struct flow_wildcards *mask,
942 struct flow_wildcards *wc)
943 {
944 /* Matches are checked with 'mask' and without 'wc'. */
945 if (mask && !wc) {
946 /* Must match at least one of the bits that implies a valid
947 * conntrack entry, or an explicit not-invalid. */
948 return flow->ct_state & (CS_NEW | CS_ESTABLISHED | CS_RELATED
949 | CS_REPLY_DIR | CS_SRC_NAT | CS_DST_NAT)
950 || (flow->ct_state & CS_TRACKED
951 && mask->masks.ct_state & CS_INVALID
952 && !(flow->ct_state & CS_INVALID));
953 }
954 /* Else we are checking a fully extracted flow, where valid CT state always
955 * has either 'new', 'established', or 'reply_dir' bit set. */
956 #define CS_VALID_MASK (CS_NEW | CS_ESTABLISHED | CS_REPLY_DIR)
957 if (wc) {
958 wc->masks.ct_state |= CS_VALID_MASK;
959 }
960 return flow->ct_state & CS_VALID_MASK;
961 }
962
963 static inline void
964 pkt_metadata_from_flow(struct pkt_metadata *md, const struct flow *flow)
965 {
966 /* Update this function whenever struct flow changes. */
967 BUILD_ASSERT_DECL(FLOW_WC_SEQ == 42);
968
969 md->recirc_id = flow->recirc_id;
970 md->dp_hash = flow->dp_hash;
971 flow_tnl_copy__(&md->tunnel, &flow->tunnel);
972 md->skb_priority = flow->skb_priority;
973 md->pkt_mark = flow->pkt_mark;
974 md->in_port = flow->in_port;
975 md->ct_state = flow->ct_state;
976 md->ct_zone = flow->ct_zone;
977 md->ct_mark = flow->ct_mark;
978 md->ct_label = flow->ct_label;
979
980 md->ct_orig_tuple_ipv6 = false;
981 if (flow->dl_type && is_ct_valid(flow, NULL, NULL)) {
982 if (flow->dl_type == htons(ETH_TYPE_IP)) {
983 md->ct_orig_tuple.ipv4 = (struct ovs_key_ct_tuple_ipv4) {
984 flow->ct_nw_src,
985 flow->ct_nw_dst,
986 flow->ct_tp_src,
987 flow->ct_tp_dst,
988 flow->ct_nw_proto,
989 };
990 } else if (flow->dl_type == htons(ETH_TYPE_IPV6)) {
991 md->ct_orig_tuple_ipv6 = true;
992 md->ct_orig_tuple.ipv6 = (struct ovs_key_ct_tuple_ipv6) {
993 flow->ct_ipv6_src,
994 flow->ct_ipv6_dst,
995 flow->ct_tp_src,
996 flow->ct_tp_dst,
997 flow->ct_nw_proto,
998 };
999 } else {
1000 /* Reset ct_orig_tuple for other types. */
1001 memset(&md->ct_orig_tuple, 0, sizeof md->ct_orig_tuple);
1002 }
1003 } else {
1004 memset(&md->ct_orig_tuple, 0, sizeof md->ct_orig_tuple);
1005 }
1006 }
1007
1008 /* Often, during translation we need to read a value from a flow('FLOW') and
1009 * unwildcard the corresponding bits in the wildcards('WC'). This macro makes
1010 * it easier to do that. */
1011
1012 #define FLOW_WC_GET_AND_MASK_WC(FLOW, WC, FIELD) \
1013 (((WC) ? WC_MASK_FIELD(WC, FIELD) : NULL), ((FLOW)->FIELD))
1014
1015 static inline bool is_ethernet(const struct flow *flow,
1016 struct flow_wildcards *wc)
1017 {
1018 if (wc) {
1019 WC_MASK_FIELD(wc, packet_type);
1020 }
1021 return flow->packet_type == htonl(PT_ETH);
1022 }
1023
1024 static inline ovs_be16 get_dl_type(const struct flow *flow)
1025 {
1026 if (flow->packet_type == htonl(PT_ETH)) {
1027 return flow->dl_type;
1028 } else if (pt_ns(flow->packet_type) == OFPHTN_ETHERTYPE) {
1029 return pt_ns_type_be(flow->packet_type);
1030 } else {
1031 return htons(FLOW_DL_TYPE_NONE);
1032 }
1033 }
1034
1035 static inline bool is_vlan(const struct flow *flow,
1036 struct flow_wildcards *wc)
1037 {
1038 if (!is_ethernet(flow, wc)) {
1039 return false;
1040 }
1041 if (wc) {
1042 WC_MASK_FIELD_MASK(wc, vlans[0].tci, htons(VLAN_CFI));
1043 }
1044 return (flow->vlans[0].tci & htons(VLAN_CFI)) != 0;
1045 }
1046
1047 static inline bool is_ip_any(const struct flow *flow)
1048 {
1049 return dl_type_is_ip_any(get_dl_type(flow));
1050 }
1051
1052 static inline bool is_ip_proto(const struct flow *flow, uint8_t ip_proto,
1053 struct flow_wildcards *wc)
1054 {
1055 if (is_ip_any(flow)) {
1056 if (wc) {
1057 WC_MASK_FIELD(wc, nw_proto);
1058 }
1059 return flow->nw_proto == ip_proto;
1060 }
1061 return false;
1062 }
1063
1064 static inline bool is_tcp(const struct flow *flow,
1065 struct flow_wildcards *wc)
1066 {
1067 return is_ip_proto(flow, IPPROTO_TCP, wc);
1068 }
1069
1070 static inline bool is_udp(const struct flow *flow,
1071 struct flow_wildcards *wc)
1072 {
1073 return is_ip_proto(flow, IPPROTO_UDP, wc);
1074 }
1075
1076 static inline bool is_sctp(const struct flow *flow,
1077 struct flow_wildcards *wc)
1078 {
1079 return is_ip_proto(flow, IPPROTO_SCTP, wc);
1080 }
1081
1082 static inline bool is_icmpv4(const struct flow *flow,
1083 struct flow_wildcards *wc)
1084 {
1085 if (get_dl_type(flow) == htons(ETH_TYPE_IP)) {
1086 if (wc) {
1087 memset(&wc->masks.nw_proto, 0xff, sizeof wc->masks.nw_proto);
1088 }
1089 return flow->nw_proto == IPPROTO_ICMP;
1090 }
1091 return false;
1092 }
1093
1094 static inline bool is_icmpv6(const struct flow *flow,
1095 struct flow_wildcards *wc)
1096 {
1097 if (get_dl_type(flow) == htons(ETH_TYPE_IPV6)) {
1098 if (wc) {
1099 memset(&wc->masks.nw_proto, 0xff, sizeof wc->masks.nw_proto);
1100 }
1101 return flow->nw_proto == IPPROTO_ICMPV6;
1102 }
1103 return false;
1104 }
1105
1106 static inline bool is_nd(const struct flow *flow,
1107 struct flow_wildcards *wc)
1108 {
1109 if (is_icmpv6(flow, wc)) {
1110 if (wc) {
1111 memset(&wc->masks.tp_dst, 0xff, sizeof wc->masks.tp_dst);
1112 }
1113 if (flow->tp_dst != htons(0)) {
1114 return false;
1115 }
1116
1117 if (wc) {
1118 memset(&wc->masks.tp_src, 0xff, sizeof wc->masks.tp_src);
1119 }
1120 return (flow->tp_src == htons(ND_NEIGHBOR_SOLICIT) ||
1121 flow->tp_src == htons(ND_NEIGHBOR_ADVERT));
1122 }
1123 return false;
1124 }
1125
1126 static inline bool is_arp(const struct flow *flow)
1127 {
1128 return (flow->dl_type == htons(ETH_TYPE_ARP));
1129 }
1130
1131 static inline bool is_garp(const struct flow *flow,
1132 struct flow_wildcards *wc)
1133 {
1134 if (is_arp(flow)) {
1135 return (FLOW_WC_GET_AND_MASK_WC(flow, wc, nw_src) ==
1136 FLOW_WC_GET_AND_MASK_WC(flow, wc, nw_dst));
1137 }
1138
1139 return false;
1140 }
1141
1142 static inline bool is_igmp(const struct flow *flow, struct flow_wildcards *wc)
1143 {
1144 if (get_dl_type(flow) == htons(ETH_TYPE_IP)) {
1145 if (wc) {
1146 memset(&wc->masks.nw_proto, 0xff, sizeof wc->masks.nw_proto);
1147 }
1148 return flow->nw_proto == IPPROTO_IGMP;
1149 }
1150 return false;
1151 }
1152
1153 static inline bool is_mld(const struct flow *flow,
1154 struct flow_wildcards *wc)
1155 {
1156 if (is_icmpv6(flow, wc)) {
1157 if (wc) {
1158 memset(&wc->masks.tp_src, 0xff, sizeof wc->masks.tp_src);
1159 }
1160 return (flow->tp_src == htons(MLD_QUERY)
1161 || flow->tp_src == htons(MLD_REPORT)
1162 || flow->tp_src == htons(MLD_DONE)
1163 || flow->tp_src == htons(MLD2_REPORT));
1164 }
1165 return false;
1166 }
1167
1168 static inline bool is_mld_query(const struct flow *flow,
1169 struct flow_wildcards *wc)
1170 {
1171 if (is_icmpv6(flow, wc)) {
1172 if (wc) {
1173 memset(&wc->masks.tp_src, 0xff, sizeof wc->masks.tp_src);
1174 }
1175 return flow->tp_src == htons(MLD_QUERY);
1176 }
1177 return false;
1178 }
1179
1180 static inline bool is_mld_report(const struct flow *flow,
1181 struct flow_wildcards *wc)
1182 {
1183 return is_mld(flow, wc) && !is_mld_query(flow, wc);
1184 }
1185
1186 static inline bool is_stp(const struct flow *flow)
1187 {
1188 return (flow->dl_type == htons(FLOW_DL_TYPE_NONE)
1189 && eth_addr_equals(flow->dl_dst, eth_addr_stp));
1190 }
1191
1192 /* Returns true if flow->tp_dst equals 'port'. If 'wc' is nonnull, sets
1193 * appropriate bits in wc->masks.tp_dst to account for the test.
1194 *
1195 * The caller must already have ensured that 'flow' is a protocol for which
1196 * tp_dst is relevant. */
1197 static inline bool tp_dst_equals(const struct flow *flow, uint16_t port,
1198 struct flow_wildcards *wc)
1199 {
1200 uint16_t diff = port ^ ntohs(flow->tp_dst);
1201 if (wc) {
1202 if (diff) {
1203 /* Set mask for the most significant mismatching bit. */
1204 int ofs = raw_clz64((uint64_t) diff << 48); /* range [0,15] */
1205 wc->masks.tp_dst |= htons(0x8000 >> ofs);
1206 } else {
1207 /* Must match all bits. */
1208 wc->masks.tp_dst = OVS_BE16_MAX;
1209 }
1210 }
1211 return !diff;
1212 }
1213
1214 #endif /* flow.h */