]> git.proxmox.com Git - mirror_ovs.git/blob - lib/flow.h
ovs-vswitchd: Better document that ovs-vswitchd manages its own datapaths.
[mirror_ovs.git] / lib / flow.h
1 /*
2 * Copyright (c) 2008, 2009, 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017 Nicira, Inc.
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16 #ifndef FLOW_H
17 #define FLOW_H 1
18
19 #include <sys/types.h>
20 #include <netinet/in.h>
21 #include <netinet/icmp6.h>
22 #include <stdbool.h>
23 #include <stdint.h>
24 #include <string.h>
25 #include "bitmap.h"
26 #include "byte-order.h"
27 #include "openvswitch/compiler.h"
28 #include "openflow/nicira-ext.h"
29 #include "openflow/openflow.h"
30 #include "openvswitch/flow.h"
31 #include "packets.h"
32 #include "hash.h"
33 #include "util.h"
34
35 struct dpif_flow_stats;
36 struct ds;
37 struct flow_wildcards;
38 struct minimask;
39 struct dp_packet;
40 struct ofputil_port_map;
41 struct pkt_metadata;
42 struct match;
43
44 /* Some flow fields are mutually exclusive or only appear within the flow
45 * pipeline. IPv6 headers are bigger than IPv4 and MPLS, and IPv6 ND packets
46 * are bigger than TCP,UDP and IGMP packets. */
47 #define FLOW_MAX_PACKET_U64S (FLOW_U64S \
48 /* Unused in datapath */ - FLOW_U64_SIZE(regs) \
49 - FLOW_U64_SIZE(metadata) \
50 /* L2.5/3 */ - FLOW_U64_SIZE(nw_src) /* incl. nw_dst */ \
51 - FLOW_U64_SIZE(mpls_lse) \
52 /* L4 */ - FLOW_U64_SIZE(tp_src) \
53 )
54
55 extern const uint8_t flow_segment_u64s[];
56
57 /* Configured maximum VLAN headers. */
58 extern int flow_vlan_limit;
59
60 #define FLOW_U64_OFFSET(FIELD) \
61 (offsetof(struct flow, FIELD) / sizeof(uint64_t))
62 #define FLOW_U64_OFFREM(FIELD) \
63 (offsetof(struct flow, FIELD) % sizeof(uint64_t))
64
65 /* Number of 64-bit units spanned by a 'FIELD'. */
66 #define FLOW_U64_SIZE(FIELD) \
67 DIV_ROUND_UP(FLOW_U64_OFFREM(FIELD) + MEMBER_SIZEOF(struct flow, FIELD), \
68 sizeof(uint64_t))
69
70 void flow_extract(struct dp_packet *, struct flow *);
71
72 void flow_zero_wildcards(struct flow *, const struct flow_wildcards *);
73 void flow_unwildcard_tp_ports(const struct flow *, struct flow_wildcards *);
74 void flow_get_metadata(const struct flow *, struct match *flow_metadata);
75
76 const char *ct_state_to_string(uint32_t state);
77 uint32_t ct_state_from_string(const char *);
78 bool parse_ct_state(const char *state_str, uint32_t default_state,
79 uint32_t *ct_state, struct ds *);
80 bool validate_ct_state(uint32_t state, struct ds *);
81 void flow_clear_conntrack(struct flow *);
82
83 char *flow_to_string(const struct flow *, const struct ofputil_port_map *);
84 void format_flags(struct ds *ds, const char *(*bit_to_string)(uint32_t),
85 uint32_t flags, char del);
86 void format_flags_masked(struct ds *ds, const char *name,
87 const char *(*bit_to_string)(uint32_t),
88 uint32_t flags, uint32_t mask, uint32_t max_mask);
89 void format_packet_type_masked(struct ds *, ovs_be32 value, ovs_be32 mask);
90 int parse_flags(const char *s, const char *(*bit_to_string)(uint32_t),
91 char end, const char *field_name, char **res_string,
92 uint32_t *res_flags, uint32_t allowed, uint32_t *res_mask);
93
94 void flow_format(struct ds *, const struct flow *,
95 const struct ofputil_port_map *);
96 void flow_print(FILE *, const struct flow *, const struct ofputil_port_map *);
97 static inline int flow_compare_3way(const struct flow *, const struct flow *);
98 static inline bool flow_equal(const struct flow *, const struct flow *);
99 static inline size_t flow_hash(const struct flow *, uint32_t basis);
100
101 void flow_set_dl_vlan(struct flow *, ovs_be16 vid);
102 void flow_fix_vlan_tpid(struct flow *);
103 void flow_set_vlan_vid(struct flow *, ovs_be16 vid);
104 void flow_set_vlan_pcp(struct flow *, uint8_t pcp);
105
106 void flow_limit_vlans(int vlan_limit);
107 int flow_count_vlan_headers(const struct flow *);
108 void flow_skip_common_vlan_headers(const struct flow *a, int *p_an,
109 const struct flow *b, int *p_bn);
110 void flow_pop_vlan(struct flow*, struct flow_wildcards*);
111 void flow_push_vlan_uninit(struct flow*, struct flow_wildcards*);
112
113 int flow_count_mpls_labels(const struct flow *, struct flow_wildcards *);
114 int flow_count_common_mpls_labels(const struct flow *a, int an,
115 const struct flow *b, int bn,
116 struct flow_wildcards *wc);
117 void flow_push_mpls(struct flow *, int n, ovs_be16 mpls_eth_type,
118 struct flow_wildcards *, bool clear_flow_L3);
119 bool flow_pop_mpls(struct flow *, int n, ovs_be16 eth_type,
120 struct flow_wildcards *);
121 void flow_set_mpls_label(struct flow *, int idx, ovs_be32 label);
122 void flow_set_mpls_ttl(struct flow *, int idx, uint8_t ttl);
123 void flow_set_mpls_tc(struct flow *, int idx, uint8_t tc);
124 void flow_set_mpls_bos(struct flow *, int idx, uint8_t stack);
125 void flow_set_mpls_lse(struct flow *, int idx, ovs_be32 lse);
126
127 void flow_compose(struct dp_packet *, const struct flow *,
128 const void *l7, size_t l7_len);
129 void packet_expand(struct dp_packet *, const struct flow *, size_t size);
130
131 bool parse_ipv6_ext_hdrs(const void **datap, size_t *sizep, uint8_t *nw_proto,
132 uint8_t *nw_frag);
133 ovs_be16 parse_dl_type(const struct eth_header *data_, size_t size);
134 bool parse_nsh(const void **datap, size_t *sizep, struct ovs_key_nsh *key);
135
136 static inline uint64_t
137 flow_get_xreg(const struct flow *flow, int idx)
138 {
139 return ((uint64_t) flow->regs[idx * 2] << 32) | flow->regs[idx * 2 + 1];
140 }
141
142 static inline void
143 flow_set_xreg(struct flow *flow, int idx, uint64_t value)
144 {
145 flow->regs[idx * 2] = value >> 32;
146 flow->regs[idx * 2 + 1] = value;
147 }
148
149 static inline ovs_u128
150 flow_get_xxreg(const struct flow *flow, int idx)
151 {
152 ovs_u128 value;
153
154 value.u64.hi = (uint64_t) flow->regs[idx * 4] << 32;
155 value.u64.hi |= flow->regs[idx * 4 + 1];
156 value.u64.lo = (uint64_t) flow->regs[idx * 4 + 2] << 32;
157 value.u64.lo |= flow->regs[idx * 4 + 3];
158
159 return value;
160 }
161
162 static inline void
163 flow_set_xxreg(struct flow *flow, int idx, ovs_u128 value)
164 {
165 flow->regs[idx * 4] = value.u64.hi >> 32;
166 flow->regs[idx * 4 + 1] = value.u64.hi;
167 flow->regs[idx * 4 + 2] = value.u64.lo >> 32;
168 flow->regs[idx * 4 + 3] = value.u64.lo;
169 }
170
171 static inline int
172 flow_compare_3way(const struct flow *a, const struct flow *b)
173 {
174 return memcmp(a, b, sizeof *a);
175 }
176
177 static inline bool
178 flow_equal(const struct flow *a, const struct flow *b)
179 {
180 return !flow_compare_3way(a, b);
181 }
182
183 static inline size_t
184 flow_hash(const struct flow *flow, uint32_t basis)
185 {
186 return hash_bytes64((const uint64_t *)flow, sizeof *flow, basis);
187 }
188
189 static inline uint16_t
190 ofp_to_u16(ofp_port_t ofp_port)
191 {
192 return (OVS_FORCE uint16_t) ofp_port;
193 }
194
195 static inline uint32_t
196 odp_to_u32(odp_port_t odp_port)
197 {
198 return (OVS_FORCE uint32_t) odp_port;
199 }
200
201 static inline uint32_t
202 ofp11_to_u32(ofp11_port_t ofp11_port)
203 {
204 return (OVS_FORCE uint32_t) ofp11_port;
205 }
206
207 static inline ofp_port_t
208 u16_to_ofp(uint16_t port)
209 {
210 return OFP_PORT_C(port);
211 }
212
213 static inline odp_port_t
214 u32_to_odp(uint32_t port)
215 {
216 return ODP_PORT_C(port);
217 }
218
219 static inline ofp11_port_t
220 u32_to_ofp11(uint32_t port)
221 {
222 return OFP11_PORT_C(port);
223 }
224
225 static inline uint32_t
226 hash_ofp_port(ofp_port_t ofp_port)
227 {
228 return hash_int(ofp_to_u16(ofp_port), 0);
229 }
230
231 static inline uint32_t
232 hash_odp_port(odp_port_t odp_port)
233 {
234 return hash_int(odp_to_u32(odp_port), 0);
235 }
236 \f
237 uint32_t flow_hash_5tuple(const struct flow *flow, uint32_t basis);
238 uint32_t flow_hash_symmetric_l4(const struct flow *flow, uint32_t basis);
239 uint32_t flow_hash_symmetric_l3l4(const struct flow *flow, uint32_t basis,
240 bool inc_udp_ports );
241
242 /* Initialize a flow with random fields that matter for nx_hash_fields. */
243 void flow_random_hash_fields(struct flow *);
244 void flow_mask_hash_fields(const struct flow *, struct flow_wildcards *,
245 enum nx_hash_fields);
246 uint32_t flow_hash_fields(const struct flow *, enum nx_hash_fields,
247 uint16_t basis);
248 const char *flow_hash_fields_to_str(enum nx_hash_fields);
249 bool flow_hash_fields_valid(enum nx_hash_fields);
250
251 uint32_t flow_hash_in_wildcards(const struct flow *,
252 const struct flow_wildcards *,
253 uint32_t basis);
254
255 bool flow_equal_except(const struct flow *a, const struct flow *b,
256 const struct flow_wildcards *);
257 \f
258 /* Bitmap for flow values. For each 1-bit the corresponding flow value is
259 * explicitly specified, other values are zeroes.
260 *
261 * map_t must be wide enough to hold any member of struct flow. */
262 typedef unsigned long long map_t;
263 #define MAP_T_BITS (sizeof(map_t) * CHAR_BIT)
264 #define MAP_1 (map_t)1
265 #define MAP_MAX TYPE_MAXIMUM(map_t)
266
267 #define MAP_IS_SET(MAP, IDX) ((MAP) & (MAP_1 << (IDX)))
268
269 /* Iterate through the indices of all 1-bits in 'MAP'. */
270 #define MAP_FOR_EACH_INDEX(IDX, MAP) \
271 ULLONG_FOR_EACH_1(IDX, MAP)
272
273 #define FLOWMAP_UNITS DIV_ROUND_UP(FLOW_U64S, MAP_T_BITS)
274
275 struct flowmap {
276 map_t bits[FLOWMAP_UNITS];
277 };
278
279 #define FLOWMAP_EMPTY_INITIALIZER { { 0 } }
280
281 static inline void flowmap_init(struct flowmap *);
282 static inline bool flowmap_equal(struct flowmap, struct flowmap);
283 static inline bool flowmap_is_set(const struct flowmap *, size_t idx);
284 static inline bool flowmap_are_set(const struct flowmap *, size_t idx,
285 unsigned int n_bits);
286 static inline void flowmap_set(struct flowmap *, size_t idx,
287 unsigned int n_bits);
288 static inline void flowmap_clear(struct flowmap *, size_t idx,
289 unsigned int n_bits);
290 static inline struct flowmap flowmap_or(struct flowmap, struct flowmap);
291 static inline struct flowmap flowmap_and(struct flowmap, struct flowmap);
292 static inline bool flowmap_is_empty(struct flowmap);
293 static inline unsigned int flowmap_n_1bits(struct flowmap);
294
295 #define FLOWMAP_HAS_FIELD(FM, FIELD) \
296 flowmap_are_set(FM, FLOW_U64_OFFSET(FIELD), FLOW_U64_SIZE(FIELD))
297
298 #define FLOWMAP_SET(FM, FIELD) \
299 flowmap_set(FM, FLOW_U64_OFFSET(FIELD), FLOW_U64_SIZE(FIELD))
300
301 #define FLOWMAP_SET__(FM, FIELD, SIZE) \
302 flowmap_set(FM, FLOW_U64_OFFSET(FIELD), \
303 DIV_ROUND_UP(SIZE, sizeof(uint64_t)))
304
305 /* XXX: Only works for full 64-bit units. */
306 #define FLOWMAP_CLEAR(FM, FIELD) \
307 BUILD_ASSERT_DECL(FLOW_U64_OFFREM(FIELD) == 0); \
308 BUILD_ASSERT_DECL(sizeof(((struct flow *)0)->FIELD) % sizeof(uint64_t) == 0); \
309 flowmap_clear(FM, FLOW_U64_OFFSET(FIELD), FLOW_U64_SIZE(FIELD))
310
311 /* Iterate through all units in 'FMAP'. */
312 #define FLOWMAP_FOR_EACH_UNIT(UNIT) \
313 for ((UNIT) = 0; (UNIT) < FLOWMAP_UNITS; (UNIT)++)
314
315 /* Iterate through all map units in 'FMAP'. */
316 #define FLOWMAP_FOR_EACH_MAP(MAP, FLOWMAP) \
317 for (size_t unit__ = 0; \
318 unit__ < FLOWMAP_UNITS && ((MAP) = (FLOWMAP).bits[unit__], true); \
319 unit__++)
320
321 struct flowmap_aux;
322 static inline bool flowmap_next_index(struct flowmap_aux *, size_t *idx);
323
324 #define FLOWMAP_AUX_INITIALIZER(FLOWMAP) { .unit = 0, .map = (FLOWMAP) }
325
326 /* Iterate through all struct flow u64 indices specified by 'MAP'. This is a
327 * slower but easier version of the FLOWMAP_FOR_EACH_MAP() &
328 * MAP_FOR_EACH_INDEX() combination. */
329 #define FLOWMAP_FOR_EACH_INDEX(IDX, MAP) \
330 for (struct flowmap_aux aux__ = FLOWMAP_AUX_INITIALIZER(MAP); \
331 flowmap_next_index(&aux__, &(IDX));)
332
333 /* Flowmap inline implementations. */
334 static inline void
335 flowmap_init(struct flowmap *fm)
336 {
337 memset(fm, 0, sizeof *fm);
338 }
339
340 static inline bool
341 flowmap_equal(struct flowmap a, struct flowmap b)
342 {
343 return !memcmp(&a, &b, sizeof a);
344 }
345
346 static inline bool
347 flowmap_is_set(const struct flowmap *fm, size_t idx)
348 {
349 return (fm->bits[idx / MAP_T_BITS] & (MAP_1 << (idx % MAP_T_BITS))) != 0;
350 }
351
352 /* Returns 'true' if any of the 'n_bits' bits starting at 'idx' are set in
353 * 'fm'. 'n_bits' can be at most MAP_T_BITS. */
354 static inline bool
355 flowmap_are_set(const struct flowmap *fm, size_t idx, unsigned int n_bits)
356 {
357 map_t n_bits_mask = (MAP_1 << n_bits) - 1;
358 size_t unit = idx / MAP_T_BITS;
359
360 idx %= MAP_T_BITS;
361
362 if (fm->bits[unit] & (n_bits_mask << idx)) {
363 return true;
364 }
365 /* The seemingly unnecessary bounds check on 'unit' is a workaround for a
366 * false-positive array out of bounds error by GCC 4.9. */
367 if (unit + 1 < FLOWMAP_UNITS && idx + n_bits > MAP_T_BITS) {
368 /* Check the remaining bits from the next unit. */
369 return fm->bits[unit + 1] & (n_bits_mask >> (MAP_T_BITS - idx));
370 }
371 return false;
372 }
373
374 /* Set the 'n_bits' consecutive bits in 'fm', starting at bit 'idx'.
375 * 'n_bits' can be at most MAP_T_BITS. */
376 static inline void
377 flowmap_set(struct flowmap *fm, size_t idx, unsigned int n_bits)
378 {
379 map_t n_bits_mask = (MAP_1 << n_bits) - 1;
380 size_t unit = idx / MAP_T_BITS;
381
382 idx %= MAP_T_BITS;
383
384 fm->bits[unit] |= n_bits_mask << idx;
385 /* The seemingly unnecessary bounds check on 'unit' is a workaround for a
386 * false-positive array out of bounds error by GCC 4.9. */
387 if (unit + 1 < FLOWMAP_UNITS && idx + n_bits > MAP_T_BITS) {
388 /* 'MAP_T_BITS - idx' bits were set on 'unit', set the remaining
389 * bits from the next unit. */
390 fm->bits[unit + 1] |= n_bits_mask >> (MAP_T_BITS - idx);
391 }
392 }
393
394 /* Clears the 'n_bits' consecutive bits in 'fm', starting at bit 'idx'.
395 * 'n_bits' can be at most MAP_T_BITS. */
396 static inline void
397 flowmap_clear(struct flowmap *fm, size_t idx, unsigned int n_bits)
398 {
399 map_t n_bits_mask = (MAP_1 << n_bits) - 1;
400 size_t unit = idx / MAP_T_BITS;
401
402 idx %= MAP_T_BITS;
403
404 fm->bits[unit] &= ~(n_bits_mask << idx);
405 /* The seemingly unnecessary bounds check on 'unit' is a workaround for a
406 * false-positive array out of bounds error by GCC 4.9. */
407 if (unit + 1 < FLOWMAP_UNITS && idx + n_bits > MAP_T_BITS) {
408 /* 'MAP_T_BITS - idx' bits were cleared on 'unit', clear the
409 * remaining bits from the next unit. */
410 fm->bits[unit + 1] &= ~(n_bits_mask >> (MAP_T_BITS - idx));
411 }
412 }
413
414 /* OR the bits in the flowmaps. */
415 static inline struct flowmap
416 flowmap_or(struct flowmap a, struct flowmap b)
417 {
418 struct flowmap map;
419 size_t unit;
420
421 FLOWMAP_FOR_EACH_UNIT (unit) {
422 map.bits[unit] = a.bits[unit] | b.bits[unit];
423 }
424 return map;
425 }
426
427 /* AND the bits in the flowmaps. */
428 static inline struct flowmap
429 flowmap_and(struct flowmap a, struct flowmap b)
430 {
431 struct flowmap map;
432 size_t unit;
433
434 FLOWMAP_FOR_EACH_UNIT (unit) {
435 map.bits[unit] = a.bits[unit] & b.bits[unit];
436 }
437 return map;
438 }
439
440 static inline bool
441 flowmap_is_empty(struct flowmap fm)
442 {
443 map_t map;
444
445 FLOWMAP_FOR_EACH_MAP (map, fm) {
446 if (map) {
447 return false;
448 }
449 }
450 return true;
451 }
452
453 static inline unsigned int
454 flowmap_n_1bits(struct flowmap fm)
455 {
456 unsigned int n_1bits = 0;
457 size_t unit;
458
459 FLOWMAP_FOR_EACH_UNIT (unit) {
460 n_1bits += count_1bits(fm.bits[unit]);
461 }
462 return n_1bits;
463 }
464
465 struct flowmap_aux {
466 size_t unit;
467 struct flowmap map;
468 };
469
470 static inline bool
471 flowmap_next_index(struct flowmap_aux *aux, size_t *idx)
472 {
473 for (;;) {
474 map_t *map = &aux->map.bits[aux->unit];
475 if (*map) {
476 *idx = aux->unit * MAP_T_BITS + raw_ctz(*map);
477 *map = zero_rightmost_1bit(*map);
478 return true;
479 }
480 if (++aux->unit >= FLOWMAP_UNITS) {
481 return false;
482 }
483 }
484 }
485
486 \f
487 /* Compressed flow. */
488
489 /* A sparse representation of a "struct flow".
490 *
491 * A "struct flow" is fairly large and tends to be mostly zeros. Sparse
492 * representation has two advantages. First, it saves memory and, more
493 * importantly, minimizes the number of accessed cache lines. Second, it saves
494 * time when the goal is to iterate over only the nonzero parts of the struct.
495 *
496 * The map member hold one bit for each uint64_t in a "struct flow". Each
497 * 0-bit indicates that the corresponding uint64_t is zero, each 1-bit that it
498 * *may* be nonzero (see below how this applies to minimasks).
499 *
500 * The values indicated by 'map' always follow the miniflow in memory. The
501 * user of the miniflow is responsible for always having enough storage after
502 * the struct miniflow corresponding to the number of 1-bits in maps.
503 *
504 * Elements in values array are allowed to be zero. This is useful for "struct
505 * minimatch", for which ensuring that the miniflow and minimask members have
506 * same maps allows optimization. This allowance applies only to a miniflow
507 * that is not a mask. That is, a minimask may NOT have zero elements in its
508 * values.
509 *
510 * A miniflow is always dynamically allocated so that the maps are followed by
511 * at least as many elements as there are 1-bits in maps. */
512 struct miniflow {
513 struct flowmap map;
514 /* Followed by:
515 * uint64_t values[n];
516 * where 'n' is miniflow_n_values(miniflow). */
517 };
518 BUILD_ASSERT_DECL(sizeof(struct miniflow) % sizeof(uint64_t) == 0);
519
520 #define MINIFLOW_VALUES_SIZE(COUNT) ((COUNT) * sizeof(uint64_t))
521
522 static inline uint64_t *miniflow_values(struct miniflow *mf)
523 {
524 return (uint64_t *)(mf + 1);
525 }
526
527 static inline const uint64_t *miniflow_get_values(const struct miniflow *mf)
528 {
529 return (const uint64_t *)(mf + 1);
530 }
531
532 struct pkt_metadata;
533
534 /* The 'dst' must follow with buffer space for FLOW_U64S 64-bit units.
535 * 'dst->map' is ignored on input and set on output to indicate which fields
536 * were extracted. */
537 void miniflow_extract(struct dp_packet *packet, struct miniflow *dst);
538 void miniflow_map_init(struct miniflow *, const struct flow *);
539 void flow_wc_map(const struct flow *, struct flowmap *);
540 size_t miniflow_alloc(struct miniflow *dsts[], size_t n,
541 const struct miniflow *src);
542 void miniflow_init(struct miniflow *, const struct flow *);
543 void miniflow_clone(struct miniflow *, const struct miniflow *,
544 size_t n_values);
545 struct miniflow * miniflow_create(const struct flow *);
546
547 void miniflow_expand(const struct miniflow *, struct flow *);
548
549 static inline uint64_t flow_u64_value(const struct flow *flow, size_t index)
550 {
551 return ((uint64_t *)flow)[index];
552 }
553
554 static inline uint64_t *flow_u64_lvalue(struct flow *flow, size_t index)
555 {
556 return &((uint64_t *)flow)[index];
557 }
558
559 static inline size_t
560 miniflow_n_values(const struct miniflow *flow)
561 {
562 return flowmap_n_1bits(flow->map);
563 }
564
565 struct flow_for_each_in_maps_aux {
566 const struct flow *flow;
567 struct flowmap_aux map_aux;
568 };
569
570 static inline bool
571 flow_values_get_next_in_maps(struct flow_for_each_in_maps_aux *aux,
572 uint64_t *value)
573 {
574 size_t idx;
575
576 if (flowmap_next_index(&aux->map_aux, &idx)) {
577 *value = flow_u64_value(aux->flow, idx);
578 return true;
579 }
580 return false;
581 }
582
583 /* Iterate through all flow u64 values specified by 'MAPS'. */
584 #define FLOW_FOR_EACH_IN_MAPS(VALUE, FLOW, MAPS) \
585 for (struct flow_for_each_in_maps_aux aux__ \
586 = { (FLOW), FLOWMAP_AUX_INITIALIZER(MAPS) }; \
587 flow_values_get_next_in_maps(&aux__, &(VALUE));)
588
589 struct mf_for_each_in_map_aux {
590 size_t unit; /* Current 64-bit unit of the flowmaps
591 being processed. */
592 struct flowmap fmap; /* Remaining 1-bits corresponding to the
593 64-bit words in ‘values’ */
594 struct flowmap map; /* Remaining 1-bits corresponding to the
595 64-bit words of interest. */
596 const uint64_t *values; /* 64-bit words corresponding to the
597 1-bits in ‘fmap’. */
598 };
599
600 /* Get the data from ‘aux->values’ corresponding to the next lowest 1-bit
601 * in ‘aux->map’, given that ‘aux->values’ points to an array of 64-bit
602 * words corresponding to the 1-bits in ‘aux->fmap’, starting from the
603 * rightmost 1-bit.
604 *
605 * Returns ’true’ if the traversal is incomplete, ‘false’ otherwise.
606 * ‘aux’ is prepared for the next iteration after each call.
607 *
608 * This is used to traverse through, for example, the values in a miniflow
609 * representation of a flow key selected by non-zero 64-bit words in a
610 * corresponding subtable mask. */
611 static inline bool
612 mf_get_next_in_map(struct mf_for_each_in_map_aux *aux,
613 uint64_t *value)
614 {
615 map_t *map, *fmap;
616 map_t rm1bit;
617
618 /* Skip empty map units. */
619 while (OVS_UNLIKELY(!*(map = &aux->map.bits[aux->unit]))) {
620 /* Skip remaining data in the current unit before advancing
621 * to the next. */
622 aux->values += count_1bits(aux->fmap.bits[aux->unit]);
623 if (++aux->unit == FLOWMAP_UNITS) {
624 return false;
625 }
626 }
627
628 rm1bit = rightmost_1bit(*map);
629 *map -= rm1bit;
630 fmap = &aux->fmap.bits[aux->unit];
631
632 /* If the rightmost 1-bit found from the current unit in ‘aux->map’
633 * (‘rm1bit’) is also present in ‘aux->fmap’, store the corresponding
634 * value from ‘aux->values’ to ‘*value', otherwise store 0. */
635 if (OVS_LIKELY(*fmap & rm1bit)) {
636 /* Skip all 64-bit words in ‘values’ preceding the one corresponding
637 * to ‘rm1bit’. */
638 map_t trash = *fmap & (rm1bit - 1);
639
640 /* Avoid resetting 'fmap' and calling count_1bits() when trash is
641 * zero. */
642 if (trash) {
643 *fmap -= trash;
644 aux->values += count_1bits(trash);
645 }
646
647 *value = *aux->values;
648 } else {
649 *value = 0;
650 }
651 return true;
652 }
653
654 /* Iterate through miniflow u64 values specified by 'FLOWMAP'. */
655 #define MINIFLOW_FOR_EACH_IN_FLOWMAP(VALUE, FLOW, FLOWMAP) \
656 for (struct mf_for_each_in_map_aux aux__ = \
657 { 0, (FLOW)->map, (FLOWMAP), miniflow_get_values(FLOW) }; \
658 mf_get_next_in_map(&aux__, &(VALUE));)
659
660 /* This can be used when it is known that 'idx' is set in 'map'. */
661 static inline const uint64_t *
662 miniflow_values_get__(const uint64_t *values, map_t map, size_t idx)
663 {
664 return values + count_1bits(map & ((MAP_1 << idx) - 1));
665 }
666
667 /* This can be used when it is known that 'u64_idx' is set in
668 * the map of 'mf'. */
669 static inline const uint64_t *
670 miniflow_get__(const struct miniflow *mf, size_t idx)
671 {
672 const uint64_t *values = miniflow_get_values(mf);
673 const map_t *map = mf->map.bits;
674
675 while (idx >= MAP_T_BITS) {
676 idx -= MAP_T_BITS;
677 values += count_1bits(*map++);
678 }
679 return miniflow_values_get__(values, *map, idx);
680 }
681
682 #define MINIFLOW_IN_MAP(MF, IDX) flowmap_is_set(&(MF)->map, IDX)
683
684 /* Get the value of the struct flow 'FIELD' as up to 8 byte wide integer type
685 * 'TYPE' from miniflow 'MF'. */
686 #define MINIFLOW_GET_TYPE(MF, TYPE, FIELD) \
687 (BUILD_ASSERT(sizeof(TYPE) == sizeof(((struct flow *)0)->FIELD)), \
688 BUILD_ASSERT_GCCONLY(__builtin_types_compatible_p(TYPE, typeof(((struct flow *)0)->FIELD))), \
689 MINIFLOW_GET_TYPE__(MF, TYPE, FIELD))
690
691 /* Like MINIFLOW_GET_TYPE, but without checking that TYPE is the correct width
692 * for FIELD. (This is useful for deliberately reading adjacent fields in one
693 * go.) */
694 #define MINIFLOW_GET_TYPE__(MF, TYPE, FIELD) \
695 (MINIFLOW_IN_MAP(MF, FLOW_U64_OFFSET(FIELD)) \
696 ? ((OVS_FORCE const TYPE *)miniflow_get__(MF, FLOW_U64_OFFSET(FIELD))) \
697 [FLOW_U64_OFFREM(FIELD) / sizeof(TYPE)] \
698 : 0)
699
700 #define MINIFLOW_GET_U128(FLOW, FIELD) \
701 (ovs_u128) { .u64 = { \
702 (MINIFLOW_IN_MAP(FLOW, FLOW_U64_OFFSET(FIELD)) ? \
703 *miniflow_get__(FLOW, FLOW_U64_OFFSET(FIELD)) : 0), \
704 (MINIFLOW_IN_MAP(FLOW, FLOW_U64_OFFSET(FIELD) + 1) ? \
705 *miniflow_get__(FLOW, FLOW_U64_OFFSET(FIELD) + 1) : 0) } }
706
707 #define MINIFLOW_GET_U8(FLOW, FIELD) \
708 MINIFLOW_GET_TYPE(FLOW, uint8_t, FIELD)
709 #define MINIFLOW_GET_U16(FLOW, FIELD) \
710 MINIFLOW_GET_TYPE(FLOW, uint16_t, FIELD)
711 #define MINIFLOW_GET_BE16(FLOW, FIELD) \
712 MINIFLOW_GET_TYPE(FLOW, ovs_be16, FIELD)
713 #define MINIFLOW_GET_U32(FLOW, FIELD) \
714 MINIFLOW_GET_TYPE(FLOW, uint32_t, FIELD)
715 #define MINIFLOW_GET_BE32(FLOW, FIELD) \
716 MINIFLOW_GET_TYPE(FLOW, ovs_be32, FIELD)
717 #define MINIFLOW_GET_U64(FLOW, FIELD) \
718 MINIFLOW_GET_TYPE(FLOW, uint64_t, FIELD)
719 #define MINIFLOW_GET_BE64(FLOW, FIELD) \
720 MINIFLOW_GET_TYPE(FLOW, ovs_be64, FIELD)
721
722 static inline uint64_t miniflow_get(const struct miniflow *,
723 unsigned int u64_ofs);
724 static inline uint32_t miniflow_get_u32(const struct miniflow *,
725 unsigned int u32_ofs);
726 static inline ovs_be32 miniflow_get_be32(const struct miniflow *,
727 unsigned int be32_ofs);
728 static inline uint16_t miniflow_get_vid(const struct miniflow *, size_t);
729 static inline uint16_t miniflow_get_tcp_flags(const struct miniflow *);
730 static inline ovs_be64 miniflow_get_metadata(const struct miniflow *);
731 static inline uint64_t miniflow_get_tun_metadata_present_map(
732 const struct miniflow *);
733 static inline uint32_t miniflow_get_recirc_id(const struct miniflow *);
734 static inline uint32_t miniflow_get_dp_hash(const struct miniflow *);
735 static inline ovs_be32 miniflow_get_ports(const struct miniflow *);
736
737 bool miniflow_equal(const struct miniflow *a, const struct miniflow *b);
738 bool miniflow_equal_in_minimask(const struct miniflow *a,
739 const struct miniflow *b,
740 const struct minimask *);
741 bool miniflow_equal_flow_in_minimask(const struct miniflow *a,
742 const struct flow *b,
743 const struct minimask *);
744 uint32_t miniflow_hash_5tuple(const struct miniflow *flow, uint32_t basis);
745
746 \f
747 /* Compressed flow wildcards. */
748
749 /* A sparse representation of a "struct flow_wildcards".
750 *
751 * See the large comment on struct miniflow for details.
752 *
753 * Note: While miniflow can have zero data for a 1-bit in the map,
754 * a minimask may not! We rely on this in the implementation. */
755 struct minimask {
756 struct miniflow masks;
757 };
758
759 void minimask_init(struct minimask *, const struct flow_wildcards *);
760 struct minimask * minimask_create(const struct flow_wildcards *);
761 void minimask_combine(struct minimask *dst,
762 const struct minimask *a, const struct minimask *b,
763 uint64_t storage[FLOW_U64S]);
764
765 void minimask_expand(const struct minimask *, struct flow_wildcards *);
766
767 static inline uint32_t minimask_get_u32(const struct minimask *,
768 unsigned int u32_ofs);
769 static inline ovs_be32 minimask_get_be32(const struct minimask *,
770 unsigned int be32_ofs);
771 static inline uint16_t minimask_get_vid_mask(const struct minimask *, size_t);
772 static inline ovs_be64 minimask_get_metadata_mask(const struct minimask *);
773
774 bool minimask_equal(const struct minimask *a, const struct minimask *b);
775 bool minimask_has_extra(const struct minimask *, const struct minimask *);
776
777 \f
778 /* Returns true if 'mask' matches every packet, false if 'mask' fixes any bits
779 * or fields. */
780 static inline bool
781 minimask_is_catchall(const struct minimask *mask)
782 {
783 /* For every 1-bit in mask's map, the corresponding value is non-zero,
784 * so the only way the mask can not fix any bits or fields is for the
785 * map the be zero. */
786 return flowmap_is_empty(mask->masks.map);
787 }
788
789 /* Returns the uint64_t that would be at byte offset '8 * u64_ofs' if 'flow'
790 * were expanded into a "struct flow". */
791 static inline uint64_t miniflow_get(const struct miniflow *flow,
792 unsigned int u64_ofs)
793 {
794 return MINIFLOW_IN_MAP(flow, u64_ofs) ? *miniflow_get__(flow, u64_ofs) : 0;
795 }
796
797 static inline uint32_t miniflow_get_u32(const struct miniflow *flow,
798 unsigned int u32_ofs)
799 {
800 uint64_t value = miniflow_get(flow, u32_ofs / 2);
801
802 #if WORDS_BIGENDIAN
803 return (u32_ofs & 1) ? value : value >> 32;
804 #else
805 return (u32_ofs & 1) ? value >> 32 : value;
806 #endif
807 }
808
809 static inline ovs_be32 miniflow_get_be32(const struct miniflow *flow,
810 unsigned int be32_ofs)
811 {
812 return (OVS_FORCE ovs_be32)miniflow_get_u32(flow, be32_ofs);
813 }
814
815 /* Returns the VID within the vlan_tci member of the "struct flow" represented
816 * by 'flow'. */
817 static inline uint16_t
818 miniflow_get_vid(const struct miniflow *flow, size_t n)
819 {
820 if (n < FLOW_MAX_VLAN_HEADERS) {
821 union flow_vlan_hdr hdr = {
822 .qtag = MINIFLOW_GET_BE32(flow, vlans[n].qtag)
823 };
824 return vlan_tci_to_vid(hdr.tci);
825 }
826 return 0;
827 }
828
829 /* Returns the uint32_t that would be at byte offset '4 * u32_ofs' if 'mask'
830 * were expanded into a "struct flow_wildcards". */
831 static inline uint32_t
832 minimask_get_u32(const struct minimask *mask, unsigned int u32_ofs)
833 {
834 return miniflow_get_u32(&mask->masks, u32_ofs);
835 }
836
837 static inline ovs_be32
838 minimask_get_be32(const struct minimask *mask, unsigned int be32_ofs)
839 {
840 return (OVS_FORCE ovs_be32)minimask_get_u32(mask, be32_ofs);
841 }
842
843 /* Returns the VID mask within the vlan_tci member of the "struct
844 * flow_wildcards" represented by 'mask'. */
845 static inline uint16_t
846 minimask_get_vid_mask(const struct minimask *mask, size_t n)
847 {
848 return miniflow_get_vid(&mask->masks, n);
849 }
850
851 /* Returns the value of the "tcp_flags" field in 'flow'. */
852 static inline uint16_t
853 miniflow_get_tcp_flags(const struct miniflow *flow)
854 {
855 return ntohs(MINIFLOW_GET_BE16(flow, tcp_flags));
856 }
857
858 /* Returns the value of the OpenFlow 1.1+ "metadata" field in 'flow'. */
859 static inline ovs_be64
860 miniflow_get_metadata(const struct miniflow *flow)
861 {
862 return MINIFLOW_GET_BE64(flow, metadata);
863 }
864
865 /* Returns the bitmap that indicates which tunnel metadata fields are present
866 * in 'flow'. */
867 static inline uint64_t
868 miniflow_get_tun_metadata_present_map(const struct miniflow *flow)
869 {
870 return MINIFLOW_GET_U64(flow, tunnel.metadata.present.map);
871 }
872
873 /* Returns the recirc_id in 'flow.' */
874 static inline uint32_t
875 miniflow_get_recirc_id(const struct miniflow *flow)
876 {
877 return MINIFLOW_GET_U32(flow, recirc_id);
878 }
879
880 /* Returns the dp_hash in 'flow.' */
881 static inline uint32_t
882 miniflow_get_dp_hash(const struct miniflow *flow)
883 {
884 return MINIFLOW_GET_U32(flow, dp_hash);
885 }
886
887 /* Returns the 'tp_src' and 'tp_dst' fields together as one piece of data. */
888 static inline ovs_be32
889 miniflow_get_ports(const struct miniflow *flow)
890 {
891 return MINIFLOW_GET_TYPE__(flow, ovs_be32, tp_src);
892 }
893
894 /* Returns the mask for the OpenFlow 1.1+ "metadata" field in 'mask'.
895 *
896 * The return value is all-1-bits if 'mask' matches on the whole value of the
897 * metadata field, all-0-bits if 'mask' entirely wildcards the metadata field,
898 * or some other value if the metadata field is partially matched, partially
899 * wildcarded. */
900 static inline ovs_be64
901 minimask_get_metadata_mask(const struct minimask *mask)
902 {
903 return MINIFLOW_GET_BE64(&mask->masks, metadata);
904 }
905
906 /* Perform a bitwise OR of miniflow 'src' flow data specified in 'subset' with
907 * the equivalent fields in 'dst', storing the result in 'dst'. 'subset' must
908 * be a subset of 'src's map. */
909 static inline void
910 flow_union_with_miniflow_subset(struct flow *dst, const struct miniflow *src,
911 struct flowmap subset)
912 {
913 uint64_t *dst_u64 = (uint64_t *) dst;
914 const uint64_t *p = miniflow_get_values(src);
915 map_t map;
916
917 FLOWMAP_FOR_EACH_MAP (map, subset) {
918 size_t idx;
919
920 MAP_FOR_EACH_INDEX(idx, map) {
921 dst_u64[idx] |= *p++;
922 }
923 dst_u64 += MAP_T_BITS;
924 }
925 }
926
927 /* Perform a bitwise OR of miniflow 'src' flow data with the equivalent
928 * fields in 'dst', storing the result in 'dst'. */
929 static inline void
930 flow_union_with_miniflow(struct flow *dst, const struct miniflow *src)
931 {
932 flow_union_with_miniflow_subset(dst, src, src->map);
933 }
934
935 static inline bool is_ct_valid(const struct flow *flow,
936 const struct flow_wildcards *mask,
937 struct flow_wildcards *wc)
938 {
939 /* Matches are checked with 'mask' and without 'wc'. */
940 if (mask && !wc) {
941 /* Must match at least one of the bits that implies a valid
942 * conntrack entry, or an explicit not-invalid. */
943 return flow->ct_state & (CS_NEW | CS_ESTABLISHED | CS_RELATED
944 | CS_REPLY_DIR | CS_SRC_NAT | CS_DST_NAT)
945 || (flow->ct_state & CS_TRACKED
946 && mask->masks.ct_state & CS_INVALID
947 && !(flow->ct_state & CS_INVALID));
948 }
949 /* Else we are checking a fully extracted flow, where valid CT state always
950 * has either 'new', 'established', or 'reply_dir' bit set. */
951 #define CS_VALID_MASK (CS_NEW | CS_ESTABLISHED | CS_REPLY_DIR)
952 if (wc) {
953 wc->masks.ct_state |= CS_VALID_MASK;
954 }
955 return flow->ct_state & CS_VALID_MASK;
956 }
957
958 static inline void
959 pkt_metadata_from_flow(struct pkt_metadata *md, const struct flow *flow)
960 {
961 /* Update this function whenever struct flow changes. */
962 BUILD_ASSERT_DECL(FLOW_WC_SEQ == 40);
963
964 md->recirc_id = flow->recirc_id;
965 md->dp_hash = flow->dp_hash;
966 flow_tnl_copy__(&md->tunnel, &flow->tunnel);
967 md->skb_priority = flow->skb_priority;
968 md->pkt_mark = flow->pkt_mark;
969 md->in_port = flow->in_port;
970 md->ct_state = flow->ct_state;
971 md->ct_zone = flow->ct_zone;
972 md->ct_mark = flow->ct_mark;
973 md->ct_label = flow->ct_label;
974
975 md->ct_orig_tuple_ipv6 = false;
976 if (flow->dl_type && is_ct_valid(flow, NULL, NULL)) {
977 if (flow->dl_type == htons(ETH_TYPE_IP)) {
978 md->ct_orig_tuple.ipv4 = (struct ovs_key_ct_tuple_ipv4) {
979 flow->ct_nw_src,
980 flow->ct_nw_dst,
981 flow->ct_tp_src,
982 flow->ct_tp_dst,
983 flow->ct_nw_proto,
984 };
985 } else if (flow->dl_type == htons(ETH_TYPE_IPV6)) {
986 md->ct_orig_tuple_ipv6 = true;
987 md->ct_orig_tuple.ipv6 = (struct ovs_key_ct_tuple_ipv6) {
988 flow->ct_ipv6_src,
989 flow->ct_ipv6_dst,
990 flow->ct_tp_src,
991 flow->ct_tp_dst,
992 flow->ct_nw_proto,
993 };
994 } else {
995 /* Reset ct_orig_tuple for other types. */
996 memset(&md->ct_orig_tuple, 0, sizeof md->ct_orig_tuple);
997 }
998 } else {
999 memset(&md->ct_orig_tuple, 0, sizeof md->ct_orig_tuple);
1000 }
1001 }
1002
1003 /* Often, during translation we need to read a value from a flow('FLOW') and
1004 * unwildcard the corresponding bits in the wildcards('WC'). This macro makes
1005 * it easier to do that. */
1006
1007 #define FLOW_WC_GET_AND_MASK_WC(FLOW, WC, FIELD) \
1008 (((WC) ? WC_MASK_FIELD(WC, FIELD) : NULL), ((FLOW)->FIELD))
1009
1010 static inline bool is_ethernet(const struct flow *flow,
1011 struct flow_wildcards *wc)
1012 {
1013 if (wc) {
1014 WC_MASK_FIELD(wc, packet_type);
1015 }
1016 return flow->packet_type == htonl(PT_ETH);
1017 }
1018
1019 static inline ovs_be16 get_dl_type(const struct flow *flow)
1020 {
1021 if (flow->packet_type == htonl(PT_ETH)) {
1022 return flow->dl_type;
1023 } else if (pt_ns(flow->packet_type) == OFPHTN_ETHERTYPE) {
1024 return pt_ns_type_be(flow->packet_type);
1025 } else {
1026 return htons(FLOW_DL_TYPE_NONE);
1027 }
1028 }
1029
1030 static inline bool is_vlan(const struct flow *flow,
1031 struct flow_wildcards *wc)
1032 {
1033 if (!is_ethernet(flow, wc)) {
1034 return false;
1035 }
1036 if (wc) {
1037 WC_MASK_FIELD_MASK(wc, vlans[0].tci, htons(VLAN_CFI));
1038 }
1039 return (flow->vlans[0].tci & htons(VLAN_CFI)) != 0;
1040 }
1041
1042 static inline bool is_ip_any(const struct flow *flow)
1043 {
1044 return dl_type_is_ip_any(get_dl_type(flow));
1045 }
1046
1047 static inline bool is_ip_proto(const struct flow *flow, uint8_t ip_proto,
1048 struct flow_wildcards *wc)
1049 {
1050 if (is_ip_any(flow)) {
1051 if (wc) {
1052 WC_MASK_FIELD(wc, nw_proto);
1053 }
1054 return flow->nw_proto == ip_proto;
1055 }
1056 return false;
1057 }
1058
1059 static inline bool is_tcp(const struct flow *flow,
1060 struct flow_wildcards *wc)
1061 {
1062 return is_ip_proto(flow, IPPROTO_TCP, wc);
1063 }
1064
1065 static inline bool is_udp(const struct flow *flow,
1066 struct flow_wildcards *wc)
1067 {
1068 return is_ip_proto(flow, IPPROTO_UDP, wc);
1069 }
1070
1071 static inline bool is_sctp(const struct flow *flow,
1072 struct flow_wildcards *wc)
1073 {
1074 return is_ip_proto(flow, IPPROTO_SCTP, wc);
1075 }
1076
1077 static inline bool is_icmpv4(const struct flow *flow,
1078 struct flow_wildcards *wc)
1079 {
1080 if (get_dl_type(flow) == htons(ETH_TYPE_IP)) {
1081 if (wc) {
1082 memset(&wc->masks.nw_proto, 0xff, sizeof wc->masks.nw_proto);
1083 }
1084 return flow->nw_proto == IPPROTO_ICMP;
1085 }
1086 return false;
1087 }
1088
1089 static inline bool is_icmpv6(const struct flow *flow,
1090 struct flow_wildcards *wc)
1091 {
1092 if (get_dl_type(flow) == htons(ETH_TYPE_IPV6)) {
1093 if (wc) {
1094 memset(&wc->masks.nw_proto, 0xff, sizeof wc->masks.nw_proto);
1095 }
1096 return flow->nw_proto == IPPROTO_ICMPV6;
1097 }
1098 return false;
1099 }
1100
1101 static inline bool is_nd(const struct flow *flow,
1102 struct flow_wildcards *wc)
1103 {
1104 if (is_icmpv6(flow, wc)) {
1105 if (wc) {
1106 memset(&wc->masks.tp_dst, 0xff, sizeof wc->masks.tp_dst);
1107 }
1108 if (flow->tp_dst != htons(0)) {
1109 return false;
1110 }
1111
1112 if (wc) {
1113 memset(&wc->masks.tp_src, 0xff, sizeof wc->masks.tp_src);
1114 }
1115 return (flow->tp_src == htons(ND_NEIGHBOR_SOLICIT) ||
1116 flow->tp_src == htons(ND_NEIGHBOR_ADVERT));
1117 }
1118 return false;
1119 }
1120
1121 static inline bool is_arp(const struct flow *flow)
1122 {
1123 return (flow->dl_type == htons(ETH_TYPE_ARP));
1124 }
1125
1126 static inline bool is_garp(const struct flow *flow,
1127 struct flow_wildcards *wc)
1128 {
1129 if (is_arp(flow)) {
1130 return (FLOW_WC_GET_AND_MASK_WC(flow, wc, nw_src) ==
1131 FLOW_WC_GET_AND_MASK_WC(flow, wc, nw_dst));
1132 }
1133
1134 return false;
1135 }
1136
1137 static inline bool is_igmp(const struct flow *flow, struct flow_wildcards *wc)
1138 {
1139 if (get_dl_type(flow) == htons(ETH_TYPE_IP)) {
1140 if (wc) {
1141 memset(&wc->masks.nw_proto, 0xff, sizeof wc->masks.nw_proto);
1142 }
1143 return flow->nw_proto == IPPROTO_IGMP;
1144 }
1145 return false;
1146 }
1147
1148 static inline bool is_mld(const struct flow *flow,
1149 struct flow_wildcards *wc)
1150 {
1151 if (is_icmpv6(flow, wc)) {
1152 if (wc) {
1153 memset(&wc->masks.tp_src, 0xff, sizeof wc->masks.tp_src);
1154 }
1155 return (flow->tp_src == htons(MLD_QUERY)
1156 || flow->tp_src == htons(MLD_REPORT)
1157 || flow->tp_src == htons(MLD_DONE)
1158 || flow->tp_src == htons(MLD2_REPORT));
1159 }
1160 return false;
1161 }
1162
1163 static inline bool is_mld_query(const struct flow *flow,
1164 struct flow_wildcards *wc)
1165 {
1166 if (is_icmpv6(flow, wc)) {
1167 if (wc) {
1168 memset(&wc->masks.tp_src, 0xff, sizeof wc->masks.tp_src);
1169 }
1170 return flow->tp_src == htons(MLD_QUERY);
1171 }
1172 return false;
1173 }
1174
1175 static inline bool is_mld_report(const struct flow *flow,
1176 struct flow_wildcards *wc)
1177 {
1178 return is_mld(flow, wc) && !is_mld_query(flow, wc);
1179 }
1180
1181 static inline bool is_stp(const struct flow *flow)
1182 {
1183 return (flow->dl_type == htons(FLOW_DL_TYPE_NONE)
1184 && eth_addr_equals(flow->dl_dst, eth_addr_stp));
1185 }
1186
1187 #endif /* flow.h */