]> git.proxmox.com Git - mirror_ovs.git/blob - lib/flow.h
ofp-util: Zero out padding bytes in ofputil_ipfix_stats_to_reply().
[mirror_ovs.git] / lib / flow.h
1 /*
2 * Copyright (c) 2008, 2009, 2010, 2011, 2012, 2013, 2014, 2015, 2016 Nicira, Inc.
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16 #ifndef FLOW_H
17 #define FLOW_H 1
18
19 #include <sys/types.h>
20 #include <netinet/in.h>
21 #include <stdbool.h>
22 #include <stdint.h>
23 #include <string.h>
24 #include "bitmap.h"
25 #include "byte-order.h"
26 #include "openvswitch/compiler.h"
27 #include "openflow/nicira-ext.h"
28 #include "openflow/openflow.h"
29 #include "openvswitch/flow.h"
30 #include "packets.h"
31 #include "hash.h"
32 #include "util.h"
33
34 struct dpif_flow_stats;
35 struct ds;
36 struct flow_wildcards;
37 struct minimask;
38 struct dp_packet;
39 struct pkt_metadata;
40 struct match;
41
42 /* Some flow fields are mutually exclusive or only appear within the flow
43 * pipeline. IPv6 headers are bigger than IPv4 and MPLS, and IPv6 ND packets
44 * are bigger than TCP,UDP and IGMP packets. */
45 #define FLOW_MAX_PACKET_U64S (FLOW_U64S \
46 /* Unused in datapath */ - FLOW_U64_SIZE(regs) \
47 - FLOW_U64_SIZE(metadata) \
48 /* L2.5/3 */ - FLOW_U64_SIZE(nw_src) /* incl. nw_dst */ \
49 - FLOW_U64_SIZE(mpls_lse) \
50 /* L4 */ - FLOW_U64_SIZE(tp_src) \
51 )
52
53 extern const uint8_t flow_segment_u64s[];
54
55 #define FLOW_U64_OFFSET(FIELD) \
56 (offsetof(struct flow, FIELD) / sizeof(uint64_t))
57 #define FLOW_U64_OFFREM(FIELD) \
58 (offsetof(struct flow, FIELD) % sizeof(uint64_t))
59
60 /* Number of 64-bit units spanned by a 'FIELD'. */
61 #define FLOW_U64_SIZE(FIELD) \
62 DIV_ROUND_UP(FLOW_U64_OFFREM(FIELD) + MEMBER_SIZEOF(struct flow, FIELD), \
63 sizeof(uint64_t))
64
65 void flow_extract(struct dp_packet *, struct flow *);
66
67 void flow_zero_wildcards(struct flow *, const struct flow_wildcards *);
68 void flow_unwildcard_tp_ports(const struct flow *, struct flow_wildcards *);
69 void flow_get_metadata(const struct flow *, struct match *flow_metadata);
70
71 const char *ct_state_to_string(uint32_t state);
72 char *flow_to_string(const struct flow *);
73 void format_flags(struct ds *ds, const char *(*bit_to_string)(uint32_t),
74 uint32_t flags, char del);
75 void format_flags_masked(struct ds *ds, const char *name,
76 const char *(*bit_to_string)(uint32_t),
77 uint32_t flags, uint32_t mask, uint32_t max_mask);
78 int parse_flags(const char *s, const char *(*bit_to_string)(uint32_t),
79 char end, const char *field_name, char **res_string,
80 uint32_t *res_flags, uint32_t allowed, uint32_t *res_mask);
81
82 void flow_format(struct ds *, const struct flow *);
83 void flow_print(FILE *, const struct flow *);
84 static inline int flow_compare_3way(const struct flow *, const struct flow *);
85 static inline bool flow_equal(const struct flow *, const struct flow *);
86 static inline size_t flow_hash(const struct flow *, uint32_t basis);
87
88 void flow_set_dl_vlan(struct flow *, ovs_be16 vid);
89 void flow_set_vlan_vid(struct flow *, ovs_be16 vid);
90 void flow_set_vlan_pcp(struct flow *, uint8_t pcp);
91
92 int flow_count_mpls_labels(const struct flow *, struct flow_wildcards *);
93 int flow_count_common_mpls_labels(const struct flow *a, int an,
94 const struct flow *b, int bn,
95 struct flow_wildcards *wc);
96 void flow_push_mpls(struct flow *, int n, ovs_be16 mpls_eth_type,
97 struct flow_wildcards *);
98 bool flow_pop_mpls(struct flow *, int n, ovs_be16 eth_type,
99 struct flow_wildcards *);
100 void flow_set_mpls_label(struct flow *, int idx, ovs_be32 label);
101 void flow_set_mpls_ttl(struct flow *, int idx, uint8_t ttl);
102 void flow_set_mpls_tc(struct flow *, int idx, uint8_t tc);
103 void flow_set_mpls_bos(struct flow *, int idx, uint8_t stack);
104 void flow_set_mpls_lse(struct flow *, int idx, ovs_be32 lse);
105
106 void flow_compose(struct dp_packet *, const struct flow *);
107
108 static inline uint64_t
109 flow_get_xreg(const struct flow *flow, int idx)
110 {
111 return ((uint64_t) flow->regs[idx * 2] << 32) | flow->regs[idx * 2 + 1];
112 }
113
114 static inline void
115 flow_set_xreg(struct flow *flow, int idx, uint64_t value)
116 {
117 flow->regs[idx * 2] = value >> 32;
118 flow->regs[idx * 2 + 1] = value;
119 }
120
121 static inline int
122 flow_compare_3way(const struct flow *a, const struct flow *b)
123 {
124 return memcmp(a, b, sizeof *a);
125 }
126
127 static inline bool
128 flow_equal(const struct flow *a, const struct flow *b)
129 {
130 return !flow_compare_3way(a, b);
131 }
132
133 static inline size_t
134 flow_hash(const struct flow *flow, uint32_t basis)
135 {
136 return hash_bytes64((const uint64_t *)flow, sizeof *flow, basis);
137 }
138
139 static inline uint16_t
140 ofp_to_u16(ofp_port_t ofp_port)
141 {
142 return (OVS_FORCE uint16_t) ofp_port;
143 }
144
145 static inline uint32_t
146 odp_to_u32(odp_port_t odp_port)
147 {
148 return (OVS_FORCE uint32_t) odp_port;
149 }
150
151 static inline uint32_t
152 ofp11_to_u32(ofp11_port_t ofp11_port)
153 {
154 return (OVS_FORCE uint32_t) ofp11_port;
155 }
156
157 static inline ofp_port_t
158 u16_to_ofp(uint16_t port)
159 {
160 return OFP_PORT_C(port);
161 }
162
163 static inline odp_port_t
164 u32_to_odp(uint32_t port)
165 {
166 return ODP_PORT_C(port);
167 }
168
169 static inline ofp11_port_t
170 u32_to_ofp11(uint32_t port)
171 {
172 return OFP11_PORT_C(port);
173 }
174
175 static inline uint32_t
176 hash_ofp_port(ofp_port_t ofp_port)
177 {
178 return hash_int(ofp_to_u16(ofp_port), 0);
179 }
180
181 static inline uint32_t
182 hash_odp_port(odp_port_t odp_port)
183 {
184 return hash_int(odp_to_u32(odp_port), 0);
185 }
186 \f
187 uint32_t flow_hash_5tuple(const struct flow *flow, uint32_t basis);
188 uint32_t flow_hash_symmetric_l4(const struct flow *flow, uint32_t basis);
189 uint32_t flow_hash_symmetric_l3l4(const struct flow *flow, uint32_t basis,
190 bool inc_udp_ports );
191
192 /* Initialize a flow with random fields that matter for nx_hash_fields. */
193 void flow_random_hash_fields(struct flow *);
194 void flow_mask_hash_fields(const struct flow *, struct flow_wildcards *,
195 enum nx_hash_fields);
196 uint32_t flow_hash_fields(const struct flow *, enum nx_hash_fields,
197 uint16_t basis);
198 const char *flow_hash_fields_to_str(enum nx_hash_fields);
199 bool flow_hash_fields_valid(enum nx_hash_fields);
200
201 uint32_t flow_hash_in_wildcards(const struct flow *,
202 const struct flow_wildcards *,
203 uint32_t basis);
204
205 bool flow_equal_except(const struct flow *a, const struct flow *b,
206 const struct flow_wildcards *);
207 \f
208 /* Bitmap for flow values. For each 1-bit the corresponding flow value is
209 * explicitly specified, other values are zeroes.
210 *
211 * map_t must be wide enough to hold any member of struct flow. */
212 typedef unsigned long long map_t;
213 #define MAP_T_BITS (sizeof(map_t) * CHAR_BIT)
214 #define MAP_1 (map_t)1
215 #define MAP_MAX TYPE_MAXIMUM(map_t)
216
217 #define MAP_IS_SET(MAP, IDX) ((MAP) & (MAP_1 << (IDX)))
218
219 /* Iterate through the indices of all 1-bits in 'MAP'. */
220 #define MAP_FOR_EACH_INDEX(IDX, MAP) \
221 ULLONG_FOR_EACH_1(IDX, MAP)
222
223 #define FLOWMAP_UNITS DIV_ROUND_UP(FLOW_U64S, MAP_T_BITS)
224
225 struct flowmap {
226 map_t bits[FLOWMAP_UNITS];
227 };
228
229 #define FLOWMAP_EMPTY_INITIALIZER { { 0 } }
230
231 static inline void flowmap_init(struct flowmap *);
232 static inline bool flowmap_equal(struct flowmap, struct flowmap);
233 static inline bool flowmap_is_set(const struct flowmap *, size_t idx);
234 static inline bool flowmap_are_set(const struct flowmap *, size_t idx,
235 unsigned int n_bits);
236 static inline void flowmap_set(struct flowmap *, size_t idx,
237 unsigned int n_bits);
238 static inline void flowmap_clear(struct flowmap *, size_t idx,
239 unsigned int n_bits);
240 static inline struct flowmap flowmap_or(struct flowmap, struct flowmap);
241 static inline struct flowmap flowmap_and(struct flowmap, struct flowmap);
242 static inline bool flowmap_is_empty(struct flowmap);
243 static inline unsigned int flowmap_n_1bits(struct flowmap);
244
245 #define FLOWMAP_HAS_FIELD(FM, FIELD) \
246 flowmap_are_set(FM, FLOW_U64_OFFSET(FIELD), FLOW_U64_SIZE(FIELD))
247
248 #define FLOWMAP_SET(FM, FIELD) \
249 flowmap_set(FM, FLOW_U64_OFFSET(FIELD), FLOW_U64_SIZE(FIELD))
250
251 #define FLOWMAP_SET__(FM, FIELD, SIZE) \
252 flowmap_set(FM, FLOW_U64_OFFSET(FIELD), \
253 DIV_ROUND_UP(SIZE, sizeof(uint64_t)))
254
255 /* XXX: Only works for full 64-bit units. */
256 #define FLOWMAP_CLEAR(FM, FIELD) \
257 BUILD_ASSERT_DECL(FLOW_U64_OFFREM(FIELD) == 0); \
258 BUILD_ASSERT_DECL(sizeof(((struct flow *)0)->FIELD) % sizeof(uint64_t) == 0); \
259 flowmap_clear(FM, FLOW_U64_OFFSET(FIELD), FLOW_U64_SIZE(FIELD))
260
261 /* Iterate through all units in 'FMAP'. */
262 #define FLOWMAP_FOR_EACH_UNIT(UNIT) \
263 for ((UNIT) = 0; (UNIT) < FLOWMAP_UNITS; (UNIT)++)
264
265 /* Iterate through all map units in 'FMAP'. */
266 #define FLOWMAP_FOR_EACH_MAP(MAP, FLOWMAP) \
267 for (size_t unit__ = 0; \
268 unit__ < FLOWMAP_UNITS && ((MAP) = (FLOWMAP).bits[unit__], true); \
269 unit__++)
270
271 struct flowmap_aux;
272 static inline bool flowmap_next_index(struct flowmap_aux *, size_t *idx);
273
274 #define FLOWMAP_AUX_INITIALIZER(FLOWMAP) { .unit = 0, .map = (FLOWMAP) }
275
276 /* Iterate through all struct flow u64 indices specified by 'MAP'. This is a
277 * slower but easier version of the FLOWMAP_FOR_EACH_MAP() &
278 * MAP_FOR_EACH_INDEX() combination. */
279 #define FLOWMAP_FOR_EACH_INDEX(IDX, MAP) \
280 for (struct flowmap_aux aux__ = FLOWMAP_AUX_INITIALIZER(MAP); \
281 flowmap_next_index(&aux__, &(IDX));)
282
283 /* Flowmap inline implementations. */
284 static inline void
285 flowmap_init(struct flowmap *fm)
286 {
287 memset(fm, 0, sizeof *fm);
288 }
289
290 static inline bool
291 flowmap_equal(struct flowmap a, struct flowmap b)
292 {
293 return !memcmp(&a, &b, sizeof a);
294 }
295
296 static inline bool
297 flowmap_is_set(const struct flowmap *fm, size_t idx)
298 {
299 return (fm->bits[idx / MAP_T_BITS] & (MAP_1 << (idx % MAP_T_BITS))) != 0;
300 }
301
302 /* Returns 'true' if any of the 'n_bits' bits starting at 'idx' are set in
303 * 'fm'. 'n_bits' can be at most MAP_T_BITS. */
304 static inline bool
305 flowmap_are_set(const struct flowmap *fm, size_t idx, unsigned int n_bits)
306 {
307 map_t n_bits_mask = (MAP_1 << n_bits) - 1;
308 size_t unit = idx / MAP_T_BITS;
309
310 idx %= MAP_T_BITS;
311
312 if (fm->bits[unit] & (n_bits_mask << idx)) {
313 return true;
314 }
315 /* The seemingly unnecessary bounds check on 'unit' is a workaround for a
316 * false-positive array out of bounds error by GCC 4.9. */
317 if (unit + 1 < FLOWMAP_UNITS && idx + n_bits > MAP_T_BITS) {
318 /* Check the remaining bits from the next unit. */
319 return fm->bits[unit + 1] & (n_bits_mask >> (MAP_T_BITS - idx));
320 }
321 return false;
322 }
323
324 /* Set the 'n_bits' consecutive bits in 'fm', starting at bit 'idx'.
325 * 'n_bits' can be at most MAP_T_BITS. */
326 static inline void
327 flowmap_set(struct flowmap *fm, size_t idx, unsigned int n_bits)
328 {
329 map_t n_bits_mask = (MAP_1 << n_bits) - 1;
330 size_t unit = idx / MAP_T_BITS;
331
332 idx %= MAP_T_BITS;
333
334 fm->bits[unit] |= n_bits_mask << idx;
335 /* The seemingly unnecessary bounds check on 'unit' is a workaround for a
336 * false-positive array out of bounds error by GCC 4.9. */
337 if (unit + 1 < FLOWMAP_UNITS && idx + n_bits > MAP_T_BITS) {
338 /* 'MAP_T_BITS - idx' bits were set on 'unit', set the remaining
339 * bits from the next unit. */
340 fm->bits[unit + 1] |= n_bits_mask >> (MAP_T_BITS - idx);
341 }
342 }
343
344 /* Clears the 'n_bits' consecutive bits in 'fm', starting at bit 'idx'.
345 * 'n_bits' can be at most MAP_T_BITS. */
346 static inline void
347 flowmap_clear(struct flowmap *fm, size_t idx, unsigned int n_bits)
348 {
349 map_t n_bits_mask = (MAP_1 << n_bits) - 1;
350 size_t unit = idx / MAP_T_BITS;
351
352 idx %= MAP_T_BITS;
353
354 fm->bits[unit] &= ~(n_bits_mask << idx);
355 /* The seemingly unnecessary bounds check on 'unit' is a workaround for a
356 * false-positive array out of bounds error by GCC 4.9. */
357 if (unit + 1 < FLOWMAP_UNITS && idx + n_bits > MAP_T_BITS) {
358 /* 'MAP_T_BITS - idx' bits were cleared on 'unit', clear the
359 * remaining bits from the next unit. */
360 fm->bits[unit + 1] &= ~(n_bits_mask >> (MAP_T_BITS - idx));
361 }
362 }
363
364 /* OR the bits in the flowmaps. */
365 static inline struct flowmap
366 flowmap_or(struct flowmap a, struct flowmap b)
367 {
368 struct flowmap map;
369 size_t unit;
370
371 FLOWMAP_FOR_EACH_UNIT (unit) {
372 map.bits[unit] = a.bits[unit] | b.bits[unit];
373 }
374 return map;
375 }
376
377 /* AND the bits in the flowmaps. */
378 static inline struct flowmap
379 flowmap_and(struct flowmap a, struct flowmap b)
380 {
381 struct flowmap map;
382 size_t unit;
383
384 FLOWMAP_FOR_EACH_UNIT (unit) {
385 map.bits[unit] = a.bits[unit] & b.bits[unit];
386 }
387 return map;
388 }
389
390 static inline bool
391 flowmap_is_empty(struct flowmap fm)
392 {
393 map_t map;
394
395 FLOWMAP_FOR_EACH_MAP (map, fm) {
396 if (map) {
397 return false;
398 }
399 }
400 return true;
401 }
402
403 static inline unsigned int
404 flowmap_n_1bits(struct flowmap fm)
405 {
406 unsigned int n_1bits = 0;
407 size_t unit;
408
409 FLOWMAP_FOR_EACH_UNIT (unit) {
410 n_1bits += count_1bits(fm.bits[unit]);
411 }
412 return n_1bits;
413 }
414
415 struct flowmap_aux {
416 size_t unit;
417 struct flowmap map;
418 };
419
420 static inline bool
421 flowmap_next_index(struct flowmap_aux *aux, size_t *idx)
422 {
423 for (;;) {
424 map_t *map = &aux->map.bits[aux->unit];
425 if (*map) {
426 *idx = aux->unit * MAP_T_BITS + raw_ctz(*map);
427 *map = zero_rightmost_1bit(*map);
428 return true;
429 }
430 if (++aux->unit >= FLOWMAP_UNITS) {
431 return false;
432 }
433 }
434 }
435
436 \f
437 /* Compressed flow. */
438
439 /* A sparse representation of a "struct flow".
440 *
441 * A "struct flow" is fairly large and tends to be mostly zeros. Sparse
442 * representation has two advantages. First, it saves memory and, more
443 * importantly, minimizes the number of accessed cache lines. Second, it saves
444 * time when the goal is to iterate over only the nonzero parts of the struct.
445 *
446 * The map member hold one bit for each uint64_t in a "struct flow". Each
447 * 0-bit indicates that the corresponding uint64_t is zero, each 1-bit that it
448 * *may* be nonzero (see below how this applies to minimasks).
449 *
450 * The values indicated by 'map' always follow the miniflow in memory. The
451 * user of the miniflow is responsible for always having enough storage after
452 * the struct miniflow corresponding to the number of 1-bits in maps.
453 *
454 * Elements in values array are allowed to be zero. This is useful for "struct
455 * minimatch", for which ensuring that the miniflow and minimask members have
456 * same maps allows optimization. This allowance applies only to a miniflow
457 * that is not a mask. That is, a minimask may NOT have zero elements in its
458 * values.
459 *
460 * A miniflow is always dynamically allocated so that the maps are followed by
461 * at least as many elements as there are 1-bits in maps. */
462 struct miniflow {
463 struct flowmap map;
464 /* Followed by:
465 * uint64_t values[n];
466 * where 'n' is miniflow_n_values(miniflow). */
467 };
468 BUILD_ASSERT_DECL(sizeof(struct miniflow) % sizeof(uint64_t) == 0);
469
470 #define MINIFLOW_VALUES_SIZE(COUNT) ((COUNT) * sizeof(uint64_t))
471
472 static inline uint64_t *miniflow_values(struct miniflow *mf)
473 {
474 return (uint64_t *)(mf + 1);
475 }
476
477 static inline const uint64_t *miniflow_get_values(const struct miniflow *mf)
478 {
479 return (const uint64_t *)(mf + 1);
480 }
481
482 struct pkt_metadata;
483
484 /* The 'dst' must follow with buffer space for FLOW_U64S 64-bit units.
485 * 'dst->map' is ignored on input and set on output to indicate which fields
486 * were extracted. */
487 void miniflow_extract(struct dp_packet *packet, struct miniflow *dst);
488 void miniflow_map_init(struct miniflow *, const struct flow *);
489 void flow_wc_map(const struct flow *, struct flowmap *);
490 size_t miniflow_alloc(struct miniflow *dsts[], size_t n,
491 const struct miniflow *src);
492 void miniflow_init(struct miniflow *, const struct flow *);
493 void miniflow_clone(struct miniflow *, const struct miniflow *,
494 size_t n_values);
495 struct miniflow * miniflow_create(const struct flow *);
496
497 void miniflow_expand(const struct miniflow *, struct flow *);
498
499 static inline uint64_t flow_u64_value(const struct flow *flow, size_t index)
500 {
501 return ((uint64_t *)flow)[index];
502 }
503
504 static inline uint64_t *flow_u64_lvalue(struct flow *flow, size_t index)
505 {
506 return &((uint64_t *)flow)[index];
507 }
508
509 static inline size_t
510 miniflow_n_values(const struct miniflow *flow)
511 {
512 return flowmap_n_1bits(flow->map);
513 }
514
515 struct flow_for_each_in_maps_aux {
516 const struct flow *flow;
517 struct flowmap_aux map_aux;
518 };
519
520 static inline bool
521 flow_values_get_next_in_maps(struct flow_for_each_in_maps_aux *aux,
522 uint64_t *value)
523 {
524 size_t idx;
525
526 if (flowmap_next_index(&aux->map_aux, &idx)) {
527 *value = flow_u64_value(aux->flow, idx);
528 return true;
529 }
530 return false;
531 }
532
533 /* Iterate through all flow u64 values specified by 'MAPS'. */
534 #define FLOW_FOR_EACH_IN_MAPS(VALUE, FLOW, MAPS) \
535 for (struct flow_for_each_in_maps_aux aux__ \
536 = { (FLOW), FLOWMAP_AUX_INITIALIZER(MAPS) }; \
537 flow_values_get_next_in_maps(&aux__, &(VALUE));)
538
539 struct mf_for_each_in_map_aux {
540 size_t unit;
541 struct flowmap fmap;
542 struct flowmap map;
543 const uint64_t *values;
544 };
545
546 static inline bool
547 mf_get_next_in_map(struct mf_for_each_in_map_aux *aux,
548 uint64_t *value)
549 {
550 map_t *map, *fmap;
551 map_t rm1bit;
552
553 while (OVS_UNLIKELY(!*(map = &aux->map.bits[aux->unit]))) {
554 /* Skip remaining data in the previous unit. */
555 aux->values += count_1bits(aux->fmap.bits[aux->unit]);
556 if (++aux->unit == FLOWMAP_UNITS) {
557 return false;
558 }
559 }
560
561 rm1bit = rightmost_1bit(*map);
562 *map -= rm1bit;
563 fmap = &aux->fmap.bits[aux->unit];
564
565 if (OVS_LIKELY(*fmap & rm1bit)) {
566 map_t trash = *fmap & (rm1bit - 1);
567
568 *fmap -= trash;
569 /* count_1bits() is fast for systems where speed matters (e.g.,
570 * DPDK), so we don't try avoid using it.
571 * Advance 'aux->values' to point to the value for 'rm1bit'. */
572 aux->values += count_1bits(trash);
573
574 *value = *aux->values;
575 } else {
576 *value = 0;
577 }
578 return true;
579 }
580
581 /* Iterate through miniflow u64 values specified by 'FLOWMAP'. */
582 #define MINIFLOW_FOR_EACH_IN_FLOWMAP(VALUE, FLOW, FLOWMAP) \
583 for (struct mf_for_each_in_map_aux aux__ = \
584 { 0, (FLOW)->map, (FLOWMAP), miniflow_get_values(FLOW) }; \
585 mf_get_next_in_map(&aux__, &(VALUE));)
586
587 /* This can be used when it is known that 'idx' is set in 'map'. */
588 static inline const uint64_t *
589 miniflow_values_get__(const uint64_t *values, map_t map, size_t idx)
590 {
591 return values + count_1bits(map & ((MAP_1 << idx) - 1));
592 }
593
594 /* This can be used when it is known that 'u64_idx' is set in
595 * the map of 'mf'. */
596 static inline const uint64_t *
597 miniflow_get__(const struct miniflow *mf, size_t idx)
598 {
599 const uint64_t *values = miniflow_get_values(mf);
600 const map_t *map = mf->map.bits;
601
602 while (idx >= MAP_T_BITS) {
603 idx -= MAP_T_BITS;
604 values += count_1bits(*map++);
605 }
606 return miniflow_values_get__(values, *map, idx);
607 }
608
609 #define MINIFLOW_IN_MAP(MF, IDX) flowmap_is_set(&(MF)->map, IDX)
610
611 /* Get the value of the struct flow 'FIELD' as up to 8 byte wide integer type
612 * 'TYPE' from miniflow 'MF'. */
613 #define MINIFLOW_GET_TYPE(MF, TYPE, FIELD) \
614 (MINIFLOW_IN_MAP(MF, FLOW_U64_OFFSET(FIELD)) \
615 ? ((OVS_FORCE const TYPE *)miniflow_get__(MF, FLOW_U64_OFFSET(FIELD))) \
616 [FLOW_U64_OFFREM(FIELD) / sizeof(TYPE)] \
617 : 0)
618
619 #define MINIFLOW_GET_U128(FLOW, FIELD) \
620 (ovs_u128) { .u64 = { \
621 (MINIFLOW_IN_MAP(FLOW, FLOW_U64_OFFSET(FIELD)) ? \
622 *miniflow_get__(FLOW, FLOW_U64_OFFSET(FIELD)) : 0), \
623 (MINIFLOW_IN_MAP(FLOW, FLOW_U64_OFFSET(FIELD) + 1) ? \
624 *miniflow_get__(FLOW, FLOW_U64_OFFSET(FIELD) + 1) : 0) } }
625
626 #define MINIFLOW_GET_U8(FLOW, FIELD) \
627 MINIFLOW_GET_TYPE(FLOW, uint8_t, FIELD)
628 #define MINIFLOW_GET_U16(FLOW, FIELD) \
629 MINIFLOW_GET_TYPE(FLOW, uint16_t, FIELD)
630 #define MINIFLOW_GET_BE16(FLOW, FIELD) \
631 MINIFLOW_GET_TYPE(FLOW, ovs_be16, FIELD)
632 #define MINIFLOW_GET_U32(FLOW, FIELD) \
633 MINIFLOW_GET_TYPE(FLOW, uint32_t, FIELD)
634 #define MINIFLOW_GET_BE32(FLOW, FIELD) \
635 MINIFLOW_GET_TYPE(FLOW, ovs_be32, FIELD)
636 #define MINIFLOW_GET_U64(FLOW, FIELD) \
637 MINIFLOW_GET_TYPE(FLOW, uint64_t, FIELD)
638 #define MINIFLOW_GET_BE64(FLOW, FIELD) \
639 MINIFLOW_GET_TYPE(FLOW, ovs_be64, FIELD)
640
641 static inline uint64_t miniflow_get(const struct miniflow *,
642 unsigned int u64_ofs);
643 static inline uint32_t miniflow_get_u32(const struct miniflow *,
644 unsigned int u32_ofs);
645 static inline ovs_be32 miniflow_get_be32(const struct miniflow *,
646 unsigned int be32_ofs);
647 static inline uint16_t miniflow_get_vid(const struct miniflow *);
648 static inline uint16_t miniflow_get_tcp_flags(const struct miniflow *);
649 static inline ovs_be64 miniflow_get_metadata(const struct miniflow *);
650
651 bool miniflow_equal(const struct miniflow *a, const struct miniflow *b);
652 bool miniflow_equal_in_minimask(const struct miniflow *a,
653 const struct miniflow *b,
654 const struct minimask *);
655 bool miniflow_equal_flow_in_minimask(const struct miniflow *a,
656 const struct flow *b,
657 const struct minimask *);
658 uint32_t miniflow_hash_5tuple(const struct miniflow *flow, uint32_t basis);
659
660 \f
661 /* Compressed flow wildcards. */
662
663 /* A sparse representation of a "struct flow_wildcards".
664 *
665 * See the large comment on struct miniflow for details.
666 *
667 * Note: While miniflow can have zero data for a 1-bit in the map,
668 * a minimask may not! We rely on this in the implementation. */
669 struct minimask {
670 struct miniflow masks;
671 };
672
673 void minimask_init(struct minimask *, const struct flow_wildcards *);
674 struct minimask * minimask_create(const struct flow_wildcards *);
675 void minimask_combine(struct minimask *dst,
676 const struct minimask *a, const struct minimask *b,
677 uint64_t storage[FLOW_U64S]);
678
679 void minimask_expand(const struct minimask *, struct flow_wildcards *);
680
681 static inline uint32_t minimask_get_u32(const struct minimask *,
682 unsigned int u32_ofs);
683 static inline ovs_be32 minimask_get_be32(const struct minimask *,
684 unsigned int be32_ofs);
685 static inline uint16_t minimask_get_vid_mask(const struct minimask *);
686 static inline ovs_be64 minimask_get_metadata_mask(const struct minimask *);
687
688 bool minimask_equal(const struct minimask *a, const struct minimask *b);
689 bool minimask_has_extra(const struct minimask *, const struct minimask *);
690
691 \f
692 /* Returns true if 'mask' matches every packet, false if 'mask' fixes any bits
693 * or fields. */
694 static inline bool
695 minimask_is_catchall(const struct minimask *mask)
696 {
697 /* For every 1-bit in mask's map, the corresponding value is non-zero,
698 * so the only way the mask can not fix any bits or fields is for the
699 * map the be zero. */
700 return flowmap_is_empty(mask->masks.map);
701 }
702
703 /* Returns the uint64_t that would be at byte offset '8 * u64_ofs' if 'flow'
704 * were expanded into a "struct flow". */
705 static inline uint64_t miniflow_get(const struct miniflow *flow,
706 unsigned int u64_ofs)
707 {
708 return MINIFLOW_IN_MAP(flow, u64_ofs) ? *miniflow_get__(flow, u64_ofs) : 0;
709 }
710
711 static inline uint32_t miniflow_get_u32(const struct miniflow *flow,
712 unsigned int u32_ofs)
713 {
714 uint64_t value = miniflow_get(flow, u32_ofs / 2);
715
716 #if WORDS_BIGENDIAN
717 return (u32_ofs & 1) ? value : value >> 32;
718 #else
719 return (u32_ofs & 1) ? value >> 32 : value;
720 #endif
721 }
722
723 static inline ovs_be32 miniflow_get_be32(const struct miniflow *flow,
724 unsigned int be32_ofs)
725 {
726 return (OVS_FORCE ovs_be32)miniflow_get_u32(flow, be32_ofs);
727 }
728
729 /* Returns the VID within the vlan_tci member of the "struct flow" represented
730 * by 'flow'. */
731 static inline uint16_t
732 miniflow_get_vid(const struct miniflow *flow)
733 {
734 ovs_be16 tci = MINIFLOW_GET_BE16(flow, vlan_tci);
735 return vlan_tci_to_vid(tci);
736 }
737
738 /* Returns the uint32_t that would be at byte offset '4 * u32_ofs' if 'mask'
739 * were expanded into a "struct flow_wildcards". */
740 static inline uint32_t
741 minimask_get_u32(const struct minimask *mask, unsigned int u32_ofs)
742 {
743 return miniflow_get_u32(&mask->masks, u32_ofs);
744 }
745
746 static inline ovs_be32
747 minimask_get_be32(const struct minimask *mask, unsigned int be32_ofs)
748 {
749 return (OVS_FORCE ovs_be32)minimask_get_u32(mask, be32_ofs);
750 }
751
752 /* Returns the VID mask within the vlan_tci member of the "struct
753 * flow_wildcards" represented by 'mask'. */
754 static inline uint16_t
755 minimask_get_vid_mask(const struct minimask *mask)
756 {
757 return miniflow_get_vid(&mask->masks);
758 }
759
760 /* Returns the value of the "tcp_flags" field in 'flow'. */
761 static inline uint16_t
762 miniflow_get_tcp_flags(const struct miniflow *flow)
763 {
764 return ntohs(MINIFLOW_GET_BE16(flow, tcp_flags));
765 }
766
767 /* Returns the value of the OpenFlow 1.1+ "metadata" field in 'flow'. */
768 static inline ovs_be64
769 miniflow_get_metadata(const struct miniflow *flow)
770 {
771 return MINIFLOW_GET_BE64(flow, metadata);
772 }
773
774 /* Returns the mask for the OpenFlow 1.1+ "metadata" field in 'mask'.
775 *
776 * The return value is all-1-bits if 'mask' matches on the whole value of the
777 * metadata field, all-0-bits if 'mask' entirely wildcards the metadata field,
778 * or some other value if the metadata field is partially matched, partially
779 * wildcarded. */
780 static inline ovs_be64
781 minimask_get_metadata_mask(const struct minimask *mask)
782 {
783 return MINIFLOW_GET_BE64(&mask->masks, metadata);
784 }
785
786 /* Perform a bitwise OR of miniflow 'src' flow data specified in 'subset' with
787 * the equivalent fields in 'dst', storing the result in 'dst'. 'subset' must
788 * be a subset of 'src's map. */
789 static inline void
790 flow_union_with_miniflow_subset(struct flow *dst, const struct miniflow *src,
791 struct flowmap subset)
792 {
793 uint64_t *dst_u64 = (uint64_t *) dst;
794 const uint64_t *p = miniflow_get_values(src);
795 map_t map;
796
797 FLOWMAP_FOR_EACH_MAP (map, subset) {
798 size_t idx;
799
800 MAP_FOR_EACH_INDEX(idx, map) {
801 dst_u64[idx] |= *p++;
802 }
803 dst_u64 += MAP_T_BITS;
804 }
805 }
806
807 /* Perform a bitwise OR of miniflow 'src' flow data with the equivalent
808 * fields in 'dst', storing the result in 'dst'. */
809 static inline void
810 flow_union_with_miniflow(struct flow *dst, const struct miniflow *src)
811 {
812 flow_union_with_miniflow_subset(dst, src, src->map);
813 }
814
815 static inline void
816 pkt_metadata_from_flow(struct pkt_metadata *md, const struct flow *flow)
817 {
818 md->recirc_id = flow->recirc_id;
819 md->dp_hash = flow->dp_hash;
820 flow_tnl_copy__(&md->tunnel, &flow->tunnel);
821 md->skb_priority = flow->skb_priority;
822 md->pkt_mark = flow->pkt_mark;
823 md->in_port = flow->in_port;
824 md->ct_state = flow->ct_state;
825 md->ct_zone = flow->ct_zone;
826 md->ct_mark = flow->ct_mark;
827 md->ct_label = flow->ct_label;
828 }
829
830 static inline bool is_ip_any(const struct flow *flow)
831 {
832 return dl_type_is_ip_any(flow->dl_type);
833 }
834
835 static inline bool is_icmpv4(const struct flow *flow,
836 struct flow_wildcards *wc)
837 {
838 if (flow->dl_type == htons(ETH_TYPE_IP)) {
839 if (wc) {
840 memset(&wc->masks.nw_proto, 0xff, sizeof wc->masks.nw_proto);
841 }
842 return flow->nw_proto == IPPROTO_ICMP;
843 }
844 return false;
845 }
846
847 static inline bool is_icmpv6(const struct flow *flow,
848 struct flow_wildcards *wc)
849 {
850 if (flow->dl_type == htons(ETH_TYPE_IPV6)) {
851 if (wc) {
852 memset(&wc->masks.nw_proto, 0xff, sizeof wc->masks.nw_proto);
853 }
854 return flow->nw_proto == IPPROTO_ICMPV6;
855 }
856 return false;
857 }
858
859 static inline bool is_igmp(const struct flow *flow, struct flow_wildcards *wc)
860 {
861 if (flow->dl_type == htons(ETH_TYPE_IP)) {
862 if (wc) {
863 memset(&wc->masks.nw_proto, 0xff, sizeof wc->masks.nw_proto);
864 }
865 return flow->nw_proto == IPPROTO_IGMP;
866 }
867 return false;
868 }
869
870 static inline bool is_mld(const struct flow *flow,
871 struct flow_wildcards *wc)
872 {
873 if (is_icmpv6(flow, wc)) {
874 if (wc) {
875 memset(&wc->masks.tp_src, 0xff, sizeof wc->masks.tp_src);
876 }
877 return (flow->tp_src == htons(MLD_QUERY)
878 || flow->tp_src == htons(MLD_REPORT)
879 || flow->tp_src == htons(MLD_DONE)
880 || flow->tp_src == htons(MLD2_REPORT));
881 }
882 return false;
883 }
884
885 static inline bool is_mld_query(const struct flow *flow,
886 struct flow_wildcards *wc)
887 {
888 if (is_icmpv6(flow, wc)) {
889 if (wc) {
890 memset(&wc->masks.tp_src, 0xff, sizeof wc->masks.tp_src);
891 }
892 return flow->tp_src == htons(MLD_QUERY);
893 }
894 return false;
895 }
896
897 static inline bool is_mld_report(const struct flow *flow,
898 struct flow_wildcards *wc)
899 {
900 return is_mld(flow, wc) && !is_mld_query(flow, wc);
901 }
902
903 static inline bool is_stp(const struct flow *flow)
904 {
905 return (eth_addr_equals(flow->dl_dst, eth_addr_stp)
906 && flow->dl_type == htons(FLOW_DL_TYPE_NONE));
907 }
908
909 #endif /* flow.h */