]> git.proxmox.com Git - ovs.git/blame - lib/classifier.c
lib/classifier: Minimize critical section.
[ovs.git] / lib / classifier.c
CommitLineData
064af421 1/*
78c8df12 2 * Copyright (c) 2009, 2010, 2011, 2012, 2013, 2014 Nicira, Inc.
064af421 3 *
a14bc59f
BP
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
064af421 7 *
a14bc59f
BP
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
064af421
BP
15 */
16
17#include <config.h>
18#include "classifier.h"
064af421
BP
19#include <errno.h>
20#include <netinet/in.h>
844dff32 21#include "byte-order.h"
68d1c8c3 22#include "dynamic-string.h"
064af421
BP
23#include "flow.h"
24#include "hash.h"
f2c21402 25#include "cmap.h"
52054c15 26#include "list.h"
07b37e8f 27#include "odp-util.h"
d8ae4d67 28#include "ofp-util.h"
13751fd8 29#include "packets.h"
52054c15
JR
30#include "tag.h"
31#include "util.h"
13751fd8
JR
32#include "vlog.h"
33
34VLOG_DEFINE_THIS_MODULE(classifier);
064af421 35
69d6040e
JR
36struct trie_ctx;
37
38/* Ports trie depends on both ports sharing the same ovs_be32. */
39#define TP_PORTS_OFS32 (offsetof(struct flow, tp_src) / 4)
40BUILD_ASSERT_DECL(TP_PORTS_OFS32 == offsetof(struct flow, tp_dst) / 4);
cabd4c43 41
cabd4c43
JR
42/* A set of rules that all have the same fields wildcarded. */
43struct cls_subtable {
e65413ab 44 /* The fields are only used by writers and iterators. */
e48eccd1 45 struct cmap_node cmap_node; /* Within struct classifier 'subtables_map'. */
e65413ab
JR
46
47 /* The fields are only used by writers. */
48 int n_rules OVS_GUARDED; /* Number of rules, including
49 * duplicates. */
50 unsigned int max_priority OVS_GUARDED; /* Max priority of any rule in
51 * the subtable. */
52 unsigned int max_count OVS_GUARDED; /* Count of max_priority rules. */
53
54 /* These fields are accessed by readers who care about wildcarding. */
55 tag_type tag; /* Tag generated from mask for partitioning (const). */
56 uint8_t n_indices; /* How many indices to use (const). */
57 uint8_t index_ofs[CLS_MAX_INDICES]; /* u32 segment boundaries (const). */
58 unsigned int trie_plen[CLS_MAX_TRIES]; /* Trie prefix length in 'mask'
59 * (runtime configurable). */
60 int ports_mask_len; /* (const) */
61 struct cmap indices[CLS_MAX_INDICES]; /* Staged lookup indices. */
f358a2cb 62 rcu_trie_ptr ports_trie; /* NULL if none. */
e65413ab
JR
63
64 /* These fields are accessed by all readers. */
65 struct cmap rules; /* Contains "struct cls_rule"s. */
66 struct minimask mask; /* Wildcards for fields (const). */
3016f3e4 67 /* 'mask' must be the last field. */
cabd4c43
JR
68};
69
70/* Associates a metadata value (that is, a value of the OpenFlow 1.1+ metadata
71 * field) with tags for the "cls_subtable"s that contain rules that match that
72 * metadata value. */
73struct cls_partition {
e48eccd1 74 struct cmap_node cmap_node; /* In struct classifier's 'partitions' map. */
cabd4c43
JR
75 ovs_be64 metadata; /* metadata value for this partition. */
76 tag_type tags; /* OR of each flow's cls_subtable tag. */
e65413ab 77 struct tag_tracker tracker OVS_GUARDED; /* Tracks the bits in 'tags'. */
cabd4c43
JR
78};
79
627fb667
JR
80/* Internal representation of a rule in a "struct cls_subtable". */
81struct cls_match {
e65413ab
JR
82 /* Accessed only by writers and iterators. */
83 struct list list OVS_GUARDED; /* List of identical, lower-priority rules. */
84
85 /* Accessed only by writers. */
86 struct cls_partition *partition OVS_GUARDED;
87
88 /* Accessed by readers interested in wildcarding. */
89 unsigned int priority; /* Larger numbers are higher priorities. */
f2c21402
JR
90 struct cmap_node index_nodes[CLS_MAX_INDICES]; /* Within subtable's
91 * 'indices'. */
e65413ab 92 /* Accessed by all readers. */
f2c21402 93 struct cmap_node cmap_node; /* Within struct cls_subtable 'rules'. */
e65413ab 94 struct cls_rule *cls_rule;
3016f3e4
JR
95 struct miniflow flow; /* Matching rule. Mask is in the subtable. */
96 /* 'flow' must be the last field. */
627fb667 97};
cabd4c43 98
627fb667
JR
99static struct cls_match *
100cls_match_alloc(struct cls_rule *rule)
101{
3016f3e4
JR
102 int count = count_1bits(rule->match.flow.map);
103
104 struct cls_match *cls_match
105 = xmalloc(sizeof *cls_match - sizeof cls_match->flow.inline_values
106 + MINIFLOW_VALUES_SIZE(count));
627fb667
JR
107
108 cls_match->cls_rule = rule;
3016f3e4 109 miniflow_clone_inline(&cls_match->flow, &rule->match.flow, count);
627fb667
JR
110 cls_match->priority = rule->priority;
111 rule->cls_match = cls_match;
112
113 return cls_match;
114}
cabd4c43 115
e48eccd1 116static struct cls_subtable *find_subtable(const struct classifier *cls,
d2064437 117 const struct minimask *);
e48eccd1 118static struct cls_subtable *insert_subtable(struct classifier *cls,
e65413ab
JR
119 const struct minimask *)
120 OVS_REQUIRES(cls->mutex);
e48eccd1 121static void destroy_subtable(struct classifier *cls, struct cls_subtable *)
e65413ab 122 OVS_REQUIRES(cls->mutex);
e48eccd1 123static struct cls_match *insert_rule(struct classifier *cls,
e65413ab
JR
124 struct cls_subtable *, struct cls_rule *)
125 OVS_REQUIRES(cls->mutex);
b5d97350 126
627fb667
JR
127static struct cls_match *find_match_wc(const struct cls_subtable *,
128 const struct flow *, struct trie_ctx *,
129 unsigned int n_tries,
130 struct flow_wildcards *);
131static struct cls_match *find_equal(struct cls_subtable *,
132 const struct miniflow *, uint32_t hash);
b5d97350 133
e65413ab
JR
134/* Iterates RULE over HEAD and all of the cls_rules on HEAD->list.
135 * Classifier's mutex must be held while iterating, as the list is
136 * protoceted by it. */
b5d97350
BP
137#define FOR_EACH_RULE_IN_LIST(RULE, HEAD) \
138 for ((RULE) = (HEAD); (RULE) != NULL; (RULE) = next_rule_in_list(RULE))
139#define FOR_EACH_RULE_IN_LIST_SAFE(RULE, NEXT, HEAD) \
140 for ((RULE) = (HEAD); \
141 (RULE) != NULL && ((NEXT) = next_rule_in_list(RULE), true); \
142 (RULE) = (NEXT))
143
627fb667
JR
144static struct cls_match *next_rule_in_list__(struct cls_match *);
145static struct cls_match *next_rule_in_list(struct cls_match *);
13751fd8
JR
146
147static unsigned int minimask_get_prefix_len(const struct minimask *,
148 const struct mf_field *);
e48eccd1 149static void trie_init(struct classifier *cls, int trie_idx,
e65413ab
JR
150 const struct mf_field *)
151 OVS_REQUIRES(cls->mutex);
13751fd8 152static unsigned int trie_lookup(const struct cls_trie *, const struct flow *,
c0bfb650 153 union mf_value *plens);
f358a2cb 154static unsigned int trie_lookup_value(const rcu_trie_ptr *,
c0bfb650
JR
155 const ovs_be32 value[], ovs_be32 plens[],
156 unsigned int value_bits);
f358a2cb 157static void trie_destroy(rcu_trie_ptr *);
13751fd8 158static void trie_insert(struct cls_trie *, const struct cls_rule *, int mlen);
f358a2cb 159static void trie_insert_prefix(rcu_trie_ptr *, const ovs_be32 *prefix,
69d6040e 160 int mlen);
13751fd8 161static void trie_remove(struct cls_trie *, const struct cls_rule *, int mlen);
f358a2cb 162static void trie_remove_prefix(rcu_trie_ptr *, const ovs_be32 *prefix,
69d6040e 163 int mlen);
13751fd8 164static void mask_set_prefix_bits(struct flow_wildcards *, uint8_t be32ofs,
c30cfa6b 165 unsigned int n_bits);
13751fd8 166static bool mask_prefix_bits_set(const struct flow_wildcards *,
c30cfa6b 167 uint8_t be32ofs, unsigned int n_bits);
3d91d909
JR
168\f
169/* flow/miniflow/minimask/minimatch utilities.
170 * These are only used by the classifier, so place them here to allow
171 * for better optimization. */
172
173static inline uint64_t
174miniflow_get_map_in_range(const struct miniflow *miniflow,
175 uint8_t start, uint8_t end, unsigned int *offset)
176{
177 uint64_t map = miniflow->map;
178 *offset = 0;
179
180 if (start > 0) {
181 uint64_t msk = (UINT64_C(1) << start) - 1; /* 'start' LSBs set */
182 *offset = count_1bits(map & msk);
183 map &= ~msk;
184 }
185 if (end < FLOW_U32S) {
186 uint64_t msk = (UINT64_C(1) << end) - 1; /* 'end' LSBs set */
187 map &= msk;
188 }
189 return map;
190}
191
192/* Returns a hash value for the bits of 'flow' where there are 1-bits in
193 * 'mask', given 'basis'.
194 *
195 * The hash values returned by this function are the same as those returned by
196 * miniflow_hash_in_minimask(), only the form of the arguments differ. */
197static inline uint32_t
198flow_hash_in_minimask(const struct flow *flow, const struct minimask *mask,
199 uint32_t basis)
200{
27bbe15d 201 const uint32_t *mask_values = miniflow_get_u32_values(&mask->masks);
3d91d909 202 const uint32_t *flow_u32 = (const uint32_t *)flow;
27bbe15d 203 const uint32_t *p = mask_values;
3d91d909
JR
204 uint32_t hash;
205 uint64_t map;
206
207 hash = basis;
208 for (map = mask->masks.map; map; map = zero_rightmost_1bit(map)) {
33c6a1b9 209 hash = hash_add(hash, flow_u32[raw_ctz(map)] & *p++);
3d91d909
JR
210 }
211
33c6a1b9 212 return hash_finish(hash, (p - mask_values) * 4);
3d91d909
JR
213}
214
215/* Returns a hash value for the bits of 'flow' where there are 1-bits in
216 * 'mask', given 'basis'.
217 *
218 * The hash values returned by this function are the same as those returned by
219 * flow_hash_in_minimask(), only the form of the arguments differ. */
220static inline uint32_t
221miniflow_hash_in_minimask(const struct miniflow *flow,
222 const struct minimask *mask, uint32_t basis)
223{
27bbe15d
JR
224 const uint32_t *mask_values = miniflow_get_u32_values(&mask->masks);
225 const uint32_t *p = mask_values;
3d91d909
JR
226 uint32_t hash = basis;
227 uint32_t flow_u32;
228
229 MINIFLOW_FOR_EACH_IN_MAP(flow_u32, flow, mask->masks.map) {
33c6a1b9 230 hash = hash_add(hash, flow_u32 & *p++);
3d91d909
JR
231 }
232
33c6a1b9 233 return hash_finish(hash, (p - mask_values) * 4);
3d91d909
JR
234}
235
236/* Returns a hash value for the bits of range [start, end) in 'flow',
237 * where there are 1-bits in 'mask', given 'hash'.
238 *
239 * The hash values returned by this function are the same as those returned by
240 * minimatch_hash_range(), only the form of the arguments differ. */
241static inline uint32_t
242flow_hash_in_minimask_range(const struct flow *flow,
243 const struct minimask *mask,
244 uint8_t start, uint8_t end, uint32_t *basis)
245{
27bbe15d 246 const uint32_t *mask_values = miniflow_get_u32_values(&mask->masks);
3d91d909
JR
247 const uint32_t *flow_u32 = (const uint32_t *)flow;
248 unsigned int offset;
249 uint64_t map = miniflow_get_map_in_range(&mask->masks, start, end,
250 &offset);
27bbe15d 251 const uint32_t *p = mask_values + offset;
3d91d909
JR
252 uint32_t hash = *basis;
253
254 for (; map; map = zero_rightmost_1bit(map)) {
33c6a1b9 255 hash = hash_add(hash, flow_u32[raw_ctz(map)] & *p++);
3d91d909
JR
256 }
257
258 *basis = hash; /* Allow continuation from the unfinished value. */
33c6a1b9 259 return hash_finish(hash, (p - mask_values) * 4);
3d91d909
JR
260}
261
262/* Fold minimask 'mask''s wildcard mask into 'wc's wildcard mask. */
263static inline void
264flow_wildcards_fold_minimask(struct flow_wildcards *wc,
265 const struct minimask *mask)
266{
267 flow_union_with_miniflow(&wc->masks, &mask->masks);
268}
269
270/* Fold minimask 'mask''s wildcard mask into 'wc's wildcard mask
271 * in range [start, end). */
272static inline void
273flow_wildcards_fold_minimask_range(struct flow_wildcards *wc,
274 const struct minimask *mask,
275 uint8_t start, uint8_t end)
276{
277 uint32_t *dst_u32 = (uint32_t *)&wc->masks;
278 unsigned int offset;
279 uint64_t map = miniflow_get_map_in_range(&mask->masks, start, end,
280 &offset);
27bbe15d 281 const uint32_t *p = miniflow_get_u32_values(&mask->masks) + offset;
3d91d909
JR
282
283 for (; map; map = zero_rightmost_1bit(map)) {
284 dst_u32[raw_ctz(map)] |= *p++;
285 }
286}
287
288/* Returns a hash value for 'flow', given 'basis'. */
289static inline uint32_t
290miniflow_hash(const struct miniflow *flow, uint32_t basis)
291{
27bbe15d
JR
292 const uint32_t *values = miniflow_get_u32_values(flow);
293 const uint32_t *p = values;
3d91d909
JR
294 uint32_t hash = basis;
295 uint64_t hash_map = 0;
296 uint64_t map;
297
298 for (map = flow->map; map; map = zero_rightmost_1bit(map)) {
299 if (*p) {
33c6a1b9 300 hash = hash_add(hash, *p);
3d91d909
JR
301 hash_map |= rightmost_1bit(map);
302 }
303 p++;
304 }
33c6a1b9
JR
305 hash = hash_add(hash, hash_map);
306 hash = hash_add(hash, hash_map >> 32);
3d91d909 307
33c6a1b9 308 return hash_finish(hash, p - values);
3d91d909
JR
309}
310
311/* Returns a hash value for 'mask', given 'basis'. */
312static inline uint32_t
313minimask_hash(const struct minimask *mask, uint32_t basis)
314{
315 return miniflow_hash(&mask->masks, basis);
316}
317
318/* Returns a hash value for 'match', given 'basis'. */
319static inline uint32_t
320minimatch_hash(const struct minimatch *match, uint32_t basis)
321{
322 return miniflow_hash(&match->flow, minimask_hash(&match->mask, basis));
323}
324
325/* Returns a hash value for the bits of range [start, end) in 'minimatch',
326 * given 'basis'.
327 *
328 * The hash values returned by this function are the same as those returned by
329 * flow_hash_in_minimask_range(), only the form of the arguments differ. */
330static inline uint32_t
331minimatch_hash_range(const struct minimatch *match, uint8_t start, uint8_t end,
332 uint32_t *basis)
333{
334 unsigned int offset;
335 const uint32_t *p, *q;
336 uint32_t hash = *basis;
337 int n, i;
338
339 n = count_1bits(miniflow_get_map_in_range(&match->mask.masks, start, end,
340 &offset));
27bbe15d
JR
341 q = miniflow_get_u32_values(&match->mask.masks) + offset;
342 p = miniflow_get_u32_values(&match->flow) + offset;
3d91d909
JR
343
344 for (i = 0; i < n; i++) {
33c6a1b9 345 hash = hash_add(hash, p[i] & q[i]);
3d91d909
JR
346 }
347 *basis = hash; /* Allow continuation from the unfinished value. */
33c6a1b9 348 return hash_finish(hash, (offset + n) * 4);
3d91d909
JR
349}
350
81a76618
BP
351\f
352/* cls_rule. */
b5d97350 353
81a76618 354/* Initializes 'rule' to match packets specified by 'match' at the given
5cb7a798
BP
355 * 'priority'. 'match' must satisfy the invariant described in the comment at
356 * the definition of struct match.
66642cb4 357 *
48d28ac1
BP
358 * The caller must eventually destroy 'rule' with cls_rule_destroy().
359 *
81a76618
BP
360 * (OpenFlow uses priorities between 0 and UINT16_MAX, inclusive, but
361 * internally Open vSwitch supports a wider range.) */
47284b1f 362void
81a76618
BP
363cls_rule_init(struct cls_rule *rule,
364 const struct match *match, unsigned int priority)
47284b1f 365{
5cb7a798
BP
366 minimatch_init(&rule->match, match);
367 rule->priority = priority;
627fb667 368 rule->cls_match = NULL;
5cb7a798
BP
369}
370
371/* Same as cls_rule_init() for initialization from a "struct minimatch". */
372void
373cls_rule_init_from_minimatch(struct cls_rule *rule,
374 const struct minimatch *match,
375 unsigned int priority)
376{
377 minimatch_clone(&rule->match, match);
81a76618 378 rule->priority = priority;
627fb667 379 rule->cls_match = NULL;
685a51a5
JP
380}
381
48d28ac1
BP
382/* Initializes 'dst' as a copy of 'src'.
383 *
b2c1f00b 384 * The caller must eventually destroy 'dst' with cls_rule_destroy(). */
48d28ac1
BP
385void
386cls_rule_clone(struct cls_rule *dst, const struct cls_rule *src)
387{
5cb7a798
BP
388 minimatch_clone(&dst->match, &src->match);
389 dst->priority = src->priority;
627fb667 390 dst->cls_match = NULL;
48d28ac1
BP
391}
392
b2c1f00b
BP
393/* Initializes 'dst' with the data in 'src', destroying 'src'.
394 *
395 * The caller must eventually destroy 'dst' with cls_rule_destroy(). */
396void
397cls_rule_move(struct cls_rule *dst, struct cls_rule *src)
398{
399 minimatch_move(&dst->match, &src->match);
400 dst->priority = src->priority;
627fb667 401 dst->cls_match = NULL;
b2c1f00b
BP
402}
403
48d28ac1
BP
404/* Frees memory referenced by 'rule'. Doesn't free 'rule' itself (it's
405 * normally embedded into a larger structure).
406 *
407 * ('rule' must not currently be in a classifier.) */
408void
5cb7a798 409cls_rule_destroy(struct cls_rule *rule)
48d28ac1 410{
627fb667 411 ovs_assert(!rule->cls_match);
5cb7a798 412 minimatch_destroy(&rule->match);
48d28ac1
BP
413}
414
81a76618
BP
415/* Returns true if 'a' and 'b' match the same packets at the same priority,
416 * false if they differ in some way. */
193eb874
BP
417bool
418cls_rule_equal(const struct cls_rule *a, const struct cls_rule *b)
419{
5cb7a798 420 return a->priority == b->priority && minimatch_equal(&a->match, &b->match);
193eb874
BP
421}
422
81a76618 423/* Returns a hash value for 'rule', folding in 'basis'. */
57452fdc
BP
424uint32_t
425cls_rule_hash(const struct cls_rule *rule, uint32_t basis)
426{
5cb7a798 427 return minimatch_hash(&rule->match, hash_int(rule->priority, basis));
73f33563
BP
428}
429
81a76618 430/* Appends a string describing 'rule' to 's'. */
07b37e8f
BP
431void
432cls_rule_format(const struct cls_rule *rule, struct ds *s)
433{
5cb7a798 434 minimatch_format(&rule->match, s, rule->priority);
064af421 435}
3ca1de08
BP
436
437/* Returns true if 'rule' matches every packet, false otherwise. */
438bool
439cls_rule_is_catchall(const struct cls_rule *rule)
440{
5cb7a798 441 return minimask_is_catchall(&rule->match.mask);
3ca1de08 442}
064af421
BP
443\f
444/* Initializes 'cls' as a classifier that initially contains no classification
445 * rules. */
446void
e48eccd1
JR
447classifier_init(struct classifier *cls, const uint8_t *flow_segments)
448 OVS_EXCLUDED(cls->mutex)
064af421 449{
e65413ab 450 ovs_mutex_init(&cls->mutex);
e65413ab 451 ovs_mutex_lock(&cls->mutex);
064af421 452 cls->n_rules = 0;
f2c21402 453 cmap_init(&cls->subtables_map);
fe7cfa5c 454 pvector_init(&cls->subtables);
f2c21402 455 cmap_init(&cls->partitions);
476f36e8
JR
456 cls->n_flow_segments = 0;
457 if (flow_segments) {
458 while (cls->n_flow_segments < CLS_MAX_INDICES
459 && *flow_segments < FLOW_U32S) {
460 cls->flow_segments[cls->n_flow_segments++] = *flow_segments++;
461 }
462 }
13751fd8 463 cls->n_tries = 0;
e65413ab
JR
464 for (int i = 0; i < CLS_MAX_TRIES; i++) {
465 trie_init(cls, i, NULL);
466 }
467 ovs_mutex_unlock(&cls->mutex);
064af421
BP
468}
469
470/* Destroys 'cls'. Rules within 'cls', if any, are not freed; this is the
afae68b1
JR
471 * caller's responsibility.
472 * May only be called after all the readers have been terminated. */
064af421 473void
e48eccd1
JR
474classifier_destroy(struct classifier *cls)
475 OVS_EXCLUDED(cls->mutex)
064af421 476{
e48eccd1 477 if (cls) {
78c8df12
BP
478 struct cls_partition *partition;
479 struct cls_subtable *subtable;
13751fd8
JR
480 int i;
481
e65413ab 482 ovs_mutex_lock(&cls->mutex);
13751fd8 483 for (i = 0; i < cls->n_tries; i++) {
f358a2cb 484 trie_destroy(&cls->tries[i].root);
13751fd8 485 }
064af421 486
6bc3bb82 487 CMAP_FOR_EACH (subtable, cmap_node, &cls->subtables_map) {
03868246 488 destroy_subtable(cls, subtable);
064af421 489 }
f2c21402 490 cmap_destroy(&cls->subtables_map);
c906cedf 491
6bc3bb82 492 CMAP_FOR_EACH (partition, cmap_node, &cls->partitions) {
f2c21402 493 ovsrcu_postpone(free, partition);
c906cedf 494 }
f2c21402 495 cmap_destroy(&cls->partitions);
cabd4c43 496
fe7cfa5c 497 pvector_destroy(&cls->subtables);
e65413ab
JR
498 ovs_mutex_unlock(&cls->mutex);
499 ovs_mutex_destroy(&cls->mutex);
064af421
BP
500 }
501}
502
13751fd8 503/* Set the fields for which prefix lookup should be performed. */
f358a2cb 504bool
e48eccd1 505classifier_set_prefix_fields(struct classifier *cls,
13751fd8
JR
506 const enum mf_field_id *trie_fields,
507 unsigned int n_fields)
e48eccd1 508 OVS_EXCLUDED(cls->mutex)
13751fd8 509{
f358a2cb 510 const struct mf_field * new_fields[CLS_MAX_TRIES];
abadfcb0 511 struct mf_bitmap fields = MF_BITMAP_INITIALIZER;
f358a2cb
JR
512 int i, n_tries = 0;
513 bool changed = false;
13751fd8 514
e65413ab 515 ovs_mutex_lock(&cls->mutex);
f358a2cb 516 for (i = 0; i < n_fields && n_tries < CLS_MAX_TRIES; i++) {
13751fd8
JR
517 const struct mf_field *field = mf_from_id(trie_fields[i]);
518 if (field->flow_be32ofs < 0 || field->n_bits % 32) {
519 /* Incompatible field. This is the only place where we
520 * enforce these requirements, but the rest of the trie code
521 * depends on the flow_be32ofs to be non-negative and the
522 * field length to be a multiple of 32 bits. */
523 continue;
524 }
525
abadfcb0 526 if (bitmap_is_set(fields.bm, trie_fields[i])) {
13751fd8
JR
527 /* Duplicate field, there is no need to build more than
528 * one index for any one field. */
529 continue;
530 }
abadfcb0 531 bitmap_set1(fields.bm, trie_fields[i]);
13751fd8 532
f358a2cb
JR
533 new_fields[n_tries] = NULL;
534 if (n_tries >= cls->n_tries || field != cls->tries[n_tries].field) {
535 new_fields[n_tries] = field;
536 changed = true;
537 }
538 n_tries++;
539 }
540
541 if (changed || n_tries < cls->n_tries) {
542 struct cls_subtable *subtable;
543
544 /* Trie configuration needs to change. Disable trie lookups
545 * for the tries that are changing and wait all the current readers
546 * with the old configuration to be done. */
547 changed = false;
548 CMAP_FOR_EACH (subtable, cmap_node, &cls->subtables_map) {
549 for (i = 0; i < cls->n_tries; i++) {
550 if ((i < n_tries && new_fields[i]) || i >= n_tries) {
551 if (subtable->trie_plen[i]) {
552 subtable->trie_plen[i] = 0;
553 changed = true;
554 }
555 }
556 }
557 }
558 /* Synchronize if any readers were using tries. The readers may
559 * temporarily function without the trie lookup based optimizations. */
560 if (changed) {
561 /* ovsrcu_synchronize() functions as a memory barrier, so it does
562 * not matter that subtable->trie_plen is not atomic. */
563 ovsrcu_synchronize();
13751fd8 564 }
13751fd8 565
f358a2cb
JR
566 /* Now set up the tries. */
567 for (i = 0; i < n_tries; i++) {
568 if (new_fields[i]) {
569 trie_init(cls, i, new_fields[i]);
570 }
571 }
572 /* Destroy the rest, if any. */
573 for (; i < cls->n_tries; i++) {
574 trie_init(cls, i, NULL);
575 }
576
577 cls->n_tries = n_tries;
578 ovs_mutex_unlock(&cls->mutex);
579 return true;
13751fd8 580 }
f358a2cb 581
e65413ab 582 ovs_mutex_unlock(&cls->mutex);
f358a2cb 583 return false; /* No change. */
13751fd8
JR
584}
585
586static void
e48eccd1 587trie_init(struct classifier *cls, int trie_idx, const struct mf_field *field)
e65413ab 588 OVS_REQUIRES(cls->mutex)
13751fd8
JR
589{
590 struct cls_trie *trie = &cls->tries[trie_idx];
591 struct cls_subtable *subtable;
592
593 if (trie_idx < cls->n_tries) {
f358a2cb
JR
594 trie_destroy(&trie->root);
595 } else {
596 ovsrcu_set_hidden(&trie->root, NULL);
13751fd8 597 }
13751fd8
JR
598 trie->field = field;
599
f358a2cb 600 /* Add existing rules to the new trie. */
f2c21402 601 CMAP_FOR_EACH (subtable, cmap_node, &cls->subtables_map) {
13751fd8
JR
602 unsigned int plen;
603
604 plen = field ? minimask_get_prefix_len(&subtable->mask, field) : 0;
13751fd8 605 if (plen) {
627fb667 606 struct cls_match *head;
13751fd8 607
f2c21402 608 CMAP_FOR_EACH (head, cmap_node, &subtable->rules) {
627fb667 609 struct cls_match *match;
13751fd8 610
627fb667
JR
611 FOR_EACH_RULE_IN_LIST (match, head) {
612 trie_insert(trie, match->cls_rule, plen);
13751fd8
JR
613 }
614 }
615 }
f358a2cb
JR
616 /* Initialize subtable's prefix length on this field. This will
617 * allow readers to use the trie. */
618 atomic_thread_fence(memory_order_release);
619 subtable->trie_plen[trie_idx] = plen;
13751fd8
JR
620 }
621}
622
5f0476ce
JR
623/* Returns true if 'cls' contains no classification rules, false otherwise.
624 * Checking the cmap requires no locking. */
064af421
BP
625bool
626classifier_is_empty(const struct classifier *cls)
627{
e48eccd1 628 return cmap_is_empty(&cls->subtables_map);
064af421
BP
629}
630
dbda2960 631/* Returns the number of rules in 'cls'. */
064af421
BP
632int
633classifier_count(const struct classifier *cls)
afae68b1 634 OVS_NO_THREAD_SAFETY_ANALYSIS
064af421 635{
afae68b1
JR
636 /* n_rules is an int, so in the presence of concurrent writers this will
637 * return either the old or a new value. */
e48eccd1 638 return cls->n_rules;
064af421
BP
639}
640
c906cedf
BP
641static uint32_t
642hash_metadata(ovs_be64 metadata_)
643{
644 uint64_t metadata = (OVS_FORCE uint64_t) metadata_;
965607c8 645 return hash_uint64(metadata);
c906cedf
BP
646}
647
648static struct cls_partition *
e48eccd1 649find_partition(const struct classifier *cls, ovs_be64 metadata, uint32_t hash)
c906cedf
BP
650{
651 struct cls_partition *partition;
652
f2c21402 653 CMAP_FOR_EACH_WITH_HASH (partition, cmap_node, hash, &cls->partitions) {
c906cedf
BP
654 if (partition->metadata == metadata) {
655 return partition;
656 }
657 }
658
659 return NULL;
660}
661
662static struct cls_partition *
e48eccd1 663create_partition(struct classifier *cls, struct cls_subtable *subtable,
c906cedf 664 ovs_be64 metadata)
e65413ab 665 OVS_REQUIRES(cls->mutex)
c906cedf
BP
666{
667 uint32_t hash = hash_metadata(metadata);
668 struct cls_partition *partition = find_partition(cls, metadata, hash);
669 if (!partition) {
670 partition = xmalloc(sizeof *partition);
671 partition->metadata = metadata;
672 partition->tags = 0;
183126a1 673 tag_tracker_init(&partition->tracker);
f2c21402 674 cmap_insert(&cls->partitions, &partition->cmap_node, hash);
c906cedf 675 }
03868246 676 tag_tracker_add(&partition->tracker, &partition->tags, subtable->tag);
c906cedf
BP
677 return partition;
678}
679
69d6040e
JR
680static inline ovs_be32 minimatch_get_ports(const struct minimatch *match)
681{
682 /* Could optimize to use the same map if needed for fast path. */
683 return MINIFLOW_GET_BE32(&match->flow, tp_src)
684 & MINIFLOW_GET_BE32(&match->mask.masks, tp_src);
685}
686
b5d97350
BP
687/* Inserts 'rule' into 'cls'. Until 'rule' is removed from 'cls', the caller
688 * must not modify or free it.
064af421
BP
689 *
690 * If 'cls' already contains an identical rule (including wildcards, values of
691 * fixed fields, and priority), replaces the old rule by 'rule' and returns the
692 * rule that was replaced. The caller takes ownership of the returned rule and
48d28ac1
BP
693 * is thus responsible for destroying it with cls_rule_destroy(), freeing the
694 * memory block in which it resides, etc., as necessary.
064af421
BP
695 *
696 * Returns NULL if 'cls' does not contain a rule with an identical key, after
697 * inserting the new rule. In this case, no rules are displaced by the new
698 * rule, even rules that cannot have any effect because the new rule matches a
699 * superset of their flows and has higher priority. */
700struct cls_rule *
e48eccd1
JR
701classifier_replace(struct classifier *cls, struct cls_rule *rule)
702 OVS_EXCLUDED(cls->mutex)
064af421 703{
627fb667 704 struct cls_match *old_rule;
03868246 705 struct cls_subtable *subtable;
e65413ab 706 struct cls_rule *old_cls_rule = NULL;
b5d97350 707
e65413ab 708 ovs_mutex_lock(&cls->mutex);
03868246
JR
709 subtable = find_subtable(cls, &rule->match.mask);
710 if (!subtable) {
711 subtable = insert_subtable(cls, &rule->match.mask);
b5d97350
BP
712 }
713
03868246 714 old_rule = insert_rule(cls, subtable, rule);
b5d97350 715 if (!old_rule) {
e65413ab 716 old_cls_rule = NULL;
13751fd8 717
627fb667 718 rule->cls_match->partition = NULL;
c906cedf
BP
719 if (minimask_get_metadata_mask(&rule->match.mask) == OVS_BE64_MAX) {
720 ovs_be64 metadata = miniflow_get_metadata(&rule->match.flow);
627fb667
JR
721 rule->cls_match->partition = create_partition(cls, subtable,
722 metadata);
c906cedf
BP
723 }
724
064af421 725 cls->n_rules++;
13751fd8 726
e65413ab 727 for (int i = 0; i < cls->n_tries; i++) {
13751fd8
JR
728 if (subtable->trie_plen[i]) {
729 trie_insert(&cls->tries[i], rule, subtable->trie_plen[i]);
730 }
731 }
69d6040e
JR
732
733 /* Ports trie. */
734 if (subtable->ports_mask_len) {
735 /* We mask the value to be inserted to always have the wildcarded
736 * bits in known (zero) state, so we can include them in comparison
737 * and they will always match (== their original value does not
738 * matter). */
739 ovs_be32 masked_ports = minimatch_get_ports(&rule->match);
740
741 trie_insert_prefix(&subtable->ports_trie, &masked_ports,
742 subtable->ports_mask_len);
743 }
c906cedf 744 } else {
e65413ab 745 old_cls_rule = old_rule->cls_rule;
627fb667
JR
746 rule->cls_match->partition = old_rule->partition;
747 old_cls_rule->cls_match = NULL;
f2c21402
JR
748
749 /* 'old_rule' contains a cmap_node, which may not be freed
750 * immediately. */
751 ovsrcu_postpone(free, old_rule);
064af421 752 }
e65413ab
JR
753 ovs_mutex_unlock(&cls->mutex);
754 return old_cls_rule;
064af421
BP
755}
756
08944c1d
BP
757/* Inserts 'rule' into 'cls'. Until 'rule' is removed from 'cls', the caller
758 * must not modify or free it.
759 *
760 * 'cls' must not contain an identical rule (including wildcards, values of
761 * fixed fields, and priority). Use classifier_find_rule_exactly() to find
762 * such a rule. */
763void
764classifier_insert(struct classifier *cls, struct cls_rule *rule)
765{
766 struct cls_rule *displaced_rule = classifier_replace(cls, rule);
cb22974d 767 ovs_assert(!displaced_rule);
08944c1d
BP
768}
769
48d28ac1
BP
770/* Removes 'rule' from 'cls'. It is the caller's responsibility to destroy
771 * 'rule' with cls_rule_destroy(), freeing the memory block in which 'rule'
747f140a
JR
772 * resides, etc., as necessary.
773 *
774 * Does nothing if 'rule' has been already removed, or was never inserted.
775 *
776 * Returns the removed rule, or NULL, if it was already removed.
777 */
778struct cls_rule *
e48eccd1
JR
779classifier_remove(struct classifier *cls, struct cls_rule *rule)
780 OVS_EXCLUDED(cls->mutex)
064af421 781{
c906cedf 782 struct cls_partition *partition;
747f140a 783 struct cls_match *cls_match;
627fb667 784 struct cls_match *head;
03868246 785 struct cls_subtable *subtable;
476f36e8 786 int i;
f2c21402
JR
787 uint32_t basis = 0, hash, ihash[CLS_MAX_INDICES];
788 uint8_t prev_be32ofs = 0;
064af421 789
e65413ab 790 ovs_mutex_lock(&cls->mutex);
747f140a
JR
791 cls_match = rule->cls_match;
792 if (!cls_match) {
793 rule = NULL;
794 goto unlock; /* Already removed. */
795 }
796
03868246 797 subtable = find_subtable(cls, &rule->match.mask);
627fb667
JR
798 ovs_assert(subtable);
799
69d6040e
JR
800 if (subtable->ports_mask_len) {
801 ovs_be32 masked_ports = minimatch_get_ports(&rule->match);
802
803 trie_remove_prefix(&subtable->ports_trie,
804 &masked_ports, subtable->ports_mask_len);
805 }
13751fd8
JR
806 for (i = 0; i < cls->n_tries; i++) {
807 if (subtable->trie_plen[i]) {
808 trie_remove(&cls->tries[i], rule, subtable->trie_plen[i]);
809 }
810 }
811
476f36e8
JR
812 /* Remove rule node from indices. */
813 for (i = 0; i < subtable->n_indices; i++) {
f2c21402
JR
814 ihash[i] = minimatch_hash_range(&rule->match, prev_be32ofs,
815 subtable->index_ofs[i], &basis);
816 cmap_remove(&subtable->indices[i], &cls_match->index_nodes[i],
817 ihash[i]);
818 prev_be32ofs = subtable->index_ofs[i];
476f36e8 819 }
f2c21402 820 hash = minimatch_hash_range(&rule->match, prev_be32ofs, FLOW_U32S, &basis);
476f36e8 821
f2c21402 822 head = find_equal(subtable, &rule->match.flow, hash);
627fb667
JR
823 if (head != cls_match) {
824 list_remove(&cls_match->list);
825 } else if (list_is_empty(&cls_match->list)) {
f2c21402 826 cmap_remove(&subtable->rules, &cls_match->cmap_node, hash);
b5d97350 827 } else {
627fb667
JR
828 struct cls_match *next = CONTAINER_OF(cls_match->list.next,
829 struct cls_match, list);
064af421 830
627fb667 831 list_remove(&cls_match->list);
f2c21402
JR
832 cmap_replace(&subtable->rules, &cls_match->cmap_node,
833 &next->cmap_node, hash);
b5d97350 834 }
064af421 835
627fb667 836 partition = cls_match->partition;
183126a1
BP
837 if (partition) {
838 tag_tracker_subtract(&partition->tracker, &partition->tags,
03868246 839 subtable->tag);
183126a1 840 if (!partition->tags) {
f2c21402
JR
841 cmap_remove(&cls->partitions, &partition->cmap_node,
842 hash_metadata(partition->metadata));
843 ovsrcu_postpone(free, partition);
183126a1 844 }
c906cedf
BP
845 }
846
03868246
JR
847 if (--subtable->n_rules == 0) {
848 destroy_subtable(cls, subtable);
fe7cfa5c
JR
849 } else if (subtable->max_priority == cls_match->priority
850 && --subtable->max_count == 0) {
851 /* Find the new 'max_priority' and 'max_count'. */
852 struct cls_match *head;
853 unsigned int max_priority = 0;
854
f2c21402 855 CMAP_FOR_EACH (head, cmap_node, &subtable->rules) {
fe7cfa5c
JR
856 if (head->priority > max_priority) {
857 max_priority = head->priority;
858 subtable->max_count = 1;
859 } else if (head->priority == max_priority) {
860 ++subtable->max_count;
861 }
862 }
863 subtable->max_priority = max_priority;
864 pvector_change_priority(&cls->subtables, subtable, max_priority);
4d935a6b 865 }
13751fd8 866
b5d97350 867 cls->n_rules--;
627fb667 868
f2c21402 869 ovsrcu_postpone(free, cls_match);
747f140a
JR
870 rule->cls_match = NULL;
871unlock:
e65413ab 872 ovs_mutex_unlock(&cls->mutex);
747f140a
JR
873
874 return rule;
064af421
BP
875}
876
13751fd8 877/* Prefix tree context. Valid when 'lookup_done' is true. Can skip all
c0bfb650
JR
878 * subtables which have a prefix match on the trie field, but whose prefix
879 * length is not indicated in 'match_plens'. For example, a subtable that
880 * has a 8-bit trie field prefix match can be skipped if
881 * !be_get_bit_at(&match_plens, 8 - 1). If skipped, 'maskbits' prefix bits
882 * must be unwildcarded to make datapath flow only match packets it should. */
13751fd8
JR
883struct trie_ctx {
884 const struct cls_trie *trie;
885 bool lookup_done; /* Status of the lookup. */
886 uint8_t be32ofs; /* U32 offset of the field in question. */
13751fd8 887 unsigned int maskbits; /* Prefix length needed to avoid false matches. */
c0bfb650
JR
888 union mf_value match_plens; /* Bitmask of prefix lengths with possible
889 * matches. */
13751fd8
JR
890};
891
892static void
893trie_ctx_init(struct trie_ctx *ctx, const struct cls_trie *trie)
894{
895 ctx->trie = trie;
896 ctx->be32ofs = trie->field->flow_be32ofs;
897 ctx->lookup_done = false;
898}
899
48c3de13
BP
900/* Finds and returns the highest-priority rule in 'cls' that matches 'flow'.
901 * Returns a null pointer if no rules in 'cls' match 'flow'. If multiple rules
74f74083
EJ
902 * of equal priority match 'flow', returns one arbitrarily.
903 *
904 * If a rule is found and 'wc' is non-null, bitwise-OR's 'wc' with the
905 * set of bits that were significant in the lookup. At some point
906 * earlier, 'wc' should have been initialized (e.g., by
907 * flow_wildcards_init_catchall()). */
48c3de13 908struct cls_rule *
e48eccd1 909classifier_lookup(const struct classifier *cls, const struct flow *flow,
74f74083 910 struct flow_wildcards *wc)
48c3de13 911{
c906cedf 912 const struct cls_partition *partition;
c906cedf 913 tag_type tags;
ec988646 914 int64_t best_priority = -1;
fe7cfa5c
JR
915 const struct cls_match *best;
916 struct trie_ctx trie_ctx[CLS_MAX_TRIES];
917 struct cls_subtable *subtable;
c906cedf 918
f358a2cb
JR
919 /* Synchronize for cls->n_tries and subtable->trie_plen. They can change
920 * when table configuration changes, which happens typically only on
921 * startup. */
922 atomic_thread_fence(memory_order_acquire);
923
03868246
JR
924 /* Determine 'tags' such that, if 'subtable->tag' doesn't intersect them,
925 * then 'flow' cannot possibly match in 'subtable':
c906cedf
BP
926 *
927 * - If flow->metadata maps to a given 'partition', then we can use
928 * 'tags' for 'partition->tags'.
929 *
930 * - If flow->metadata has no partition, then no rule in 'cls' has an
931 * exact-match for flow->metadata. That means that we don't need to
03868246 932 * search any subtable that includes flow->metadata in its mask.
c906cedf 933 *
03868246 934 * In either case, we always need to search any cls_subtables that do not
c906cedf 935 * include flow->metadata in its mask. One way to do that would be to
03868246
JR
936 * check the "cls_subtable"s explicitly for that, but that would require an
937 * extra branch per subtable. Instead, we mark such a cls_subtable's
938 * 'tags' as TAG_ALL and make sure that 'tags' is never empty. This means
939 * that 'tags' always intersects such a cls_subtable's 'tags', so we don't
940 * need a special case.
c906cedf 941 */
f2c21402 942 partition = (cmap_is_empty(&cls->partitions)
c906cedf
BP
943 ? NULL
944 : find_partition(cls, flow->metadata,
945 hash_metadata(flow->metadata)));
946 tags = partition ? partition->tags : TAG_ARBITRARY;
48c3de13 947
13751fd8 948 /* Initialize trie contexts for match_find_wc(). */
fe7cfa5c 949 for (int i = 0; i < cls->n_tries; i++) {
13751fd8
JR
950 trie_ctx_init(&trie_ctx[i], &cls->tries[i]);
951 }
ec988646 952
b5d97350 953 best = NULL;
fe7cfa5c
JR
954 PVECTOR_FOR_EACH_PRIORITY(subtable, best_priority, 2,
955 sizeof(struct cls_subtable), &cls->subtables) {
627fb667 956 struct cls_match *rule;
c906cedf 957
fe7cfa5c 958 if (!tag_intersects(tags, subtable->tag)) {
c906cedf
BP
959 continue;
960 }
74f74083 961
fe7cfa5c 962 rule = find_match_wc(subtable, flow, trie_ctx, cls->n_tries, wc);
ec988646
JR
963 if (rule && (int64_t)rule->priority > best_priority) {
964 best_priority = (int64_t)rule->priority;
1f3c5efc 965 best = rule;
b5d97350 966 }
48c3de13 967 }
13751fd8 968
627fb667 969 return best ? best->cls_rule : NULL;
48c3de13
BP
970}
971
2abf78ff 972/* Returns true if 'target' satisifies 'match', that is, if each bit for which
3016f3e4
JR
973 * 'match' specifies a particular value has the correct value in 'target'.
974 *
975 * 'flow' and 'mask' have the same mask! */
2abf78ff 976static bool
3016f3e4
JR
977miniflow_and_mask_matches_miniflow(const struct miniflow *flow,
978 const struct minimask *mask,
979 const struct miniflow *target)
2abf78ff 980{
3016f3e4
JR
981 const uint32_t *flowp = miniflow_get_u32_values(flow);
982 const uint32_t *maskp = miniflow_get_u32_values(&mask->masks);
2abf78ff
JR
983 uint32_t target_u32;
984
3016f3e4 985 MINIFLOW_FOR_EACH_IN_MAP(target_u32, target, mask->masks.map) {
2abf78ff
JR
986 if ((*flowp++ ^ target_u32) & *maskp++) {
987 return false;
988 }
989 }
990
991 return true;
992}
993
b7648634 994/* For each miniflow in 'flows' performs a classifier lookup writing the result
52a524eb 995 * into the corresponding slot in 'rules'.
2abf78ff 996 *
b7648634
EJ
997 * This function is optimized for use in the userspace datapath and therefore
998 * does not implement a lot of features available in the standard
999 * classifier_lookup() function. Specifically, it does not implement
56394293
EJ
1000 * priorities, instead returning any rule which matches the flow.
1001 *
1002 * Returns true if all flows found a corresponding rule. */
1003bool
e48eccd1 1004classifier_lookup_miniflow_batch(const struct classifier *cls,
52a524eb
JR
1005 const struct miniflow *flows[],
1006 struct cls_rule *rules[], const size_t cnt)
2abf78ff 1007{
52a524eb
JR
1008 /* The batch size 16 was experimentally found faster than 8 or 32. */
1009 typedef uint16_t map_type;
1010#define MAP_BITS (sizeof(map_type) * CHAR_BIT)
1011
2abf78ff 1012 struct cls_subtable *subtable;
52a524eb
JR
1013 const int n_maps = DIV_ROUND_UP(cnt, MAP_BITS);
1014
1015#if !defined(__CHECKER__) && !defined(_WIN32)
1016 map_type maps[n_maps];
1017#else
1018 map_type maps[DIV_ROUND_UP(CLASSIFIER_MAX_BATCH, MAP_BITS)];
1019 ovs_assert(n_maps <= CLASSIFIER_MAX_BATCH);
1020#endif
1021 BUILD_ASSERT_DECL(sizeof *maps * CHAR_BIT == MAP_BITS);
1022
1023 memset(maps, 0xff, sizeof maps);
1024 if (cnt % MAP_BITS) {
1025 maps[n_maps - 1] >>= MAP_BITS - cnt % MAP_BITS; /* Clear extra bits. */
1026 }
1027 memset(rules, 0, cnt * sizeof *rules);
2abf78ff 1028
fe7cfa5c 1029 PVECTOR_FOR_EACH (subtable, &cls->subtables) {
52a524eb
JR
1030 const struct miniflow **mfs = flows;
1031 struct cls_rule **results = rules;
1032 map_type remains = 0;
1033 int m;
2abf78ff 1034
52a524eb
JR
1035 BUILD_ASSERT_DECL(sizeof remains == sizeof *maps);
1036
1037 for (m = 0; m < n_maps; m++, mfs += MAP_BITS, results += MAP_BITS) {
1038 uint32_t hashes[MAP_BITS];
1039 const struct cmap_node *nodes[MAP_BITS];
1040 unsigned long map = maps[m];
1041 int i;
b7648634 1042
52a524eb
JR
1043 if (!map) {
1044 continue; /* Skip empty ones. */
b7648634 1045 }
2abf78ff 1046
52a524eb
JR
1047 /* Compute hashes for the unfound flows. */
1048 ULONG_FOR_EACH_1(i, map) {
1049 hashes[i] = miniflow_hash_in_minimask(mfs[i], &subtable->mask,
1050 0);
1051 }
1052 /* Lookup. */
1053 map = cmap_find_batch(&subtable->rules, map, hashes, nodes);
1054 /* Check results. */
1055 ULONG_FOR_EACH_1(i, map) {
1056 struct cls_match *rule;
1057
1058 CMAP_NODE_FOR_EACH (rule, cmap_node, nodes[i]) {
1059 if (OVS_LIKELY(miniflow_and_mask_matches_miniflow(
1060 &rule->flow, &subtable->mask,
1061 mfs[i]))) {
1062 results[i] = rule->cls_rule;
1063 goto next;
1064 }
1065 }
1066 ULONG_SET0(map, i); /* Did not match. */
1067 next:
1068 ; /* Keep Sparse happy. */
1069 }
1070 maps[m] &= ~map; /* Clear the found rules. */
1071 remains |= maps[m];
b7648634 1072 }
52a524eb
JR
1073 if (!remains) {
1074 return true; /* All found. */
b7648634
EJ
1075 }
1076 }
52a524eb 1077 /* Some misses. */
56394293 1078 return false;
2abf78ff
JR
1079}
1080
b5d97350
BP
1081/* Finds and returns a rule in 'cls' with exactly the same priority and
1082 * matching criteria as 'target'. Returns a null pointer if 'cls' doesn't
c084ce1d 1083 * contain an exact match. */
064af421 1084struct cls_rule *
e48eccd1 1085classifier_find_rule_exactly(const struct classifier *cls,
76ecc721 1086 const struct cls_rule *target)
e48eccd1 1087 OVS_EXCLUDED(cls->mutex)
064af421 1088{
627fb667 1089 struct cls_match *head, *rule;
03868246 1090 struct cls_subtable *subtable;
064af421 1091
03868246 1092 subtable = find_subtable(cls, &target->match.mask);
d2064437
JR
1093 if (!subtable || target->priority > subtable->max_priority) {
1094 return NULL;
4d935a6b
JR
1095 }
1096
03868246 1097 head = find_equal(subtable, &target->match.flow,
5cb7a798
BP
1098 miniflow_hash_in_minimask(&target->match.flow,
1099 &target->match.mask, 0));
d2064437
JR
1100
1101
1102 /* Use RCU list instead of locking when one is available. */
1103 ovs_mutex_lock(&cls->mutex);
b5d97350
BP
1104 FOR_EACH_RULE_IN_LIST (rule, head) {
1105 if (target->priority >= rule->priority) {
e65413ab 1106 ovs_mutex_unlock(&cls->mutex);
627fb667 1107 return target->priority == rule->priority ? rule->cls_rule : NULL;
064af421
BP
1108 }
1109 }
e65413ab 1110 ovs_mutex_unlock(&cls->mutex);
d2064437 1111
064af421
BP
1112 return NULL;
1113}
1114
81a76618
BP
1115/* Finds and returns a rule in 'cls' with priority 'priority' and exactly the
1116 * same matching criteria as 'target'. Returns a null pointer if 'cls' doesn't
1117 * contain an exact match. */
1118struct cls_rule *
1119classifier_find_match_exactly(const struct classifier *cls,
1120 const struct match *target,
1121 unsigned int priority)
1122{
1123 struct cls_rule *retval;
1124 struct cls_rule cr;
1125
1126 cls_rule_init(&cr, target, priority);
1127 retval = classifier_find_rule_exactly(cls, &cr);
48d28ac1 1128 cls_rule_destroy(&cr);
81a76618
BP
1129
1130 return retval;
1131}
1132
faa50f40
BP
1133/* Checks if 'target' would overlap any other rule in 'cls'. Two rules are
1134 * considered to overlap if both rules have the same priority and a packet
1135 * could match both. */
49bdc010 1136bool
e48eccd1 1137classifier_rule_overlaps(const struct classifier *cls,
faa50f40 1138 const struct cls_rule *target)
e48eccd1 1139 OVS_EXCLUDED(cls->mutex)
49bdc010 1140{
03868246 1141 struct cls_subtable *subtable;
fe7cfa5c 1142 int64_t stop_at_priority = (int64_t)target->priority - 1;
49bdc010 1143
e65413ab 1144 ovs_mutex_lock(&cls->mutex);
03868246 1145 /* Iterate subtables in the descending max priority order. */
fe7cfa5c
JR
1146 PVECTOR_FOR_EACH_PRIORITY (subtable, stop_at_priority, 2,
1147 sizeof(struct cls_subtable), &cls->subtables) {
5cb7a798
BP
1148 uint32_t storage[FLOW_U32S];
1149 struct minimask mask;
627fb667 1150 struct cls_match *head;
49bdc010 1151
03868246 1152 minimask_combine(&mask, &target->match.mask, &subtable->mask, storage);
f2c21402 1153 CMAP_FOR_EACH (head, cmap_node, &subtable->rules) {
627fb667 1154 struct cls_match *rule;
49bdc010 1155
b5d97350 1156 FOR_EACH_RULE_IN_LIST (rule, head) {
4d935a6b
JR
1157 if (rule->priority < target->priority) {
1158 break; /* Rules in descending priority order. */
1159 }
faa50f40 1160 if (rule->priority == target->priority
5cb7a798 1161 && miniflow_equal_in_minimask(&target->match.flow,
3016f3e4 1162 &rule->flow, &mask)) {
e65413ab 1163 ovs_mutex_unlock(&cls->mutex);
49bdc010
JP
1164 return true;
1165 }
1166 }
1167 }
1168 }
1169
e65413ab 1170 ovs_mutex_unlock(&cls->mutex);
49bdc010
JP
1171 return false;
1172}
6ceeaa92
BP
1173
1174/* Returns true if 'rule' exactly matches 'criteria' or if 'rule' is more
1175 * specific than 'criteria'. That is, 'rule' matches 'criteria' and this
1176 * function returns true if, for every field:
1177 *
1178 * - 'criteria' and 'rule' specify the same (non-wildcarded) value for the
1179 * field, or
1180 *
1181 * - 'criteria' wildcards the field,
1182 *
1183 * Conversely, 'rule' does not match 'criteria' and this function returns false
1184 * if, for at least one field:
1185 *
1186 * - 'criteria' and 'rule' specify different values for the field, or
1187 *
1188 * - 'criteria' specifies a value for the field but 'rule' wildcards it.
1189 *
1190 * Equivalently, the truth table for whether a field matches is:
1191 *
1192 * rule
1193 *
1194 * c wildcard exact
1195 * r +---------+---------+
1196 * i wild | yes | yes |
1197 * t card | | |
1198 * e +---------+---------+
1199 * r exact | no |if values|
1200 * i | |are equal|
1201 * a +---------+---------+
1202 *
1203 * This is the matching rule used by OpenFlow 1.0 non-strict OFPT_FLOW_MOD
1204 * commands and by OpenFlow 1.0 aggregate and flow stats.
1205 *
81a76618 1206 * Ignores rule->priority. */
6ceeaa92
BP
1207bool
1208cls_rule_is_loose_match(const struct cls_rule *rule,
5cb7a798 1209 const struct minimatch *criteria)
6ceeaa92 1210{
5cb7a798
BP
1211 return (!minimask_has_extra(&rule->match.mask, &criteria->mask)
1212 && miniflow_equal_in_minimask(&rule->match.flow, &criteria->flow,
1213 &criteria->mask));
6ceeaa92 1214}
b5d97350 1215\f
5ecc9d81
BP
1216/* Iteration. */
1217
1218static bool
627fb667 1219rule_matches(const struct cls_match *rule, const struct cls_rule *target)
5ecc9d81
BP
1220{
1221 return (!target
3016f3e4 1222 || miniflow_equal_in_minimask(&rule->flow,
5cb7a798
BP
1223 &target->match.flow,
1224 &target->match.mask));
5ecc9d81
BP
1225}
1226
627fb667 1227static struct cls_match *
03868246 1228search_subtable(const struct cls_subtable *subtable,
f2c21402 1229 struct cls_cursor *cursor)
5ecc9d81 1230{
f2c21402
JR
1231 if (!cursor->target
1232 || !minimask_has_extra(&subtable->mask, &cursor->target->match.mask)) {
627fb667 1233 struct cls_match *rule;
5ecc9d81 1234
f2c21402
JR
1235 CMAP_CURSOR_FOR_EACH (rule, cmap_node, &cursor->rules,
1236 &subtable->rules) {
1237 if (rule_matches(rule, cursor->target)) {
5ecc9d81
BP
1238 return rule;
1239 }
1240 }
1241 }
1242 return NULL;
1243}
1244
5f0476ce
JR
1245/* Initializes 'cursor' for iterating through rules in 'cls', and returns the
1246 * first matching cls_rule via '*pnode', or NULL if there are no matches.
5ecc9d81 1247 *
6ceeaa92 1248 * - If 'target' is null, the cursor will visit every rule in 'cls'.
5ecc9d81 1249 *
6ceeaa92
BP
1250 * - If 'target' is nonnull, the cursor will visit each 'rule' in 'cls'
1251 * such that cls_rule_is_loose_match(rule, target) returns true.
5ecc9d81 1252 *
6ceeaa92 1253 * Ignores target->priority. */
78c8df12
BP
1254struct cls_cursor cls_cursor_start(const struct classifier *cls,
1255 const struct cls_rule *target,
1256 bool safe)
5f0476ce 1257 OVS_NO_THREAD_SAFETY_ANALYSIS
5ecc9d81 1258{
5f0476ce 1259 struct cls_cursor cursor;
03868246 1260 struct cls_subtable *subtable;
5ecc9d81 1261
5f0476ce 1262 cursor.safe = safe;
e48eccd1 1263 cursor.cls = cls;
5f0476ce 1264 cursor.target = target && !cls_rule_is_catchall(target) ? target : NULL;
78c8df12 1265 cursor.rule = NULL;
5f0476ce
JR
1266
1267 /* Find first rule. */
e65413ab 1268 ovs_mutex_lock(&cursor.cls->mutex);
5f0476ce 1269 CMAP_CURSOR_FOR_EACH (subtable, cmap_node, &cursor.subtables,
e65413ab 1270 &cursor.cls->subtables_map) {
5f0476ce 1271 struct cls_match *rule = search_subtable(subtable, &cursor);
f2c21402 1272
5ecc9d81 1273 if (rule) {
5f0476ce 1274 cursor.subtable = subtable;
78c8df12 1275 cursor.rule = rule->cls_rule;
5f0476ce 1276 break;
5ecc9d81
BP
1277 }
1278 }
1279
5f0476ce 1280 /* Leave locked if requested and have a rule. */
78c8df12 1281 if (safe || !cursor.rule) {
e65413ab 1282 ovs_mutex_unlock(&cursor.cls->mutex);
5f0476ce
JR
1283 }
1284 return cursor;
1285}
1286
1caa1561
BP
1287static struct cls_rule *
1288cls_cursor_next(struct cls_cursor *cursor)
5f0476ce 1289 OVS_NO_THREAD_SAFETY_ANALYSIS
5ecc9d81 1290{
78c8df12 1291 struct cls_match *rule = cursor->rule->cls_match;
03868246 1292 const struct cls_subtable *subtable;
627fb667 1293 struct cls_match *next;
5ecc9d81 1294
955f579d
BP
1295 next = next_rule_in_list__(rule);
1296 if (next->priority < rule->priority) {
1caa1561 1297 return next->cls_rule;
5ecc9d81
BP
1298 }
1299
955f579d 1300 /* 'next' is the head of the list, that is, the rule that is included in
f2c21402 1301 * the subtable's map. (This is important when the classifier contains
03868246 1302 * rules that differ only in priority.) */
955f579d 1303 rule = next;
f2c21402 1304 CMAP_CURSOR_FOR_EACH_CONTINUE (rule, cmap_node, &cursor->rules) {
5ecc9d81 1305 if (rule_matches(rule, cursor->target)) {
1caa1561 1306 return rule->cls_rule;
5ecc9d81
BP
1307 }
1308 }
1309
03868246 1310 subtable = cursor->subtable;
f2c21402
JR
1311 CMAP_CURSOR_FOR_EACH_CONTINUE (subtable, cmap_node, &cursor->subtables) {
1312 rule = search_subtable(subtable, cursor);
5ecc9d81 1313 if (rule) {
03868246 1314 cursor->subtable = subtable;
1caa1561 1315 return rule->cls_rule;
5ecc9d81
BP
1316 }
1317 }
1318
1caa1561
BP
1319 return NULL;
1320}
1321
1322/* Sets 'cursor->rule' to the next matching cls_rule in 'cursor''s iteration,
1323 * or to null if all matching rules have been visited. */
1324void
1325cls_cursor_advance(struct cls_cursor *cursor)
1326 OVS_NO_THREAD_SAFETY_ANALYSIS
1327{
1328 if (cursor->safe) {
1329 ovs_mutex_lock(&cursor->cls->mutex);
1330 }
1331 cursor->rule = cls_cursor_next(cursor);
1332 if (cursor->safe || !cursor->rule) {
1333 ovs_mutex_unlock(&cursor->cls->mutex);
1334 }
5ecc9d81
BP
1335}
1336\f
03868246 1337static struct cls_subtable *
e48eccd1 1338find_subtable(const struct classifier *cls, const struct minimask *mask)
b5d97350 1339{
03868246 1340 struct cls_subtable *subtable;
064af421 1341
f2c21402 1342 CMAP_FOR_EACH_WITH_HASH (subtable, cmap_node, minimask_hash(mask, 0),
5a87054c 1343 &cls->subtables_map) {
03868246
JR
1344 if (minimask_equal(mask, &subtable->mask)) {
1345 return subtable;
064af421
BP
1346 }
1347 }
b5d97350 1348 return NULL;
064af421 1349}
064af421 1350
e65413ab 1351/* The new subtable will be visible to the readers only after this. */
03868246 1352static struct cls_subtable *
e48eccd1 1353insert_subtable(struct classifier *cls, const struct minimask *mask)
e65413ab 1354 OVS_REQUIRES(cls->mutex)
b5d97350 1355{
c906cedf 1356 uint32_t hash = minimask_hash(mask, 0);
03868246 1357 struct cls_subtable *subtable;
476f36e8
JR
1358 int i, index = 0;
1359 struct flow_wildcards old, new;
1360 uint8_t prev;
3016f3e4 1361 int count = count_1bits(mask->masks.map);
064af421 1362
3016f3e4
JR
1363 subtable = xzalloc(sizeof *subtable - sizeof mask->masks.inline_values
1364 + MINIFLOW_VALUES_SIZE(count));
f2c21402 1365 cmap_init(&subtable->rules);
3016f3e4 1366 miniflow_clone_inline(&subtable->mask.masks, &mask->masks, count);
476f36e8
JR
1367
1368 /* Init indices for segmented lookup, if any. */
1369 flow_wildcards_init_catchall(&new);
1370 old = new;
1371 prev = 0;
1372 for (i = 0; i < cls->n_flow_segments; i++) {
1373 flow_wildcards_fold_minimask_range(&new, mask, prev,
1374 cls->flow_segments[i]);
1375 /* Add an index if it adds mask bits. */
1376 if (!flow_wildcards_equal(&new, &old)) {
f2c21402 1377 cmap_init(&subtable->indices[index]);
476f36e8
JR
1378 subtable->index_ofs[index] = cls->flow_segments[i];
1379 index++;
1380 old = new;
1381 }
1382 prev = cls->flow_segments[i];
1383 }
1384 /* Check if the rest of the subtable's mask adds any bits,
1385 * and remove the last index if it doesn't. */
1386 if (index > 0) {
1387 flow_wildcards_fold_minimask_range(&new, mask, prev, FLOW_U32S);
1388 if (flow_wildcards_equal(&new, &old)) {
1389 --index;
1390 subtable->index_ofs[index] = 0;
f2c21402 1391 cmap_destroy(&subtable->indices[index]);
476f36e8
JR
1392 }
1393 }
1394 subtable->n_indices = index;
1395
03868246
JR
1396 subtable->tag = (minimask_get_metadata_mask(mask) == OVS_BE64_MAX
1397 ? tag_create_deterministic(hash)
1398 : TAG_ALL);
064af421 1399
13751fd8
JR
1400 for (i = 0; i < cls->n_tries; i++) {
1401 subtable->trie_plen[i] = minimask_get_prefix_len(mask,
1402 cls->tries[i].field);
1403 }
1404
69d6040e 1405 /* Ports trie. */
f358a2cb 1406 ovsrcu_set_hidden(&subtable->ports_trie, NULL);
69d6040e
JR
1407 subtable->ports_mask_len
1408 = 32 - ctz32(ntohl(MINIFLOW_GET_BE32(&mask->masks, tp_src)));
1409
f2c21402 1410 cmap_insert(&cls->subtables_map, &subtable->cmap_node, hash);
ec988646 1411
03868246 1412 return subtable;
064af421
BP
1413}
1414
b5d97350 1415static void
e48eccd1 1416destroy_subtable(struct classifier *cls, struct cls_subtable *subtable)
e65413ab 1417 OVS_REQUIRES(cls->mutex)
b5d97350 1418{
476f36e8
JR
1419 int i;
1420
fe7cfa5c 1421 pvector_remove(&cls->subtables, subtable);
f358a2cb 1422 trie_destroy(&subtable->ports_trie);
69d6040e 1423
476f36e8 1424 for (i = 0; i < subtable->n_indices; i++) {
f2c21402 1425 cmap_destroy(&subtable->indices[i]);
476f36e8 1426 }
f2c21402
JR
1427 cmap_remove(&cls->subtables_map, &subtable->cmap_node,
1428 minimask_hash(&subtable->mask, 0));
fe7cfa5c 1429 minimask_destroy(&subtable->mask);
f2c21402 1430 cmap_destroy(&subtable->rules);
fe7cfa5c 1431 ovsrcu_postpone(free, subtable);
4aacd02d
BP
1432}
1433
13751fd8
JR
1434struct range {
1435 uint8_t start;
1436 uint8_t end;
1437};
1438
c0bfb650
JR
1439static unsigned int be_get_bit_at(const ovs_be32 value[], unsigned int ofs);
1440
13751fd8
JR
1441/* Return 'true' if can skip rest of the subtable based on the prefix trie
1442 * lookup results. */
1443static inline bool
1444check_tries(struct trie_ctx trie_ctx[CLS_MAX_TRIES], unsigned int n_tries,
1445 const unsigned int field_plen[CLS_MAX_TRIES],
1446 const struct range ofs, const struct flow *flow,
1447 struct flow_wildcards *wc)
1448{
1449 int j;
1450
1451 /* Check if we could avoid fully unwildcarding the next level of
1452 * fields using the prefix tries. The trie checks are done only as
1453 * needed to avoid folding in additional bits to the wildcards mask. */
1454 for (j = 0; j < n_tries; j++) {
1455 /* Is the trie field relevant for this subtable? */
1456 if (field_plen[j]) {
1457 struct trie_ctx *ctx = &trie_ctx[j];
1458 uint8_t be32ofs = ctx->be32ofs;
1459
1460 /* Is the trie field within the current range of fields? */
1461 if (be32ofs >= ofs.start && be32ofs < ofs.end) {
1462 /* On-demand trie lookup. */
1463 if (!ctx->lookup_done) {
c0bfb650
JR
1464 memset(&ctx->match_plens, 0, sizeof ctx->match_plens);
1465 ctx->maskbits = trie_lookup(ctx->trie, flow,
1466 &ctx->match_plens);
13751fd8
JR
1467 ctx->lookup_done = true;
1468 }
1469 /* Possible to skip the rest of the subtable if subtable's
c0bfb650
JR
1470 * prefix on the field is not included in the lookup result. */
1471 if (!be_get_bit_at(&ctx->match_plens.be32, field_plen[j] - 1)) {
1817dcea
JR
1472 /* We want the trie lookup to never result in unwildcarding
1473 * any bits that would not be unwildcarded otherwise.
1474 * Since the trie is shared by the whole classifier, it is
1475 * possible that the 'maskbits' contain bits that are
1476 * irrelevant for the partition relevant for the current
1477 * packet. Hence the checks below. */
13751fd8 1478
13751fd8 1479 /* Check that the trie result will not unwildcard more bits
1817dcea 1480 * than this subtable would otherwise. */
13751fd8
JR
1481 if (ctx->maskbits <= field_plen[j]) {
1482 /* Unwildcard the bits and skip the rest. */
1483 mask_set_prefix_bits(wc, be32ofs, ctx->maskbits);
1484 /* Note: Prerequisite already unwildcarded, as the only
1485 * prerequisite of the supported trie lookup fields is
1817dcea
JR
1486 * the ethertype, which is always unwildcarded. */
1487 return true;
1488 }
1489 /* Can skip if the field is already unwildcarded. */
1490 if (mask_prefix_bits_set(wc, be32ofs, ctx->maskbits)) {
13751fd8
JR
1491 return true;
1492 }
1493 }
1494 }
1495 }
1496 }
1497 return false;
1498}
1499
3016f3e4
JR
1500/* Returns true if 'target' satisifies 'flow'/'mask', that is, if each bit
1501 * for which 'flow', for which 'mask' has a bit set, specifies a particular
1502 * value has the correct value in 'target'.
1503 *
1504 * This function is equivalent to miniflow_equal_flow_in_minimask(flow,
a64759f0
JR
1505 * target, mask) but this is faster because of the invariant that
1506 * flow->map and mask->masks.map are the same, and that this version
1507 * takes the 'wc'. */
3016f3e4
JR
1508static inline bool
1509miniflow_and_mask_matches_flow(const struct miniflow *flow,
1510 const struct minimask *mask,
e9319757 1511 const struct flow *target)
3016f3e4
JR
1512{
1513 const uint32_t *flowp = miniflow_get_u32_values(flow);
1514 const uint32_t *maskp = miniflow_get_u32_values(&mask->masks);
a64759f0 1515 uint32_t idx;
3016f3e4 1516
a64759f0
JR
1517 MAP_FOR_EACH_INDEX(idx, mask->masks.map) {
1518 uint32_t diff = (*flowp++ ^ flow_u32_value(target, idx)) & *maskp++;
1519
1520 if (diff) {
3016f3e4
JR
1521 return false;
1522 }
1523 }
1524
1525 return true;
1526}
1527
627fb667 1528static inline struct cls_match *
476f36e8
JR
1529find_match(const struct cls_subtable *subtable, const struct flow *flow,
1530 uint32_t hash)
b5d97350 1531{
627fb667 1532 struct cls_match *rule;
b5d97350 1533
f2c21402 1534 CMAP_FOR_EACH_WITH_HASH (rule, cmap_node, hash, &subtable->rules) {
3016f3e4 1535 if (miniflow_and_mask_matches_flow(&rule->flow, &subtable->mask,
e9319757 1536 flow)) {
b5d97350 1537 return rule;
064af421
BP
1538 }
1539 }
c23740be 1540
064af421
BP
1541 return NULL;
1542}
1543
e9319757
JR
1544/* Returns true if 'target' satisifies 'flow'/'mask', that is, if each bit
1545 * for which 'flow', for which 'mask' has a bit set, specifies a particular
1546 * value has the correct value in 'target'.
1547 *
1548 * This function is equivalent to miniflow_and_mask_matches_flow() but this
1549 * version fills in the mask bits in 'wc'. */
1550static inline bool
1551miniflow_and_mask_matches_flow_wc(const struct miniflow *flow,
1552 const struct minimask *mask,
1553 const struct flow *target,
1554 struct flow_wildcards *wc)
1555{
1556 const uint32_t *flowp = miniflow_get_u32_values(flow);
1557 const uint32_t *maskp = miniflow_get_u32_values(&mask->masks);
1558 uint32_t idx;
1559
1560 MAP_FOR_EACH_INDEX(idx, mask->masks.map) {
1561 uint32_t mask = *maskp++;
1562 uint32_t diff = (*flowp++ ^ flow_u32_value(target, idx)) & mask;
1563
1564 if (diff) {
1565 /* Only unwildcard if none of the differing bits is already
1566 * exact-matched. */
1567 if (!(flow_u32_value(&wc->masks, idx) & diff)) {
1568 /* Keep one bit of the difference. */
1569 *flow_u32_lvalue(&wc->masks, idx) |= rightmost_1bit(diff);
1570 }
1571 return false;
1572 }
1573 /* Fill in the bits that were looked at. */
1574 *flow_u32_lvalue(&wc->masks, idx) |= mask;
1575 }
1576
1577 return true;
1578}
1579
386cb9f7
JR
1580/* Unwildcard the fields looked up so far, if any. */
1581static void
1582fill_range_wc(const struct cls_subtable *subtable, struct flow_wildcards *wc,
1583 uint8_t to)
1584{
1585 if (to) {
1586 flow_wildcards_fold_minimask_range(wc, &subtable->mask, 0, to);
1587 }
1588}
1589
627fb667 1590static struct cls_match *
476f36e8 1591find_match_wc(const struct cls_subtable *subtable, const struct flow *flow,
13751fd8
JR
1592 struct trie_ctx trie_ctx[CLS_MAX_TRIES], unsigned int n_tries,
1593 struct flow_wildcards *wc)
476f36e8
JR
1594{
1595 uint32_t basis = 0, hash;
f17e8ad6 1596 struct cls_match *rule = NULL;
476f36e8 1597 int i;
13751fd8 1598 struct range ofs;
476f36e8 1599
ec988646 1600 if (OVS_UNLIKELY(!wc)) {
476f36e8
JR
1601 return find_match(subtable, flow,
1602 flow_hash_in_minimask(flow, &subtable->mask, 0));
1603 }
1604
13751fd8 1605 ofs.start = 0;
476f36e8
JR
1606 /* Try to finish early by checking fields in segments. */
1607 for (i = 0; i < subtable->n_indices; i++) {
55847abe 1608 const struct cmap_node *inode;
f2c21402 1609
13751fd8 1610 ofs.end = subtable->index_ofs[i];
476f36e8 1611
13751fd8
JR
1612 if (check_tries(trie_ctx, n_tries, subtable->trie_plen, ofs, flow,
1613 wc)) {
386cb9f7
JR
1614 /* 'wc' bits for the trie field set, now unwildcard the preceding
1615 * bits used so far. */
1616 fill_range_wc(subtable, wc, ofs.start);
1617 return NULL;
13751fd8
JR
1618 }
1619 hash = flow_hash_in_minimask_range(flow, &subtable->mask, ofs.start,
1620 ofs.end, &basis);
f2c21402 1621 inode = cmap_find(&subtable->indices[i], hash);
476f36e8 1622 if (!inode) {
386cb9f7
JR
1623 /* No match, can stop immediately, but must fold in the bits
1624 * used in lookup so far. */
1625 fill_range_wc(subtable, wc, ofs.end);
1626 return NULL;
476f36e8
JR
1627 }
1628
1629 /* If we have narrowed down to a single rule already, check whether
a64759f0 1630 * that rule matches. Either way, we're done.
476f36e8
JR
1631 *
1632 * (Rare) hash collisions may cause us to miss the opportunity for this
1633 * optimization. */
f2c21402 1634 if (!cmap_node_next(inode)) {
476f36e8 1635 ASSIGN_CONTAINER(rule, inode - i, index_nodes);
e9319757
JR
1636 if (miniflow_and_mask_matches_flow_wc(&rule->flow, &subtable->mask,
1637 flow, wc)) {
1638 return rule;
476f36e8 1639 }
e9319757 1640 return NULL;
476f36e8 1641 }
386cb9f7 1642 ofs.start = ofs.end;
476f36e8 1643 }
13751fd8
JR
1644 ofs.end = FLOW_U32S;
1645 /* Trie check for the final range. */
1646 if (check_tries(trie_ctx, n_tries, subtable->trie_plen, ofs, flow, wc)) {
386cb9f7
JR
1647 fill_range_wc(subtable, wc, ofs.start);
1648 return NULL;
13751fd8 1649 }
a64759f0
JR
1650 hash = flow_hash_in_minimask_range(flow, &subtable->mask, ofs.start,
1651 ofs.end, &basis);
1652 rule = find_match(subtable, flow, hash);
69d6040e
JR
1653 if (!rule && subtable->ports_mask_len) {
1654 /* Ports are always part of the final range, if any.
1655 * No match was found for the ports. Use the ports trie to figure out
1656 * which ports bits to unwildcard. */
1657 unsigned int mbits;
c0bfb650 1658 ovs_be32 value, plens, mask;
69d6040e
JR
1659
1660 mask = MINIFLOW_GET_BE32(&subtable->mask.masks, tp_src);
1661 value = ((OVS_FORCE ovs_be32 *)flow)[TP_PORTS_OFS32] & mask;
c0bfb650 1662 mbits = trie_lookup_value(&subtable->ports_trie, &value, &plens, 32);
69d6040e
JR
1663
1664 ((OVS_FORCE ovs_be32 *)&wc->masks)[TP_PORTS_OFS32] |=
1665 mask & htonl(~0 << (32 - mbits));
1666
386cb9f7
JR
1667 /* Unwildcard all bits in the mask upto the ports, as they were used
1668 * to determine there is no match. */
1669 fill_range_wc(subtable, wc, TP_PORTS_OFS32);
1670 return NULL;
69d6040e 1671 }
e9319757 1672
13751fd8 1673 /* Must unwildcard all the fields, as they were looked at. */
476f36e8
JR
1674 flow_wildcards_fold_minimask(wc, &subtable->mask);
1675 return rule;
1676}
1677
627fb667 1678static struct cls_match *
03868246
JR
1679find_equal(struct cls_subtable *subtable, const struct miniflow *flow,
1680 uint32_t hash)
064af421 1681{
627fb667 1682 struct cls_match *head;
064af421 1683
f2c21402 1684 CMAP_FOR_EACH_WITH_HASH (head, cmap_node, hash, &subtable->rules) {
3016f3e4 1685 if (miniflow_equal(&head->flow, flow)) {
b5d97350 1686 return head;
064af421
BP
1687 }
1688 }
1689 return NULL;
1690}
1691
afae68b1
JR
1692/*
1693 * As the readers are operating concurrently with the modifications, a
1694 * concurrent reader may or may not see the new rule, depending on how
1695 * the concurrent events overlap with each other. This is no
1696 * different from the former locked behavior, but there the visibility
1697 * of the new rule only depended on the timing of the locking
1698 * functions.
1699 *
1700 * The new rule is first added to the segment indices, so the readers
1701 * may find the rule in the indices before the rule is visible in the
1702 * subtables 'rules' map. This may result in us losing the
1703 * opportunity to quit lookups earlier, resulting in sub-optimal
1704 * wildcarding. This will be fixed by forthcoming revalidation always
1705 * scheduled after flow table changes.
1706 *
1707 * Similar behavior may happen due to us removing the overlapping rule
1708 * (if any) from the indices only after the new rule has been added.
1709 *
1710 * The subtable's max priority is updated only after the rule is
1711 * inserted, so the concurrent readers may not see the rule, as the
1712 * updated priority ordered subtable list will only be visible after
1713 * the subtable's max priority is updated.
1714 *
1715 * Similarly, the classifier's partitions for new rules are updated by
1716 * the caller after this function, so the readers may keep skipping
1717 * the subtable until they see the updated partitions.
1718 */
627fb667 1719static struct cls_match *
e48eccd1 1720insert_rule(struct classifier *cls, struct cls_subtable *subtable,
f2c21402 1721 struct cls_rule *new_rule)
e65413ab 1722 OVS_REQUIRES(cls->mutex)
064af421 1723{
627fb667 1724 struct cls_match *old = NULL;
f2c21402
JR
1725 struct cls_match *new = cls_match_alloc(new_rule);
1726 struct cls_match *head;
476f36e8 1727 int i;
f2c21402 1728 uint32_t basis = 0, hash, ihash[CLS_MAX_INDICES];
13751fd8 1729 uint8_t prev_be32ofs = 0;
476f36e8
JR
1730
1731 /* Add new node to segment indices. */
1732 for (i = 0; i < subtable->n_indices; i++) {
f2c21402
JR
1733 ihash[i] = minimatch_hash_range(&new_rule->match, prev_be32ofs,
1734 subtable->index_ofs[i], &basis);
1735 cmap_insert(&subtable->indices[i], &new->index_nodes[i], ihash[i]);
13751fd8 1736 prev_be32ofs = subtable->index_ofs[i];
476f36e8 1737 }
f2c21402
JR
1738 hash = minimatch_hash_range(&new_rule->match, prev_be32ofs, FLOW_U32S,
1739 &basis);
1740 head = find_equal(subtable, &new_rule->match.flow, hash);
b5d97350 1741 if (!head) {
f2c21402
JR
1742 cmap_insert(&subtable->rules, &new->cmap_node, hash);
1743 list_init(&new->list);
4aacd02d 1744 goto out;
b5d97350
BP
1745 } else {
1746 /* Scan the list for the insertion point that will keep the list in
1747 * order of decreasing priority. */
627fb667 1748 struct cls_match *rule;
476f36e8 1749
b5d97350 1750 FOR_EACH_RULE_IN_LIST (rule, head) {
f2c21402 1751 if (new->priority >= rule->priority) {
b5d97350 1752 if (rule == head) {
f2c21402
JR
1753 /* 'new' is the new highest-priority flow in the list. */
1754 cmap_replace(&subtable->rules, &rule->cmap_node,
1755 &new->cmap_node, hash);
b5d97350 1756 }
064af421 1757
f2c21402
JR
1758 if (new->priority == rule->priority) {
1759 list_replace(&new->list, &rule->list);
4aacd02d 1760 old = rule;
b5d97350 1761 } else {
f2c21402 1762 list_insert(&rule->list, &new->list);
b5d97350 1763 }
fe7cfa5c 1764 goto out;
b5d97350
BP
1765 }
1766 }
064af421 1767
b5d97350 1768 /* Insert 'new' at the end of the list. */
f2c21402 1769 list_push_back(&head->list, &new->list);
064af421 1770 }
4aacd02d
BP
1771
1772 out:
1773 if (!old) {
fe7cfa5c
JR
1774 subtable->n_rules++;
1775
1776 /* Rule was added, not replaced. Update 'subtable's 'max_priority'
1777 * and 'max_count', if necessary. */
1778 if (subtable->n_rules == 1) {
f2c21402 1779 subtable->max_priority = new->priority;
fe7cfa5c 1780 subtable->max_count = 1;
f2c21402
JR
1781 pvector_insert(&cls->subtables, subtable, new->priority);
1782 } else if (subtable->max_priority == new->priority) {
fe7cfa5c 1783 ++subtable->max_count;
f2c21402
JR
1784 } else if (new->priority > subtable->max_priority) {
1785 subtable->max_priority = new->priority;
fe7cfa5c 1786 subtable->max_count = 1;
f2c21402 1787 pvector_change_priority(&cls->subtables, subtable, new->priority);
fe7cfa5c 1788 }
476f36e8
JR
1789 } else {
1790 /* Remove old node from indices. */
1791 for (i = 0; i < subtable->n_indices; i++) {
f2c21402 1792 cmap_remove(&subtable->indices[i], &old->index_nodes[i], ihash[i]);
476f36e8 1793 }
4aacd02d
BP
1794 }
1795 return old;
064af421
BP
1796}
1797
627fb667
JR
1798static struct cls_match *
1799next_rule_in_list__(struct cls_match *rule)
e65413ab 1800 OVS_NO_THREAD_SAFETY_ANALYSIS
064af421 1801{
f17e8ad6
GS
1802 struct cls_match *next = NULL;
1803 next = OBJECT_CONTAINING(rule->list.next, next, list);
955f579d
BP
1804 return next;
1805}
1806
627fb667
JR
1807static struct cls_match *
1808next_rule_in_list(struct cls_match *rule)
955f579d 1809{
627fb667 1810 struct cls_match *next = next_rule_in_list__(rule);
b5d97350 1811 return next->priority < rule->priority ? next : NULL;
064af421 1812}
13751fd8
JR
1813\f
1814/* A longest-prefix match tree. */
1815struct trie_node {
1816 uint32_t prefix; /* Prefix bits for this node, MSB first. */
c30cfa6b 1817 uint8_t n_bits; /* Never zero, except for the root node. */
13751fd8 1818 unsigned int n_rules; /* Number of rules that have this prefix. */
f358a2cb 1819 rcu_trie_ptr edges[2]; /* Both NULL if leaf. */
13751fd8
JR
1820};
1821
1822/* Max bits per node. Must fit in struct trie_node's 'prefix'.
1823 * Also tested with 16, 8, and 5 to stress the implementation. */
1824#define TRIE_PREFIX_BITS 32
1825
1826/* Return at least 'plen' bits of the 'prefix', starting at bit offset 'ofs'.
1827 * Prefixes are in the network byte order, and the offset 0 corresponds to
1828 * the most significant bit of the first byte. The offset can be read as
1829 * "how many bits to skip from the start of the prefix starting at 'pr'". */
1830static uint32_t
1831raw_get_prefix(const ovs_be32 pr[], unsigned int ofs, unsigned int plen)
1832{
1833 uint32_t prefix;
1834
1835 pr += ofs / 32; /* Where to start. */
1836 ofs %= 32; /* How many bits to skip at 'pr'. */
1837
1838 prefix = ntohl(*pr) << ofs; /* Get the first 32 - ofs bits. */
1839 if (plen > 32 - ofs) { /* Need more than we have already? */
1840 prefix |= ntohl(*++pr) >> (32 - ofs);
1841 }
1842 /* Return with possible unwanted bits at the end. */
1843 return prefix;
1844}
1845
1846/* Return min(TRIE_PREFIX_BITS, plen) bits of the 'prefix', starting at bit
1847 * offset 'ofs'. Prefixes are in the network byte order, and the offset 0
1848 * corresponds to the most significant bit of the first byte. The offset can
1849 * be read as "how many bits to skip from the start of the prefix starting at
1850 * 'pr'". */
1851static uint32_t
1852trie_get_prefix(const ovs_be32 pr[], unsigned int ofs, unsigned int plen)
1853{
1854 if (!plen) {
1855 return 0;
1856 }
1857 if (plen > TRIE_PREFIX_BITS) {
1858 plen = TRIE_PREFIX_BITS; /* Get at most TRIE_PREFIX_BITS. */
1859 }
1860 /* Return with unwanted bits cleared. */
1861 return raw_get_prefix(pr, ofs, plen) & ~0u << (32 - plen);
1862}
1863
c30cfa6b 1864/* Return the number of equal bits in 'n_bits' of 'prefix's MSBs and a 'value'
13751fd8
JR
1865 * starting at "MSB 0"-based offset 'ofs'. */
1866static unsigned int
c30cfa6b 1867prefix_equal_bits(uint32_t prefix, unsigned int n_bits, const ovs_be32 value[],
13751fd8
JR
1868 unsigned int ofs)
1869{
c30cfa6b 1870 uint64_t diff = prefix ^ raw_get_prefix(value, ofs, n_bits);
13751fd8 1871 /* Set the bit after the relevant bits to limit the result. */
c30cfa6b 1872 return raw_clz64(diff << 32 | UINT64_C(1) << (63 - n_bits));
13751fd8
JR
1873}
1874
1875/* Return the number of equal bits in 'node' prefix and a 'prefix' of length
1876 * 'plen', starting at "MSB 0"-based offset 'ofs'. */
1877static unsigned int
1878trie_prefix_equal_bits(const struct trie_node *node, const ovs_be32 prefix[],
1879 unsigned int ofs, unsigned int plen)
1880{
c30cfa6b 1881 return prefix_equal_bits(node->prefix, MIN(node->n_bits, plen - ofs),
13751fd8
JR
1882 prefix, ofs);
1883}
1884
1885/* Return the bit at ("MSB 0"-based) offset 'ofs' as an int. 'ofs' can
1886 * be greater than 31. */
1887static unsigned int
1888be_get_bit_at(const ovs_be32 value[], unsigned int ofs)
1889{
1890 return (((const uint8_t *)value)[ofs / 8] >> (7 - ofs % 8)) & 1u;
1891}
1892
1893/* Return the bit at ("MSB 0"-based) offset 'ofs' as an int. 'ofs' must
1894 * be between 0 and 31, inclusive. */
1895static unsigned int
1896get_bit_at(const uint32_t prefix, unsigned int ofs)
1897{
1898 return (prefix >> (31 - ofs)) & 1u;
1899}
1900
1901/* Create new branch. */
1902static struct trie_node *
1903trie_branch_create(const ovs_be32 *prefix, unsigned int ofs, unsigned int plen,
1904 unsigned int n_rules)
1905{
1906 struct trie_node *node = xmalloc(sizeof *node);
1907
1908 node->prefix = trie_get_prefix(prefix, ofs, plen);
1909
1910 if (plen <= TRIE_PREFIX_BITS) {
c30cfa6b 1911 node->n_bits = plen;
f358a2cb
JR
1912 ovsrcu_set_hidden(&node->edges[0], NULL);
1913 ovsrcu_set_hidden(&node->edges[1], NULL);
13751fd8
JR
1914 node->n_rules = n_rules;
1915 } else { /* Need intermediate nodes. */
1916 struct trie_node *subnode = trie_branch_create(prefix,
1917 ofs + TRIE_PREFIX_BITS,
1918 plen - TRIE_PREFIX_BITS,
1919 n_rules);
1920 int bit = get_bit_at(subnode->prefix, 0);
c30cfa6b 1921 node->n_bits = TRIE_PREFIX_BITS;
f358a2cb
JR
1922 ovsrcu_set_hidden(&node->edges[bit], subnode);
1923 ovsrcu_set_hidden(&node->edges[!bit], NULL);
13751fd8
JR
1924 node->n_rules = 0;
1925 }
1926 return node;
1927}
1928
1929static void
f358a2cb 1930trie_node_destroy(const struct trie_node *node)
13751fd8 1931{
f358a2cb
JR
1932 ovsrcu_postpone(free, CONST_CAST(struct trie_node *, node));
1933}
1934
1935/* Copy a trie node for modification and postpone delete the old one. */
1936static struct trie_node *
1937trie_node_rcu_realloc(const struct trie_node *node)
1938{
1939 struct trie_node *new_node = xmalloc(sizeof *node);
1940
1941 *new_node = *node;
1942 trie_node_destroy(node);
1943
1944 return new_node;
13751fd8
JR
1945}
1946
e48eccd1 1947/* May only be called while holding the classifier mutex. */
13751fd8 1948static void
f358a2cb 1949trie_destroy(rcu_trie_ptr *trie)
13751fd8 1950{
f358a2cb
JR
1951 struct trie_node *node = ovsrcu_get_protected(struct trie_node *, trie);
1952
13751fd8 1953 if (node) {
f358a2cb
JR
1954 ovsrcu_set_hidden(trie, NULL);
1955 trie_destroy(&node->edges[0]);
1956 trie_destroy(&node->edges[1]);
1957 trie_node_destroy(node);
13751fd8
JR
1958 }
1959}
1960
1961static bool
1962trie_is_leaf(const struct trie_node *trie)
1963{
f358a2cb
JR
1964 /* No children? */
1965 return !ovsrcu_get(struct trie_node *, &trie->edges[0])
1966 && !ovsrcu_get(struct trie_node *, &trie->edges[1]);
13751fd8
JR
1967}
1968
1969static void
1970mask_set_prefix_bits(struct flow_wildcards *wc, uint8_t be32ofs,
c30cfa6b 1971 unsigned int n_bits)
13751fd8
JR
1972{
1973 ovs_be32 *mask = &((ovs_be32 *)&wc->masks)[be32ofs];
1974 unsigned int i;
1975
c30cfa6b 1976 for (i = 0; i < n_bits / 32; i++) {
13751fd8
JR
1977 mask[i] = OVS_BE32_MAX;
1978 }
c30cfa6b
JR
1979 if (n_bits % 32) {
1980 mask[i] |= htonl(~0u << (32 - n_bits % 32));
13751fd8
JR
1981 }
1982}
1983
1984static bool
1985mask_prefix_bits_set(const struct flow_wildcards *wc, uint8_t be32ofs,
c30cfa6b 1986 unsigned int n_bits)
13751fd8
JR
1987{
1988 ovs_be32 *mask = &((ovs_be32 *)&wc->masks)[be32ofs];
1989 unsigned int i;
1990 ovs_be32 zeroes = 0;
1991
c30cfa6b 1992 for (i = 0; i < n_bits / 32; i++) {
13751fd8
JR
1993 zeroes |= ~mask[i];
1994 }
c30cfa6b
JR
1995 if (n_bits % 32) {
1996 zeroes |= ~mask[i] & htonl(~0u << (32 - n_bits % 32));
13751fd8
JR
1997 }
1998
c30cfa6b 1999 return !zeroes; /* All 'n_bits' bits set. */
13751fd8
JR
2000}
2001
f358a2cb 2002static rcu_trie_ptr *
13751fd8
JR
2003trie_next_edge(struct trie_node *node, const ovs_be32 value[],
2004 unsigned int ofs)
2005{
2006 return node->edges + be_get_bit_at(value, ofs);
2007}
2008
2009static const struct trie_node *
2010trie_next_node(const struct trie_node *node, const ovs_be32 value[],
2011 unsigned int ofs)
2012{
f358a2cb
JR
2013 return ovsrcu_get(struct trie_node *,
2014 &node->edges[be_get_bit_at(value, ofs)]);
13751fd8
JR
2015}
2016
c0bfb650
JR
2017/* Set the bit at ("MSB 0"-based) offset 'ofs'. 'ofs' can be greater than 31.
2018 */
2019static void
2020be_set_bit_at(ovs_be32 value[], unsigned int ofs)
2021{
2022 ((uint8_t *)value)[ofs / 8] |= 1u << (7 - ofs % 8);
2023}
2024
2025/* Returns the number of bits in the prefix mask necessary to determine a
2026 * mismatch, in case there are longer prefixes in the tree below the one that
2027 * matched.
2028 * '*plens' will have a bit set for each prefix length that may have matching
2029 * rules. The caller is responsible for clearing the '*plens' prior to
2030 * calling this.
13751fd8
JR
2031 */
2032static unsigned int
f358a2cb 2033trie_lookup_value(const rcu_trie_ptr *trie, const ovs_be32 value[],
c0bfb650 2034 ovs_be32 plens[], unsigned int n_bits)
13751fd8 2035{
13751fd8 2036 const struct trie_node *prev = NULL;
c0bfb650
JR
2037 const struct trie_node *node = ovsrcu_get(struct trie_node *, trie);
2038 unsigned int match_len = 0; /* Number of matching bits. */
13751fd8 2039
27ce650f 2040 for (; node; prev = node, node = trie_next_node(node, value, match_len)) {
13751fd8
JR
2041 unsigned int eqbits;
2042 /* Check if this edge can be followed. */
27ce650f
JR
2043 eqbits = prefix_equal_bits(node->prefix, node->n_bits, value,
2044 match_len);
2045 match_len += eqbits;
c30cfa6b 2046 if (eqbits < node->n_bits) { /* Mismatch, nothing more to be found. */
27ce650f 2047 /* Bit at offset 'match_len' differed. */
c0bfb650 2048 return match_len + 1; /* Includes the first mismatching bit. */
13751fd8
JR
2049 }
2050 /* Full match, check if rules exist at this prefix length. */
2051 if (node->n_rules > 0) {
c0bfb650 2052 be_set_bit_at(plens, match_len - 1);
13751fd8 2053 }
27ce650f 2054 if (match_len >= n_bits) {
c0bfb650 2055 return n_bits; /* Full prefix. */
f0e5aa11 2056 }
13751fd8 2057 }
c0bfb650
JR
2058 /* node == NULL. Full match so far, but we tried to follow an
2059 * non-existing branch. Need to exclude the other branch if it exists
2060 * (it does not if we were called on an empty trie or 'prev' is a leaf
2061 * node). */
2062 return !prev || trie_is_leaf(prev) ? match_len : match_len + 1;
13751fd8
JR
2063}
2064
2065static unsigned int
2066trie_lookup(const struct cls_trie *trie, const struct flow *flow,
c0bfb650 2067 union mf_value *plens)
13751fd8
JR
2068{
2069 const struct mf_field *mf = trie->field;
2070
2071 /* Check that current flow matches the prerequisites for the trie
2072 * field. Some match fields are used for multiple purposes, so we
2073 * must check that the trie is relevant for this flow. */
2074 if (mf_are_prereqs_ok(mf, flow)) {
f358a2cb 2075 return trie_lookup_value(&trie->root,
13751fd8 2076 &((ovs_be32 *)flow)[mf->flow_be32ofs],
c0bfb650 2077 &plens->be32, mf->n_bits);
13751fd8 2078 }
c0bfb650
JR
2079 memset(plens, 0xff, sizeof *plens); /* All prefixes, no skipping. */
2080 return 0; /* Value not used in this case. */
13751fd8
JR
2081}
2082
2083/* Returns the length of a prefix match mask for the field 'mf' in 'minimask'.
2084 * Returns the u32 offset to the miniflow data in '*miniflow_index', if
2085 * 'miniflow_index' is not NULL. */
2086static unsigned int
2087minimask_get_prefix_len(const struct minimask *minimask,
2088 const struct mf_field *mf)
2089{
c30cfa6b 2090 unsigned int n_bits = 0, mask_tz = 0; /* Non-zero when end of mask seen. */
13751fd8
JR
2091 uint8_t u32_ofs = mf->flow_be32ofs;
2092 uint8_t u32_end = u32_ofs + mf->n_bytes / 4;
2093
2094 for (; u32_ofs < u32_end; ++u32_ofs) {
2095 uint32_t mask;
2096 mask = ntohl((OVS_FORCE ovs_be32)minimask_get(minimask, u32_ofs));
2097
2098 /* Validate mask, count the mask length. */
2099 if (mask_tz) {
2100 if (mask) {
2101 return 0; /* No bits allowed after mask ended. */
2102 }
2103 } else {
2104 if (~mask & (~mask + 1)) {
2105 return 0; /* Mask not contiguous. */
2106 }
2107 mask_tz = ctz32(mask);
c30cfa6b 2108 n_bits += 32 - mask_tz;
13751fd8
JR
2109 }
2110 }
2111
c30cfa6b 2112 return n_bits;
13751fd8
JR
2113}
2114
2115/*
2116 * This is called only when mask prefix is known to be CIDR and non-zero.
2117 * Relies on the fact that the flow and mask have the same map, and since
2118 * the mask is CIDR, the storage for the flow field exists even if it
2119 * happened to be zeros.
2120 */
2121static const ovs_be32 *
2122minimatch_get_prefix(const struct minimatch *match, const struct mf_field *mf)
2123{
27bbe15d 2124 return miniflow_get_be32_values(&match->flow) +
13751fd8
JR
2125 count_1bits(match->flow.map & ((UINT64_C(1) << mf->flow_be32ofs) - 1));
2126}
2127
2128/* Insert rule in to the prefix tree.
2129 * 'mlen' must be the (non-zero) CIDR prefix length of the 'trie->field' mask
2130 * in 'rule'. */
2131static void
2132trie_insert(struct cls_trie *trie, const struct cls_rule *rule, int mlen)
2133{
69d6040e
JR
2134 trie_insert_prefix(&trie->root,
2135 minimatch_get_prefix(&rule->match, trie->field), mlen);
2136}
2137
2138static void
f358a2cb 2139trie_insert_prefix(rcu_trie_ptr *edge, const ovs_be32 *prefix, int mlen)
69d6040e 2140{
13751fd8 2141 struct trie_node *node;
13751fd8
JR
2142 int ofs = 0;
2143
2144 /* Walk the tree. */
f358a2cb 2145 for (; (node = ovsrcu_get_protected(struct trie_node *, edge));
13751fd8
JR
2146 edge = trie_next_edge(node, prefix, ofs)) {
2147 unsigned int eqbits = trie_prefix_equal_bits(node, prefix, ofs, mlen);
2148 ofs += eqbits;
c30cfa6b 2149 if (eqbits < node->n_bits) {
13751fd8
JR
2150 /* Mismatch, new node needs to be inserted above. */
2151 int old_branch = get_bit_at(node->prefix, eqbits);
f358a2cb 2152 struct trie_node *new_parent;
13751fd8 2153
f358a2cb
JR
2154 new_parent = trie_branch_create(prefix, ofs - eqbits, eqbits,
2155 ofs == mlen ? 1 : 0);
2156 /* Copy the node to modify it. */
2157 node = trie_node_rcu_realloc(node);
2158 /* Adjust the new node for its new position in the tree. */
13751fd8 2159 node->prefix <<= eqbits;
c30cfa6b 2160 node->n_bits -= eqbits;
f358a2cb 2161 ovsrcu_set_hidden(&new_parent->edges[old_branch], node);
13751fd8
JR
2162
2163 /* Check if need a new branch for the new rule. */
2164 if (ofs < mlen) {
f358a2cb
JR
2165 ovsrcu_set_hidden(&new_parent->edges[!old_branch],
2166 trie_branch_create(prefix, ofs, mlen - ofs,
2167 1));
13751fd8 2168 }
f358a2cb 2169 ovsrcu_set(edge, new_parent); /* Publish changes. */
13751fd8
JR
2170 return;
2171 }
2172 /* Full match so far. */
2173
2174 if (ofs == mlen) {
2175 /* Full match at the current node, rule needs to be added here. */
2176 node->n_rules++;
2177 return;
2178 }
2179 }
2180 /* Must insert a new tree branch for the new rule. */
f358a2cb 2181 ovsrcu_set(edge, trie_branch_create(prefix, ofs, mlen - ofs, 1));
13751fd8
JR
2182}
2183
2184/* 'mlen' must be the (non-zero) CIDR prefix length of the 'trie->field' mask
2185 * in 'rule'. */
2186static void
2187trie_remove(struct cls_trie *trie, const struct cls_rule *rule, int mlen)
2188{
69d6040e
JR
2189 trie_remove_prefix(&trie->root,
2190 minimatch_get_prefix(&rule->match, trie->field), mlen);
2191}
2192
2193/* 'mlen' must be the (non-zero) CIDR prefix length of the 'trie->field' mask
2194 * in 'rule'. */
2195static void
f358a2cb 2196trie_remove_prefix(rcu_trie_ptr *root, const ovs_be32 *prefix, int mlen)
69d6040e 2197{
13751fd8 2198 struct trie_node *node;
f358a2cb 2199 rcu_trie_ptr *edges[sizeof(union mf_value) * 8];
13751fd8
JR
2200 int depth = 0, ofs = 0;
2201
2202 /* Walk the tree. */
69d6040e 2203 for (edges[0] = root;
f358a2cb 2204 (node = ovsrcu_get_protected(struct trie_node *, edges[depth]));
13751fd8
JR
2205 edges[++depth] = trie_next_edge(node, prefix, ofs)) {
2206 unsigned int eqbits = trie_prefix_equal_bits(node, prefix, ofs, mlen);
69d6040e 2207
c30cfa6b 2208 if (eqbits < node->n_bits) {
13751fd8
JR
2209 /* Mismatch, nothing to be removed. This should never happen, as
2210 * only rules in the classifier are ever removed. */
2211 break; /* Log a warning. */
2212 }
2213 /* Full match so far. */
2214 ofs += eqbits;
2215
2216 if (ofs == mlen) {
2217 /* Full prefix match at the current node, remove rule here. */
2218 if (!node->n_rules) {
2219 break; /* Log a warning. */
2220 }
2221 node->n_rules--;
2222
2223 /* Check if can prune the tree. */
f358a2cb
JR
2224 while (!node->n_rules) {
2225 struct trie_node *next,
2226 *edge0 = ovsrcu_get_protected(struct trie_node *,
2227 &node->edges[0]),
2228 *edge1 = ovsrcu_get_protected(struct trie_node *,
2229 &node->edges[1]);
2230
2231 if (edge0 && edge1) {
2232 break; /* A branching point, cannot prune. */
2233 }
2234
2235 /* Else have at most one child node, remove this node. */
2236 next = edge0 ? edge0 : edge1;
13751fd8
JR
2237
2238 if (next) {
c30cfa6b 2239 if (node->n_bits + next->n_bits > TRIE_PREFIX_BITS) {
13751fd8
JR
2240 break; /* Cannot combine. */
2241 }
f358a2cb
JR
2242 next = trie_node_rcu_realloc(next); /* Modify. */
2243
13751fd8 2244 /* Combine node with next. */
c30cfa6b
JR
2245 next->prefix = node->prefix | next->prefix >> node->n_bits;
2246 next->n_bits += node->n_bits;
13751fd8 2247 }
13751fd8 2248 /* Update the parent's edge. */
f358a2cb
JR
2249 ovsrcu_set(edges[depth], next); /* Publish changes. */
2250 trie_node_destroy(node);
2251
13751fd8
JR
2252 if (next || !depth) {
2253 /* Branch not pruned or at root, nothing more to do. */
2254 break;
2255 }
f358a2cb
JR
2256 node = ovsrcu_get_protected(struct trie_node *,
2257 edges[--depth]);
13751fd8
JR
2258 }
2259 return;
2260 }
2261 }
2262 /* Cannot go deeper. This should never happen, since only rules
2263 * that actually exist in the classifier are ever removed. */
2264 VLOG_WARN("Trying to remove non-existing rule from a prefix trie.");
2265}