]> git.proxmox.com Git - ovs.git/blame - lib/classifier.c
ofproto: Infra for table versioning.
[ovs.git] / lib / classifier.c
CommitLineData
064af421 1/*
18080541 2 * Copyright (c) 2009, 2010, 2011, 2012, 2013, 2014, 2015 Nicira, Inc.
064af421 3 *
a14bc59f
BP
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
064af421 7 *
a14bc59f
BP
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
064af421
BP
15 */
16
17#include <config.h>
18#include "classifier.h"
38c449e0 19#include "classifier-private.h"
064af421
BP
20#include <errno.h>
21#include <netinet/in.h>
844dff32 22#include "byte-order.h"
68d1c8c3 23#include "dynamic-string.h"
07b37e8f 24#include "odp-util.h"
d8ae4d67 25#include "ofp-util.h"
13751fd8 26#include "packets.h"
52054c15 27#include "util.h"
e6211adc 28#include "openvswitch/vlog.h"
13751fd8
JR
29
30VLOG_DEFINE_THIS_MODULE(classifier);
064af421 31
69d6040e
JR
32struct trie_ctx;
33
18080541
BP
34/* A collection of "struct cls_conjunction"s currently embedded into a
35 * cls_match. */
36struct cls_conjunction_set {
37 /* Link back to the cls_match.
38 *
39 * cls_conjunction_set is mostly used during classifier lookup, and, in
40 * turn, during classifier lookup the most used member of
41 * cls_conjunction_set is the rule's priority, so we cache it here for fast
42 * access. */
43 struct cls_match *match;
44 int priority; /* Cached copy of match->priority. */
45
46 /* Conjunction information.
47 *
48 * 'min_n_clauses' allows some optimization during classifier lookup. */
49 unsigned int n; /* Number of elements in 'conj'. */
50 unsigned int min_n_clauses; /* Smallest 'n' among elements of 'conj'. */
51 struct cls_conjunction conj[];
52};
53
69d6040e
JR
54/* Ports trie depends on both ports sharing the same ovs_be32. */
55#define TP_PORTS_OFS32 (offsetof(struct flow, tp_src) / 4)
56BUILD_ASSERT_DECL(TP_PORTS_OFS32 == offsetof(struct flow, tp_dst) / 4);
d70e8c28
JR
57BUILD_ASSERT_DECL(TP_PORTS_OFS32 % 2 == 0);
58#define TP_PORTS_OFS64 (TP_PORTS_OFS32 / 2)
cabd4c43 59
18080541
BP
60static size_t
61cls_conjunction_set_size(size_t n)
62{
63 return (sizeof(struct cls_conjunction_set)
64 + n * sizeof(struct cls_conjunction));
65}
66
67static struct cls_conjunction_set *
68cls_conjunction_set_alloc(struct cls_match *match,
69 const struct cls_conjunction conj[], size_t n)
70{
71 if (n) {
72 size_t min_n_clauses = conj[0].n_clauses;
73 for (size_t i = 1; i < n; i++) {
74 min_n_clauses = MIN(min_n_clauses, conj[i].n_clauses);
75 }
76
77 struct cls_conjunction_set *set = xmalloc(cls_conjunction_set_size(n));
78 set->match = match;
79 set->priority = match->priority;
80 set->n = n;
81 set->min_n_clauses = min_n_clauses;
82 memcpy(set->conj, conj, n * sizeof *conj);
83 return set;
84 } else {
85 return NULL;
86 }
87}
88
627fb667 89static struct cls_match *
18080541
BP
90cls_match_alloc(const struct cls_rule *rule,
91 const struct cls_conjunction conj[], size_t n)
627fb667 92{
3016f3e4
JR
93 int count = count_1bits(rule->match.flow.map);
94
95 struct cls_match *cls_match
96 = xmalloc(sizeof *cls_match - sizeof cls_match->flow.inline_values
97 + MINIFLOW_VALUES_SIZE(count));
627fb667 98
8f8023b3 99 ovsrcu_init(&cls_match->next, NULL);
f80028fe
JR
100 *CONST_CAST(const struct cls_rule **, &cls_match->cls_rule) = rule;
101 *CONST_CAST(int *, &cls_match->priority) = rule->priority;
2b7b1427 102 atomic_init(&cls_match->visibility, 0); /* Initially invisible. */
f80028fe
JR
103 miniflow_clone_inline(CONST_CAST(struct miniflow *, &cls_match->flow),
104 &rule->match.flow, count);
18080541
BP
105 ovsrcu_set_hidden(&cls_match->conj_set,
106 cls_conjunction_set_alloc(cls_match, conj, n));
627fb667
JR
107
108 return cls_match;
109}
cabd4c43 110
e48eccd1 111static struct cls_subtable *find_subtable(const struct classifier *cls,
dfea28b3 112 const struct minimask *);
e48eccd1 113static struct cls_subtable *insert_subtable(struct classifier *cls,
fccd7c09
JR
114 const struct minimask *);
115static void destroy_subtable(struct classifier *cls, struct cls_subtable *);
b5d97350 116
dfea28b3 117static const struct cls_match *find_match_wc(const struct cls_subtable *,
2b7b1427 118 long long version,
dfea28b3
JR
119 const struct flow *,
120 struct trie_ctx *,
121 unsigned int n_tries,
122 struct flow_wildcards *);
123static struct cls_match *find_equal(const struct cls_subtable *,
627fb667 124 const struct miniflow *, uint32_t hash);
b5d97350 125
8f8023b3
JR
126/* Return the next visible (lower-priority) rule in the list. Multiple
127 * identical rules with the same priority may exist transitionally, but when
128 * versioning is used at most one of them is ever visible for lookups on any
129 * given 'version'. */
fc02ecc7 130static inline const struct cls_match *
2b7b1427 131next_visible_rule_in_list(const struct cls_match *rule, long long version)
fc02ecc7 132{
fc02ecc7 133 do {
8f8023b3
JR
134 rule = cls_match_next(rule);
135 if (!rule) {
186120da 136 /* We have reached the head of the list, stop. */
8f8023b3 137 break;
186120da 138 }
8f8023b3 139 } while (!cls_match_visible_in_version(rule, version));
fc02ecc7 140
8f8023b3 141 return rule;
c501b427
JR
142}
143
13751fd8
JR
144static unsigned int minimask_get_prefix_len(const struct minimask *,
145 const struct mf_field *);
e48eccd1 146static void trie_init(struct classifier *cls, int trie_idx,
fccd7c09 147 const struct mf_field *);
13751fd8 148static unsigned int trie_lookup(const struct cls_trie *, const struct flow *,
c0bfb650 149 union mf_value *plens);
f358a2cb 150static unsigned int trie_lookup_value(const rcu_trie_ptr *,
c0bfb650
JR
151 const ovs_be32 value[], ovs_be32 plens[],
152 unsigned int value_bits);
f358a2cb 153static void trie_destroy(rcu_trie_ptr *);
13751fd8 154static void trie_insert(struct cls_trie *, const struct cls_rule *, int mlen);
f358a2cb 155static void trie_insert_prefix(rcu_trie_ptr *, const ovs_be32 *prefix,
69d6040e 156 int mlen);
13751fd8 157static void trie_remove(struct cls_trie *, const struct cls_rule *, int mlen);
f358a2cb 158static void trie_remove_prefix(rcu_trie_ptr *, const ovs_be32 *prefix,
69d6040e 159 int mlen);
13751fd8 160static void mask_set_prefix_bits(struct flow_wildcards *, uint8_t be32ofs,
c30cfa6b 161 unsigned int n_bits);
13751fd8 162static bool mask_prefix_bits_set(const struct flow_wildcards *,
c30cfa6b 163 uint8_t be32ofs, unsigned int n_bits);
81a76618
BP
164\f
165/* cls_rule. */
b5d97350 166
de4ad4a2 167static inline void
2b7b1427
JR
168cls_rule_init__(struct cls_rule *rule, unsigned int priority,
169 long long version)
de4ad4a2 170{
2b7b1427
JR
171 ovs_assert(version > 0);
172
de4ad4a2 173 rculist_init(&rule->node);
2b7b1427
JR
174 *CONST_CAST(int *, &rule->priority) = priority;
175 *CONST_CAST(long long *, &rule->version) = version;
de4ad4a2
JR
176 rule->cls_match = NULL;
177}
178
81a76618 179/* Initializes 'rule' to match packets specified by 'match' at the given
5cb7a798
BP
180 * 'priority'. 'match' must satisfy the invariant described in the comment at
181 * the definition of struct match.
66642cb4 182 *
48d28ac1
BP
183 * The caller must eventually destroy 'rule' with cls_rule_destroy().
184 *
eb391b76
BP
185 * Clients should not use priority INT_MIN. (OpenFlow uses priorities between
186 * 0 and UINT16_MAX, inclusive.) */
47284b1f 187void
2b7b1427
JR
188cls_rule_init(struct cls_rule *rule, const struct match *match, int priority,
189 long long version)
47284b1f 190{
2b7b1427
JR
191 cls_rule_init__(rule, priority, version);
192 minimatch_init(CONST_CAST(struct minimatch *, &rule->match), match);
5cb7a798
BP
193}
194
195/* Same as cls_rule_init() for initialization from a "struct minimatch". */
196void
197cls_rule_init_from_minimatch(struct cls_rule *rule,
2b7b1427
JR
198 const struct minimatch *match, int priority,
199 long long version)
5cb7a798 200{
2b7b1427
JR
201 cls_rule_init__(rule, priority, version);
202 minimatch_clone(CONST_CAST(struct minimatch *, &rule->match), match);
685a51a5
JP
203}
204
48d28ac1
BP
205/* Initializes 'dst' as a copy of 'src'.
206 *
b2c1f00b 207 * The caller must eventually destroy 'dst' with cls_rule_destroy(). */
48d28ac1
BP
208void
209cls_rule_clone(struct cls_rule *dst, const struct cls_rule *src)
210{
2b7b1427
JR
211 cls_rule_init__(dst, src->priority, src->version);
212 minimatch_clone(CONST_CAST(struct minimatch *, &dst->match), &src->match);
48d28ac1
BP
213}
214
b2c1f00b 215/* Initializes 'dst' with the data in 'src', destroying 'src'.
2b7b1427 216 *
de4ad4a2 217 * 'src' must be a cls_rule NOT in a classifier.
b2c1f00b
BP
218 *
219 * The caller must eventually destroy 'dst' with cls_rule_destroy(). */
220void
221cls_rule_move(struct cls_rule *dst, struct cls_rule *src)
222{
2b7b1427
JR
223 cls_rule_init__(dst, src->priority, src->version);
224 minimatch_move(CONST_CAST(struct minimatch *, &dst->match),
225 CONST_CAST(struct minimatch *, &src->match));
b2c1f00b
BP
226}
227
48d28ac1
BP
228/* Frees memory referenced by 'rule'. Doesn't free 'rule' itself (it's
229 * normally embedded into a larger structure).
230 *
231 * ('rule' must not currently be in a classifier.) */
232void
5cb7a798 233cls_rule_destroy(struct cls_rule *rule)
48d28ac1 234{
de4ad4a2
JR
235 ovs_assert(!rule->cls_match); /* Must not be in a classifier. */
236
237 /* Check that the rule has been properly removed from the classifier and
238 * that the destruction only happens after the RCU grace period, or that
239 * the rule was never inserted to the classifier in the first place. */
240 ovs_assert(rculist_next_protected(&rule->node) == RCULIST_POISON
241 || rculist_is_empty(&rule->node));
242
2b7b1427 243 minimatch_destroy(CONST_CAST(struct minimatch *, &rule->match));
48d28ac1
BP
244}
245
18080541
BP
246void
247cls_rule_set_conjunctions(struct cls_rule *cr,
248 const struct cls_conjunction *conj, size_t n)
249{
250 struct cls_match *match = cr->cls_match;
251 struct cls_conjunction_set *old
252 = ovsrcu_get_protected(struct cls_conjunction_set *, &match->conj_set);
253 struct cls_conjunction *old_conj = old ? old->conj : NULL;
254 unsigned int old_n = old ? old->n : 0;
255
256 if (old_n != n || (n && memcmp(old_conj, conj, n * sizeof *conj))) {
257 if (old) {
258 ovsrcu_postpone(free, old);
259 }
260 ovsrcu_set(&match->conj_set,
261 cls_conjunction_set_alloc(match, conj, n));
262 }
263}
264
265
81a76618
BP
266/* Returns true if 'a' and 'b' match the same packets at the same priority,
267 * false if they differ in some way. */
193eb874
BP
268bool
269cls_rule_equal(const struct cls_rule *a, const struct cls_rule *b)
270{
5cb7a798 271 return a->priority == b->priority && minimatch_equal(&a->match, &b->match);
193eb874
BP
272}
273
81a76618 274/* Returns a hash value for 'rule', folding in 'basis'. */
57452fdc
BP
275uint32_t
276cls_rule_hash(const struct cls_rule *rule, uint32_t basis)
277{
5cb7a798 278 return minimatch_hash(&rule->match, hash_int(rule->priority, basis));
73f33563
BP
279}
280
81a76618 281/* Appends a string describing 'rule' to 's'. */
07b37e8f
BP
282void
283cls_rule_format(const struct cls_rule *rule, struct ds *s)
284{
5cb7a798 285 minimatch_format(&rule->match, s, rule->priority);
064af421 286}
3ca1de08
BP
287
288/* Returns true if 'rule' matches every packet, false otherwise. */
289bool
290cls_rule_is_catchall(const struct cls_rule *rule)
291{
5cb7a798 292 return minimask_is_catchall(&rule->match.mask);
3ca1de08 293}
fc02ecc7 294
2b7b1427
JR
295/* Makes rule invisible after 'version'. Once that version is made invisible
296 * (by changing the version parameter used in lookups), the rule should be
297 * actually removed via ovsrcu_postpone().
298 *
299 * 'rule_' must be in a classifier. */
300void
301cls_rule_make_invisible_in_version(const struct cls_rule *rule_,
302 long long version, long long lookup_version)
303{
304 struct cls_match *rule = rule_->cls_match;
305
306 /* XXX: Adjust when versioning is actually used. */
307 ovs_assert(version >= rule_->version && version >= lookup_version);
308
309 /* Normally, we call this when deleting a rule that is already visible to
310 * lookups. However, sometimes a bundle transaction will add a rule and
311 * then delete it before the rule has ever become visible. If we set such
312 * a rule to become invisible in a future 'version', it would become
313 * visible to all prior versions. So, in this case we must set the rule
314 * visibility to 0 (== never visible). */
315 if (cls_match_visible_in_version(rule, lookup_version)) {
316 /* Make invisible starting at 'version'. */
317 atomic_store_relaxed(&rule->visibility, -version);
318 } else {
319 /* Rule has not yet been visible to lookups, make invisible in all
320 * version. */
321 atomic_store_relaxed(&rule->visibility, 0);
322 }
323}
324
325/* This undoes the change made by cls_rule_make_invisible_after_version().
fc02ecc7
JR
326 *
327 * 'rule' must be in a classifier. */
2b7b1427
JR
328void
329cls_rule_restore_visibility(const struct cls_rule *rule)
fc02ecc7 330{
2b7b1427 331 atomic_store_relaxed(&rule->cls_match->visibility, rule->version);
fc02ecc7
JR
332}
333
2b7b1427
JR
334/* Return true if 'rule' is visible in 'version'.
335 *
336 * 'rule' must be in a classifier. */
337bool
338cls_rule_visible_in_version(const struct cls_rule *rule, long long version)
339{
340 return cls_match_visible_in_version(rule->cls_match, version);
341}
064af421
BP
342\f
343/* Initializes 'cls' as a classifier that initially contains no classification
344 * rules. */
345void
e48eccd1 346classifier_init(struct classifier *cls, const uint8_t *flow_segments)
064af421 347{
064af421 348 cls->n_rules = 0;
f2c21402 349 cmap_init(&cls->subtables_map);
fe7cfa5c 350 pvector_init(&cls->subtables);
f2c21402 351 cmap_init(&cls->partitions);
476f36e8
JR
352 cls->n_flow_segments = 0;
353 if (flow_segments) {
354 while (cls->n_flow_segments < CLS_MAX_INDICES
d70e8c28 355 && *flow_segments < FLOW_U64S) {
476f36e8
JR
356 cls->flow_segments[cls->n_flow_segments++] = *flow_segments++;
357 }
358 }
13751fd8 359 cls->n_tries = 0;
e65413ab
JR
360 for (int i = 0; i < CLS_MAX_TRIES; i++) {
361 trie_init(cls, i, NULL);
362 }
802f84ff 363 cls->publish = true;
064af421
BP
364}
365
366/* Destroys 'cls'. Rules within 'cls', if any, are not freed; this is the
afae68b1
JR
367 * caller's responsibility.
368 * May only be called after all the readers have been terminated. */
064af421 369void
e48eccd1 370classifier_destroy(struct classifier *cls)
064af421 371{
e48eccd1 372 if (cls) {
78c8df12
BP
373 struct cls_partition *partition;
374 struct cls_subtable *subtable;
13751fd8
JR
375 int i;
376
377 for (i = 0; i < cls->n_tries; i++) {
f358a2cb 378 trie_destroy(&cls->tries[i].root);
13751fd8 379 }
064af421 380
6bc3bb82 381 CMAP_FOR_EACH (subtable, cmap_node, &cls->subtables_map) {
03868246 382 destroy_subtable(cls, subtable);
064af421 383 }
f2c21402 384 cmap_destroy(&cls->subtables_map);
c906cedf 385
6bc3bb82 386 CMAP_FOR_EACH (partition, cmap_node, &cls->partitions) {
f2c21402 387 ovsrcu_postpone(free, partition);
c906cedf 388 }
f2c21402 389 cmap_destroy(&cls->partitions);
cabd4c43 390
fe7cfa5c 391 pvector_destroy(&cls->subtables);
064af421
BP
392 }
393}
394
13751fd8 395/* Set the fields for which prefix lookup should be performed. */
f358a2cb 396bool
e48eccd1 397classifier_set_prefix_fields(struct classifier *cls,
13751fd8
JR
398 const enum mf_field_id *trie_fields,
399 unsigned int n_fields)
400{
f358a2cb 401 const struct mf_field * new_fields[CLS_MAX_TRIES];
abadfcb0 402 struct mf_bitmap fields = MF_BITMAP_INITIALIZER;
f358a2cb
JR
403 int i, n_tries = 0;
404 bool changed = false;
13751fd8 405
f358a2cb 406 for (i = 0; i < n_fields && n_tries < CLS_MAX_TRIES; i++) {
13751fd8
JR
407 const struct mf_field *field = mf_from_id(trie_fields[i]);
408 if (field->flow_be32ofs < 0 || field->n_bits % 32) {
409 /* Incompatible field. This is the only place where we
410 * enforce these requirements, but the rest of the trie code
411 * depends on the flow_be32ofs to be non-negative and the
412 * field length to be a multiple of 32 bits. */
413 continue;
414 }
415
abadfcb0 416 if (bitmap_is_set(fields.bm, trie_fields[i])) {
13751fd8
JR
417 /* Duplicate field, there is no need to build more than
418 * one index for any one field. */
419 continue;
420 }
abadfcb0 421 bitmap_set1(fields.bm, trie_fields[i]);
13751fd8 422
f358a2cb
JR
423 new_fields[n_tries] = NULL;
424 if (n_tries >= cls->n_tries || field != cls->tries[n_tries].field) {
425 new_fields[n_tries] = field;
426 changed = true;
427 }
428 n_tries++;
429 }
430
431 if (changed || n_tries < cls->n_tries) {
432 struct cls_subtable *subtable;
433
434 /* Trie configuration needs to change. Disable trie lookups
435 * for the tries that are changing and wait all the current readers
436 * with the old configuration to be done. */
437 changed = false;
438 CMAP_FOR_EACH (subtable, cmap_node, &cls->subtables_map) {
439 for (i = 0; i < cls->n_tries; i++) {
440 if ((i < n_tries && new_fields[i]) || i >= n_tries) {
441 if (subtable->trie_plen[i]) {
442 subtable->trie_plen[i] = 0;
443 changed = true;
444 }
445 }
446 }
447 }
448 /* Synchronize if any readers were using tries. The readers may
449 * temporarily function without the trie lookup based optimizations. */
450 if (changed) {
451 /* ovsrcu_synchronize() functions as a memory barrier, so it does
452 * not matter that subtable->trie_plen is not atomic. */
453 ovsrcu_synchronize();
13751fd8 454 }
13751fd8 455
f358a2cb
JR
456 /* Now set up the tries. */
457 for (i = 0; i < n_tries; i++) {
458 if (new_fields[i]) {
459 trie_init(cls, i, new_fields[i]);
460 }
461 }
462 /* Destroy the rest, if any. */
463 for (; i < cls->n_tries; i++) {
464 trie_init(cls, i, NULL);
465 }
466
467 cls->n_tries = n_tries;
f358a2cb 468 return true;
13751fd8 469 }
f358a2cb 470
f358a2cb 471 return false; /* No change. */
13751fd8
JR
472}
473
474static void
e48eccd1 475trie_init(struct classifier *cls, int trie_idx, const struct mf_field *field)
13751fd8
JR
476{
477 struct cls_trie *trie = &cls->tries[trie_idx];
478 struct cls_subtable *subtable;
479
480 if (trie_idx < cls->n_tries) {
f358a2cb
JR
481 trie_destroy(&trie->root);
482 } else {
483 ovsrcu_set_hidden(&trie->root, NULL);
13751fd8 484 }
13751fd8
JR
485 trie->field = field;
486
f358a2cb 487 /* Add existing rules to the new trie. */
f2c21402 488 CMAP_FOR_EACH (subtable, cmap_node, &cls->subtables_map) {
13751fd8
JR
489 unsigned int plen;
490
491 plen = field ? minimask_get_prefix_len(&subtable->mask, field) : 0;
13751fd8 492 if (plen) {
627fb667 493 struct cls_match *head;
13751fd8 494
f2c21402 495 CMAP_FOR_EACH (head, cmap_node, &subtable->rules) {
f47eef15 496 trie_insert(trie, head->cls_rule, plen);
13751fd8
JR
497 }
498 }
f358a2cb
JR
499 /* Initialize subtable's prefix length on this field. This will
500 * allow readers to use the trie. */
501 atomic_thread_fence(memory_order_release);
502 subtable->trie_plen[trie_idx] = plen;
13751fd8
JR
503 }
504}
505
5f0476ce
JR
506/* Returns true if 'cls' contains no classification rules, false otherwise.
507 * Checking the cmap requires no locking. */
064af421
BP
508bool
509classifier_is_empty(const struct classifier *cls)
510{
e48eccd1 511 return cmap_is_empty(&cls->subtables_map);
064af421
BP
512}
513
dbda2960 514/* Returns the number of rules in 'cls'. */
064af421
BP
515int
516classifier_count(const struct classifier *cls)
517{
afae68b1
JR
518 /* n_rules is an int, so in the presence of concurrent writers this will
519 * return either the old or a new value. */
e48eccd1 520 return cls->n_rules;
064af421
BP
521}
522
c906cedf 523static uint32_t
d70e8c28 524hash_metadata(ovs_be64 metadata)
c906cedf 525{
d70e8c28 526 return hash_uint64((OVS_FORCE uint64_t) metadata);
c906cedf
BP
527}
528
529static struct cls_partition *
e48eccd1 530find_partition(const struct classifier *cls, ovs_be64 metadata, uint32_t hash)
c906cedf
BP
531{
532 struct cls_partition *partition;
533
f2c21402 534 CMAP_FOR_EACH_WITH_HASH (partition, cmap_node, hash, &cls->partitions) {
c906cedf
BP
535 if (partition->metadata == metadata) {
536 return partition;
537 }
538 }
539
540 return NULL;
541}
542
543static struct cls_partition *
e48eccd1 544create_partition(struct classifier *cls, struct cls_subtable *subtable,
c906cedf
BP
545 ovs_be64 metadata)
546{
547 uint32_t hash = hash_metadata(metadata);
548 struct cls_partition *partition = find_partition(cls, metadata, hash);
549 if (!partition) {
550 partition = xmalloc(sizeof *partition);
551 partition->metadata = metadata;
552 partition->tags = 0;
183126a1 553 tag_tracker_init(&partition->tracker);
f2c21402 554 cmap_insert(&cls->partitions, &partition->cmap_node, hash);
c906cedf 555 }
03868246 556 tag_tracker_add(&partition->tracker, &partition->tags, subtable->tag);
c906cedf
BP
557 return partition;
558}
559
69d6040e
JR
560static inline ovs_be32 minimatch_get_ports(const struct minimatch *match)
561{
562 /* Could optimize to use the same map if needed for fast path. */
563 return MINIFLOW_GET_BE32(&match->flow, tp_src)
564 & MINIFLOW_GET_BE32(&match->mask.masks, tp_src);
565}
566
f47eef15
JR
567static void
568subtable_replace_head_rule(struct classifier *cls OVS_UNUSED,
569 struct cls_subtable *subtable,
570 struct cls_match *head, struct cls_match *new,
571 uint32_t hash, uint32_t ihash[CLS_MAX_INDICES])
f47eef15
JR
572{
573 /* Rule's data is already in the tries. */
574
575 new->partition = head->partition; /* Steal partition, if any. */
576 head->partition = NULL;
577
578 for (int i = 0; i < subtable->n_indices; i++) {
579 cmap_replace(&subtable->indices[i], &head->index_nodes[i],
580 &new->index_nodes[i], ihash[i]);
581 }
582 cmap_replace(&subtable->rules, &head->cmap_node, &new->cmap_node, hash);
583}
584
b5d97350
BP
585/* Inserts 'rule' into 'cls'. Until 'rule' is removed from 'cls', the caller
586 * must not modify or free it.
064af421
BP
587 *
588 * If 'cls' already contains an identical rule (including wildcards, values of
589 * fixed fields, and priority), replaces the old rule by 'rule' and returns the
590 * rule that was replaced. The caller takes ownership of the returned rule and
f47eef15
JR
591 * is thus responsible for destroying it with cls_rule_destroy(), after RCU
592 * grace period has passed (see ovsrcu_postpone()).
064af421
BP
593 *
594 * Returns NULL if 'cls' does not contain a rule with an identical key, after
595 * inserting the new rule. In this case, no rules are displaced by the new
596 * rule, even rules that cannot have any effect because the new rule matches a
886af6ea
JR
597 * superset of their flows and has higher priority.
598 */
dfea28b3 599const struct cls_rule *
18080541
BP
600classifier_replace(struct classifier *cls, const struct cls_rule *rule,
601 const struct cls_conjunction *conjs, size_t n_conjs)
064af421 602{
2b7b1427 603 struct cls_match *new;
03868246 604 struct cls_subtable *subtable;
886af6ea 605 uint32_t ihash[CLS_MAX_INDICES];
d70e8c28 606 uint8_t prev_be64ofs = 0;
886af6ea 607 struct cls_match *head;
f47eef15 608 size_t n_rules = 0;
886af6ea
JR
609 uint32_t basis;
610 uint32_t hash;
611 int i;
b5d97350 612
2b7b1427
JR
613 ovs_assert(rule->version > 0);
614
615 /* 'new' is initially invisible to lookups. */
616 new = cls_match_alloc(rule, conjs, n_conjs);
617
d0999f1b 618 CONST_CAST(struct cls_rule *, rule)->cls_match = new;
f47eef15 619
03868246
JR
620 subtable = find_subtable(cls, &rule->match.mask);
621 if (!subtable) {
622 subtable = insert_subtable(cls, &rule->match.mask);
b5d97350
BP
623 }
624
f47eef15 625 /* Compute hashes in segments. */
886af6ea
JR
626 basis = 0;
627 for (i = 0; i < subtable->n_indices; i++) {
d70e8c28 628 ihash[i] = minimatch_hash_range(&rule->match, prev_be64ofs,
886af6ea 629 subtable->index_ofs[i], &basis);
d70e8c28 630 prev_be64ofs = subtable->index_ofs[i];
886af6ea 631 }
d70e8c28 632 hash = minimatch_hash_range(&rule->match, prev_be64ofs, FLOW_U64S, &basis);
f47eef15 633
886af6ea
JR
634 head = find_equal(subtable, &rule->match.flow, hash);
635 if (!head) {
886af6ea
JR
636 /* Add rule to tries.
637 *
638 * Concurrent readers might miss seeing the rule until this update,
639 * which might require being fixed up by revalidation later. */
f47eef15 640 for (i = 0; i < cls->n_tries; i++) {
13751fd8
JR
641 if (subtable->trie_plen[i]) {
642 trie_insert(&cls->tries[i], rule, subtable->trie_plen[i]);
643 }
644 }
69d6040e 645
886af6ea 646 /* Add rule to ports trie. */
69d6040e
JR
647 if (subtable->ports_mask_len) {
648 /* We mask the value to be inserted to always have the wildcarded
649 * bits in known (zero) state, so we can include them in comparison
650 * and they will always match (== their original value does not
651 * matter). */
652 ovs_be32 masked_ports = minimatch_get_ports(&rule->match);
653
654 trie_insert_prefix(&subtable->ports_trie, &masked_ports,
655 subtable->ports_mask_len);
656 }
886af6ea 657
f47eef15 658 /* Add rule to partitions.
886af6ea 659 *
f47eef15
JR
660 * Concurrent readers might miss seeing the rule until this update,
661 * which might require being fixed up by revalidation later. */
662 new->partition = NULL;
663 if (minimask_get_metadata_mask(&rule->match.mask) == OVS_BE64_MAX) {
664 ovs_be64 metadata = miniflow_get_metadata(&rule->match.flow);
665
666 new->partition = create_partition(cls, subtable, metadata);
667 }
668
f47eef15
JR
669 /* Add new node to segment indices.
670 *
671 * Readers may find the rule in the indices before the rule is visible
672 * in the subtables 'rules' map. This may result in us losing the
673 * opportunity to quit lookups earlier, resulting in sub-optimal
674 * wildcarding. This will be fixed later by revalidation (always
675 * scheduled after flow table changes). */
886af6ea 676 for (i = 0; i < subtable->n_indices; i++) {
f47eef15
JR
677 cmap_insert(&subtable->indices[i], &new->index_nodes[i], ihash[i]);
678 }
679 n_rules = cmap_insert(&subtable->rules, &new->cmap_node, hash);
680 } else { /* Equal rules exist in the classifier already. */
8f8023b3 681 struct cls_match *prev, *iter;
f47eef15
JR
682
683 /* Scan the list for the insertion point that will keep the list in
2b7b1427
JR
684 * order of decreasing priority. Insert after rules marked invisible
685 * in any version of the same priority. */
8f8023b3 686 FOR_EACH_RULE_IN_LIST_PROTECTED (iter, prev, head) {
186120da
JR
687 if (rule->priority > iter->priority
688 || (rule->priority == iter->priority
2b7b1427 689 && !cls_match_is_eventually_invisible(iter))) {
f47eef15
JR
690 break;
691 }
886af6ea
JR
692 }
693
8f8023b3
JR
694 /* Replace 'iter' with 'new' or insert 'new' between 'prev' and
695 * 'iter'. */
f47eef15
JR
696 if (iter) {
697 struct cls_rule *old;
698
699 if (rule->priority == iter->priority) {
8f8023b3 700 cls_match_replace(prev, iter, new);
f47eef15
JR
701 old = CONST_CAST(struct cls_rule *, iter->cls_rule);
702 } else {
8f8023b3 703 cls_match_insert(prev, iter, new);
f47eef15
JR
704 old = NULL;
705 }
706
707 /* Replace the existing head in data structures, if rule is the new
708 * head. */
709 if (iter == head) {
710 subtable_replace_head_rule(cls, subtable, head, new, hash,
711 ihash);
712 }
713
714 if (old) {
18080541
BP
715 struct cls_conjunction_set *conj_set;
716
717 conj_set = ovsrcu_get_protected(struct cls_conjunction_set *,
718 &iter->conj_set);
719 if (conj_set) {
720 ovsrcu_postpone(free, conj_set);
721 }
722
8f8023b3 723 ovsrcu_postpone(cls_match_free_cb, iter);
f47eef15 724 old->cls_match = NULL;
f2c21402 725
f47eef15
JR
726 /* No change in subtable's max priority or max count. */
727
2b7b1427
JR
728 /* Make 'new' visible to lookups in the appropriate version. */
729 cls_match_set_visibility(new, rule->version);
fc02ecc7
JR
730
731 /* Make rule visible to iterators (immediately). */
d0999f1b
JR
732 rculist_replace(CONST_CAST(struct rculist *, &rule->node),
733 &old->node);
de4ad4a2 734
f47eef15
JR
735 /* Return displaced rule. Caller is responsible for keeping it
736 * around until all threads quiesce. */
f47eef15
JR
737 return old;
738 }
739 } else {
8f8023b3
JR
740 /* 'new' is new node after 'prev' */
741 cls_match_insert(prev, iter, new);
f47eef15 742 }
064af421 743 }
886af6ea 744
2b7b1427
JR
745 /* Make 'new' visible to lookups in the appropriate version. */
746 cls_match_set_visibility(new, rule->version);
fc02ecc7
JR
747
748 /* Make rule visible to iterators (immediately). */
d0999f1b
JR
749 rculist_push_back(&subtable->rules_list,
750 CONST_CAST(struct rculist *, &rule->node));
de4ad4a2 751
f47eef15
JR
752 /* Rule was added, not replaced. Update 'subtable's 'max_priority' and
753 * 'max_count', if necessary.
754 *
755 * The rule was already inserted, but concurrent readers may not see the
756 * rule yet as the subtables vector is not updated yet. This will have to
757 * be fixed by revalidation later. */
758 if (n_rules == 1) {
759 subtable->max_priority = rule->priority;
760 subtable->max_count = 1;
761 pvector_insert(&cls->subtables, subtable, rule->priority);
762 } else if (rule->priority == subtable->max_priority) {
763 ++subtable->max_count;
764 } else if (rule->priority > subtable->max_priority) {
765 subtable->max_priority = rule->priority;
766 subtable->max_count = 1;
767 pvector_change_priority(&cls->subtables, subtable, rule->priority);
768 }
769
770 /* Nothing was replaced. */
771 cls->n_rules++;
802f84ff
JR
772
773 if (cls->publish) {
774 pvector_publish(&cls->subtables);
775 }
776
f47eef15 777 return NULL;
064af421
BP
778}
779
08944c1d
BP
780/* Inserts 'rule' into 'cls'. Until 'rule' is removed from 'cls', the caller
781 * must not modify or free it.
782 *
783 * 'cls' must not contain an identical rule (including wildcards, values of
784 * fixed fields, and priority). Use classifier_find_rule_exactly() to find
785 * such a rule. */
786void
18080541
BP
787classifier_insert(struct classifier *cls, const struct cls_rule *rule,
788 const struct cls_conjunction conj[], size_t n_conj)
08944c1d 789{
18080541
BP
790 const struct cls_rule *displaced_rule
791 = classifier_replace(cls, rule, conj, n_conj);
cb22974d 792 ovs_assert(!displaced_rule);
08944c1d
BP
793}
794
48d28ac1
BP
795/* Removes 'rule' from 'cls'. It is the caller's responsibility to destroy
796 * 'rule' with cls_rule_destroy(), freeing the memory block in which 'rule'
747f140a
JR
797 * resides, etc., as necessary.
798 *
799 * Does nothing if 'rule' has been already removed, or was never inserted.
800 *
801 * Returns the removed rule, or NULL, if it was already removed.
802 */
dfea28b3 803const struct cls_rule *
186120da 804classifier_remove(struct classifier *cls, const struct cls_rule *cls_rule)
064af421 805{
8f8023b3 806 struct cls_match *rule, *prev, *next, *head;
c906cedf 807 struct cls_partition *partition;
18080541 808 struct cls_conjunction_set *conj_set;
03868246 809 struct cls_subtable *subtable;
476f36e8 810 int i;
f2c21402 811 uint32_t basis = 0, hash, ihash[CLS_MAX_INDICES];
d70e8c28 812 uint8_t prev_be64ofs = 0;
f47eef15 813 size_t n_rules;
064af421 814
186120da
JR
815 rule = cls_rule->cls_match;
816 if (!rule) {
fccd7c09 817 return NULL;
747f140a 818 }
f47eef15 819 /* Mark as removed. */
186120da 820 CONST_CAST(struct cls_rule *, cls_rule)->cls_match = NULL;
f47eef15 821
186120da
JR
822 /* Remove 'cls_rule' from the subtable's rules list. */
823 rculist_remove(CONST_CAST(struct rculist *, &cls_rule->node));
de4ad4a2 824
186120da 825 subtable = find_subtable(cls, &cls_rule->match.mask);
627fb667
JR
826 ovs_assert(subtable);
827
f47eef15 828 for (i = 0; i < subtable->n_indices; i++) {
186120da 829 ihash[i] = minimatch_hash_range(&cls_rule->match, prev_be64ofs,
f47eef15 830 subtable->index_ofs[i], &basis);
d70e8c28 831 prev_be64ofs = subtable->index_ofs[i];
f47eef15 832 }
186120da
JR
833 hash = minimatch_hash_range(&cls_rule->match, prev_be64ofs, FLOW_U64S,
834 &basis);
835
8f8023b3
JR
836 head = find_equal(subtable, &cls_rule->match.flow, hash);
837
186120da 838 /* Check if the rule is not the head rule. */
8f8023b3
JR
839 if (rule != head) {
840 struct cls_match *iter;
841
186120da 842 /* Not the head rule, but potentially one with the same priority. */
8f8023b3
JR
843 /* Remove from the list of equal rules. */
844 FOR_EACH_RULE_IN_LIST_PROTECTED (iter, prev, head) {
845 if (rule == iter) {
846 break;
847 }
848 }
849 ovs_assert(iter == rule);
850
851 cls_match_remove(prev, rule);
852
186120da
JR
853 goto check_priority;
854 }
f47eef15 855
186120da
JR
856 /* 'rule' is the head rule. Check if there is another rule to
857 * replace 'rule' in the data structures. */
8f8023b3
JR
858 next = cls_match_next_protected(rule);
859 if (next) {
186120da 860 subtable_replace_head_rule(cls, subtable, rule, next, hash, ihash);
f47eef15
JR
861 goto check_priority;
862 }
863
864 /* 'rule' is last of the kind in the classifier, must remove from all the
865 * data structures. */
866
69d6040e 867 if (subtable->ports_mask_len) {
186120da 868 ovs_be32 masked_ports = minimatch_get_ports(&cls_rule->match);
69d6040e
JR
869
870 trie_remove_prefix(&subtable->ports_trie,
871 &masked_ports, subtable->ports_mask_len);
872 }
13751fd8
JR
873 for (i = 0; i < cls->n_tries; i++) {
874 if (subtable->trie_plen[i]) {
186120da 875 trie_remove(&cls->tries[i], cls_rule, subtable->trie_plen[i]);
13751fd8
JR
876 }
877 }
878
476f36e8
JR
879 /* Remove rule node from indices. */
880 for (i = 0; i < subtable->n_indices; i++) {
186120da 881 cmap_remove(&subtable->indices[i], &rule->index_nodes[i], ihash[i]);
b5d97350 882 }
186120da 883 n_rules = cmap_remove(&subtable->rules, &rule->cmap_node, hash);
064af421 884
186120da 885 partition = rule->partition;
183126a1
BP
886 if (partition) {
887 tag_tracker_subtract(&partition->tracker, &partition->tags,
03868246 888 subtable->tag);
183126a1 889 if (!partition->tags) {
f2c21402
JR
890 cmap_remove(&cls->partitions, &partition->cmap_node,
891 hash_metadata(partition->metadata));
892 ovsrcu_postpone(free, partition);
183126a1 893 }
c906cedf
BP
894 }
895
f47eef15 896 if (n_rules == 0) {
03868246 897 destroy_subtable(cls, subtable);
f47eef15
JR
898 } else {
899check_priority:
900 if (subtable->max_priority == rule->priority
901 && --subtable->max_count == 0) {
902 /* Find the new 'max_priority' and 'max_count'. */
f47eef15 903 int max_priority = INT_MIN;
186120da 904 struct cls_match *head;
f47eef15
JR
905
906 CMAP_FOR_EACH (head, cmap_node, &subtable->rules) {
907 if (head->priority > max_priority) {
908 max_priority = head->priority;
909 subtable->max_count = 1;
910 } else if (head->priority == max_priority) {
911 ++subtable->max_count;
912 }
fe7cfa5c 913 }
f47eef15
JR
914 subtable->max_priority = max_priority;
915 pvector_change_priority(&cls->subtables, subtable, max_priority);
fe7cfa5c 916 }
4d935a6b 917 }
802f84ff
JR
918
919 if (cls->publish) {
920 pvector_publish(&cls->subtables);
921 }
922
8f8023b3 923 /* free the rule. */
18080541 924 conj_set = ovsrcu_get_protected(struct cls_conjunction_set *,
186120da 925 &rule->conj_set);
18080541
BP
926 if (conj_set) {
927 ovsrcu_postpone(free, conj_set);
928 }
8f8023b3 929 ovsrcu_postpone(cls_match_free_cb, rule);
f47eef15 930 cls->n_rules--;
747f140a 931
186120da 932 return cls_rule;
064af421
BP
933}
934
13751fd8 935/* Prefix tree context. Valid when 'lookup_done' is true. Can skip all
c0bfb650
JR
936 * subtables which have a prefix match on the trie field, but whose prefix
937 * length is not indicated in 'match_plens'. For example, a subtable that
938 * has a 8-bit trie field prefix match can be skipped if
939 * !be_get_bit_at(&match_plens, 8 - 1). If skipped, 'maskbits' prefix bits
940 * must be unwildcarded to make datapath flow only match packets it should. */
13751fd8
JR
941struct trie_ctx {
942 const struct cls_trie *trie;
943 bool lookup_done; /* Status of the lookup. */
944 uint8_t be32ofs; /* U32 offset of the field in question. */
13751fd8 945 unsigned int maskbits; /* Prefix length needed to avoid false matches. */
c0bfb650
JR
946 union mf_value match_plens; /* Bitmask of prefix lengths with possible
947 * matches. */
13751fd8
JR
948};
949
950static void
951trie_ctx_init(struct trie_ctx *ctx, const struct cls_trie *trie)
952{
953 ctx->trie = trie;
954 ctx->be32ofs = trie->field->flow_be32ofs;
955 ctx->lookup_done = false;
956}
957
18080541
BP
958struct conjunctive_match {
959 struct hmap_node hmap_node;
960 uint32_t id;
961 uint64_t clauses;
962};
963
964static struct conjunctive_match *
965find_conjunctive_match__(struct hmap *matches, uint64_t id, uint32_t hash)
966{
967 struct conjunctive_match *m;
968
969 HMAP_FOR_EACH_IN_BUCKET (m, hmap_node, hash, matches) {
970 if (m->id == id) {
971 return m;
972 }
973 }
974 return NULL;
975}
976
977static bool
978find_conjunctive_match(const struct cls_conjunction_set *set,
979 unsigned int max_n_clauses, struct hmap *matches,
980 struct conjunctive_match *cm_stubs, size_t n_cm_stubs,
981 uint32_t *idp)
982{
983 const struct cls_conjunction *c;
984
985 if (max_n_clauses < set->min_n_clauses) {
986 return false;
987 }
988
989 for (c = set->conj; c < &set->conj[set->n]; c++) {
990 struct conjunctive_match *cm;
991 uint32_t hash;
992
993 if (c->n_clauses > max_n_clauses) {
994 continue;
995 }
996
997 hash = hash_int(c->id, 0);
998 cm = find_conjunctive_match__(matches, c->id, hash);
999 if (!cm) {
1000 size_t n = hmap_count(matches);
1001
1002 cm = n < n_cm_stubs ? &cm_stubs[n] : xmalloc(sizeof *cm);
1003 hmap_insert(matches, &cm->hmap_node, hash);
1004 cm->id = c->id;
1005 cm->clauses = UINT64_MAX << (c->n_clauses & 63);
1006 }
1007 cm->clauses |= UINT64_C(1) << c->clause;
1008 if (cm->clauses == UINT64_MAX) {
1009 *idp = cm->id;
1010 return true;
1011 }
1012 }
1013 return false;
1014}
1015
1016static void
1017free_conjunctive_matches(struct hmap *matches,
1018 struct conjunctive_match *cm_stubs, size_t n_cm_stubs)
1019{
1020 if (hmap_count(matches) > n_cm_stubs) {
1021 struct conjunctive_match *cm, *next;
1022
1023 HMAP_FOR_EACH_SAFE (cm, next, hmap_node, matches) {
1024 if (!(cm >= cm_stubs && cm < &cm_stubs[n_cm_stubs])) {
1025 free(cm);
1026 }
1027 }
1028 }
1029 hmap_destroy(matches);
1030}
1031
1032/* Like classifier_lookup(), except that support for conjunctive matches can be
1033 * configured with 'allow_conjunctive_matches'. That feature is not exposed
1034 * externally because turning off conjunctive matches is only useful to avoid
1035 * recursion within this function itself.
2e0bded4
BP
1036 *
1037 * 'flow' is non-const to allow for temporary modifications during the lookup.
1038 * Any changes are restored before returning. */
18080541 1039static const struct cls_rule *
2b7b1427
JR
1040classifier_lookup__(const struct classifier *cls, long long version,
1041 struct flow *flow, struct flow_wildcards *wc,
1042 bool allow_conjunctive_matches)
48c3de13 1043{
c906cedf 1044 const struct cls_partition *partition;
fe7cfa5c 1045 struct trie_ctx trie_ctx[CLS_MAX_TRIES];
18080541
BP
1046 const struct cls_match *match;
1047 tag_type tags;
1048
1049 /* Highest-priority flow in 'cls' that certainly matches 'flow'. */
1050 const struct cls_match *hard = NULL;
1051 int hard_pri = INT_MIN; /* hard ? hard->priority : INT_MIN. */
1052
1053 /* Highest-priority conjunctive flows in 'cls' matching 'flow'. Since
1054 * these are (components of) conjunctive flows, we can only know whether
1055 * the full conjunctive flow matches after seeing multiple of them. Thus,
1056 * we refer to these as "soft matches". */
1057 struct cls_conjunction_set *soft_stub[64];
1058 struct cls_conjunction_set **soft = soft_stub;
1059 size_t n_soft = 0, allocated_soft = ARRAY_SIZE(soft_stub);
1060 int soft_pri = INT_MIN; /* n_soft ? MAX(soft[*]->priority) : INT_MIN. */
c906cedf 1061
f358a2cb
JR
1062 /* Synchronize for cls->n_tries and subtable->trie_plen. They can change
1063 * when table configuration changes, which happens typically only on
1064 * startup. */
1065 atomic_thread_fence(memory_order_acquire);
1066
03868246
JR
1067 /* Determine 'tags' such that, if 'subtable->tag' doesn't intersect them,
1068 * then 'flow' cannot possibly match in 'subtable':
c906cedf
BP
1069 *
1070 * - If flow->metadata maps to a given 'partition', then we can use
1071 * 'tags' for 'partition->tags'.
1072 *
1073 * - If flow->metadata has no partition, then no rule in 'cls' has an
1074 * exact-match for flow->metadata. That means that we don't need to
03868246 1075 * search any subtable that includes flow->metadata in its mask.
c906cedf 1076 *
03868246 1077 * In either case, we always need to search any cls_subtables that do not
c906cedf 1078 * include flow->metadata in its mask. One way to do that would be to
03868246
JR
1079 * check the "cls_subtable"s explicitly for that, but that would require an
1080 * extra branch per subtable. Instead, we mark such a cls_subtable's
1081 * 'tags' as TAG_ALL and make sure that 'tags' is never empty. This means
1082 * that 'tags' always intersects such a cls_subtable's 'tags', so we don't
1083 * need a special case.
c906cedf 1084 */
f2c21402 1085 partition = (cmap_is_empty(&cls->partitions)
c906cedf
BP
1086 ? NULL
1087 : find_partition(cls, flow->metadata,
1088 hash_metadata(flow->metadata)));
1089 tags = partition ? partition->tags : TAG_ARBITRARY;
48c3de13 1090
ff8241db 1091 /* Initialize trie contexts for find_match_wc(). */
fe7cfa5c 1092 for (int i = 0; i < cls->n_tries; i++) {
13751fd8
JR
1093 trie_ctx_init(&trie_ctx[i], &cls->tries[i]);
1094 }
ec988646 1095
18080541
BP
1096 /* Main loop. */
1097 struct cls_subtable *subtable;
1098 PVECTOR_FOR_EACH_PRIORITY (subtable, hard_pri, 2, sizeof *subtable,
1099 &cls->subtables) {
1100 struct cls_conjunction_set *conj_set;
c906cedf 1101
18080541 1102 /* Skip subtables not in our partition. */
fe7cfa5c 1103 if (!tag_intersects(tags, subtable->tag)) {
c906cedf
BP
1104 continue;
1105 }
74f74083 1106
18080541
BP
1107 /* Skip subtables with no match, or where the match is lower-priority
1108 * than some certain match we've already found. */
2b7b1427
JR
1109 match = find_match_wc(subtable, version, flow, trie_ctx, cls->n_tries,
1110 wc);
18080541
BP
1111 if (!match || match->priority <= hard_pri) {
1112 continue;
1113 }
1114
1115 conj_set = ovsrcu_get(struct cls_conjunction_set *, &match->conj_set);
1116 if (!conj_set) {
1117 /* 'match' isn't part of a conjunctive match. It's the best
1118 * certain match we've got so far, since we know that it's
1119 * higher-priority than hard_pri.
1120 *
1121 * (There might be a higher-priority conjunctive match. We can't
1122 * tell yet.) */
1123 hard = match;
1124 hard_pri = hard->priority;
1125 } else if (allow_conjunctive_matches) {
1126 /* 'match' is part of a conjunctive match. Add it to the list. */
1127 if (OVS_UNLIKELY(n_soft >= allocated_soft)) {
1128 struct cls_conjunction_set **old_soft = soft;
1129
1130 allocated_soft *= 2;
1131 soft = xmalloc(allocated_soft * sizeof *soft);
1132 memcpy(soft, old_soft, n_soft * sizeof *soft);
1133 if (old_soft != soft_stub) {
1134 free(old_soft);
1135 }
1136 }
1137 soft[n_soft++] = conj_set;
1138
1139 /* Keep track of the highest-priority soft match. */
1140 if (soft_pri < match->priority) {
1141 soft_pri = match->priority;
1142 }
b5d97350 1143 }
48c3de13 1144 }
13751fd8 1145
18080541
BP
1146 /* In the common case, at this point we have no soft matches and we can
1147 * return immediately. (We do the same thing if we have potential soft
1148 * matches but none of them are higher-priority than our hard match.) */
1149 if (hard_pri >= soft_pri) {
1150 if (soft != soft_stub) {
1151 free(soft);
1152 }
1153 return hard ? hard->cls_rule : NULL;
1154 }
1155
1156 /* At this point, we have some soft matches. We might also have a hard
1157 * match; if so, its priority is lower than the highest-priority soft
1158 * match. */
1159
1160 /* Soft match loop.
1161 *
1162 * Check whether soft matches are real matches. */
1163 for (;;) {
1164 /* Delete soft matches that are null. This only happens in second and
1165 * subsequent iterations of the soft match loop, when we drop back from
1166 * a high-priority soft match to a lower-priority one.
1167 *
1168 * Also, delete soft matches whose priority is less than or equal to
1169 * the hard match's priority. In the first iteration of the soft
1170 * match, these can be in 'soft' because the earlier main loop found
1171 * the soft match before the hard match. In second and later iteration
1172 * of the soft match loop, these can be in 'soft' because we dropped
1173 * back from a high-priority soft match to a lower-priority soft match.
1174 *
1175 * It is tempting to delete soft matches that cannot be satisfied
1176 * because there are fewer soft matches than required to satisfy any of
1177 * their conjunctions, but we cannot do that because there might be
1178 * lower priority soft or hard matches with otherwise identical
1179 * matches. (We could special case those here, but there's no
1180 * need--we'll do so at the bottom of the soft match loop anyway and
1181 * this duplicates less code.)
1182 *
1183 * It's also tempting to break out of the soft match loop if 'n_soft ==
1184 * 1' but that would also miss lower-priority hard matches. We could
1185 * special case that also but again there's no need. */
1186 for (int i = 0; i < n_soft; ) {
1187 if (!soft[i] || soft[i]->priority <= hard_pri) {
1188 soft[i] = soft[--n_soft];
1189 } else {
1190 i++;
1191 }
1192 }
1193 if (!n_soft) {
1194 break;
1195 }
1196
1197 /* Find the highest priority among the soft matches. (We know this
1198 * must be higher than the hard match's priority; otherwise we would
1199 * have deleted all of the soft matches in the previous loop.) Count
1200 * the number of soft matches that have that priority. */
1201 soft_pri = INT_MIN;
1202 int n_soft_pri = 0;
1203 for (int i = 0; i < n_soft; i++) {
1204 if (soft[i]->priority > soft_pri) {
1205 soft_pri = soft[i]->priority;
1206 n_soft_pri = 1;
1207 } else if (soft[i]->priority == soft_pri) {
1208 n_soft_pri++;
1209 }
1210 }
1211 ovs_assert(soft_pri > hard_pri);
1212
1213 /* Look for a real match among the highest-priority soft matches.
1214 *
1215 * It's unusual to have many conjunctive matches, so we use stubs to
1216 * avoid calling malloc() in the common case. An hmap has a built-in
1217 * stub for up to 2 hmap_nodes; possibly, we would benefit a variant
1218 * with a bigger stub. */
1219 struct conjunctive_match cm_stubs[16];
1220 struct hmap matches;
1221
1222 hmap_init(&matches);
1223 for (int i = 0; i < n_soft; i++) {
1224 uint32_t id;
1225
1226 if (soft[i]->priority == soft_pri
1227 && find_conjunctive_match(soft[i], n_soft_pri, &matches,
1228 cm_stubs, ARRAY_SIZE(cm_stubs),
1229 &id)) {
1230 uint32_t saved_conj_id = flow->conj_id;
1231 const struct cls_rule *rule;
1232
1233 flow->conj_id = id;
2b7b1427 1234 rule = classifier_lookup__(cls, version, flow, wc, false);
18080541
BP
1235 flow->conj_id = saved_conj_id;
1236
1237 if (rule) {
1238 free_conjunctive_matches(&matches,
1239 cm_stubs, ARRAY_SIZE(cm_stubs));
1240 if (soft != soft_stub) {
1241 free(soft);
1242 }
1243 return rule;
1244 }
1245 }
1246 }
1247 free_conjunctive_matches(&matches, cm_stubs, ARRAY_SIZE(cm_stubs));
1248
1249 /* There's no real match among the highest-priority soft matches.
1250 * However, if any of those soft matches has a lower-priority but
1251 * otherwise identical flow match, then we need to consider those for
1252 * soft or hard matches.
1253 *
1254 * The next iteration of the soft match loop will delete any null
1255 * pointers we put into 'soft' (and some others too). */
1256 for (int i = 0; i < n_soft; i++) {
1257 if (soft[i]->priority != soft_pri) {
1258 continue;
1259 }
1260
1261 /* Find next-lower-priority flow with identical flow match. */
2b7b1427 1262 match = next_visible_rule_in_list(soft[i]->match, version);
18080541
BP
1263 if (match) {
1264 soft[i] = ovsrcu_get(struct cls_conjunction_set *,
1265 &match->conj_set);
1266 if (!soft[i]) {
1267 /* The flow is a hard match; don't treat as a soft
1268 * match. */
1269 if (match->priority > hard_pri) {
1270 hard = match;
1271 hard_pri = hard->priority;
1272 }
1273 }
1274 } else {
1275 /* No such lower-priority flow (probably the common case). */
1276 soft[i] = NULL;
1277 }
1278 }
1279 }
1280
1281 if (soft != soft_stub) {
1282 free(soft);
1283 }
1284 return hard ? hard->cls_rule : NULL;
1285}
1286
2b7b1427
JR
1287/* Finds and returns the highest-priority rule in 'cls' that matches 'flow' and
1288 * that is visible in 'version'. Returns a null pointer if no rules in 'cls'
1289 * match 'flow'. If multiple rules of equal priority match 'flow', returns one
1290 * arbitrarily.
18080541
BP
1291 *
1292 * If a rule is found and 'wc' is non-null, bitwise-OR's 'wc' with the
1293 * set of bits that were significant in the lookup. At some point
1294 * earlier, 'wc' should have been initialized (e.g., by
1295 * flow_wildcards_init_catchall()).
1296 *
1297 * 'flow' is non-const to allow for temporary modifications during the lookup.
1298 * Any changes are restored before returning. */
1299const struct cls_rule *
2b7b1427
JR
1300classifier_lookup(const struct classifier *cls, long long version,
1301 struct flow *flow, struct flow_wildcards *wc)
18080541 1302{
2b7b1427 1303 return classifier_lookup__(cls, version, flow, wc, true);
48c3de13
BP
1304}
1305
b5d97350 1306/* Finds and returns a rule in 'cls' with exactly the same priority and
2b7b1427
JR
1307 * matching criteria as 'target', and that is visible in 'target->version.
1308 * Only one such rule may ever exist. Returns a null pointer if 'cls' doesn't
1309 * contain an exact match. */
dfea28b3 1310const struct cls_rule *
e48eccd1 1311classifier_find_rule_exactly(const struct classifier *cls,
76ecc721 1312 const struct cls_rule *target)
064af421 1313{
dfea28b3
JR
1314 const struct cls_match *head, *rule;
1315 const struct cls_subtable *subtable;
064af421 1316
03868246 1317 subtable = find_subtable(cls, &target->match.mask);
0722ee5c 1318 if (!subtable) {
98abae4a 1319 return NULL;
4d935a6b
JR
1320 }
1321
03868246 1322 head = find_equal(subtable, &target->match.flow,
5cb7a798
BP
1323 miniflow_hash_in_minimask(&target->match.flow,
1324 &target->match.mask, 0));
98abae4a
JR
1325 if (!head) {
1326 return NULL;
1327 }
8f8023b3 1328 CLS_MATCH_FOR_EACH (rule, head) {
186120da
JR
1329 if (rule->priority < target->priority) {
1330 break; /* Not found. */
1331 }
1332 if (rule->priority == target->priority
2b7b1427 1333 && cls_match_visible_in_version(rule, target->version)) {
186120da 1334 return rule->cls_rule;
064af421
BP
1335 }
1336 }
1337 return NULL;
1338}
1339
81a76618 1340/* Finds and returns a rule in 'cls' with priority 'priority' and exactly the
2b7b1427
JR
1341 * same matching criteria as 'target', and that is visible in 'version'.
1342 * Returns a null pointer if 'cls' doesn't contain an exact match visible in
1343 * 'version'. */
dfea28b3 1344const struct cls_rule *
81a76618 1345classifier_find_match_exactly(const struct classifier *cls,
2b7b1427
JR
1346 const struct match *target, int priority,
1347 long long version)
81a76618 1348{
dfea28b3 1349 const struct cls_rule *retval;
81a76618
BP
1350 struct cls_rule cr;
1351
2b7b1427 1352 cls_rule_init(&cr, target, priority, version);
81a76618 1353 retval = classifier_find_rule_exactly(cls, &cr);
48d28ac1 1354 cls_rule_destroy(&cr);
81a76618
BP
1355
1356 return retval;
1357}
1358
faa50f40
BP
1359/* Checks if 'target' would overlap any other rule in 'cls'. Two rules are
1360 * considered to overlap if both rules have the same priority and a packet
2b7b1427 1361 * could match both, and if both rules are visible in the same version.
de4ad4a2
JR
1362 *
1363 * A trivial example of overlapping rules is two rules matching disjoint sets
1364 * of fields. E.g., if one rule matches only on port number, while another only
1365 * on dl_type, any packet from that specific port and with that specific
2b7b1427 1366 * dl_type could match both, if the rules also have the same priority. */
49bdc010 1367bool
e48eccd1 1368classifier_rule_overlaps(const struct classifier *cls,
faa50f40 1369 const struct cls_rule *target)
49bdc010 1370{
03868246 1371 struct cls_subtable *subtable;
49bdc010 1372
03868246 1373 /* Iterate subtables in the descending max priority order. */
eb391b76 1374 PVECTOR_FOR_EACH_PRIORITY (subtable, target->priority - 1, 2,
fe7cfa5c 1375 sizeof(struct cls_subtable), &cls->subtables) {
d70e8c28 1376 uint64_t storage[FLOW_U64S];
5cb7a798 1377 struct minimask mask;
de4ad4a2 1378 const struct cls_rule *rule;
49bdc010 1379
03868246 1380 minimask_combine(&mask, &target->match.mask, &subtable->mask, storage);
49bdc010 1381
de4ad4a2
JR
1382 RCULIST_FOR_EACH (rule, node, &subtable->rules_list) {
1383 if (rule->priority == target->priority
1384 && miniflow_equal_in_minimask(&target->match.flow,
2b7b1427
JR
1385 &rule->match.flow, &mask)
1386 && cls_match_visible_in_version(rule->cls_match,
1387 target->version)) {
de4ad4a2 1388 return true;
49bdc010
JP
1389 }
1390 }
1391 }
49bdc010
JP
1392 return false;
1393}
6ceeaa92
BP
1394
1395/* Returns true if 'rule' exactly matches 'criteria' or if 'rule' is more
1396 * specific than 'criteria'. That is, 'rule' matches 'criteria' and this
1397 * function returns true if, for every field:
1398 *
1399 * - 'criteria' and 'rule' specify the same (non-wildcarded) value for the
1400 * field, or
1401 *
1402 * - 'criteria' wildcards the field,
1403 *
1404 * Conversely, 'rule' does not match 'criteria' and this function returns false
1405 * if, for at least one field:
1406 *
1407 * - 'criteria' and 'rule' specify different values for the field, or
1408 *
1409 * - 'criteria' specifies a value for the field but 'rule' wildcards it.
1410 *
1411 * Equivalently, the truth table for whether a field matches is:
1412 *
1413 * rule
1414 *
1415 * c wildcard exact
1416 * r +---------+---------+
1417 * i wild | yes | yes |
1418 * t card | | |
1419 * e +---------+---------+
1420 * r exact | no |if values|
1421 * i | |are equal|
1422 * a +---------+---------+
1423 *
1424 * This is the matching rule used by OpenFlow 1.0 non-strict OFPT_FLOW_MOD
1425 * commands and by OpenFlow 1.0 aggregate and flow stats.
1426 *
81a76618 1427 * Ignores rule->priority. */
6ceeaa92
BP
1428bool
1429cls_rule_is_loose_match(const struct cls_rule *rule,
5cb7a798 1430 const struct minimatch *criteria)
6ceeaa92 1431{
5cb7a798
BP
1432 return (!minimask_has_extra(&rule->match.mask, &criteria->mask)
1433 && miniflow_equal_in_minimask(&rule->match.flow, &criteria->flow,
1434 &criteria->mask));
6ceeaa92 1435}
b5d97350 1436\f
5ecc9d81
BP
1437/* Iteration. */
1438
2b7b1427
JR
1439/* Rule may only match a target if it is visible in target's version. For NULL
1440 * target we only return rules that are not invisible in any version. */
5ecc9d81 1441static bool
de4ad4a2 1442rule_matches(const struct cls_rule *rule, const struct cls_rule *target)
5ecc9d81 1443{
2b7b1427
JR
1444 /* Iterators never see duplicate rules with the same priority. */
1445 return target
1446 ? (miniflow_equal_in_minimask(&rule->match.flow, &target->match.flow,
1447 &target->match.mask)
1448 && cls_match_visible_in_version(rule->cls_match, target->version))
1449 : !cls_match_is_eventually_invisible(rule->cls_match);
5ecc9d81
BP
1450}
1451
de4ad4a2 1452static const struct cls_rule *
03868246 1453search_subtable(const struct cls_subtable *subtable,
f2c21402 1454 struct cls_cursor *cursor)
5ecc9d81 1455{
f2c21402
JR
1456 if (!cursor->target
1457 || !minimask_has_extra(&subtable->mask, &cursor->target->match.mask)) {
de4ad4a2 1458 const struct cls_rule *rule;
5ecc9d81 1459
de4ad4a2 1460 RCULIST_FOR_EACH (rule, node, &subtable->rules_list) {
f2c21402 1461 if (rule_matches(rule, cursor->target)) {
5ecc9d81
BP
1462 return rule;
1463 }
1464 }
1465 }
1466 return NULL;
1467}
1468
5f0476ce
JR
1469/* Initializes 'cursor' for iterating through rules in 'cls', and returns the
1470 * first matching cls_rule via '*pnode', or NULL if there are no matches.
5ecc9d81 1471 *
2b7b1427
JR
1472 * - If 'target' is null, or if the 'target' is a catchall target and the
1473 * target's version is CLS_NO_VERSION, the cursor will visit every rule
1474 * in 'cls' that is not invisible in any version.
5ecc9d81 1475 *
6ceeaa92 1476 * - If 'target' is nonnull, the cursor will visit each 'rule' in 'cls'
2b7b1427
JR
1477 * such that cls_rule_is_loose_match(rule, target) returns true and that
1478 * the rule is visible in 'target->version'.
5ecc9d81 1479 *
6ceeaa92 1480 * Ignores target->priority. */
186120da
JR
1481struct cls_cursor
1482cls_cursor_start(const struct classifier *cls, const struct cls_rule *target)
5ecc9d81 1483{
5f0476ce 1484 struct cls_cursor cursor;
03868246 1485 struct cls_subtable *subtable;
5ecc9d81 1486
e48eccd1 1487 cursor.cls = cls;
2b7b1427
JR
1488 cursor.target = target && (!cls_rule_is_catchall(target)
1489 || target->version != CLS_MAX_VERSION)
1490 ? target : NULL;
78c8df12 1491 cursor.rule = NULL;
5f0476ce
JR
1492
1493 /* Find first rule. */
de4ad4a2
JR
1494 PVECTOR_CURSOR_FOR_EACH (subtable, &cursor.subtables,
1495 &cursor.cls->subtables) {
1496 const struct cls_rule *rule = search_subtable(subtable, &cursor);
f2c21402 1497
5ecc9d81 1498 if (rule) {
5f0476ce 1499 cursor.subtable = subtable;
de4ad4a2 1500 cursor.rule = rule;
5f0476ce 1501 break;
5ecc9d81
BP
1502 }
1503 }
1504
5f0476ce
JR
1505 return cursor;
1506}
1507
dfea28b3 1508static const struct cls_rule *
1caa1561 1509cls_cursor_next(struct cls_cursor *cursor)
5ecc9d81 1510{
de4ad4a2 1511 const struct cls_rule *rule;
03868246 1512 const struct cls_subtable *subtable;
5ecc9d81 1513
de4ad4a2
JR
1514 rule = cursor->rule;
1515 subtable = cursor->subtable;
1516 RCULIST_FOR_EACH_CONTINUE (rule, node, &subtable->rules_list) {
5ecc9d81 1517 if (rule_matches(rule, cursor->target)) {
de4ad4a2 1518 return rule;
5ecc9d81
BP
1519 }
1520 }
1521
de4ad4a2 1522 PVECTOR_CURSOR_FOR_EACH_CONTINUE (subtable, &cursor->subtables) {
f2c21402 1523 rule = search_subtable(subtable, cursor);
5ecc9d81 1524 if (rule) {
03868246 1525 cursor->subtable = subtable;
de4ad4a2 1526 return rule;
5ecc9d81
BP
1527 }
1528 }
1529
1caa1561
BP
1530 return NULL;
1531}
1532
1533/* Sets 'cursor->rule' to the next matching cls_rule in 'cursor''s iteration,
1534 * or to null if all matching rules have been visited. */
1535void
1536cls_cursor_advance(struct cls_cursor *cursor)
1caa1561 1537{
1caa1561 1538 cursor->rule = cls_cursor_next(cursor);
5ecc9d81
BP
1539}
1540\f
03868246 1541static struct cls_subtable *
e48eccd1 1542find_subtable(const struct classifier *cls, const struct minimask *mask)
b5d97350 1543{
03868246 1544 struct cls_subtable *subtable;
064af421 1545
f2c21402 1546 CMAP_FOR_EACH_WITH_HASH (subtable, cmap_node, minimask_hash(mask, 0),
5a87054c 1547 &cls->subtables_map) {
03868246
JR
1548 if (minimask_equal(mask, &subtable->mask)) {
1549 return subtable;
064af421
BP
1550 }
1551 }
b5d97350 1552 return NULL;
064af421 1553}
064af421 1554
e65413ab 1555/* The new subtable will be visible to the readers only after this. */
03868246 1556static struct cls_subtable *
e48eccd1 1557insert_subtable(struct classifier *cls, const struct minimask *mask)
b5d97350 1558{
c906cedf 1559 uint32_t hash = minimask_hash(mask, 0);
03868246 1560 struct cls_subtable *subtable;
476f36e8
JR
1561 int i, index = 0;
1562 struct flow_wildcards old, new;
1563 uint8_t prev;
3016f3e4 1564 int count = count_1bits(mask->masks.map);
064af421 1565
3016f3e4
JR
1566 subtable = xzalloc(sizeof *subtable - sizeof mask->masks.inline_values
1567 + MINIFLOW_VALUES_SIZE(count));
f2c21402 1568 cmap_init(&subtable->rules);
f80028fe
JR
1569 miniflow_clone_inline(CONST_CAST(struct miniflow *, &subtable->mask.masks),
1570 &mask->masks, count);
476f36e8
JR
1571
1572 /* Init indices for segmented lookup, if any. */
1573 flow_wildcards_init_catchall(&new);
1574 old = new;
1575 prev = 0;
1576 for (i = 0; i < cls->n_flow_segments; i++) {
1577 flow_wildcards_fold_minimask_range(&new, mask, prev,
1578 cls->flow_segments[i]);
1579 /* Add an index if it adds mask bits. */
1580 if (!flow_wildcards_equal(&new, &old)) {
f2c21402 1581 cmap_init(&subtable->indices[index]);
f80028fe
JR
1582 *CONST_CAST(uint8_t *, &subtable->index_ofs[index])
1583 = cls->flow_segments[i];
476f36e8
JR
1584 index++;
1585 old = new;
1586 }
1587 prev = cls->flow_segments[i];
1588 }
1589 /* Check if the rest of the subtable's mask adds any bits,
1590 * and remove the last index if it doesn't. */
1591 if (index > 0) {
d70e8c28 1592 flow_wildcards_fold_minimask_range(&new, mask, prev, FLOW_U64S);
476f36e8
JR
1593 if (flow_wildcards_equal(&new, &old)) {
1594 --index;
f80028fe 1595 *CONST_CAST(uint8_t *, &subtable->index_ofs[index]) = 0;
f2c21402 1596 cmap_destroy(&subtable->indices[index]);
476f36e8
JR
1597 }
1598 }
f80028fe 1599 *CONST_CAST(uint8_t *, &subtable->n_indices) = index;
476f36e8 1600
f80028fe
JR
1601 *CONST_CAST(tag_type *, &subtable->tag) =
1602 (minimask_get_metadata_mask(mask) == OVS_BE64_MAX
1603 ? tag_create_deterministic(hash)
1604 : TAG_ALL);
064af421 1605
13751fd8
JR
1606 for (i = 0; i < cls->n_tries; i++) {
1607 subtable->trie_plen[i] = minimask_get_prefix_len(mask,
1608 cls->tries[i].field);
1609 }
1610
69d6040e 1611 /* Ports trie. */
f358a2cb 1612 ovsrcu_set_hidden(&subtable->ports_trie, NULL);
f80028fe 1613 *CONST_CAST(int *, &subtable->ports_mask_len)
69d6040e
JR
1614 = 32 - ctz32(ntohl(MINIFLOW_GET_BE32(&mask->masks, tp_src)));
1615
de4ad4a2
JR
1616 /* List of rules. */
1617 rculist_init(&subtable->rules_list);
1618
f2c21402 1619 cmap_insert(&cls->subtables_map, &subtable->cmap_node, hash);
ec988646 1620
03868246 1621 return subtable;
064af421
BP
1622}
1623
01c0f83a 1624/* RCU readers may still access the subtable before it is actually freed. */
b5d97350 1625static void
e48eccd1 1626destroy_subtable(struct classifier *cls, struct cls_subtable *subtable)
b5d97350 1627{
476f36e8
JR
1628 int i;
1629
fe7cfa5c 1630 pvector_remove(&cls->subtables, subtable);
01c0f83a
JR
1631 cmap_remove(&cls->subtables_map, &subtable->cmap_node,
1632 minimask_hash(&subtable->mask, 0));
1633
1634 ovs_assert(ovsrcu_get_protected(struct trie_node *, &subtable->ports_trie)
1635 == NULL);
1636 ovs_assert(cmap_is_empty(&subtable->rules));
de4ad4a2 1637 ovs_assert(rculist_is_empty(&subtable->rules_list));
69d6040e 1638
476f36e8 1639 for (i = 0; i < subtable->n_indices; i++) {
f2c21402 1640 cmap_destroy(&subtable->indices[i]);
476f36e8 1641 }
f2c21402 1642 cmap_destroy(&subtable->rules);
fe7cfa5c 1643 ovsrcu_postpone(free, subtable);
4aacd02d
BP
1644}
1645
13751fd8
JR
1646struct range {
1647 uint8_t start;
1648 uint8_t end;
1649};
1650
c0bfb650
JR
1651static unsigned int be_get_bit_at(const ovs_be32 value[], unsigned int ofs);
1652
13751fd8
JR
1653/* Return 'true' if can skip rest of the subtable based on the prefix trie
1654 * lookup results. */
1655static inline bool
1656check_tries(struct trie_ctx trie_ctx[CLS_MAX_TRIES], unsigned int n_tries,
1657 const unsigned int field_plen[CLS_MAX_TRIES],
1658 const struct range ofs, const struct flow *flow,
1659 struct flow_wildcards *wc)
1660{
1661 int j;
1662
1663 /* Check if we could avoid fully unwildcarding the next level of
1664 * fields using the prefix tries. The trie checks are done only as
1665 * needed to avoid folding in additional bits to the wildcards mask. */
1666 for (j = 0; j < n_tries; j++) {
1667 /* Is the trie field relevant for this subtable? */
1668 if (field_plen[j]) {
1669 struct trie_ctx *ctx = &trie_ctx[j];
1670 uint8_t be32ofs = ctx->be32ofs;
d70e8c28 1671 uint8_t be64ofs = be32ofs / 2;
13751fd8
JR
1672
1673 /* Is the trie field within the current range of fields? */
d70e8c28 1674 if (be64ofs >= ofs.start && be64ofs < ofs.end) {
13751fd8
JR
1675 /* On-demand trie lookup. */
1676 if (!ctx->lookup_done) {
c0bfb650
JR
1677 memset(&ctx->match_plens, 0, sizeof ctx->match_plens);
1678 ctx->maskbits = trie_lookup(ctx->trie, flow,
1679 &ctx->match_plens);
13751fd8
JR
1680 ctx->lookup_done = true;
1681 }
1682 /* Possible to skip the rest of the subtable if subtable's
c0bfb650
JR
1683 * prefix on the field is not included in the lookup result. */
1684 if (!be_get_bit_at(&ctx->match_plens.be32, field_plen[j] - 1)) {
1817dcea
JR
1685 /* We want the trie lookup to never result in unwildcarding
1686 * any bits that would not be unwildcarded otherwise.
1687 * Since the trie is shared by the whole classifier, it is
1688 * possible that the 'maskbits' contain bits that are
1689 * irrelevant for the partition relevant for the current
1690 * packet. Hence the checks below. */
13751fd8 1691
13751fd8 1692 /* Check that the trie result will not unwildcard more bits
1817dcea 1693 * than this subtable would otherwise. */
13751fd8
JR
1694 if (ctx->maskbits <= field_plen[j]) {
1695 /* Unwildcard the bits and skip the rest. */
1696 mask_set_prefix_bits(wc, be32ofs, ctx->maskbits);
1697 /* Note: Prerequisite already unwildcarded, as the only
1698 * prerequisite of the supported trie lookup fields is
1817dcea
JR
1699 * the ethertype, which is always unwildcarded. */
1700 return true;
1701 }
1702 /* Can skip if the field is already unwildcarded. */
1703 if (mask_prefix_bits_set(wc, be32ofs, ctx->maskbits)) {
13751fd8
JR
1704 return true;
1705 }
1706 }
1707 }
1708 }
1709 }
1710 return false;
1711}
1712
3016f3e4
JR
1713/* Returns true if 'target' satisifies 'flow'/'mask', that is, if each bit
1714 * for which 'flow', for which 'mask' has a bit set, specifies a particular
1715 * value has the correct value in 'target'.
1716 *
1717 * This function is equivalent to miniflow_equal_flow_in_minimask(flow,
a64759f0
JR
1718 * target, mask) but this is faster because of the invariant that
1719 * flow->map and mask->masks.map are the same, and that this version
1720 * takes the 'wc'. */
3016f3e4
JR
1721static inline bool
1722miniflow_and_mask_matches_flow(const struct miniflow *flow,
1723 const struct minimask *mask,
e9319757 1724 const struct flow *target)
3016f3e4 1725{
d70e8c28
JR
1726 const uint64_t *flowp = miniflow_get_values(flow);
1727 const uint64_t *maskp = miniflow_get_values(&mask->masks);
1cea007c 1728 int idx;
3016f3e4 1729
a64759f0 1730 MAP_FOR_EACH_INDEX(idx, mask->masks.map) {
d70e8c28 1731 uint64_t diff = (*flowp++ ^ flow_u64_value(target, idx)) & *maskp++;
a64759f0
JR
1732
1733 if (diff) {
3016f3e4
JR
1734 return false;
1735 }
1736 }
1737
1738 return true;
1739}
1740
dfea28b3 1741static inline const struct cls_match *
2b7b1427
JR
1742find_match(const struct cls_subtable *subtable, long long version,
1743 const struct flow *flow, uint32_t hash)
b5d97350 1744{
fc02ecc7 1745 const struct cls_match *head, *rule;
b5d97350 1746
fc02ecc7
JR
1747 CMAP_FOR_EACH_WITH_HASH (head, cmap_node, hash, &subtable->rules) {
1748 if (OVS_LIKELY(miniflow_and_mask_matches_flow(&head->flow,
1749 &subtable->mask,
1750 flow))) {
1751 /* Return highest priority rule that is visible. */
8f8023b3 1752 CLS_MATCH_FOR_EACH (rule, head) {
2b7b1427 1753 if (OVS_LIKELY(cls_match_visible_in_version(rule, version))) {
fc02ecc7
JR
1754 return rule;
1755 }
1756 }
064af421
BP
1757 }
1758 }
c23740be 1759
064af421
BP
1760 return NULL;
1761}
1762
e9319757
JR
1763/* Returns true if 'target' satisifies 'flow'/'mask', that is, if each bit
1764 * for which 'flow', for which 'mask' has a bit set, specifies a particular
1765 * value has the correct value in 'target'.
1766 *
1767 * This function is equivalent to miniflow_and_mask_matches_flow() but this
1768 * version fills in the mask bits in 'wc'. */
1769static inline bool
1770miniflow_and_mask_matches_flow_wc(const struct miniflow *flow,
1771 const struct minimask *mask,
1772 const struct flow *target,
1773 struct flow_wildcards *wc)
1774{
d70e8c28
JR
1775 const uint64_t *flowp = miniflow_get_values(flow);
1776 const uint64_t *maskp = miniflow_get_values(&mask->masks);
1cea007c 1777 int idx;
e9319757
JR
1778
1779 MAP_FOR_EACH_INDEX(idx, mask->masks.map) {
d70e8c28
JR
1780 uint64_t mask = *maskp++;
1781 uint64_t diff = (*flowp++ ^ flow_u64_value(target, idx)) & mask;
e9319757
JR
1782
1783 if (diff) {
1784 /* Only unwildcard if none of the differing bits is already
1785 * exact-matched. */
d70e8c28 1786 if (!(flow_u64_value(&wc->masks, idx) & diff)) {
66e1d955
JR
1787 /* Keep one bit of the difference. The selected bit may be
1788 * different in big-endian v.s. little-endian systems. */
d70e8c28 1789 *flow_u64_lvalue(&wc->masks, idx) |= rightmost_1bit(diff);
e9319757
JR
1790 }
1791 return false;
1792 }
1793 /* Fill in the bits that were looked at. */
d70e8c28 1794 *flow_u64_lvalue(&wc->masks, idx) |= mask;
e9319757
JR
1795 }
1796
1797 return true;
1798}
1799
386cb9f7
JR
1800/* Unwildcard the fields looked up so far, if any. */
1801static void
1802fill_range_wc(const struct cls_subtable *subtable, struct flow_wildcards *wc,
1803 uint8_t to)
1804{
1805 if (to) {
1806 flow_wildcards_fold_minimask_range(wc, &subtable->mask, 0, to);
1807 }
1808}
1809
dfea28b3 1810static const struct cls_match *
2b7b1427
JR
1811find_match_wc(const struct cls_subtable *subtable, long long version,
1812 const struct flow *flow, struct trie_ctx trie_ctx[CLS_MAX_TRIES],
1813 unsigned int n_tries, struct flow_wildcards *wc)
476f36e8
JR
1814{
1815 uint32_t basis = 0, hash;
dfea28b3 1816 const struct cls_match *rule = NULL;
476f36e8 1817 int i;
13751fd8 1818 struct range ofs;
476f36e8 1819
ec988646 1820 if (OVS_UNLIKELY(!wc)) {
2b7b1427 1821 return find_match(subtable, version, flow,
476f36e8
JR
1822 flow_hash_in_minimask(flow, &subtable->mask, 0));
1823 }
1824
13751fd8 1825 ofs.start = 0;
476f36e8
JR
1826 /* Try to finish early by checking fields in segments. */
1827 for (i = 0; i < subtable->n_indices; i++) {
55847abe 1828 const struct cmap_node *inode;
f2c21402 1829
13751fd8 1830 ofs.end = subtable->index_ofs[i];
476f36e8 1831
13751fd8
JR
1832 if (check_tries(trie_ctx, n_tries, subtable->trie_plen, ofs, flow,
1833 wc)) {
386cb9f7
JR
1834 /* 'wc' bits for the trie field set, now unwildcard the preceding
1835 * bits used so far. */
1836 fill_range_wc(subtable, wc, ofs.start);
1837 return NULL;
13751fd8
JR
1838 }
1839 hash = flow_hash_in_minimask_range(flow, &subtable->mask, ofs.start,
1840 ofs.end, &basis);
f2c21402 1841 inode = cmap_find(&subtable->indices[i], hash);
476f36e8 1842 if (!inode) {
386cb9f7
JR
1843 /* No match, can stop immediately, but must fold in the bits
1844 * used in lookup so far. */
1845 fill_range_wc(subtable, wc, ofs.end);
1846 return NULL;
476f36e8
JR
1847 }
1848
1849 /* If we have narrowed down to a single rule already, check whether
a64759f0 1850 * that rule matches. Either way, we're done.
476f36e8
JR
1851 *
1852 * (Rare) hash collisions may cause us to miss the opportunity for this
1853 * optimization. */
f2c21402 1854 if (!cmap_node_next(inode)) {
fc02ecc7
JR
1855 const struct cls_match *head;
1856
1857 ASSIGN_CONTAINER(head, inode - i, index_nodes);
1858 if (miniflow_and_mask_matches_flow_wc(&head->flow, &subtable->mask,
e9319757 1859 flow, wc)) {
fc02ecc7 1860 /* Return highest priority rule that is visible. */
8f8023b3 1861 CLS_MATCH_FOR_EACH (rule, head) {
2b7b1427
JR
1862 if (OVS_LIKELY(cls_match_visible_in_version(rule,
1863 version))) {
fc02ecc7
JR
1864 return rule;
1865 }
1866 }
476f36e8 1867 }
e9319757 1868 return NULL;
476f36e8 1869 }
386cb9f7 1870 ofs.start = ofs.end;
476f36e8 1871 }
d70e8c28 1872 ofs.end = FLOW_U64S;
13751fd8
JR
1873 /* Trie check for the final range. */
1874 if (check_tries(trie_ctx, n_tries, subtable->trie_plen, ofs, flow, wc)) {
386cb9f7
JR
1875 fill_range_wc(subtable, wc, ofs.start);
1876 return NULL;
13751fd8 1877 }
a64759f0
JR
1878 hash = flow_hash_in_minimask_range(flow, &subtable->mask, ofs.start,
1879 ofs.end, &basis);
2b7b1427 1880 rule = find_match(subtable, version, flow, hash);
69d6040e
JR
1881 if (!rule && subtable->ports_mask_len) {
1882 /* Ports are always part of the final range, if any.
1883 * No match was found for the ports. Use the ports trie to figure out
1884 * which ports bits to unwildcard. */
1885 unsigned int mbits;
c0bfb650 1886 ovs_be32 value, plens, mask;
69d6040e
JR
1887
1888 mask = MINIFLOW_GET_BE32(&subtable->mask.masks, tp_src);
1889 value = ((OVS_FORCE ovs_be32 *)flow)[TP_PORTS_OFS32] & mask;
c0bfb650 1890 mbits = trie_lookup_value(&subtable->ports_trie, &value, &plens, 32);
69d6040e
JR
1891
1892 ((OVS_FORCE ovs_be32 *)&wc->masks)[TP_PORTS_OFS32] |=
86f35fb5 1893 mask & be32_prefix_mask(mbits);
69d6040e 1894
386cb9f7
JR
1895 /* Unwildcard all bits in the mask upto the ports, as they were used
1896 * to determine there is no match. */
d70e8c28 1897 fill_range_wc(subtable, wc, TP_PORTS_OFS64);
386cb9f7 1898 return NULL;
69d6040e 1899 }
e9319757 1900
13751fd8 1901 /* Must unwildcard all the fields, as they were looked at. */
476f36e8
JR
1902 flow_wildcards_fold_minimask(wc, &subtable->mask);
1903 return rule;
1904}
1905
627fb667 1906static struct cls_match *
dfea28b3 1907find_equal(const struct cls_subtable *subtable, const struct miniflow *flow,
03868246 1908 uint32_t hash)
064af421 1909{
627fb667 1910 struct cls_match *head;
064af421 1911
f2c21402 1912 CMAP_FOR_EACH_WITH_HASH (head, cmap_node, hash, &subtable->rules) {
3016f3e4 1913 if (miniflow_equal(&head->flow, flow)) {
b5d97350 1914 return head;
064af421
BP
1915 }
1916 }
1917 return NULL;
1918}
13751fd8
JR
1919\f
1920/* A longest-prefix match tree. */
13751fd8
JR
1921
1922/* Return at least 'plen' bits of the 'prefix', starting at bit offset 'ofs'.
1923 * Prefixes are in the network byte order, and the offset 0 corresponds to
1924 * the most significant bit of the first byte. The offset can be read as
1925 * "how many bits to skip from the start of the prefix starting at 'pr'". */
1926static uint32_t
1927raw_get_prefix(const ovs_be32 pr[], unsigned int ofs, unsigned int plen)
1928{
1929 uint32_t prefix;
1930
1931 pr += ofs / 32; /* Where to start. */
1932 ofs %= 32; /* How many bits to skip at 'pr'. */
1933
1934 prefix = ntohl(*pr) << ofs; /* Get the first 32 - ofs bits. */
1935 if (plen > 32 - ofs) { /* Need more than we have already? */
1936 prefix |= ntohl(*++pr) >> (32 - ofs);
1937 }
1938 /* Return with possible unwanted bits at the end. */
1939 return prefix;
1940}
1941
1942/* Return min(TRIE_PREFIX_BITS, plen) bits of the 'prefix', starting at bit
1943 * offset 'ofs'. Prefixes are in the network byte order, and the offset 0
1944 * corresponds to the most significant bit of the first byte. The offset can
1945 * be read as "how many bits to skip from the start of the prefix starting at
1946 * 'pr'". */
1947static uint32_t
1948trie_get_prefix(const ovs_be32 pr[], unsigned int ofs, unsigned int plen)
1949{
1950 if (!plen) {
1951 return 0;
1952 }
1953 if (plen > TRIE_PREFIX_BITS) {
1954 plen = TRIE_PREFIX_BITS; /* Get at most TRIE_PREFIX_BITS. */
1955 }
1956 /* Return with unwanted bits cleared. */
1957 return raw_get_prefix(pr, ofs, plen) & ~0u << (32 - plen);
1958}
1959
c30cfa6b 1960/* Return the number of equal bits in 'n_bits' of 'prefix's MSBs and a 'value'
13751fd8
JR
1961 * starting at "MSB 0"-based offset 'ofs'. */
1962static unsigned int
c30cfa6b 1963prefix_equal_bits(uint32_t prefix, unsigned int n_bits, const ovs_be32 value[],
13751fd8
JR
1964 unsigned int ofs)
1965{
c30cfa6b 1966 uint64_t diff = prefix ^ raw_get_prefix(value, ofs, n_bits);
13751fd8 1967 /* Set the bit after the relevant bits to limit the result. */
c30cfa6b 1968 return raw_clz64(diff << 32 | UINT64_C(1) << (63 - n_bits));
13751fd8
JR
1969}
1970
1971/* Return the number of equal bits in 'node' prefix and a 'prefix' of length
1972 * 'plen', starting at "MSB 0"-based offset 'ofs'. */
1973static unsigned int
1974trie_prefix_equal_bits(const struct trie_node *node, const ovs_be32 prefix[],
1975 unsigned int ofs, unsigned int plen)
1976{
c30cfa6b 1977 return prefix_equal_bits(node->prefix, MIN(node->n_bits, plen - ofs),
13751fd8
JR
1978 prefix, ofs);
1979}
1980
1981/* Return the bit at ("MSB 0"-based) offset 'ofs' as an int. 'ofs' can
1982 * be greater than 31. */
1983static unsigned int
1984be_get_bit_at(const ovs_be32 value[], unsigned int ofs)
1985{
1986 return (((const uint8_t *)value)[ofs / 8] >> (7 - ofs % 8)) & 1u;
1987}
1988
1989/* Return the bit at ("MSB 0"-based) offset 'ofs' as an int. 'ofs' must
1990 * be between 0 and 31, inclusive. */
1991static unsigned int
1992get_bit_at(const uint32_t prefix, unsigned int ofs)
1993{
1994 return (prefix >> (31 - ofs)) & 1u;
1995}
1996
1997/* Create new branch. */
1998static struct trie_node *
1999trie_branch_create(const ovs_be32 *prefix, unsigned int ofs, unsigned int plen,
2000 unsigned int n_rules)
2001{
2002 struct trie_node *node = xmalloc(sizeof *node);
2003
2004 node->prefix = trie_get_prefix(prefix, ofs, plen);
2005
2006 if (plen <= TRIE_PREFIX_BITS) {
c30cfa6b 2007 node->n_bits = plen;
f358a2cb
JR
2008 ovsrcu_set_hidden(&node->edges[0], NULL);
2009 ovsrcu_set_hidden(&node->edges[1], NULL);
13751fd8
JR
2010 node->n_rules = n_rules;
2011 } else { /* Need intermediate nodes. */
2012 struct trie_node *subnode = trie_branch_create(prefix,
2013 ofs + TRIE_PREFIX_BITS,
2014 plen - TRIE_PREFIX_BITS,
2015 n_rules);
2016 int bit = get_bit_at(subnode->prefix, 0);
c30cfa6b 2017 node->n_bits = TRIE_PREFIX_BITS;
f358a2cb
JR
2018 ovsrcu_set_hidden(&node->edges[bit], subnode);
2019 ovsrcu_set_hidden(&node->edges[!bit], NULL);
13751fd8
JR
2020 node->n_rules = 0;
2021 }
2022 return node;
2023}
2024
2025static void
f358a2cb 2026trie_node_destroy(const struct trie_node *node)
13751fd8 2027{
f358a2cb
JR
2028 ovsrcu_postpone(free, CONST_CAST(struct trie_node *, node));
2029}
2030
2031/* Copy a trie node for modification and postpone delete the old one. */
2032static struct trie_node *
2033trie_node_rcu_realloc(const struct trie_node *node)
2034{
2035 struct trie_node *new_node = xmalloc(sizeof *node);
2036
2037 *new_node = *node;
2038 trie_node_destroy(node);
2039
2040 return new_node;
13751fd8
JR
2041}
2042
2043static void
f358a2cb 2044trie_destroy(rcu_trie_ptr *trie)
13751fd8 2045{
f358a2cb
JR
2046 struct trie_node *node = ovsrcu_get_protected(struct trie_node *, trie);
2047
13751fd8 2048 if (node) {
f358a2cb
JR
2049 ovsrcu_set_hidden(trie, NULL);
2050 trie_destroy(&node->edges[0]);
2051 trie_destroy(&node->edges[1]);
2052 trie_node_destroy(node);
13751fd8
JR
2053 }
2054}
2055
2056static bool
2057trie_is_leaf(const struct trie_node *trie)
2058{
f358a2cb
JR
2059 /* No children? */
2060 return !ovsrcu_get(struct trie_node *, &trie->edges[0])
2061 && !ovsrcu_get(struct trie_node *, &trie->edges[1]);
13751fd8
JR
2062}
2063
2064static void
2065mask_set_prefix_bits(struct flow_wildcards *wc, uint8_t be32ofs,
c30cfa6b 2066 unsigned int n_bits)
13751fd8
JR
2067{
2068 ovs_be32 *mask = &((ovs_be32 *)&wc->masks)[be32ofs];
2069 unsigned int i;
2070
c30cfa6b 2071 for (i = 0; i < n_bits / 32; i++) {
13751fd8
JR
2072 mask[i] = OVS_BE32_MAX;
2073 }
c30cfa6b
JR
2074 if (n_bits % 32) {
2075 mask[i] |= htonl(~0u << (32 - n_bits % 32));
13751fd8
JR
2076 }
2077}
2078
2079static bool
2080mask_prefix_bits_set(const struct flow_wildcards *wc, uint8_t be32ofs,
c30cfa6b 2081 unsigned int n_bits)
13751fd8
JR
2082{
2083 ovs_be32 *mask = &((ovs_be32 *)&wc->masks)[be32ofs];
2084 unsigned int i;
2085 ovs_be32 zeroes = 0;
2086
c30cfa6b 2087 for (i = 0; i < n_bits / 32; i++) {
13751fd8
JR
2088 zeroes |= ~mask[i];
2089 }
c30cfa6b
JR
2090 if (n_bits % 32) {
2091 zeroes |= ~mask[i] & htonl(~0u << (32 - n_bits % 32));
13751fd8
JR
2092 }
2093
c30cfa6b 2094 return !zeroes; /* All 'n_bits' bits set. */
13751fd8
JR
2095}
2096
f358a2cb 2097static rcu_trie_ptr *
13751fd8
JR
2098trie_next_edge(struct trie_node *node, const ovs_be32 value[],
2099 unsigned int ofs)
2100{
2101 return node->edges + be_get_bit_at(value, ofs);
2102}
2103
2104static const struct trie_node *
2105trie_next_node(const struct trie_node *node, const ovs_be32 value[],
2106 unsigned int ofs)
2107{
f358a2cb
JR
2108 return ovsrcu_get(struct trie_node *,
2109 &node->edges[be_get_bit_at(value, ofs)]);
13751fd8
JR
2110}
2111
c0bfb650
JR
2112/* Set the bit at ("MSB 0"-based) offset 'ofs'. 'ofs' can be greater than 31.
2113 */
2114static void
2115be_set_bit_at(ovs_be32 value[], unsigned int ofs)
2116{
2117 ((uint8_t *)value)[ofs / 8] |= 1u << (7 - ofs % 8);
2118}
2119
2120/* Returns the number of bits in the prefix mask necessary to determine a
2121 * mismatch, in case there are longer prefixes in the tree below the one that
2122 * matched.
2123 * '*plens' will have a bit set for each prefix length that may have matching
2124 * rules. The caller is responsible for clearing the '*plens' prior to
2125 * calling this.
13751fd8
JR
2126 */
2127static unsigned int
f358a2cb 2128trie_lookup_value(const rcu_trie_ptr *trie, const ovs_be32 value[],
c0bfb650 2129 ovs_be32 plens[], unsigned int n_bits)
13751fd8 2130{
13751fd8 2131 const struct trie_node *prev = NULL;
c0bfb650
JR
2132 const struct trie_node *node = ovsrcu_get(struct trie_node *, trie);
2133 unsigned int match_len = 0; /* Number of matching bits. */
13751fd8 2134
27ce650f 2135 for (; node; prev = node, node = trie_next_node(node, value, match_len)) {
13751fd8
JR
2136 unsigned int eqbits;
2137 /* Check if this edge can be followed. */
27ce650f
JR
2138 eqbits = prefix_equal_bits(node->prefix, node->n_bits, value,
2139 match_len);
2140 match_len += eqbits;
c30cfa6b 2141 if (eqbits < node->n_bits) { /* Mismatch, nothing more to be found. */
27ce650f 2142 /* Bit at offset 'match_len' differed. */
c0bfb650 2143 return match_len + 1; /* Includes the first mismatching bit. */
13751fd8
JR
2144 }
2145 /* Full match, check if rules exist at this prefix length. */
2146 if (node->n_rules > 0) {
c0bfb650 2147 be_set_bit_at(plens, match_len - 1);
13751fd8 2148 }
27ce650f 2149 if (match_len >= n_bits) {
c0bfb650 2150 return n_bits; /* Full prefix. */
f0e5aa11 2151 }
13751fd8 2152 }
c0bfb650
JR
2153 /* node == NULL. Full match so far, but we tried to follow an
2154 * non-existing branch. Need to exclude the other branch if it exists
2155 * (it does not if we were called on an empty trie or 'prev' is a leaf
2156 * node). */
2157 return !prev || trie_is_leaf(prev) ? match_len : match_len + 1;
13751fd8
JR
2158}
2159
2160static unsigned int
2161trie_lookup(const struct cls_trie *trie, const struct flow *flow,
c0bfb650 2162 union mf_value *plens)
13751fd8
JR
2163{
2164 const struct mf_field *mf = trie->field;
2165
2166 /* Check that current flow matches the prerequisites for the trie
2167 * field. Some match fields are used for multiple purposes, so we
2168 * must check that the trie is relevant for this flow. */
2169 if (mf_are_prereqs_ok(mf, flow)) {
f358a2cb 2170 return trie_lookup_value(&trie->root,
13751fd8 2171 &((ovs_be32 *)flow)[mf->flow_be32ofs],
c0bfb650 2172 &plens->be32, mf->n_bits);
13751fd8 2173 }
c0bfb650
JR
2174 memset(plens, 0xff, sizeof *plens); /* All prefixes, no skipping. */
2175 return 0; /* Value not used in this case. */
13751fd8
JR
2176}
2177
2178/* Returns the length of a prefix match mask for the field 'mf' in 'minimask'.
2179 * Returns the u32 offset to the miniflow data in '*miniflow_index', if
2180 * 'miniflow_index' is not NULL. */
2181static unsigned int
2182minimask_get_prefix_len(const struct minimask *minimask,
2183 const struct mf_field *mf)
2184{
c30cfa6b 2185 unsigned int n_bits = 0, mask_tz = 0; /* Non-zero when end of mask seen. */
d70e8c28
JR
2186 uint8_t be32_ofs = mf->flow_be32ofs;
2187 uint8_t be32_end = be32_ofs + mf->n_bytes / 4;
13751fd8 2188
d70e8c28
JR
2189 for (; be32_ofs < be32_end; ++be32_ofs) {
2190 uint32_t mask = ntohl(minimask_get_be32(minimask, be32_ofs));
13751fd8
JR
2191
2192 /* Validate mask, count the mask length. */
2193 if (mask_tz) {
2194 if (mask) {
2195 return 0; /* No bits allowed after mask ended. */
2196 }
2197 } else {
2198 if (~mask & (~mask + 1)) {
2199 return 0; /* Mask not contiguous. */
2200 }
2201 mask_tz = ctz32(mask);
c30cfa6b 2202 n_bits += 32 - mask_tz;
13751fd8
JR
2203 }
2204 }
2205
c30cfa6b 2206 return n_bits;
13751fd8
JR
2207}
2208
2209/*
2210 * This is called only when mask prefix is known to be CIDR and non-zero.
2211 * Relies on the fact that the flow and mask have the same map, and since
2212 * the mask is CIDR, the storage for the flow field exists even if it
2213 * happened to be zeros.
2214 */
2215static const ovs_be32 *
2216minimatch_get_prefix(const struct minimatch *match, const struct mf_field *mf)
2217{
d70e8c28
JR
2218 return (OVS_FORCE const ovs_be32 *)
2219 (miniflow_get_values(&match->flow)
2220 + count_1bits(match->flow.map &
2221 ((UINT64_C(1) << mf->flow_be32ofs / 2) - 1)))
2222 + (mf->flow_be32ofs & 1);
13751fd8
JR
2223}
2224
2225/* Insert rule in to the prefix tree.
2226 * 'mlen' must be the (non-zero) CIDR prefix length of the 'trie->field' mask
2227 * in 'rule'. */
2228static void
2229trie_insert(struct cls_trie *trie, const struct cls_rule *rule, int mlen)
2230{
69d6040e
JR
2231 trie_insert_prefix(&trie->root,
2232 minimatch_get_prefix(&rule->match, trie->field), mlen);
2233}
2234
2235static void
f358a2cb 2236trie_insert_prefix(rcu_trie_ptr *edge, const ovs_be32 *prefix, int mlen)
69d6040e 2237{
13751fd8 2238 struct trie_node *node;
13751fd8
JR
2239 int ofs = 0;
2240
2241 /* Walk the tree. */
f358a2cb 2242 for (; (node = ovsrcu_get_protected(struct trie_node *, edge));
13751fd8
JR
2243 edge = trie_next_edge(node, prefix, ofs)) {
2244 unsigned int eqbits = trie_prefix_equal_bits(node, prefix, ofs, mlen);
2245 ofs += eqbits;
c30cfa6b 2246 if (eqbits < node->n_bits) {
13751fd8
JR
2247 /* Mismatch, new node needs to be inserted above. */
2248 int old_branch = get_bit_at(node->prefix, eqbits);
f358a2cb 2249 struct trie_node *new_parent;
13751fd8 2250
f358a2cb
JR
2251 new_parent = trie_branch_create(prefix, ofs - eqbits, eqbits,
2252 ofs == mlen ? 1 : 0);
2253 /* Copy the node to modify it. */
2254 node = trie_node_rcu_realloc(node);
2255 /* Adjust the new node for its new position in the tree. */
13751fd8 2256 node->prefix <<= eqbits;
c30cfa6b 2257 node->n_bits -= eqbits;
f358a2cb 2258 ovsrcu_set_hidden(&new_parent->edges[old_branch], node);
13751fd8
JR
2259
2260 /* Check if need a new branch for the new rule. */
2261 if (ofs < mlen) {
f358a2cb
JR
2262 ovsrcu_set_hidden(&new_parent->edges[!old_branch],
2263 trie_branch_create(prefix, ofs, mlen - ofs,
2264 1));
13751fd8 2265 }
f358a2cb 2266 ovsrcu_set(edge, new_parent); /* Publish changes. */
13751fd8
JR
2267 return;
2268 }
2269 /* Full match so far. */
2270
2271 if (ofs == mlen) {
2272 /* Full match at the current node, rule needs to be added here. */
2273 node->n_rules++;
2274 return;
2275 }
2276 }
2277 /* Must insert a new tree branch for the new rule. */
f358a2cb 2278 ovsrcu_set(edge, trie_branch_create(prefix, ofs, mlen - ofs, 1));
13751fd8
JR
2279}
2280
2281/* 'mlen' must be the (non-zero) CIDR prefix length of the 'trie->field' mask
2282 * in 'rule'. */
2283static void
2284trie_remove(struct cls_trie *trie, const struct cls_rule *rule, int mlen)
2285{
69d6040e
JR
2286 trie_remove_prefix(&trie->root,
2287 minimatch_get_prefix(&rule->match, trie->field), mlen);
2288}
2289
2290/* 'mlen' must be the (non-zero) CIDR prefix length of the 'trie->field' mask
2291 * in 'rule'. */
2292static void
f358a2cb 2293trie_remove_prefix(rcu_trie_ptr *root, const ovs_be32 *prefix, int mlen)
69d6040e 2294{
13751fd8 2295 struct trie_node *node;
f358a2cb 2296 rcu_trie_ptr *edges[sizeof(union mf_value) * 8];
13751fd8
JR
2297 int depth = 0, ofs = 0;
2298
2299 /* Walk the tree. */
69d6040e 2300 for (edges[0] = root;
f358a2cb 2301 (node = ovsrcu_get_protected(struct trie_node *, edges[depth]));
13751fd8
JR
2302 edges[++depth] = trie_next_edge(node, prefix, ofs)) {
2303 unsigned int eqbits = trie_prefix_equal_bits(node, prefix, ofs, mlen);
69d6040e 2304
c30cfa6b 2305 if (eqbits < node->n_bits) {
13751fd8
JR
2306 /* Mismatch, nothing to be removed. This should never happen, as
2307 * only rules in the classifier are ever removed. */
2308 break; /* Log a warning. */
2309 }
2310 /* Full match so far. */
2311 ofs += eqbits;
2312
2313 if (ofs == mlen) {
2314 /* Full prefix match at the current node, remove rule here. */
2315 if (!node->n_rules) {
2316 break; /* Log a warning. */
2317 }
2318 node->n_rules--;
2319
2320 /* Check if can prune the tree. */
f358a2cb
JR
2321 while (!node->n_rules) {
2322 struct trie_node *next,
2323 *edge0 = ovsrcu_get_protected(struct trie_node *,
2324 &node->edges[0]),
2325 *edge1 = ovsrcu_get_protected(struct trie_node *,
2326 &node->edges[1]);
2327
2328 if (edge0 && edge1) {
2329 break; /* A branching point, cannot prune. */
2330 }
2331
2332 /* Else have at most one child node, remove this node. */
2333 next = edge0 ? edge0 : edge1;
13751fd8
JR
2334
2335 if (next) {
c30cfa6b 2336 if (node->n_bits + next->n_bits > TRIE_PREFIX_BITS) {
13751fd8
JR
2337 break; /* Cannot combine. */
2338 }
f358a2cb
JR
2339 next = trie_node_rcu_realloc(next); /* Modify. */
2340
13751fd8 2341 /* Combine node with next. */
c30cfa6b
JR
2342 next->prefix = node->prefix | next->prefix >> node->n_bits;
2343 next->n_bits += node->n_bits;
13751fd8 2344 }
13751fd8 2345 /* Update the parent's edge. */
f358a2cb
JR
2346 ovsrcu_set(edges[depth], next); /* Publish changes. */
2347 trie_node_destroy(node);
2348
13751fd8
JR
2349 if (next || !depth) {
2350 /* Branch not pruned or at root, nothing more to do. */
2351 break;
2352 }
f358a2cb
JR
2353 node = ovsrcu_get_protected(struct trie_node *,
2354 edges[--depth]);
13751fd8
JR
2355 }
2356 return;
2357 }
2358 }
2359 /* Cannot go deeper. This should never happen, since only rules
2360 * that actually exist in the classifier are ever removed. */
2361 VLOG_WARN("Trying to remove non-existing rule from a prefix trie.");
2362}
8f8023b3
JR
2363\f
2364
2365#define CLS_MATCH_POISON (struct cls_match *)(UINTPTR_MAX / 0xf * 0xb)
2366
2367void
2368cls_match_free_cb(struct cls_match *rule)
2369{
2370 ovsrcu_set_hidden(&rule->next, CLS_MATCH_POISON);
2371 free(rule);
2372}