]> git.proxmox.com Git - ovs.git/blame - lib/classifier.c
dpif-netdev: Skip also xregs when building a mask.
[ovs.git] / lib / classifier.c
CommitLineData
064af421 1/*
18080541 2 * Copyright (c) 2009, 2010, 2011, 2012, 2013, 2014, 2015 Nicira, Inc.
064af421 3 *
a14bc59f
BP
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
064af421 7 *
a14bc59f
BP
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
064af421
BP
15 */
16
17#include <config.h>
18#include "classifier.h"
38c449e0 19#include "classifier-private.h"
064af421
BP
20#include <errno.h>
21#include <netinet/in.h>
844dff32 22#include "byte-order.h"
68d1c8c3 23#include "dynamic-string.h"
07b37e8f 24#include "odp-util.h"
d8ae4d67 25#include "ofp-util.h"
13751fd8 26#include "packets.h"
52054c15 27#include "util.h"
e6211adc 28#include "openvswitch/vlog.h"
13751fd8
JR
29
30VLOG_DEFINE_THIS_MODULE(classifier);
064af421 31
69d6040e
JR
32struct trie_ctx;
33
18080541
BP
34/* A collection of "struct cls_conjunction"s currently embedded into a
35 * cls_match. */
36struct cls_conjunction_set {
37 /* Link back to the cls_match.
38 *
39 * cls_conjunction_set is mostly used during classifier lookup, and, in
40 * turn, during classifier lookup the most used member of
41 * cls_conjunction_set is the rule's priority, so we cache it here for fast
42 * access. */
43 struct cls_match *match;
44 int priority; /* Cached copy of match->priority. */
45
46 /* Conjunction information.
47 *
48 * 'min_n_clauses' allows some optimization during classifier lookup. */
49 unsigned int n; /* Number of elements in 'conj'. */
50 unsigned int min_n_clauses; /* Smallest 'n' among elements of 'conj'. */
51 struct cls_conjunction conj[];
52};
53
69d6040e
JR
54/* Ports trie depends on both ports sharing the same ovs_be32. */
55#define TP_PORTS_OFS32 (offsetof(struct flow, tp_src) / 4)
56BUILD_ASSERT_DECL(TP_PORTS_OFS32 == offsetof(struct flow, tp_dst) / 4);
d70e8c28
JR
57BUILD_ASSERT_DECL(TP_PORTS_OFS32 % 2 == 0);
58#define TP_PORTS_OFS64 (TP_PORTS_OFS32 / 2)
cabd4c43 59
18080541
BP
60static size_t
61cls_conjunction_set_size(size_t n)
62{
63 return (sizeof(struct cls_conjunction_set)
64 + n * sizeof(struct cls_conjunction));
65}
66
67static struct cls_conjunction_set *
68cls_conjunction_set_alloc(struct cls_match *match,
69 const struct cls_conjunction conj[], size_t n)
70{
71 if (n) {
72 size_t min_n_clauses = conj[0].n_clauses;
73 for (size_t i = 1; i < n; i++) {
74 min_n_clauses = MIN(min_n_clauses, conj[i].n_clauses);
75 }
76
77 struct cls_conjunction_set *set = xmalloc(cls_conjunction_set_size(n));
78 set->match = match;
79 set->priority = match->priority;
80 set->n = n;
81 set->min_n_clauses = min_n_clauses;
82 memcpy(set->conj, conj, n * sizeof *conj);
83 return set;
84 } else {
85 return NULL;
86 }
87}
88
627fb667 89static struct cls_match *
bd53aa17 90cls_match_alloc(const struct cls_rule *rule, cls_version_t version,
18080541 91 const struct cls_conjunction conj[], size_t n)
627fb667 92{
8fd47924 93 int count = count_1bits(rule->match.flow->map);
3016f3e4
JR
94
95 struct cls_match *cls_match
8fd47924 96 = xmalloc(sizeof *cls_match + MINIFLOW_VALUES_SIZE(count));
627fb667 97
8f8023b3 98 ovsrcu_init(&cls_match->next, NULL);
f80028fe
JR
99 *CONST_CAST(const struct cls_rule **, &cls_match->cls_rule) = rule;
100 *CONST_CAST(int *, &cls_match->priority) = rule->priority;
bd53aa17
JR
101 *CONST_CAST(cls_version_t *, &cls_match->add_version) = version;
102 atomic_init(&cls_match->remove_version, version); /* Initially
103 * invisible. */
a851eb94
JR
104 miniflow_clone(CONST_CAST(struct miniflow *, &cls_match->flow),
105 rule->match.flow, count);
18080541
BP
106 ovsrcu_set_hidden(&cls_match->conj_set,
107 cls_conjunction_set_alloc(cls_match, conj, n));
627fb667
JR
108
109 return cls_match;
110}
cabd4c43 111
e48eccd1 112static struct cls_subtable *find_subtable(const struct classifier *cls,
dfea28b3 113 const struct minimask *);
e48eccd1 114static struct cls_subtable *insert_subtable(struct classifier *cls,
fccd7c09
JR
115 const struct minimask *);
116static void destroy_subtable(struct classifier *cls, struct cls_subtable *);
b5d97350 117
dfea28b3 118static const struct cls_match *find_match_wc(const struct cls_subtable *,
18721c4a 119 cls_version_t version,
dfea28b3
JR
120 const struct flow *,
121 struct trie_ctx *,
122 unsigned int n_tries,
123 struct flow_wildcards *);
124static struct cls_match *find_equal(const struct cls_subtable *,
627fb667 125 const struct miniflow *, uint32_t hash);
b5d97350 126
8f8023b3
JR
127/* Return the next visible (lower-priority) rule in the list. Multiple
128 * identical rules with the same priority may exist transitionally, but when
129 * versioning is used at most one of them is ever visible for lookups on any
130 * given 'version'. */
fc02ecc7 131static inline const struct cls_match *
18721c4a 132next_visible_rule_in_list(const struct cls_match *rule, cls_version_t version)
fc02ecc7 133{
fc02ecc7 134 do {
8f8023b3 135 rule = cls_match_next(rule);
18721c4a 136 } while (rule && !cls_match_visible_in_version(rule, version));
fc02ecc7 137
8f8023b3 138 return rule;
c501b427
JR
139}
140
13751fd8
JR
141static unsigned int minimask_get_prefix_len(const struct minimask *,
142 const struct mf_field *);
e48eccd1 143static void trie_init(struct classifier *cls, int trie_idx,
fccd7c09 144 const struct mf_field *);
13751fd8 145static unsigned int trie_lookup(const struct cls_trie *, const struct flow *,
c0bfb650 146 union mf_value *plens);
f358a2cb 147static unsigned int trie_lookup_value(const rcu_trie_ptr *,
c0bfb650
JR
148 const ovs_be32 value[], ovs_be32 plens[],
149 unsigned int value_bits);
f358a2cb 150static void trie_destroy(rcu_trie_ptr *);
13751fd8 151static void trie_insert(struct cls_trie *, const struct cls_rule *, int mlen);
f358a2cb 152static void trie_insert_prefix(rcu_trie_ptr *, const ovs_be32 *prefix,
69d6040e 153 int mlen);
13751fd8 154static void trie_remove(struct cls_trie *, const struct cls_rule *, int mlen);
f358a2cb 155static void trie_remove_prefix(rcu_trie_ptr *, const ovs_be32 *prefix,
69d6040e 156 int mlen);
13751fd8 157static void mask_set_prefix_bits(struct flow_wildcards *, uint8_t be32ofs,
c30cfa6b 158 unsigned int n_bits);
13751fd8 159static bool mask_prefix_bits_set(const struct flow_wildcards *,
c30cfa6b 160 uint8_t be32ofs, unsigned int n_bits);
81a76618
BP
161\f
162/* cls_rule. */
b5d97350 163
de4ad4a2 164static inline void
bd53aa17 165cls_rule_init__(struct cls_rule *rule, unsigned int priority)
de4ad4a2
JR
166{
167 rculist_init(&rule->node);
2b7b1427 168 *CONST_CAST(int *, &rule->priority) = priority;
de4ad4a2
JR
169 rule->cls_match = NULL;
170}
171
81a76618 172/* Initializes 'rule' to match packets specified by 'match' at the given
5cb7a798
BP
173 * 'priority'. 'match' must satisfy the invariant described in the comment at
174 * the definition of struct match.
66642cb4 175 *
48d28ac1
BP
176 * The caller must eventually destroy 'rule' with cls_rule_destroy().
177 *
eb391b76
BP
178 * Clients should not use priority INT_MIN. (OpenFlow uses priorities between
179 * 0 and UINT16_MAX, inclusive.) */
47284b1f 180void
bd53aa17 181cls_rule_init(struct cls_rule *rule, const struct match *match, int priority)
47284b1f 182{
bd53aa17 183 cls_rule_init__(rule, priority);
2b7b1427 184 minimatch_init(CONST_CAST(struct minimatch *, &rule->match), match);
5cb7a798
BP
185}
186
187/* Same as cls_rule_init() for initialization from a "struct minimatch". */
188void
189cls_rule_init_from_minimatch(struct cls_rule *rule,
bd53aa17 190 const struct minimatch *match, int priority)
5cb7a798 191{
bd53aa17 192 cls_rule_init__(rule, priority);
2b7b1427 193 minimatch_clone(CONST_CAST(struct minimatch *, &rule->match), match);
685a51a5
JP
194}
195
48d28ac1
BP
196/* Initializes 'dst' as a copy of 'src'.
197 *
b2c1f00b 198 * The caller must eventually destroy 'dst' with cls_rule_destroy(). */
48d28ac1
BP
199void
200cls_rule_clone(struct cls_rule *dst, const struct cls_rule *src)
201{
bd53aa17
JR
202 cls_rule_init__(dst, src->priority);
203 minimatch_clone(CONST_CAST(struct minimatch *, &dst->match), &src->match);
48d28ac1
BP
204}
205
b2c1f00b 206/* Initializes 'dst' with the data in 'src', destroying 'src'.
2b7b1427 207 *
de4ad4a2 208 * 'src' must be a cls_rule NOT in a classifier.
b2c1f00b
BP
209 *
210 * The caller must eventually destroy 'dst' with cls_rule_destroy(). */
211void
212cls_rule_move(struct cls_rule *dst, struct cls_rule *src)
213{
bd53aa17 214 cls_rule_init__(dst, src->priority);
2b7b1427
JR
215 minimatch_move(CONST_CAST(struct minimatch *, &dst->match),
216 CONST_CAST(struct minimatch *, &src->match));
b2c1f00b
BP
217}
218
48d28ac1
BP
219/* Frees memory referenced by 'rule'. Doesn't free 'rule' itself (it's
220 * normally embedded into a larger structure).
221 *
222 * ('rule' must not currently be in a classifier.) */
223void
5cb7a798 224cls_rule_destroy(struct cls_rule *rule)
2541d759 225 OVS_NO_THREAD_SAFETY_ANALYSIS
48d28ac1 226{
de4ad4a2
JR
227 ovs_assert(!rule->cls_match); /* Must not be in a classifier. */
228
2541d759
JR
229 /* Check that the rule has been properly removed from the classifier. */
230 ovs_assert(rule->node.prev == RCULIST_POISON
de4ad4a2 231 || rculist_is_empty(&rule->node));
2541d759 232 rculist_poison__(&rule->node); /* Poisons also the next pointer. */
de4ad4a2 233
2b7b1427 234 minimatch_destroy(CONST_CAST(struct minimatch *, &rule->match));
48d28ac1
BP
235}
236
18080541
BP
237void
238cls_rule_set_conjunctions(struct cls_rule *cr,
239 const struct cls_conjunction *conj, size_t n)
240{
241 struct cls_match *match = cr->cls_match;
242 struct cls_conjunction_set *old
243 = ovsrcu_get_protected(struct cls_conjunction_set *, &match->conj_set);
244 struct cls_conjunction *old_conj = old ? old->conj : NULL;
245 unsigned int old_n = old ? old->n : 0;
246
247 if (old_n != n || (n && memcmp(old_conj, conj, n * sizeof *conj))) {
248 if (old) {
249 ovsrcu_postpone(free, old);
250 }
251 ovsrcu_set(&match->conj_set,
252 cls_conjunction_set_alloc(match, conj, n));
253 }
254}
255
256
81a76618
BP
257/* Returns true if 'a' and 'b' match the same packets at the same priority,
258 * false if they differ in some way. */
193eb874
BP
259bool
260cls_rule_equal(const struct cls_rule *a, const struct cls_rule *b)
261{
5cb7a798 262 return a->priority == b->priority && minimatch_equal(&a->match, &b->match);
193eb874
BP
263}
264
81a76618 265/* Returns a hash value for 'rule', folding in 'basis'. */
57452fdc
BP
266uint32_t
267cls_rule_hash(const struct cls_rule *rule, uint32_t basis)
268{
5cb7a798 269 return minimatch_hash(&rule->match, hash_int(rule->priority, basis));
73f33563
BP
270}
271
81a76618 272/* Appends a string describing 'rule' to 's'. */
07b37e8f
BP
273void
274cls_rule_format(const struct cls_rule *rule, struct ds *s)
275{
5cb7a798 276 minimatch_format(&rule->match, s, rule->priority);
064af421 277}
3ca1de08
BP
278
279/* Returns true if 'rule' matches every packet, false otherwise. */
280bool
281cls_rule_is_catchall(const struct cls_rule *rule)
282{
8fd47924 283 return minimask_is_catchall(rule->match.mask);
3ca1de08 284}
fc02ecc7 285
2b7b1427
JR
286/* Makes rule invisible after 'version'. Once that version is made invisible
287 * (by changing the version parameter used in lookups), the rule should be
288 * actually removed via ovsrcu_postpone().
289 *
290 * 'rule_' must be in a classifier. */
291void
18721c4a
JR
292cls_rule_make_invisible_in_version(const struct cls_rule *rule,
293 cls_version_t remove_version)
2b7b1427 294{
18721c4a
JR
295 ovs_assert(remove_version >= rule->cls_match->add_version);
296
297 cls_match_set_remove_version(rule->cls_match, remove_version);
2b7b1427
JR
298}
299
bd53aa17 300/* This undoes the change made by cls_rule_make_invisible_in_version().
fc02ecc7
JR
301 *
302 * 'rule' must be in a classifier. */
2b7b1427
JR
303void
304cls_rule_restore_visibility(const struct cls_rule *rule)
fc02ecc7 305{
18721c4a 306 cls_match_set_remove_version(rule->cls_match, CLS_NOT_REMOVED_VERSION);
fc02ecc7
JR
307}
308
2b7b1427
JR
309/* Return true if 'rule' is visible in 'version'.
310 *
311 * 'rule' must be in a classifier. */
312bool
18721c4a 313cls_rule_visible_in_version(const struct cls_rule *rule, cls_version_t version)
2b7b1427
JR
314{
315 return cls_match_visible_in_version(rule->cls_match, version);
316}
064af421
BP
317\f
318/* Initializes 'cls' as a classifier that initially contains no classification
319 * rules. */
320void
e48eccd1 321classifier_init(struct classifier *cls, const uint8_t *flow_segments)
064af421 322{
064af421 323 cls->n_rules = 0;
f2c21402 324 cmap_init(&cls->subtables_map);
fe7cfa5c 325 pvector_init(&cls->subtables);
f2c21402 326 cmap_init(&cls->partitions);
476f36e8
JR
327 cls->n_flow_segments = 0;
328 if (flow_segments) {
329 while (cls->n_flow_segments < CLS_MAX_INDICES
d70e8c28 330 && *flow_segments < FLOW_U64S) {
476f36e8
JR
331 cls->flow_segments[cls->n_flow_segments++] = *flow_segments++;
332 }
333 }
13751fd8 334 cls->n_tries = 0;
e65413ab
JR
335 for (int i = 0; i < CLS_MAX_TRIES; i++) {
336 trie_init(cls, i, NULL);
337 }
802f84ff 338 cls->publish = true;
064af421
BP
339}
340
341/* Destroys 'cls'. Rules within 'cls', if any, are not freed; this is the
afae68b1
JR
342 * caller's responsibility.
343 * May only be called after all the readers have been terminated. */
064af421 344void
e48eccd1 345classifier_destroy(struct classifier *cls)
064af421 346{
e48eccd1 347 if (cls) {
78c8df12
BP
348 struct cls_partition *partition;
349 struct cls_subtable *subtable;
13751fd8
JR
350 int i;
351
352 for (i = 0; i < cls->n_tries; i++) {
f358a2cb 353 trie_destroy(&cls->tries[i].root);
13751fd8 354 }
064af421 355
6bc3bb82 356 CMAP_FOR_EACH (subtable, cmap_node, &cls->subtables_map) {
03868246 357 destroy_subtable(cls, subtable);
064af421 358 }
f2c21402 359 cmap_destroy(&cls->subtables_map);
c906cedf 360
6bc3bb82 361 CMAP_FOR_EACH (partition, cmap_node, &cls->partitions) {
f2c21402 362 ovsrcu_postpone(free, partition);
c906cedf 363 }
f2c21402 364 cmap_destroy(&cls->partitions);
cabd4c43 365
fe7cfa5c 366 pvector_destroy(&cls->subtables);
064af421
BP
367 }
368}
369
13751fd8 370/* Set the fields for which prefix lookup should be performed. */
f358a2cb 371bool
e48eccd1 372classifier_set_prefix_fields(struct classifier *cls,
13751fd8
JR
373 const enum mf_field_id *trie_fields,
374 unsigned int n_fields)
375{
f358a2cb 376 const struct mf_field * new_fields[CLS_MAX_TRIES];
abadfcb0 377 struct mf_bitmap fields = MF_BITMAP_INITIALIZER;
f358a2cb
JR
378 int i, n_tries = 0;
379 bool changed = false;
13751fd8 380
f358a2cb 381 for (i = 0; i < n_fields && n_tries < CLS_MAX_TRIES; i++) {
13751fd8
JR
382 const struct mf_field *field = mf_from_id(trie_fields[i]);
383 if (field->flow_be32ofs < 0 || field->n_bits % 32) {
384 /* Incompatible field. This is the only place where we
385 * enforce these requirements, but the rest of the trie code
386 * depends on the flow_be32ofs to be non-negative and the
387 * field length to be a multiple of 32 bits. */
388 continue;
389 }
390
abadfcb0 391 if (bitmap_is_set(fields.bm, trie_fields[i])) {
13751fd8
JR
392 /* Duplicate field, there is no need to build more than
393 * one index for any one field. */
394 continue;
395 }
abadfcb0 396 bitmap_set1(fields.bm, trie_fields[i]);
13751fd8 397
f358a2cb
JR
398 new_fields[n_tries] = NULL;
399 if (n_tries >= cls->n_tries || field != cls->tries[n_tries].field) {
400 new_fields[n_tries] = field;
401 changed = true;
402 }
403 n_tries++;
404 }
405
406 if (changed || n_tries < cls->n_tries) {
407 struct cls_subtable *subtable;
408
409 /* Trie configuration needs to change. Disable trie lookups
410 * for the tries that are changing and wait all the current readers
411 * with the old configuration to be done. */
412 changed = false;
413 CMAP_FOR_EACH (subtable, cmap_node, &cls->subtables_map) {
414 for (i = 0; i < cls->n_tries; i++) {
415 if ((i < n_tries && new_fields[i]) || i >= n_tries) {
416 if (subtable->trie_plen[i]) {
417 subtable->trie_plen[i] = 0;
418 changed = true;
419 }
420 }
421 }
422 }
423 /* Synchronize if any readers were using tries. The readers may
424 * temporarily function without the trie lookup based optimizations. */
425 if (changed) {
426 /* ovsrcu_synchronize() functions as a memory barrier, so it does
427 * not matter that subtable->trie_plen is not atomic. */
428 ovsrcu_synchronize();
13751fd8 429 }
13751fd8 430
f358a2cb
JR
431 /* Now set up the tries. */
432 for (i = 0; i < n_tries; i++) {
433 if (new_fields[i]) {
434 trie_init(cls, i, new_fields[i]);
435 }
436 }
437 /* Destroy the rest, if any. */
438 for (; i < cls->n_tries; i++) {
439 trie_init(cls, i, NULL);
440 }
441
442 cls->n_tries = n_tries;
f358a2cb 443 return true;
13751fd8 444 }
f358a2cb 445
f358a2cb 446 return false; /* No change. */
13751fd8
JR
447}
448
449static void
e48eccd1 450trie_init(struct classifier *cls, int trie_idx, const struct mf_field *field)
13751fd8
JR
451{
452 struct cls_trie *trie = &cls->tries[trie_idx];
453 struct cls_subtable *subtable;
454
455 if (trie_idx < cls->n_tries) {
f358a2cb
JR
456 trie_destroy(&trie->root);
457 } else {
458 ovsrcu_set_hidden(&trie->root, NULL);
13751fd8 459 }
13751fd8
JR
460 trie->field = field;
461
f358a2cb 462 /* Add existing rules to the new trie. */
f2c21402 463 CMAP_FOR_EACH (subtable, cmap_node, &cls->subtables_map) {
13751fd8
JR
464 unsigned int plen;
465
466 plen = field ? minimask_get_prefix_len(&subtable->mask, field) : 0;
13751fd8 467 if (plen) {
627fb667 468 struct cls_match *head;
13751fd8 469
f2c21402 470 CMAP_FOR_EACH (head, cmap_node, &subtable->rules) {
f47eef15 471 trie_insert(trie, head->cls_rule, plen);
13751fd8
JR
472 }
473 }
f358a2cb
JR
474 /* Initialize subtable's prefix length on this field. This will
475 * allow readers to use the trie. */
476 atomic_thread_fence(memory_order_release);
477 subtable->trie_plen[trie_idx] = plen;
13751fd8
JR
478 }
479}
480
5f0476ce
JR
481/* Returns true if 'cls' contains no classification rules, false otherwise.
482 * Checking the cmap requires no locking. */
064af421
BP
483bool
484classifier_is_empty(const struct classifier *cls)
485{
e48eccd1 486 return cmap_is_empty(&cls->subtables_map);
064af421
BP
487}
488
dbda2960 489/* Returns the number of rules in 'cls'. */
064af421
BP
490int
491classifier_count(const struct classifier *cls)
492{
afae68b1
JR
493 /* n_rules is an int, so in the presence of concurrent writers this will
494 * return either the old or a new value. */
e48eccd1 495 return cls->n_rules;
064af421
BP
496}
497
c906cedf 498static uint32_t
d70e8c28 499hash_metadata(ovs_be64 metadata)
c906cedf 500{
d70e8c28 501 return hash_uint64((OVS_FORCE uint64_t) metadata);
c906cedf
BP
502}
503
504static struct cls_partition *
e48eccd1 505find_partition(const struct classifier *cls, ovs_be64 metadata, uint32_t hash)
c906cedf
BP
506{
507 struct cls_partition *partition;
508
f2c21402 509 CMAP_FOR_EACH_WITH_HASH (partition, cmap_node, hash, &cls->partitions) {
c906cedf
BP
510 if (partition->metadata == metadata) {
511 return partition;
512 }
513 }
514
515 return NULL;
516}
517
518static struct cls_partition *
e48eccd1 519create_partition(struct classifier *cls, struct cls_subtable *subtable,
c906cedf
BP
520 ovs_be64 metadata)
521{
522 uint32_t hash = hash_metadata(metadata);
523 struct cls_partition *partition = find_partition(cls, metadata, hash);
524 if (!partition) {
525 partition = xmalloc(sizeof *partition);
526 partition->metadata = metadata;
527 partition->tags = 0;
183126a1 528 tag_tracker_init(&partition->tracker);
f2c21402 529 cmap_insert(&cls->partitions, &partition->cmap_node, hash);
c906cedf 530 }
03868246 531 tag_tracker_add(&partition->tracker, &partition->tags, subtable->tag);
c906cedf
BP
532 return partition;
533}
534
69d6040e
JR
535static inline ovs_be32 minimatch_get_ports(const struct minimatch *match)
536{
537 /* Could optimize to use the same map if needed for fast path. */
8fd47924
JR
538 return MINIFLOW_GET_BE32(match->flow, tp_src)
539 & MINIFLOW_GET_BE32(&match->mask->masks, tp_src);
69d6040e
JR
540}
541
f47eef15
JR
542static void
543subtable_replace_head_rule(struct classifier *cls OVS_UNUSED,
544 struct cls_subtable *subtable,
545 struct cls_match *head, struct cls_match *new,
546 uint32_t hash, uint32_t ihash[CLS_MAX_INDICES])
f47eef15
JR
547{
548 /* Rule's data is already in the tries. */
549
550 new->partition = head->partition; /* Steal partition, if any. */
551 head->partition = NULL;
552
553 for (int i = 0; i < subtable->n_indices; i++) {
554 cmap_replace(&subtable->indices[i], &head->index_nodes[i],
555 &new->index_nodes[i], ihash[i]);
556 }
557 cmap_replace(&subtable->rules, &head->cmap_node, &new->cmap_node, hash);
558}
559
bd53aa17
JR
560/* Inserts 'rule' into 'cls' in 'version'. Until 'rule' is removed from 'cls',
561 * the caller must not modify or free it.
064af421
BP
562 *
563 * If 'cls' already contains an identical rule (including wildcards, values of
bd53aa17
JR
564 * fixed fields, and priority) that is visible in 'version', replaces the old
565 * rule by 'rule' and returns the rule that was replaced. The caller takes
566 * ownership of the returned rule and is thus responsible for destroying it
567 * with cls_rule_destroy(), after RCU grace period has passed (see
568 * ovsrcu_postpone()).
064af421
BP
569 *
570 * Returns NULL if 'cls' does not contain a rule with an identical key, after
571 * inserting the new rule. In this case, no rules are displaced by the new
572 * rule, even rules that cannot have any effect because the new rule matches a
886af6ea
JR
573 * superset of their flows and has higher priority.
574 */
dfea28b3 575const struct cls_rule *
18080541 576classifier_replace(struct classifier *cls, const struct cls_rule *rule,
bd53aa17 577 cls_version_t version,
18080541 578 const struct cls_conjunction *conjs, size_t n_conjs)
064af421 579{
2b7b1427 580 struct cls_match *new;
03868246 581 struct cls_subtable *subtable;
886af6ea 582 uint32_t ihash[CLS_MAX_INDICES];
d70e8c28 583 uint8_t prev_be64ofs = 0;
886af6ea 584 struct cls_match *head;
f47eef15 585 size_t n_rules = 0;
886af6ea
JR
586 uint32_t basis;
587 uint32_t hash;
588 int i;
b5d97350 589
2b7b1427 590 /* 'new' is initially invisible to lookups. */
bd53aa17 591 new = cls_match_alloc(rule, version, conjs, n_conjs);
2b7b1427 592
d0999f1b 593 CONST_CAST(struct cls_rule *, rule)->cls_match = new;
f47eef15 594
8fd47924 595 subtable = find_subtable(cls, rule->match.mask);
03868246 596 if (!subtable) {
8fd47924 597 subtable = insert_subtable(cls, rule->match.mask);
b5d97350
BP
598 }
599
f47eef15 600 /* Compute hashes in segments. */
886af6ea
JR
601 basis = 0;
602 for (i = 0; i < subtable->n_indices; i++) {
d70e8c28 603 ihash[i] = minimatch_hash_range(&rule->match, prev_be64ofs,
886af6ea 604 subtable->index_ofs[i], &basis);
d70e8c28 605 prev_be64ofs = subtable->index_ofs[i];
886af6ea 606 }
d70e8c28 607 hash = minimatch_hash_range(&rule->match, prev_be64ofs, FLOW_U64S, &basis);
f47eef15 608
8fd47924 609 head = find_equal(subtable, rule->match.flow, hash);
886af6ea 610 if (!head) {
886af6ea
JR
611 /* Add rule to tries.
612 *
613 * Concurrent readers might miss seeing the rule until this update,
614 * which might require being fixed up by revalidation later. */
f47eef15 615 for (i = 0; i < cls->n_tries; i++) {
13751fd8
JR
616 if (subtable->trie_plen[i]) {
617 trie_insert(&cls->tries[i], rule, subtable->trie_plen[i]);
618 }
619 }
69d6040e 620
886af6ea 621 /* Add rule to ports trie. */
69d6040e
JR
622 if (subtable->ports_mask_len) {
623 /* We mask the value to be inserted to always have the wildcarded
624 * bits in known (zero) state, so we can include them in comparison
625 * and they will always match (== their original value does not
626 * matter). */
627 ovs_be32 masked_ports = minimatch_get_ports(&rule->match);
628
629 trie_insert_prefix(&subtable->ports_trie, &masked_ports,
630 subtable->ports_mask_len);
631 }
886af6ea 632
f47eef15 633 /* Add rule to partitions.
886af6ea 634 *
f47eef15
JR
635 * Concurrent readers might miss seeing the rule until this update,
636 * which might require being fixed up by revalidation later. */
637 new->partition = NULL;
8fd47924
JR
638 if (minimask_get_metadata_mask(rule->match.mask) == OVS_BE64_MAX) {
639 ovs_be64 metadata = miniflow_get_metadata(rule->match.flow);
f47eef15
JR
640
641 new->partition = create_partition(cls, subtable, metadata);
642 }
643
f47eef15
JR
644 /* Add new node to segment indices.
645 *
646 * Readers may find the rule in the indices before the rule is visible
647 * in the subtables 'rules' map. This may result in us losing the
648 * opportunity to quit lookups earlier, resulting in sub-optimal
649 * wildcarding. This will be fixed later by revalidation (always
650 * scheduled after flow table changes). */
886af6ea 651 for (i = 0; i < subtable->n_indices; i++) {
f47eef15
JR
652 cmap_insert(&subtable->indices[i], &new->index_nodes[i], ihash[i]);
653 }
654 n_rules = cmap_insert(&subtable->rules, &new->cmap_node, hash);
655 } else { /* Equal rules exist in the classifier already. */
8f8023b3 656 struct cls_match *prev, *iter;
f47eef15
JR
657
658 /* Scan the list for the insertion point that will keep the list in
2b7b1427
JR
659 * order of decreasing priority. Insert after rules marked invisible
660 * in any version of the same priority. */
8f8023b3 661 FOR_EACH_RULE_IN_LIST_PROTECTED (iter, prev, head) {
186120da
JR
662 if (rule->priority > iter->priority
663 || (rule->priority == iter->priority
2b7b1427 664 && !cls_match_is_eventually_invisible(iter))) {
f47eef15
JR
665 break;
666 }
886af6ea
JR
667 }
668
8f8023b3
JR
669 /* Replace 'iter' with 'new' or insert 'new' between 'prev' and
670 * 'iter'. */
f47eef15
JR
671 if (iter) {
672 struct cls_rule *old;
673
674 if (rule->priority == iter->priority) {
8f8023b3 675 cls_match_replace(prev, iter, new);
f47eef15
JR
676 old = CONST_CAST(struct cls_rule *, iter->cls_rule);
677 } else {
8f8023b3 678 cls_match_insert(prev, iter, new);
f47eef15
JR
679 old = NULL;
680 }
681
682 /* Replace the existing head in data structures, if rule is the new
683 * head. */
684 if (iter == head) {
685 subtable_replace_head_rule(cls, subtable, head, new, hash,
686 ihash);
687 }
688
689 if (old) {
18080541
BP
690 struct cls_conjunction_set *conj_set;
691
692 conj_set = ovsrcu_get_protected(struct cls_conjunction_set *,
693 &iter->conj_set);
694 if (conj_set) {
695 ovsrcu_postpone(free, conj_set);
696 }
697
8f8023b3 698 ovsrcu_postpone(cls_match_free_cb, iter);
f47eef15 699 old->cls_match = NULL;
f2c21402 700
f47eef15
JR
701 /* No change in subtable's max priority or max count. */
702
2b7b1427 703 /* Make 'new' visible to lookups in the appropriate version. */
18721c4a 704 cls_match_set_remove_version(new, CLS_NOT_REMOVED_VERSION);
fc02ecc7
JR
705
706 /* Make rule visible to iterators (immediately). */
d0999f1b
JR
707 rculist_replace(CONST_CAST(struct rculist *, &rule->node),
708 &old->node);
de4ad4a2 709
f47eef15
JR
710 /* Return displaced rule. Caller is responsible for keeping it
711 * around until all threads quiesce. */
f47eef15
JR
712 return old;
713 }
714 } else {
8f8023b3
JR
715 /* 'new' is new node after 'prev' */
716 cls_match_insert(prev, iter, new);
f47eef15 717 }
064af421 718 }
886af6ea 719
2b7b1427 720 /* Make 'new' visible to lookups in the appropriate version. */
18721c4a 721 cls_match_set_remove_version(new, CLS_NOT_REMOVED_VERSION);
fc02ecc7
JR
722
723 /* Make rule visible to iterators (immediately). */
d0999f1b
JR
724 rculist_push_back(&subtable->rules_list,
725 CONST_CAST(struct rculist *, &rule->node));
de4ad4a2 726
f47eef15
JR
727 /* Rule was added, not replaced. Update 'subtable's 'max_priority' and
728 * 'max_count', if necessary.
729 *
730 * The rule was already inserted, but concurrent readers may not see the
731 * rule yet as the subtables vector is not updated yet. This will have to
732 * be fixed by revalidation later. */
733 if (n_rules == 1) {
734 subtable->max_priority = rule->priority;
735 subtable->max_count = 1;
736 pvector_insert(&cls->subtables, subtable, rule->priority);
737 } else if (rule->priority == subtable->max_priority) {
738 ++subtable->max_count;
739 } else if (rule->priority > subtable->max_priority) {
740 subtable->max_priority = rule->priority;
741 subtable->max_count = 1;
742 pvector_change_priority(&cls->subtables, subtable, rule->priority);
743 }
744
745 /* Nothing was replaced. */
746 cls->n_rules++;
802f84ff
JR
747
748 if (cls->publish) {
749 pvector_publish(&cls->subtables);
750 }
751
f47eef15 752 return NULL;
064af421
BP
753}
754
08944c1d
BP
755/* Inserts 'rule' into 'cls'. Until 'rule' is removed from 'cls', the caller
756 * must not modify or free it.
757 *
758 * 'cls' must not contain an identical rule (including wildcards, values of
759 * fixed fields, and priority). Use classifier_find_rule_exactly() to find
760 * such a rule. */
761void
18080541 762classifier_insert(struct classifier *cls, const struct cls_rule *rule,
bd53aa17
JR
763 cls_version_t version, const struct cls_conjunction conj[],
764 size_t n_conj)
08944c1d 765{
18080541 766 const struct cls_rule *displaced_rule
bd53aa17 767 = classifier_replace(cls, rule, version, conj, n_conj);
cb22974d 768 ovs_assert(!displaced_rule);
08944c1d
BP
769}
770
48d28ac1
BP
771/* Removes 'rule' from 'cls'. It is the caller's responsibility to destroy
772 * 'rule' with cls_rule_destroy(), freeing the memory block in which 'rule'
747f140a
JR
773 * resides, etc., as necessary.
774 *
775 * Does nothing if 'rule' has been already removed, or was never inserted.
776 *
777 * Returns the removed rule, or NULL, if it was already removed.
778 */
dfea28b3 779const struct cls_rule *
186120da 780classifier_remove(struct classifier *cls, const struct cls_rule *cls_rule)
064af421 781{
8f8023b3 782 struct cls_match *rule, *prev, *next, *head;
c906cedf 783 struct cls_partition *partition;
18080541 784 struct cls_conjunction_set *conj_set;
03868246 785 struct cls_subtable *subtable;
476f36e8 786 int i;
f2c21402 787 uint32_t basis = 0, hash, ihash[CLS_MAX_INDICES];
d70e8c28 788 uint8_t prev_be64ofs = 0;
f47eef15 789 size_t n_rules;
064af421 790
186120da
JR
791 rule = cls_rule->cls_match;
792 if (!rule) {
fccd7c09 793 return NULL;
747f140a 794 }
f47eef15 795 /* Mark as removed. */
186120da 796 CONST_CAST(struct cls_rule *, cls_rule)->cls_match = NULL;
f47eef15 797
186120da
JR
798 /* Remove 'cls_rule' from the subtable's rules list. */
799 rculist_remove(CONST_CAST(struct rculist *, &cls_rule->node));
de4ad4a2 800
8fd47924 801 subtable = find_subtable(cls, cls_rule->match.mask);
627fb667
JR
802 ovs_assert(subtable);
803
f47eef15 804 for (i = 0; i < subtable->n_indices; i++) {
186120da 805 ihash[i] = minimatch_hash_range(&cls_rule->match, prev_be64ofs,
f47eef15 806 subtable->index_ofs[i], &basis);
d70e8c28 807 prev_be64ofs = subtable->index_ofs[i];
f47eef15 808 }
186120da
JR
809 hash = minimatch_hash_range(&cls_rule->match, prev_be64ofs, FLOW_U64S,
810 &basis);
811
8fd47924 812 head = find_equal(subtable, cls_rule->match.flow, hash);
8f8023b3 813
186120da 814 /* Check if the rule is not the head rule. */
8f8023b3
JR
815 if (rule != head) {
816 struct cls_match *iter;
817
186120da 818 /* Not the head rule, but potentially one with the same priority. */
8f8023b3
JR
819 /* Remove from the list of equal rules. */
820 FOR_EACH_RULE_IN_LIST_PROTECTED (iter, prev, head) {
821 if (rule == iter) {
822 break;
823 }
824 }
825 ovs_assert(iter == rule);
826
827 cls_match_remove(prev, rule);
828
186120da
JR
829 goto check_priority;
830 }
f47eef15 831
186120da
JR
832 /* 'rule' is the head rule. Check if there is another rule to
833 * replace 'rule' in the data structures. */
8f8023b3
JR
834 next = cls_match_next_protected(rule);
835 if (next) {
186120da 836 subtable_replace_head_rule(cls, subtable, rule, next, hash, ihash);
f47eef15
JR
837 goto check_priority;
838 }
839
840 /* 'rule' is last of the kind in the classifier, must remove from all the
841 * data structures. */
842
69d6040e 843 if (subtable->ports_mask_len) {
186120da 844 ovs_be32 masked_ports = minimatch_get_ports(&cls_rule->match);
69d6040e
JR
845
846 trie_remove_prefix(&subtable->ports_trie,
847 &masked_ports, subtable->ports_mask_len);
848 }
13751fd8
JR
849 for (i = 0; i < cls->n_tries; i++) {
850 if (subtable->trie_plen[i]) {
186120da 851 trie_remove(&cls->tries[i], cls_rule, subtable->trie_plen[i]);
13751fd8
JR
852 }
853 }
854
476f36e8
JR
855 /* Remove rule node from indices. */
856 for (i = 0; i < subtable->n_indices; i++) {
186120da 857 cmap_remove(&subtable->indices[i], &rule->index_nodes[i], ihash[i]);
b5d97350 858 }
186120da 859 n_rules = cmap_remove(&subtable->rules, &rule->cmap_node, hash);
064af421 860
186120da 861 partition = rule->partition;
183126a1
BP
862 if (partition) {
863 tag_tracker_subtract(&partition->tracker, &partition->tags,
03868246 864 subtable->tag);
183126a1 865 if (!partition->tags) {
f2c21402
JR
866 cmap_remove(&cls->partitions, &partition->cmap_node,
867 hash_metadata(partition->metadata));
868 ovsrcu_postpone(free, partition);
183126a1 869 }
c906cedf
BP
870 }
871
f47eef15 872 if (n_rules == 0) {
03868246 873 destroy_subtable(cls, subtable);
f47eef15
JR
874 } else {
875check_priority:
876 if (subtable->max_priority == rule->priority
877 && --subtable->max_count == 0) {
878 /* Find the new 'max_priority' and 'max_count'. */
f47eef15 879 int max_priority = INT_MIN;
186120da 880 struct cls_match *head;
f47eef15
JR
881
882 CMAP_FOR_EACH (head, cmap_node, &subtable->rules) {
883 if (head->priority > max_priority) {
884 max_priority = head->priority;
885 subtable->max_count = 1;
886 } else if (head->priority == max_priority) {
887 ++subtable->max_count;
888 }
fe7cfa5c 889 }
f47eef15
JR
890 subtable->max_priority = max_priority;
891 pvector_change_priority(&cls->subtables, subtable, max_priority);
fe7cfa5c 892 }
4d935a6b 893 }
802f84ff
JR
894
895 if (cls->publish) {
896 pvector_publish(&cls->subtables);
897 }
898
8f8023b3 899 /* free the rule. */
18080541 900 conj_set = ovsrcu_get_protected(struct cls_conjunction_set *,
186120da 901 &rule->conj_set);
18080541
BP
902 if (conj_set) {
903 ovsrcu_postpone(free, conj_set);
904 }
8f8023b3 905 ovsrcu_postpone(cls_match_free_cb, rule);
f47eef15 906 cls->n_rules--;
747f140a 907
186120da 908 return cls_rule;
064af421
BP
909}
910
13751fd8 911/* Prefix tree context. Valid when 'lookup_done' is true. Can skip all
c0bfb650
JR
912 * subtables which have a prefix match on the trie field, but whose prefix
913 * length is not indicated in 'match_plens'. For example, a subtable that
914 * has a 8-bit trie field prefix match can be skipped if
915 * !be_get_bit_at(&match_plens, 8 - 1). If skipped, 'maskbits' prefix bits
916 * must be unwildcarded to make datapath flow only match packets it should. */
13751fd8
JR
917struct trie_ctx {
918 const struct cls_trie *trie;
919 bool lookup_done; /* Status of the lookup. */
920 uint8_t be32ofs; /* U32 offset of the field in question. */
13751fd8 921 unsigned int maskbits; /* Prefix length needed to avoid false matches. */
c0bfb650
JR
922 union mf_value match_plens; /* Bitmask of prefix lengths with possible
923 * matches. */
13751fd8
JR
924};
925
926static void
927trie_ctx_init(struct trie_ctx *ctx, const struct cls_trie *trie)
928{
929 ctx->trie = trie;
930 ctx->be32ofs = trie->field->flow_be32ofs;
931 ctx->lookup_done = false;
932}
933
18080541
BP
934struct conjunctive_match {
935 struct hmap_node hmap_node;
936 uint32_t id;
937 uint64_t clauses;
938};
939
940static struct conjunctive_match *
941find_conjunctive_match__(struct hmap *matches, uint64_t id, uint32_t hash)
942{
943 struct conjunctive_match *m;
944
945 HMAP_FOR_EACH_IN_BUCKET (m, hmap_node, hash, matches) {
946 if (m->id == id) {
947 return m;
948 }
949 }
950 return NULL;
951}
952
953static bool
954find_conjunctive_match(const struct cls_conjunction_set *set,
955 unsigned int max_n_clauses, struct hmap *matches,
956 struct conjunctive_match *cm_stubs, size_t n_cm_stubs,
957 uint32_t *idp)
958{
959 const struct cls_conjunction *c;
960
961 if (max_n_clauses < set->min_n_clauses) {
962 return false;
963 }
964
965 for (c = set->conj; c < &set->conj[set->n]; c++) {
966 struct conjunctive_match *cm;
967 uint32_t hash;
968
969 if (c->n_clauses > max_n_clauses) {
970 continue;
971 }
972
973 hash = hash_int(c->id, 0);
974 cm = find_conjunctive_match__(matches, c->id, hash);
975 if (!cm) {
976 size_t n = hmap_count(matches);
977
978 cm = n < n_cm_stubs ? &cm_stubs[n] : xmalloc(sizeof *cm);
979 hmap_insert(matches, &cm->hmap_node, hash);
980 cm->id = c->id;
981 cm->clauses = UINT64_MAX << (c->n_clauses & 63);
982 }
983 cm->clauses |= UINT64_C(1) << c->clause;
984 if (cm->clauses == UINT64_MAX) {
985 *idp = cm->id;
986 return true;
987 }
988 }
989 return false;
990}
991
992static void
993free_conjunctive_matches(struct hmap *matches,
994 struct conjunctive_match *cm_stubs, size_t n_cm_stubs)
995{
996 if (hmap_count(matches) > n_cm_stubs) {
997 struct conjunctive_match *cm, *next;
998
999 HMAP_FOR_EACH_SAFE (cm, next, hmap_node, matches) {
1000 if (!(cm >= cm_stubs && cm < &cm_stubs[n_cm_stubs])) {
1001 free(cm);
1002 }
1003 }
1004 }
1005 hmap_destroy(matches);
1006}
1007
1008/* Like classifier_lookup(), except that support for conjunctive matches can be
1009 * configured with 'allow_conjunctive_matches'. That feature is not exposed
1010 * externally because turning off conjunctive matches is only useful to avoid
1011 * recursion within this function itself.
2e0bded4
BP
1012 *
1013 * 'flow' is non-const to allow for temporary modifications during the lookup.
1014 * Any changes are restored before returning. */
18080541 1015static const struct cls_rule *
18721c4a 1016classifier_lookup__(const struct classifier *cls, cls_version_t version,
2b7b1427
JR
1017 struct flow *flow, struct flow_wildcards *wc,
1018 bool allow_conjunctive_matches)
48c3de13 1019{
c906cedf 1020 const struct cls_partition *partition;
fe7cfa5c 1021 struct trie_ctx trie_ctx[CLS_MAX_TRIES];
18080541
BP
1022 const struct cls_match *match;
1023 tag_type tags;
1024
1025 /* Highest-priority flow in 'cls' that certainly matches 'flow'. */
1026 const struct cls_match *hard = NULL;
1027 int hard_pri = INT_MIN; /* hard ? hard->priority : INT_MIN. */
1028
1029 /* Highest-priority conjunctive flows in 'cls' matching 'flow'. Since
1030 * these are (components of) conjunctive flows, we can only know whether
1031 * the full conjunctive flow matches after seeing multiple of them. Thus,
1032 * we refer to these as "soft matches". */
1033 struct cls_conjunction_set *soft_stub[64];
1034 struct cls_conjunction_set **soft = soft_stub;
1035 size_t n_soft = 0, allocated_soft = ARRAY_SIZE(soft_stub);
1036 int soft_pri = INT_MIN; /* n_soft ? MAX(soft[*]->priority) : INT_MIN. */
c906cedf 1037
f358a2cb
JR
1038 /* Synchronize for cls->n_tries and subtable->trie_plen. They can change
1039 * when table configuration changes, which happens typically only on
1040 * startup. */
1041 atomic_thread_fence(memory_order_acquire);
1042
03868246
JR
1043 /* Determine 'tags' such that, if 'subtable->tag' doesn't intersect them,
1044 * then 'flow' cannot possibly match in 'subtable':
c906cedf
BP
1045 *
1046 * - If flow->metadata maps to a given 'partition', then we can use
1047 * 'tags' for 'partition->tags'.
1048 *
1049 * - If flow->metadata has no partition, then no rule in 'cls' has an
1050 * exact-match for flow->metadata. That means that we don't need to
03868246 1051 * search any subtable that includes flow->metadata in its mask.
c906cedf 1052 *
03868246 1053 * In either case, we always need to search any cls_subtables that do not
c906cedf 1054 * include flow->metadata in its mask. One way to do that would be to
03868246
JR
1055 * check the "cls_subtable"s explicitly for that, but that would require an
1056 * extra branch per subtable. Instead, we mark such a cls_subtable's
1057 * 'tags' as TAG_ALL and make sure that 'tags' is never empty. This means
1058 * that 'tags' always intersects such a cls_subtable's 'tags', so we don't
1059 * need a special case.
c906cedf 1060 */
f2c21402 1061 partition = (cmap_is_empty(&cls->partitions)
c906cedf
BP
1062 ? NULL
1063 : find_partition(cls, flow->metadata,
1064 hash_metadata(flow->metadata)));
1065 tags = partition ? partition->tags : TAG_ARBITRARY;
48c3de13 1066
ff8241db 1067 /* Initialize trie contexts for find_match_wc(). */
fe7cfa5c 1068 for (int i = 0; i < cls->n_tries; i++) {
13751fd8
JR
1069 trie_ctx_init(&trie_ctx[i], &cls->tries[i]);
1070 }
ec988646 1071
18080541
BP
1072 /* Main loop. */
1073 struct cls_subtable *subtable;
1074 PVECTOR_FOR_EACH_PRIORITY (subtable, hard_pri, 2, sizeof *subtable,
1075 &cls->subtables) {
1076 struct cls_conjunction_set *conj_set;
c906cedf 1077
18080541 1078 /* Skip subtables not in our partition. */
fe7cfa5c 1079 if (!tag_intersects(tags, subtable->tag)) {
c906cedf
BP
1080 continue;
1081 }
74f74083 1082
18080541
BP
1083 /* Skip subtables with no match, or where the match is lower-priority
1084 * than some certain match we've already found. */
2b7b1427
JR
1085 match = find_match_wc(subtable, version, flow, trie_ctx, cls->n_tries,
1086 wc);
18080541
BP
1087 if (!match || match->priority <= hard_pri) {
1088 continue;
1089 }
1090
1091 conj_set = ovsrcu_get(struct cls_conjunction_set *, &match->conj_set);
1092 if (!conj_set) {
1093 /* 'match' isn't part of a conjunctive match. It's the best
1094 * certain match we've got so far, since we know that it's
1095 * higher-priority than hard_pri.
1096 *
1097 * (There might be a higher-priority conjunctive match. We can't
1098 * tell yet.) */
1099 hard = match;
1100 hard_pri = hard->priority;
1101 } else if (allow_conjunctive_matches) {
1102 /* 'match' is part of a conjunctive match. Add it to the list. */
1103 if (OVS_UNLIKELY(n_soft >= allocated_soft)) {
1104 struct cls_conjunction_set **old_soft = soft;
1105
1106 allocated_soft *= 2;
1107 soft = xmalloc(allocated_soft * sizeof *soft);
1108 memcpy(soft, old_soft, n_soft * sizeof *soft);
1109 if (old_soft != soft_stub) {
1110 free(old_soft);
1111 }
1112 }
1113 soft[n_soft++] = conj_set;
1114
1115 /* Keep track of the highest-priority soft match. */
1116 if (soft_pri < match->priority) {
1117 soft_pri = match->priority;
1118 }
b5d97350 1119 }
48c3de13 1120 }
13751fd8 1121
18080541
BP
1122 /* In the common case, at this point we have no soft matches and we can
1123 * return immediately. (We do the same thing if we have potential soft
1124 * matches but none of them are higher-priority than our hard match.) */
1125 if (hard_pri >= soft_pri) {
1126 if (soft != soft_stub) {
1127 free(soft);
1128 }
1129 return hard ? hard->cls_rule : NULL;
1130 }
1131
1132 /* At this point, we have some soft matches. We might also have a hard
1133 * match; if so, its priority is lower than the highest-priority soft
1134 * match. */
1135
1136 /* Soft match loop.
1137 *
1138 * Check whether soft matches are real matches. */
1139 for (;;) {
1140 /* Delete soft matches that are null. This only happens in second and
1141 * subsequent iterations of the soft match loop, when we drop back from
1142 * a high-priority soft match to a lower-priority one.
1143 *
1144 * Also, delete soft matches whose priority is less than or equal to
1145 * the hard match's priority. In the first iteration of the soft
1146 * match, these can be in 'soft' because the earlier main loop found
1147 * the soft match before the hard match. In second and later iteration
1148 * of the soft match loop, these can be in 'soft' because we dropped
1149 * back from a high-priority soft match to a lower-priority soft match.
1150 *
1151 * It is tempting to delete soft matches that cannot be satisfied
1152 * because there are fewer soft matches than required to satisfy any of
1153 * their conjunctions, but we cannot do that because there might be
1154 * lower priority soft or hard matches with otherwise identical
1155 * matches. (We could special case those here, but there's no
1156 * need--we'll do so at the bottom of the soft match loop anyway and
1157 * this duplicates less code.)
1158 *
1159 * It's also tempting to break out of the soft match loop if 'n_soft ==
1160 * 1' but that would also miss lower-priority hard matches. We could
1161 * special case that also but again there's no need. */
1162 for (int i = 0; i < n_soft; ) {
1163 if (!soft[i] || soft[i]->priority <= hard_pri) {
1164 soft[i] = soft[--n_soft];
1165 } else {
1166 i++;
1167 }
1168 }
1169 if (!n_soft) {
1170 break;
1171 }
1172
1173 /* Find the highest priority among the soft matches. (We know this
1174 * must be higher than the hard match's priority; otherwise we would
1175 * have deleted all of the soft matches in the previous loop.) Count
1176 * the number of soft matches that have that priority. */
1177 soft_pri = INT_MIN;
1178 int n_soft_pri = 0;
1179 for (int i = 0; i < n_soft; i++) {
1180 if (soft[i]->priority > soft_pri) {
1181 soft_pri = soft[i]->priority;
1182 n_soft_pri = 1;
1183 } else if (soft[i]->priority == soft_pri) {
1184 n_soft_pri++;
1185 }
1186 }
1187 ovs_assert(soft_pri > hard_pri);
1188
1189 /* Look for a real match among the highest-priority soft matches.
1190 *
1191 * It's unusual to have many conjunctive matches, so we use stubs to
1192 * avoid calling malloc() in the common case. An hmap has a built-in
1193 * stub for up to 2 hmap_nodes; possibly, we would benefit a variant
1194 * with a bigger stub. */
1195 struct conjunctive_match cm_stubs[16];
1196 struct hmap matches;
1197
1198 hmap_init(&matches);
1199 for (int i = 0; i < n_soft; i++) {
1200 uint32_t id;
1201
1202 if (soft[i]->priority == soft_pri
1203 && find_conjunctive_match(soft[i], n_soft_pri, &matches,
1204 cm_stubs, ARRAY_SIZE(cm_stubs),
1205 &id)) {
1206 uint32_t saved_conj_id = flow->conj_id;
1207 const struct cls_rule *rule;
1208
1209 flow->conj_id = id;
2b7b1427 1210 rule = classifier_lookup__(cls, version, flow, wc, false);
18080541
BP
1211 flow->conj_id = saved_conj_id;
1212
1213 if (rule) {
1214 free_conjunctive_matches(&matches,
1215 cm_stubs, ARRAY_SIZE(cm_stubs));
1216 if (soft != soft_stub) {
1217 free(soft);
1218 }
1219 return rule;
1220 }
1221 }
1222 }
1223 free_conjunctive_matches(&matches, cm_stubs, ARRAY_SIZE(cm_stubs));
1224
1225 /* There's no real match among the highest-priority soft matches.
1226 * However, if any of those soft matches has a lower-priority but
1227 * otherwise identical flow match, then we need to consider those for
1228 * soft or hard matches.
1229 *
1230 * The next iteration of the soft match loop will delete any null
1231 * pointers we put into 'soft' (and some others too). */
1232 for (int i = 0; i < n_soft; i++) {
1233 if (soft[i]->priority != soft_pri) {
1234 continue;
1235 }
1236
1237 /* Find next-lower-priority flow with identical flow match. */
2b7b1427 1238 match = next_visible_rule_in_list(soft[i]->match, version);
18080541
BP
1239 if (match) {
1240 soft[i] = ovsrcu_get(struct cls_conjunction_set *,
1241 &match->conj_set);
1242 if (!soft[i]) {
1243 /* The flow is a hard match; don't treat as a soft
1244 * match. */
1245 if (match->priority > hard_pri) {
1246 hard = match;
1247 hard_pri = hard->priority;
1248 }
1249 }
1250 } else {
1251 /* No such lower-priority flow (probably the common case). */
1252 soft[i] = NULL;
1253 }
1254 }
1255 }
1256
1257 if (soft != soft_stub) {
1258 free(soft);
1259 }
1260 return hard ? hard->cls_rule : NULL;
1261}
1262
2b7b1427
JR
1263/* Finds and returns the highest-priority rule in 'cls' that matches 'flow' and
1264 * that is visible in 'version'. Returns a null pointer if no rules in 'cls'
1265 * match 'flow'. If multiple rules of equal priority match 'flow', returns one
1266 * arbitrarily.
18080541
BP
1267 *
1268 * If a rule is found and 'wc' is non-null, bitwise-OR's 'wc' with the
1269 * set of bits that were significant in the lookup. At some point
1270 * earlier, 'wc' should have been initialized (e.g., by
1271 * flow_wildcards_init_catchall()).
1272 *
1273 * 'flow' is non-const to allow for temporary modifications during the lookup.
1274 * Any changes are restored before returning. */
1275const struct cls_rule *
18721c4a 1276classifier_lookup(const struct classifier *cls, cls_version_t version,
2b7b1427 1277 struct flow *flow, struct flow_wildcards *wc)
18080541 1278{
2b7b1427 1279 return classifier_lookup__(cls, version, flow, wc, true);
48c3de13
BP
1280}
1281
b5d97350 1282/* Finds and returns a rule in 'cls' with exactly the same priority and
bd53aa17 1283 * matching criteria as 'target', and that is visible in 'version'.
2b7b1427
JR
1284 * Only one such rule may ever exist. Returns a null pointer if 'cls' doesn't
1285 * contain an exact match. */
dfea28b3 1286const struct cls_rule *
e48eccd1 1287classifier_find_rule_exactly(const struct classifier *cls,
bd53aa17
JR
1288 const struct cls_rule *target,
1289 cls_version_t version)
064af421 1290{
dfea28b3
JR
1291 const struct cls_match *head, *rule;
1292 const struct cls_subtable *subtable;
064af421 1293
8fd47924 1294 subtable = find_subtable(cls, target->match.mask);
0722ee5c 1295 if (!subtable) {
98abae4a 1296 return NULL;
4d935a6b
JR
1297 }
1298
8fd47924
JR
1299 head = find_equal(subtable, target->match.flow,
1300 miniflow_hash_in_minimask(target->match.flow,
1301 target->match.mask, 0));
98abae4a
JR
1302 if (!head) {
1303 return NULL;
1304 }
8f8023b3 1305 CLS_MATCH_FOR_EACH (rule, head) {
186120da
JR
1306 if (rule->priority < target->priority) {
1307 break; /* Not found. */
1308 }
1309 if (rule->priority == target->priority
bd53aa17 1310 && cls_match_visible_in_version(rule, version)) {
186120da 1311 return rule->cls_rule;
064af421
BP
1312 }
1313 }
1314 return NULL;
1315}
1316
81a76618 1317/* Finds and returns a rule in 'cls' with priority 'priority' and exactly the
2b7b1427
JR
1318 * same matching criteria as 'target', and that is visible in 'version'.
1319 * Returns a null pointer if 'cls' doesn't contain an exact match visible in
1320 * 'version'. */
dfea28b3 1321const struct cls_rule *
81a76618 1322classifier_find_match_exactly(const struct classifier *cls,
2b7b1427 1323 const struct match *target, int priority,
18721c4a 1324 cls_version_t version)
81a76618 1325{
dfea28b3 1326 const struct cls_rule *retval;
81a76618
BP
1327 struct cls_rule cr;
1328
bd53aa17
JR
1329 cls_rule_init(&cr, target, priority);
1330 retval = classifier_find_rule_exactly(cls, &cr, version);
48d28ac1 1331 cls_rule_destroy(&cr);
81a76618
BP
1332
1333 return retval;
1334}
1335
bd53aa17
JR
1336/* Checks if 'target' would overlap any other rule in 'cls' in 'version'. Two
1337 * rules are considered to overlap if both rules have the same priority and a
1338 * packet could match both, and if both rules are visible in the same version.
de4ad4a2
JR
1339 *
1340 * A trivial example of overlapping rules is two rules matching disjoint sets
1341 * of fields. E.g., if one rule matches only on port number, while another only
1342 * on dl_type, any packet from that specific port and with that specific
2b7b1427 1343 * dl_type could match both, if the rules also have the same priority. */
49bdc010 1344bool
e48eccd1 1345classifier_rule_overlaps(const struct classifier *cls,
bd53aa17 1346 const struct cls_rule *target, cls_version_t version)
49bdc010 1347{
03868246 1348 struct cls_subtable *subtable;
49bdc010 1349
03868246 1350 /* Iterate subtables in the descending max priority order. */
eb391b76 1351 PVECTOR_FOR_EACH_PRIORITY (subtable, target->priority - 1, 2,
fe7cfa5c 1352 sizeof(struct cls_subtable), &cls->subtables) {
8fd47924
JR
1353 struct {
1354 struct minimask mask;
1355 uint64_t storage[FLOW_U64S];
1356 } m;
de4ad4a2 1357 const struct cls_rule *rule;
49bdc010 1358
8fd47924
JR
1359 minimask_combine(&m.mask, target->match.mask, &subtable->mask,
1360 m.storage);
49bdc010 1361
de4ad4a2
JR
1362 RCULIST_FOR_EACH (rule, node, &subtable->rules_list) {
1363 if (rule->priority == target->priority
8fd47924
JR
1364 && miniflow_equal_in_minimask(target->match.flow,
1365 rule->match.flow, &m.mask)
bd53aa17 1366 && cls_match_visible_in_version(rule->cls_match, version)) {
de4ad4a2 1367 return true;
49bdc010
JP
1368 }
1369 }
1370 }
49bdc010
JP
1371 return false;
1372}
6ceeaa92
BP
1373
1374/* Returns true if 'rule' exactly matches 'criteria' or if 'rule' is more
1375 * specific than 'criteria'. That is, 'rule' matches 'criteria' and this
1376 * function returns true if, for every field:
1377 *
1378 * - 'criteria' and 'rule' specify the same (non-wildcarded) value for the
1379 * field, or
1380 *
1381 * - 'criteria' wildcards the field,
1382 *
1383 * Conversely, 'rule' does not match 'criteria' and this function returns false
1384 * if, for at least one field:
1385 *
1386 * - 'criteria' and 'rule' specify different values for the field, or
1387 *
1388 * - 'criteria' specifies a value for the field but 'rule' wildcards it.
1389 *
1390 * Equivalently, the truth table for whether a field matches is:
1391 *
1392 * rule
1393 *
1394 * c wildcard exact
1395 * r +---------+---------+
1396 * i wild | yes | yes |
1397 * t card | | |
1398 * e +---------+---------+
1399 * r exact | no |if values|
1400 * i | |are equal|
1401 * a +---------+---------+
1402 *
1403 * This is the matching rule used by OpenFlow 1.0 non-strict OFPT_FLOW_MOD
1404 * commands and by OpenFlow 1.0 aggregate and flow stats.
1405 *
81a76618 1406 * Ignores rule->priority. */
6ceeaa92
BP
1407bool
1408cls_rule_is_loose_match(const struct cls_rule *rule,
5cb7a798 1409 const struct minimatch *criteria)
6ceeaa92 1410{
8fd47924
JR
1411 return (!minimask_has_extra(rule->match.mask, criteria->mask)
1412 && miniflow_equal_in_minimask(rule->match.flow, criteria->flow,
1413 criteria->mask));
6ceeaa92 1414}
b5d97350 1415\f
5ecc9d81
BP
1416/* Iteration. */
1417
1418static bool
bd53aa17
JR
1419rule_matches(const struct cls_rule *rule, const struct cls_rule *target,
1420 cls_version_t version)
5ecc9d81 1421{
bd53aa17
JR
1422 /* Rule may only match a target if it is visible in target's version. */
1423 return cls_match_visible_in_version(rule->cls_match, version)
8fd47924
JR
1424 && (!target || miniflow_equal_in_minimask(rule->match.flow,
1425 target->match.flow,
1426 target->match.mask));
5ecc9d81
BP
1427}
1428
de4ad4a2 1429static const struct cls_rule *
03868246 1430search_subtable(const struct cls_subtable *subtable,
f2c21402 1431 struct cls_cursor *cursor)
5ecc9d81 1432{
f2c21402 1433 if (!cursor->target
8fd47924 1434 || !minimask_has_extra(&subtable->mask, cursor->target->match.mask)) {
de4ad4a2 1435 const struct cls_rule *rule;
5ecc9d81 1436
de4ad4a2 1437 RCULIST_FOR_EACH (rule, node, &subtable->rules_list) {
bd53aa17 1438 if (rule_matches(rule, cursor->target, cursor->version)) {
5ecc9d81
BP
1439 return rule;
1440 }
1441 }
1442 }
1443 return NULL;
1444}
1445
5f0476ce 1446/* Initializes 'cursor' for iterating through rules in 'cls', and returns the
bd53aa17 1447 * cursor.
5ecc9d81 1448 *
bd53aa17
JR
1449 * - If 'target' is null, or if the 'target' is a catchall target, the
1450 * cursor will visit every rule in 'cls' that is visible in 'version'.
5ecc9d81 1451 *
6ceeaa92 1452 * - If 'target' is nonnull, the cursor will visit each 'rule' in 'cls'
2b7b1427 1453 * such that cls_rule_is_loose_match(rule, target) returns true and that
bd53aa17 1454 * the rule is visible in 'version'.
5ecc9d81 1455 *
6ceeaa92 1456 * Ignores target->priority. */
186120da 1457struct cls_cursor
bd53aa17
JR
1458cls_cursor_start(const struct classifier *cls, const struct cls_rule *target,
1459 cls_version_t version)
5ecc9d81 1460{
5f0476ce 1461 struct cls_cursor cursor;
03868246 1462 struct cls_subtable *subtable;
5ecc9d81 1463
e48eccd1 1464 cursor.cls = cls;
bd53aa17
JR
1465 cursor.target = target && !cls_rule_is_catchall(target) ? target : NULL;
1466 cursor.version = version;
78c8df12 1467 cursor.rule = NULL;
5f0476ce
JR
1468
1469 /* Find first rule. */
de4ad4a2
JR
1470 PVECTOR_CURSOR_FOR_EACH (subtable, &cursor.subtables,
1471 &cursor.cls->subtables) {
1472 const struct cls_rule *rule = search_subtable(subtable, &cursor);
f2c21402 1473
5ecc9d81 1474 if (rule) {
5f0476ce 1475 cursor.subtable = subtable;
de4ad4a2 1476 cursor.rule = rule;
5f0476ce 1477 break;
5ecc9d81
BP
1478 }
1479 }
1480
5f0476ce
JR
1481 return cursor;
1482}
1483
dfea28b3 1484static const struct cls_rule *
1caa1561 1485cls_cursor_next(struct cls_cursor *cursor)
5ecc9d81 1486{
de4ad4a2 1487 const struct cls_rule *rule;
03868246 1488 const struct cls_subtable *subtable;
5ecc9d81 1489
de4ad4a2
JR
1490 rule = cursor->rule;
1491 subtable = cursor->subtable;
1492 RCULIST_FOR_EACH_CONTINUE (rule, node, &subtable->rules_list) {
bd53aa17 1493 if (rule_matches(rule, cursor->target, cursor->version)) {
de4ad4a2 1494 return rule;
5ecc9d81
BP
1495 }
1496 }
1497
de4ad4a2 1498 PVECTOR_CURSOR_FOR_EACH_CONTINUE (subtable, &cursor->subtables) {
f2c21402 1499 rule = search_subtable(subtable, cursor);
5ecc9d81 1500 if (rule) {
03868246 1501 cursor->subtable = subtable;
de4ad4a2 1502 return rule;
5ecc9d81
BP
1503 }
1504 }
1505
1caa1561
BP
1506 return NULL;
1507}
1508
1509/* Sets 'cursor->rule' to the next matching cls_rule in 'cursor''s iteration,
1510 * or to null if all matching rules have been visited. */
1511void
1512cls_cursor_advance(struct cls_cursor *cursor)
1caa1561 1513{
1caa1561 1514 cursor->rule = cls_cursor_next(cursor);
5ecc9d81
BP
1515}
1516\f
03868246 1517static struct cls_subtable *
e48eccd1 1518find_subtable(const struct classifier *cls, const struct minimask *mask)
b5d97350 1519{
03868246 1520 struct cls_subtable *subtable;
064af421 1521
f2c21402 1522 CMAP_FOR_EACH_WITH_HASH (subtable, cmap_node, minimask_hash(mask, 0),
5a87054c 1523 &cls->subtables_map) {
03868246
JR
1524 if (minimask_equal(mask, &subtable->mask)) {
1525 return subtable;
064af421
BP
1526 }
1527 }
b5d97350 1528 return NULL;
064af421 1529}
064af421 1530
e65413ab 1531/* The new subtable will be visible to the readers only after this. */
03868246 1532static struct cls_subtable *
e48eccd1 1533insert_subtable(struct classifier *cls, const struct minimask *mask)
b5d97350 1534{
c906cedf 1535 uint32_t hash = minimask_hash(mask, 0);
03868246 1536 struct cls_subtable *subtable;
476f36e8
JR
1537 int i, index = 0;
1538 struct flow_wildcards old, new;
1539 uint8_t prev;
3016f3e4 1540 int count = count_1bits(mask->masks.map);
064af421 1541
8fd47924 1542 subtable = xzalloc(sizeof *subtable + MINIFLOW_VALUES_SIZE(count));
f2c21402 1543 cmap_init(&subtable->rules);
a851eb94
JR
1544 miniflow_clone(CONST_CAST(struct miniflow *, &subtable->mask.masks),
1545 &mask->masks, count);
476f36e8
JR
1546
1547 /* Init indices for segmented lookup, if any. */
1548 flow_wildcards_init_catchall(&new);
1549 old = new;
1550 prev = 0;
1551 for (i = 0; i < cls->n_flow_segments; i++) {
1552 flow_wildcards_fold_minimask_range(&new, mask, prev,
1553 cls->flow_segments[i]);
1554 /* Add an index if it adds mask bits. */
1555 if (!flow_wildcards_equal(&new, &old)) {
f2c21402 1556 cmap_init(&subtable->indices[index]);
f80028fe
JR
1557 *CONST_CAST(uint8_t *, &subtable->index_ofs[index])
1558 = cls->flow_segments[i];
476f36e8
JR
1559 index++;
1560 old = new;
1561 }
1562 prev = cls->flow_segments[i];
1563 }
1564 /* Check if the rest of the subtable's mask adds any bits,
1565 * and remove the last index if it doesn't. */
1566 if (index > 0) {
d70e8c28 1567 flow_wildcards_fold_minimask_range(&new, mask, prev, FLOW_U64S);
476f36e8
JR
1568 if (flow_wildcards_equal(&new, &old)) {
1569 --index;
f80028fe 1570 *CONST_CAST(uint8_t *, &subtable->index_ofs[index]) = 0;
f2c21402 1571 cmap_destroy(&subtable->indices[index]);
476f36e8
JR
1572 }
1573 }
f80028fe 1574 *CONST_CAST(uint8_t *, &subtable->n_indices) = index;
476f36e8 1575
f80028fe
JR
1576 *CONST_CAST(tag_type *, &subtable->tag) =
1577 (minimask_get_metadata_mask(mask) == OVS_BE64_MAX
1578 ? tag_create_deterministic(hash)
1579 : TAG_ALL);
064af421 1580
13751fd8
JR
1581 for (i = 0; i < cls->n_tries; i++) {
1582 subtable->trie_plen[i] = minimask_get_prefix_len(mask,
1583 cls->tries[i].field);
1584 }
1585
69d6040e 1586 /* Ports trie. */
f358a2cb 1587 ovsrcu_set_hidden(&subtable->ports_trie, NULL);
f80028fe 1588 *CONST_CAST(int *, &subtable->ports_mask_len)
69d6040e
JR
1589 = 32 - ctz32(ntohl(MINIFLOW_GET_BE32(&mask->masks, tp_src)));
1590
de4ad4a2
JR
1591 /* List of rules. */
1592 rculist_init(&subtable->rules_list);
1593
f2c21402 1594 cmap_insert(&cls->subtables_map, &subtable->cmap_node, hash);
ec988646 1595
03868246 1596 return subtable;
064af421
BP
1597}
1598
01c0f83a 1599/* RCU readers may still access the subtable before it is actually freed. */
b5d97350 1600static void
e48eccd1 1601destroy_subtable(struct classifier *cls, struct cls_subtable *subtable)
b5d97350 1602{
476f36e8
JR
1603 int i;
1604
fe7cfa5c 1605 pvector_remove(&cls->subtables, subtable);
01c0f83a
JR
1606 cmap_remove(&cls->subtables_map, &subtable->cmap_node,
1607 minimask_hash(&subtable->mask, 0));
1608
1609 ovs_assert(ovsrcu_get_protected(struct trie_node *, &subtable->ports_trie)
1610 == NULL);
1611 ovs_assert(cmap_is_empty(&subtable->rules));
de4ad4a2 1612 ovs_assert(rculist_is_empty(&subtable->rules_list));
69d6040e 1613
476f36e8 1614 for (i = 0; i < subtable->n_indices; i++) {
f2c21402 1615 cmap_destroy(&subtable->indices[i]);
476f36e8 1616 }
f2c21402 1617 cmap_destroy(&subtable->rules);
fe7cfa5c 1618 ovsrcu_postpone(free, subtable);
4aacd02d
BP
1619}
1620
13751fd8
JR
1621struct range {
1622 uint8_t start;
1623 uint8_t end;
1624};
1625
c0bfb650
JR
1626static unsigned int be_get_bit_at(const ovs_be32 value[], unsigned int ofs);
1627
13751fd8
JR
1628/* Return 'true' if can skip rest of the subtable based on the prefix trie
1629 * lookup results. */
1630static inline bool
1631check_tries(struct trie_ctx trie_ctx[CLS_MAX_TRIES], unsigned int n_tries,
1632 const unsigned int field_plen[CLS_MAX_TRIES],
1633 const struct range ofs, const struct flow *flow,
1634 struct flow_wildcards *wc)
1635{
1636 int j;
1637
1638 /* Check if we could avoid fully unwildcarding the next level of
1639 * fields using the prefix tries. The trie checks are done only as
1640 * needed to avoid folding in additional bits to the wildcards mask. */
1641 for (j = 0; j < n_tries; j++) {
1642 /* Is the trie field relevant for this subtable? */
1643 if (field_plen[j]) {
1644 struct trie_ctx *ctx = &trie_ctx[j];
1645 uint8_t be32ofs = ctx->be32ofs;
d70e8c28 1646 uint8_t be64ofs = be32ofs / 2;
13751fd8
JR
1647
1648 /* Is the trie field within the current range of fields? */
d70e8c28 1649 if (be64ofs >= ofs.start && be64ofs < ofs.end) {
13751fd8
JR
1650 /* On-demand trie lookup. */
1651 if (!ctx->lookup_done) {
c0bfb650
JR
1652 memset(&ctx->match_plens, 0, sizeof ctx->match_plens);
1653 ctx->maskbits = trie_lookup(ctx->trie, flow,
1654 &ctx->match_plens);
13751fd8
JR
1655 ctx->lookup_done = true;
1656 }
1657 /* Possible to skip the rest of the subtable if subtable's
c0bfb650
JR
1658 * prefix on the field is not included in the lookup result. */
1659 if (!be_get_bit_at(&ctx->match_plens.be32, field_plen[j] - 1)) {
1817dcea
JR
1660 /* We want the trie lookup to never result in unwildcarding
1661 * any bits that would not be unwildcarded otherwise.
1662 * Since the trie is shared by the whole classifier, it is
1663 * possible that the 'maskbits' contain bits that are
1664 * irrelevant for the partition relevant for the current
1665 * packet. Hence the checks below. */
13751fd8 1666
13751fd8 1667 /* Check that the trie result will not unwildcard more bits
1817dcea 1668 * than this subtable would otherwise. */
13751fd8
JR
1669 if (ctx->maskbits <= field_plen[j]) {
1670 /* Unwildcard the bits and skip the rest. */
1671 mask_set_prefix_bits(wc, be32ofs, ctx->maskbits);
1672 /* Note: Prerequisite already unwildcarded, as the only
1673 * prerequisite of the supported trie lookup fields is
1817dcea
JR
1674 * the ethertype, which is always unwildcarded. */
1675 return true;
1676 }
1677 /* Can skip if the field is already unwildcarded. */
1678 if (mask_prefix_bits_set(wc, be32ofs, ctx->maskbits)) {
13751fd8
JR
1679 return true;
1680 }
1681 }
1682 }
1683 }
1684 }
1685 return false;
1686}
1687
3016f3e4
JR
1688/* Returns true if 'target' satisifies 'flow'/'mask', that is, if each bit
1689 * for which 'flow', for which 'mask' has a bit set, specifies a particular
1690 * value has the correct value in 'target'.
1691 *
1692 * This function is equivalent to miniflow_equal_flow_in_minimask(flow,
a64759f0
JR
1693 * target, mask) but this is faster because of the invariant that
1694 * flow->map and mask->masks.map are the same, and that this version
1695 * takes the 'wc'. */
3016f3e4
JR
1696static inline bool
1697miniflow_and_mask_matches_flow(const struct miniflow *flow,
1698 const struct minimask *mask,
e9319757 1699 const struct flow *target)
3016f3e4 1700{
09b0fa9c
JR
1701 const uint64_t *flowp = miniflow_get_values(flow);
1702 const uint64_t *maskp = miniflow_get_values(&mask->masks);
1cea007c 1703 int idx;
3016f3e4 1704
a64759f0 1705 MAP_FOR_EACH_INDEX(idx, mask->masks.map) {
d70e8c28 1706 uint64_t diff = (*flowp++ ^ flow_u64_value(target, idx)) & *maskp++;
a64759f0
JR
1707
1708 if (diff) {
3016f3e4
JR
1709 return false;
1710 }
1711 }
1712
1713 return true;
1714}
1715
dfea28b3 1716static inline const struct cls_match *
18721c4a 1717find_match(const struct cls_subtable *subtable, cls_version_t version,
2b7b1427 1718 const struct flow *flow, uint32_t hash)
b5d97350 1719{
fc02ecc7 1720 const struct cls_match *head, *rule;
b5d97350 1721
fc02ecc7
JR
1722 CMAP_FOR_EACH_WITH_HASH (head, cmap_node, hash, &subtable->rules) {
1723 if (OVS_LIKELY(miniflow_and_mask_matches_flow(&head->flow,
1724 &subtable->mask,
1725 flow))) {
1726 /* Return highest priority rule that is visible. */
8f8023b3 1727 CLS_MATCH_FOR_EACH (rule, head) {
2b7b1427 1728 if (OVS_LIKELY(cls_match_visible_in_version(rule, version))) {
fc02ecc7
JR
1729 return rule;
1730 }
1731 }
064af421
BP
1732 }
1733 }
c23740be 1734
064af421
BP
1735 return NULL;
1736}
1737
e9319757
JR
1738/* Returns true if 'target' satisifies 'flow'/'mask', that is, if each bit
1739 * for which 'flow', for which 'mask' has a bit set, specifies a particular
1740 * value has the correct value in 'target'.
1741 *
1742 * This function is equivalent to miniflow_and_mask_matches_flow() but this
1743 * version fills in the mask bits in 'wc'. */
1744static inline bool
1745miniflow_and_mask_matches_flow_wc(const struct miniflow *flow,
1746 const struct minimask *mask,
1747 const struct flow *target,
1748 struct flow_wildcards *wc)
1749{
09b0fa9c
JR
1750 const uint64_t *flowp = miniflow_get_values(flow);
1751 const uint64_t *maskp = miniflow_get_values(&mask->masks);
1cea007c 1752 int idx;
e9319757
JR
1753
1754 MAP_FOR_EACH_INDEX(idx, mask->masks.map) {
d70e8c28
JR
1755 uint64_t mask = *maskp++;
1756 uint64_t diff = (*flowp++ ^ flow_u64_value(target, idx)) & mask;
e9319757
JR
1757
1758 if (diff) {
1759 /* Only unwildcard if none of the differing bits is already
1760 * exact-matched. */
d70e8c28 1761 if (!(flow_u64_value(&wc->masks, idx) & diff)) {
66e1d955
JR
1762 /* Keep one bit of the difference. The selected bit may be
1763 * different in big-endian v.s. little-endian systems. */
d70e8c28 1764 *flow_u64_lvalue(&wc->masks, idx) |= rightmost_1bit(diff);
e9319757
JR
1765 }
1766 return false;
1767 }
1768 /* Fill in the bits that were looked at. */
d70e8c28 1769 *flow_u64_lvalue(&wc->masks, idx) |= mask;
e9319757
JR
1770 }
1771
1772 return true;
1773}
1774
386cb9f7
JR
1775/* Unwildcard the fields looked up so far, if any. */
1776static void
1777fill_range_wc(const struct cls_subtable *subtable, struct flow_wildcards *wc,
1778 uint8_t to)
1779{
1780 if (to) {
1781 flow_wildcards_fold_minimask_range(wc, &subtable->mask, 0, to);
1782 }
1783}
1784
dfea28b3 1785static const struct cls_match *
18721c4a 1786find_match_wc(const struct cls_subtable *subtable, cls_version_t version,
2b7b1427
JR
1787 const struct flow *flow, struct trie_ctx trie_ctx[CLS_MAX_TRIES],
1788 unsigned int n_tries, struct flow_wildcards *wc)
476f36e8
JR
1789{
1790 uint32_t basis = 0, hash;
dfea28b3 1791 const struct cls_match *rule = NULL;
476f36e8 1792 int i;
13751fd8 1793 struct range ofs;
476f36e8 1794
ec988646 1795 if (OVS_UNLIKELY(!wc)) {
2b7b1427 1796 return find_match(subtable, version, flow,
476f36e8
JR
1797 flow_hash_in_minimask(flow, &subtable->mask, 0));
1798 }
1799
13751fd8 1800 ofs.start = 0;
476f36e8
JR
1801 /* Try to finish early by checking fields in segments. */
1802 for (i = 0; i < subtable->n_indices; i++) {
55847abe 1803 const struct cmap_node *inode;
f2c21402 1804
13751fd8 1805 ofs.end = subtable->index_ofs[i];
476f36e8 1806
13751fd8
JR
1807 if (check_tries(trie_ctx, n_tries, subtable->trie_plen, ofs, flow,
1808 wc)) {
386cb9f7
JR
1809 /* 'wc' bits for the trie field set, now unwildcard the preceding
1810 * bits used so far. */
1811 fill_range_wc(subtable, wc, ofs.start);
1812 return NULL;
13751fd8
JR
1813 }
1814 hash = flow_hash_in_minimask_range(flow, &subtable->mask, ofs.start,
1815 ofs.end, &basis);
f2c21402 1816 inode = cmap_find(&subtable->indices[i], hash);
476f36e8 1817 if (!inode) {
386cb9f7
JR
1818 /* No match, can stop immediately, but must fold in the bits
1819 * used in lookup so far. */
1820 fill_range_wc(subtable, wc, ofs.end);
1821 return NULL;
476f36e8
JR
1822 }
1823
1824 /* If we have narrowed down to a single rule already, check whether
a64759f0 1825 * that rule matches. Either way, we're done.
476f36e8
JR
1826 *
1827 * (Rare) hash collisions may cause us to miss the opportunity for this
1828 * optimization. */
f2c21402 1829 if (!cmap_node_next(inode)) {
fc02ecc7
JR
1830 const struct cls_match *head;
1831
1832 ASSIGN_CONTAINER(head, inode - i, index_nodes);
1833 if (miniflow_and_mask_matches_flow_wc(&head->flow, &subtable->mask,
e9319757 1834 flow, wc)) {
fc02ecc7 1835 /* Return highest priority rule that is visible. */
8f8023b3 1836 CLS_MATCH_FOR_EACH (rule, head) {
2b7b1427
JR
1837 if (OVS_LIKELY(cls_match_visible_in_version(rule,
1838 version))) {
fc02ecc7
JR
1839 return rule;
1840 }
1841 }
476f36e8 1842 }
e9319757 1843 return NULL;
476f36e8 1844 }
386cb9f7 1845 ofs.start = ofs.end;
476f36e8 1846 }
d70e8c28 1847 ofs.end = FLOW_U64S;
13751fd8
JR
1848 /* Trie check for the final range. */
1849 if (check_tries(trie_ctx, n_tries, subtable->trie_plen, ofs, flow, wc)) {
386cb9f7
JR
1850 fill_range_wc(subtable, wc, ofs.start);
1851 return NULL;
13751fd8 1852 }
a64759f0
JR
1853 hash = flow_hash_in_minimask_range(flow, &subtable->mask, ofs.start,
1854 ofs.end, &basis);
2b7b1427 1855 rule = find_match(subtable, version, flow, hash);
69d6040e
JR
1856 if (!rule && subtable->ports_mask_len) {
1857 /* Ports are always part of the final range, if any.
1858 * No match was found for the ports. Use the ports trie to figure out
1859 * which ports bits to unwildcard. */
1860 unsigned int mbits;
c0bfb650 1861 ovs_be32 value, plens, mask;
69d6040e
JR
1862
1863 mask = MINIFLOW_GET_BE32(&subtable->mask.masks, tp_src);
1864 value = ((OVS_FORCE ovs_be32 *)flow)[TP_PORTS_OFS32] & mask;
c0bfb650 1865 mbits = trie_lookup_value(&subtable->ports_trie, &value, &plens, 32);
69d6040e
JR
1866
1867 ((OVS_FORCE ovs_be32 *)&wc->masks)[TP_PORTS_OFS32] |=
86f35fb5 1868 mask & be32_prefix_mask(mbits);
69d6040e 1869
386cb9f7
JR
1870 /* Unwildcard all bits in the mask upto the ports, as they were used
1871 * to determine there is no match. */
d70e8c28 1872 fill_range_wc(subtable, wc, TP_PORTS_OFS64);
386cb9f7 1873 return NULL;
69d6040e 1874 }
e9319757 1875
13751fd8 1876 /* Must unwildcard all the fields, as they were looked at. */
476f36e8
JR
1877 flow_wildcards_fold_minimask(wc, &subtable->mask);
1878 return rule;
1879}
1880
627fb667 1881static struct cls_match *
dfea28b3 1882find_equal(const struct cls_subtable *subtable, const struct miniflow *flow,
03868246 1883 uint32_t hash)
064af421 1884{
627fb667 1885 struct cls_match *head;
064af421 1886
f2c21402 1887 CMAP_FOR_EACH_WITH_HASH (head, cmap_node, hash, &subtable->rules) {
3016f3e4 1888 if (miniflow_equal(&head->flow, flow)) {
b5d97350 1889 return head;
064af421
BP
1890 }
1891 }
1892 return NULL;
1893}
13751fd8
JR
1894\f
1895/* A longest-prefix match tree. */
13751fd8
JR
1896
1897/* Return at least 'plen' bits of the 'prefix', starting at bit offset 'ofs'.
1898 * Prefixes are in the network byte order, and the offset 0 corresponds to
1899 * the most significant bit of the first byte. The offset can be read as
1900 * "how many bits to skip from the start of the prefix starting at 'pr'". */
1901static uint32_t
1902raw_get_prefix(const ovs_be32 pr[], unsigned int ofs, unsigned int plen)
1903{
1904 uint32_t prefix;
1905
1906 pr += ofs / 32; /* Where to start. */
1907 ofs %= 32; /* How many bits to skip at 'pr'. */
1908
1909 prefix = ntohl(*pr) << ofs; /* Get the first 32 - ofs bits. */
1910 if (plen > 32 - ofs) { /* Need more than we have already? */
1911 prefix |= ntohl(*++pr) >> (32 - ofs);
1912 }
1913 /* Return with possible unwanted bits at the end. */
1914 return prefix;
1915}
1916
1917/* Return min(TRIE_PREFIX_BITS, plen) bits of the 'prefix', starting at bit
1918 * offset 'ofs'. Prefixes are in the network byte order, and the offset 0
1919 * corresponds to the most significant bit of the first byte. The offset can
1920 * be read as "how many bits to skip from the start of the prefix starting at
1921 * 'pr'". */
1922static uint32_t
1923trie_get_prefix(const ovs_be32 pr[], unsigned int ofs, unsigned int plen)
1924{
1925 if (!plen) {
1926 return 0;
1927 }
1928 if (plen > TRIE_PREFIX_BITS) {
1929 plen = TRIE_PREFIX_BITS; /* Get at most TRIE_PREFIX_BITS. */
1930 }
1931 /* Return with unwanted bits cleared. */
1932 return raw_get_prefix(pr, ofs, plen) & ~0u << (32 - plen);
1933}
1934
c30cfa6b 1935/* Return the number of equal bits in 'n_bits' of 'prefix's MSBs and a 'value'
13751fd8
JR
1936 * starting at "MSB 0"-based offset 'ofs'. */
1937static unsigned int
c30cfa6b 1938prefix_equal_bits(uint32_t prefix, unsigned int n_bits, const ovs_be32 value[],
13751fd8
JR
1939 unsigned int ofs)
1940{
c30cfa6b 1941 uint64_t diff = prefix ^ raw_get_prefix(value, ofs, n_bits);
13751fd8 1942 /* Set the bit after the relevant bits to limit the result. */
c30cfa6b 1943 return raw_clz64(diff << 32 | UINT64_C(1) << (63 - n_bits));
13751fd8
JR
1944}
1945
1946/* Return the number of equal bits in 'node' prefix and a 'prefix' of length
1947 * 'plen', starting at "MSB 0"-based offset 'ofs'. */
1948static unsigned int
1949trie_prefix_equal_bits(const struct trie_node *node, const ovs_be32 prefix[],
1950 unsigned int ofs, unsigned int plen)
1951{
c30cfa6b 1952 return prefix_equal_bits(node->prefix, MIN(node->n_bits, plen - ofs),
13751fd8
JR
1953 prefix, ofs);
1954}
1955
1956/* Return the bit at ("MSB 0"-based) offset 'ofs' as an int. 'ofs' can
1957 * be greater than 31. */
1958static unsigned int
1959be_get_bit_at(const ovs_be32 value[], unsigned int ofs)
1960{
1961 return (((const uint8_t *)value)[ofs / 8] >> (7 - ofs % 8)) & 1u;
1962}
1963
1964/* Return the bit at ("MSB 0"-based) offset 'ofs' as an int. 'ofs' must
1965 * be between 0 and 31, inclusive. */
1966static unsigned int
1967get_bit_at(const uint32_t prefix, unsigned int ofs)
1968{
1969 return (prefix >> (31 - ofs)) & 1u;
1970}
1971
1972/* Create new branch. */
1973static struct trie_node *
1974trie_branch_create(const ovs_be32 *prefix, unsigned int ofs, unsigned int plen,
1975 unsigned int n_rules)
1976{
1977 struct trie_node *node = xmalloc(sizeof *node);
1978
1979 node->prefix = trie_get_prefix(prefix, ofs, plen);
1980
1981 if (plen <= TRIE_PREFIX_BITS) {
c30cfa6b 1982 node->n_bits = plen;
f358a2cb
JR
1983 ovsrcu_set_hidden(&node->edges[0], NULL);
1984 ovsrcu_set_hidden(&node->edges[1], NULL);
13751fd8
JR
1985 node->n_rules = n_rules;
1986 } else { /* Need intermediate nodes. */
1987 struct trie_node *subnode = trie_branch_create(prefix,
1988 ofs + TRIE_PREFIX_BITS,
1989 plen - TRIE_PREFIX_BITS,
1990 n_rules);
1991 int bit = get_bit_at(subnode->prefix, 0);
c30cfa6b 1992 node->n_bits = TRIE_PREFIX_BITS;
f358a2cb
JR
1993 ovsrcu_set_hidden(&node->edges[bit], subnode);
1994 ovsrcu_set_hidden(&node->edges[!bit], NULL);
13751fd8
JR
1995 node->n_rules = 0;
1996 }
1997 return node;
1998}
1999
2000static void
f358a2cb 2001trie_node_destroy(const struct trie_node *node)
13751fd8 2002{
f358a2cb
JR
2003 ovsrcu_postpone(free, CONST_CAST(struct trie_node *, node));
2004}
2005
2006/* Copy a trie node for modification and postpone delete the old one. */
2007static struct trie_node *
2008trie_node_rcu_realloc(const struct trie_node *node)
2009{
2010 struct trie_node *new_node = xmalloc(sizeof *node);
2011
2012 *new_node = *node;
2013 trie_node_destroy(node);
2014
2015 return new_node;
13751fd8
JR
2016}
2017
2018static void
f358a2cb 2019trie_destroy(rcu_trie_ptr *trie)
13751fd8 2020{
f358a2cb
JR
2021 struct trie_node *node = ovsrcu_get_protected(struct trie_node *, trie);
2022
13751fd8 2023 if (node) {
f358a2cb
JR
2024 ovsrcu_set_hidden(trie, NULL);
2025 trie_destroy(&node->edges[0]);
2026 trie_destroy(&node->edges[1]);
2027 trie_node_destroy(node);
13751fd8
JR
2028 }
2029}
2030
2031static bool
2032trie_is_leaf(const struct trie_node *trie)
2033{
f358a2cb
JR
2034 /* No children? */
2035 return !ovsrcu_get(struct trie_node *, &trie->edges[0])
2036 && !ovsrcu_get(struct trie_node *, &trie->edges[1]);
13751fd8
JR
2037}
2038
2039static void
2040mask_set_prefix_bits(struct flow_wildcards *wc, uint8_t be32ofs,
c30cfa6b 2041 unsigned int n_bits)
13751fd8
JR
2042{
2043 ovs_be32 *mask = &((ovs_be32 *)&wc->masks)[be32ofs];
2044 unsigned int i;
2045
c30cfa6b 2046 for (i = 0; i < n_bits / 32; i++) {
13751fd8
JR
2047 mask[i] = OVS_BE32_MAX;
2048 }
c30cfa6b
JR
2049 if (n_bits % 32) {
2050 mask[i] |= htonl(~0u << (32 - n_bits % 32));
13751fd8
JR
2051 }
2052}
2053
2054static bool
2055mask_prefix_bits_set(const struct flow_wildcards *wc, uint8_t be32ofs,
c30cfa6b 2056 unsigned int n_bits)
13751fd8
JR
2057{
2058 ovs_be32 *mask = &((ovs_be32 *)&wc->masks)[be32ofs];
2059 unsigned int i;
2060 ovs_be32 zeroes = 0;
2061
c30cfa6b 2062 for (i = 0; i < n_bits / 32; i++) {
13751fd8
JR
2063 zeroes |= ~mask[i];
2064 }
c30cfa6b
JR
2065 if (n_bits % 32) {
2066 zeroes |= ~mask[i] & htonl(~0u << (32 - n_bits % 32));
13751fd8
JR
2067 }
2068
c30cfa6b 2069 return !zeroes; /* All 'n_bits' bits set. */
13751fd8
JR
2070}
2071
f358a2cb 2072static rcu_trie_ptr *
13751fd8
JR
2073trie_next_edge(struct trie_node *node, const ovs_be32 value[],
2074 unsigned int ofs)
2075{
2076 return node->edges + be_get_bit_at(value, ofs);
2077}
2078
2079static const struct trie_node *
2080trie_next_node(const struct trie_node *node, const ovs_be32 value[],
2081 unsigned int ofs)
2082{
f358a2cb
JR
2083 return ovsrcu_get(struct trie_node *,
2084 &node->edges[be_get_bit_at(value, ofs)]);
13751fd8
JR
2085}
2086
c0bfb650
JR
2087/* Set the bit at ("MSB 0"-based) offset 'ofs'. 'ofs' can be greater than 31.
2088 */
2089static void
2090be_set_bit_at(ovs_be32 value[], unsigned int ofs)
2091{
2092 ((uint8_t *)value)[ofs / 8] |= 1u << (7 - ofs % 8);
2093}
2094
2095/* Returns the number of bits in the prefix mask necessary to determine a
2096 * mismatch, in case there are longer prefixes in the tree below the one that
2097 * matched.
2098 * '*plens' will have a bit set for each prefix length that may have matching
2099 * rules. The caller is responsible for clearing the '*plens' prior to
2100 * calling this.
13751fd8
JR
2101 */
2102static unsigned int
f358a2cb 2103trie_lookup_value(const rcu_trie_ptr *trie, const ovs_be32 value[],
c0bfb650 2104 ovs_be32 plens[], unsigned int n_bits)
13751fd8 2105{
13751fd8 2106 const struct trie_node *prev = NULL;
c0bfb650
JR
2107 const struct trie_node *node = ovsrcu_get(struct trie_node *, trie);
2108 unsigned int match_len = 0; /* Number of matching bits. */
13751fd8 2109
27ce650f 2110 for (; node; prev = node, node = trie_next_node(node, value, match_len)) {
13751fd8
JR
2111 unsigned int eqbits;
2112 /* Check if this edge can be followed. */
27ce650f
JR
2113 eqbits = prefix_equal_bits(node->prefix, node->n_bits, value,
2114 match_len);
2115 match_len += eqbits;
c30cfa6b 2116 if (eqbits < node->n_bits) { /* Mismatch, nothing more to be found. */
27ce650f 2117 /* Bit at offset 'match_len' differed. */
c0bfb650 2118 return match_len + 1; /* Includes the first mismatching bit. */
13751fd8
JR
2119 }
2120 /* Full match, check if rules exist at this prefix length. */
2121 if (node->n_rules > 0) {
c0bfb650 2122 be_set_bit_at(plens, match_len - 1);
13751fd8 2123 }
27ce650f 2124 if (match_len >= n_bits) {
c0bfb650 2125 return n_bits; /* Full prefix. */
f0e5aa11 2126 }
13751fd8 2127 }
c0bfb650
JR
2128 /* node == NULL. Full match so far, but we tried to follow an
2129 * non-existing branch. Need to exclude the other branch if it exists
2130 * (it does not if we were called on an empty trie or 'prev' is a leaf
2131 * node). */
2132 return !prev || trie_is_leaf(prev) ? match_len : match_len + 1;
13751fd8
JR
2133}
2134
2135static unsigned int
2136trie_lookup(const struct cls_trie *trie, const struct flow *flow,
c0bfb650 2137 union mf_value *plens)
13751fd8
JR
2138{
2139 const struct mf_field *mf = trie->field;
2140
2141 /* Check that current flow matches the prerequisites for the trie
2142 * field. Some match fields are used for multiple purposes, so we
2143 * must check that the trie is relevant for this flow. */
2144 if (mf_are_prereqs_ok(mf, flow)) {
f358a2cb 2145 return trie_lookup_value(&trie->root,
13751fd8 2146 &((ovs_be32 *)flow)[mf->flow_be32ofs],
c0bfb650 2147 &plens->be32, mf->n_bits);
13751fd8 2148 }
c0bfb650
JR
2149 memset(plens, 0xff, sizeof *plens); /* All prefixes, no skipping. */
2150 return 0; /* Value not used in this case. */
13751fd8
JR
2151}
2152
2153/* Returns the length of a prefix match mask for the field 'mf' in 'minimask'.
2154 * Returns the u32 offset to the miniflow data in '*miniflow_index', if
2155 * 'miniflow_index' is not NULL. */
2156static unsigned int
2157minimask_get_prefix_len(const struct minimask *minimask,
2158 const struct mf_field *mf)
2159{
c30cfa6b 2160 unsigned int n_bits = 0, mask_tz = 0; /* Non-zero when end of mask seen. */
d70e8c28
JR
2161 uint8_t be32_ofs = mf->flow_be32ofs;
2162 uint8_t be32_end = be32_ofs + mf->n_bytes / 4;
13751fd8 2163
d70e8c28
JR
2164 for (; be32_ofs < be32_end; ++be32_ofs) {
2165 uint32_t mask = ntohl(minimask_get_be32(minimask, be32_ofs));
13751fd8
JR
2166
2167 /* Validate mask, count the mask length. */
2168 if (mask_tz) {
2169 if (mask) {
2170 return 0; /* No bits allowed after mask ended. */
2171 }
2172 } else {
2173 if (~mask & (~mask + 1)) {
2174 return 0; /* Mask not contiguous. */
2175 }
2176 mask_tz = ctz32(mask);
c30cfa6b 2177 n_bits += 32 - mask_tz;
13751fd8
JR
2178 }
2179 }
2180
c30cfa6b 2181 return n_bits;
13751fd8
JR
2182}
2183
2184/*
2185 * This is called only when mask prefix is known to be CIDR and non-zero.
2186 * Relies on the fact that the flow and mask have the same map, and since
2187 * the mask is CIDR, the storage for the flow field exists even if it
2188 * happened to be zeros.
2189 */
2190static const ovs_be32 *
2191minimatch_get_prefix(const struct minimatch *match, const struct mf_field *mf)
2192{
d70e8c28 2193 return (OVS_FORCE const ovs_be32 *)
09b0fa9c 2194 (miniflow_get_values(match->flow)
8fd47924 2195 + count_1bits(match->flow->map &
d70e8c28
JR
2196 ((UINT64_C(1) << mf->flow_be32ofs / 2) - 1)))
2197 + (mf->flow_be32ofs & 1);
13751fd8
JR
2198}
2199
2200/* Insert rule in to the prefix tree.
2201 * 'mlen' must be the (non-zero) CIDR prefix length of the 'trie->field' mask
2202 * in 'rule'. */
2203static void
2204trie_insert(struct cls_trie *trie, const struct cls_rule *rule, int mlen)
2205{
69d6040e
JR
2206 trie_insert_prefix(&trie->root,
2207 minimatch_get_prefix(&rule->match, trie->field), mlen);
2208}
2209
2210static void
f358a2cb 2211trie_insert_prefix(rcu_trie_ptr *edge, const ovs_be32 *prefix, int mlen)
69d6040e 2212{
13751fd8 2213 struct trie_node *node;
13751fd8
JR
2214 int ofs = 0;
2215
2216 /* Walk the tree. */
f358a2cb 2217 for (; (node = ovsrcu_get_protected(struct trie_node *, edge));
13751fd8
JR
2218 edge = trie_next_edge(node, prefix, ofs)) {
2219 unsigned int eqbits = trie_prefix_equal_bits(node, prefix, ofs, mlen);
2220 ofs += eqbits;
c30cfa6b 2221 if (eqbits < node->n_bits) {
13751fd8
JR
2222 /* Mismatch, new node needs to be inserted above. */
2223 int old_branch = get_bit_at(node->prefix, eqbits);
f358a2cb 2224 struct trie_node *new_parent;
13751fd8 2225
f358a2cb
JR
2226 new_parent = trie_branch_create(prefix, ofs - eqbits, eqbits,
2227 ofs == mlen ? 1 : 0);
2228 /* Copy the node to modify it. */
2229 node = trie_node_rcu_realloc(node);
2230 /* Adjust the new node for its new position in the tree. */
13751fd8 2231 node->prefix <<= eqbits;
c30cfa6b 2232 node->n_bits -= eqbits;
f358a2cb 2233 ovsrcu_set_hidden(&new_parent->edges[old_branch], node);
13751fd8
JR
2234
2235 /* Check if need a new branch for the new rule. */
2236 if (ofs < mlen) {
f358a2cb
JR
2237 ovsrcu_set_hidden(&new_parent->edges[!old_branch],
2238 trie_branch_create(prefix, ofs, mlen - ofs,
2239 1));
13751fd8 2240 }
f358a2cb 2241 ovsrcu_set(edge, new_parent); /* Publish changes. */
13751fd8
JR
2242 return;
2243 }
2244 /* Full match so far. */
2245
2246 if (ofs == mlen) {
2247 /* Full match at the current node, rule needs to be added here. */
2248 node->n_rules++;
2249 return;
2250 }
2251 }
2252 /* Must insert a new tree branch for the new rule. */
f358a2cb 2253 ovsrcu_set(edge, trie_branch_create(prefix, ofs, mlen - ofs, 1));
13751fd8
JR
2254}
2255
2256/* 'mlen' must be the (non-zero) CIDR prefix length of the 'trie->field' mask
2257 * in 'rule'. */
2258static void
2259trie_remove(struct cls_trie *trie, const struct cls_rule *rule, int mlen)
2260{
69d6040e
JR
2261 trie_remove_prefix(&trie->root,
2262 minimatch_get_prefix(&rule->match, trie->field), mlen);
2263}
2264
2265/* 'mlen' must be the (non-zero) CIDR prefix length of the 'trie->field' mask
2266 * in 'rule'. */
2267static void
f358a2cb 2268trie_remove_prefix(rcu_trie_ptr *root, const ovs_be32 *prefix, int mlen)
69d6040e 2269{
13751fd8 2270 struct trie_node *node;
f358a2cb 2271 rcu_trie_ptr *edges[sizeof(union mf_value) * 8];
13751fd8
JR
2272 int depth = 0, ofs = 0;
2273
2274 /* Walk the tree. */
69d6040e 2275 for (edges[0] = root;
f358a2cb 2276 (node = ovsrcu_get_protected(struct trie_node *, edges[depth]));
13751fd8
JR
2277 edges[++depth] = trie_next_edge(node, prefix, ofs)) {
2278 unsigned int eqbits = trie_prefix_equal_bits(node, prefix, ofs, mlen);
69d6040e 2279
c30cfa6b 2280 if (eqbits < node->n_bits) {
13751fd8
JR
2281 /* Mismatch, nothing to be removed. This should never happen, as
2282 * only rules in the classifier are ever removed. */
2283 break; /* Log a warning. */
2284 }
2285 /* Full match so far. */
2286 ofs += eqbits;
2287
2288 if (ofs == mlen) {
2289 /* Full prefix match at the current node, remove rule here. */
2290 if (!node->n_rules) {
2291 break; /* Log a warning. */
2292 }
2293 node->n_rules--;
2294
2295 /* Check if can prune the tree. */
f358a2cb
JR
2296 while (!node->n_rules) {
2297 struct trie_node *next,
2298 *edge0 = ovsrcu_get_protected(struct trie_node *,
2299 &node->edges[0]),
2300 *edge1 = ovsrcu_get_protected(struct trie_node *,
2301 &node->edges[1]);
2302
2303 if (edge0 && edge1) {
2304 break; /* A branching point, cannot prune. */
2305 }
2306
2307 /* Else have at most one child node, remove this node. */
2308 next = edge0 ? edge0 : edge1;
13751fd8
JR
2309
2310 if (next) {
c30cfa6b 2311 if (node->n_bits + next->n_bits > TRIE_PREFIX_BITS) {
13751fd8
JR
2312 break; /* Cannot combine. */
2313 }
f358a2cb
JR
2314 next = trie_node_rcu_realloc(next); /* Modify. */
2315
13751fd8 2316 /* Combine node with next. */
c30cfa6b
JR
2317 next->prefix = node->prefix | next->prefix >> node->n_bits;
2318 next->n_bits += node->n_bits;
13751fd8 2319 }
13751fd8 2320 /* Update the parent's edge. */
f358a2cb
JR
2321 ovsrcu_set(edges[depth], next); /* Publish changes. */
2322 trie_node_destroy(node);
2323
13751fd8
JR
2324 if (next || !depth) {
2325 /* Branch not pruned or at root, nothing more to do. */
2326 break;
2327 }
f358a2cb
JR
2328 node = ovsrcu_get_protected(struct trie_node *,
2329 edges[--depth]);
13751fd8
JR
2330 }
2331 return;
2332 }
2333 }
2334 /* Cannot go deeper. This should never happen, since only rules
2335 * that actually exist in the classifier are ever removed. */
2336 VLOG_WARN("Trying to remove non-existing rule from a prefix trie.");
2337}
8f8023b3
JR
2338\f
2339
2340#define CLS_MATCH_POISON (struct cls_match *)(UINTPTR_MAX / 0xf * 0xb)
2341
2342void
2343cls_match_free_cb(struct cls_match *rule)
2344{
2345 ovsrcu_set_hidden(&rule->next, CLS_MATCH_POISON);
2346 free(rule);
2347}