]> git.proxmox.com Git - mirror_ovs.git/blame - lib/classifier.c
tests: Fix ovs-ofctl rule with importance test case.
[mirror_ovs.git] / lib / classifier.c
CommitLineData
064af421 1/*
78c8df12 2 * Copyright (c) 2009, 2010, 2011, 2012, 2013, 2014 Nicira, Inc.
064af421 3 *
a14bc59f
BP
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
064af421 7 *
a14bc59f
BP
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
064af421
BP
15 */
16
17#include <config.h>
18#include "classifier.h"
38c449e0 19#include "classifier-private.h"
064af421
BP
20#include <errno.h>
21#include <netinet/in.h>
844dff32 22#include "byte-order.h"
68d1c8c3 23#include "dynamic-string.h"
07b37e8f 24#include "odp-util.h"
d8ae4d67 25#include "ofp-util.h"
13751fd8 26#include "packets.h"
52054c15 27#include "util.h"
13751fd8
JR
28#include "vlog.h"
29
30VLOG_DEFINE_THIS_MODULE(classifier);
064af421 31
69d6040e
JR
32struct trie_ctx;
33
34/* Ports trie depends on both ports sharing the same ovs_be32. */
35#define TP_PORTS_OFS32 (offsetof(struct flow, tp_src) / 4)
36BUILD_ASSERT_DECL(TP_PORTS_OFS32 == offsetof(struct flow, tp_dst) / 4);
cabd4c43 37
627fb667
JR
38static struct cls_match *
39cls_match_alloc(struct cls_rule *rule)
40{
3016f3e4
JR
41 int count = count_1bits(rule->match.flow.map);
42
43 struct cls_match *cls_match
44 = xmalloc(sizeof *cls_match - sizeof cls_match->flow.inline_values
45 + MINIFLOW_VALUES_SIZE(count));
627fb667 46
f80028fe
JR
47 *CONST_CAST(const struct cls_rule **, &cls_match->cls_rule) = rule;
48 *CONST_CAST(int *, &cls_match->priority) = rule->priority;
49 miniflow_clone_inline(CONST_CAST(struct miniflow *, &cls_match->flow),
50 &rule->match.flow, count);
627fb667
JR
51 rule->cls_match = cls_match;
52
53 return cls_match;
54}
cabd4c43 55
e48eccd1 56static struct cls_subtable *find_subtable(const struct classifier *cls,
dfea28b3 57 const struct minimask *);
e48eccd1 58static struct cls_subtable *insert_subtable(struct classifier *cls,
e65413ab
JR
59 const struct minimask *)
60 OVS_REQUIRES(cls->mutex);
e48eccd1 61static void destroy_subtable(struct classifier *cls, struct cls_subtable *)
e65413ab 62 OVS_REQUIRES(cls->mutex);
e48eccd1 63static struct cls_match *insert_rule(struct classifier *cls,
e65413ab
JR
64 struct cls_subtable *, struct cls_rule *)
65 OVS_REQUIRES(cls->mutex);
b5d97350 66
dfea28b3
JR
67static const struct cls_match *find_match_wc(const struct cls_subtable *,
68 const struct flow *,
69 struct trie_ctx *,
70 unsigned int n_tries,
71 struct flow_wildcards *);
72static struct cls_match *find_equal(const struct cls_subtable *,
627fb667 73 const struct miniflow *, uint32_t hash);
b5d97350 74
dfea28b3
JR
75static inline const struct cls_match *
76next_rule_in_list__(const struct cls_match *rule)
77{
78 const struct cls_match *next = NULL;
79 next = OBJECT_CONTAINING(rculist_next(&rule->list), next, list);
80 return next;
81}
82
83static inline const struct cls_match *
84next_rule_in_list(const struct cls_match *rule)
85{
86 const struct cls_match *next = next_rule_in_list__(rule);
87 return next->priority < rule->priority ? next : NULL;
88}
89
c501b427 90static inline struct cls_match *
dfea28b3 91next_rule_in_list_protected__(struct cls_match *rule)
c501b427
JR
92{
93 struct cls_match *next = NULL;
dfea28b3 94 next = OBJECT_CONTAINING(rculist_next_protected(&rule->list), next, list);
c501b427
JR
95 return next;
96}
97
98static inline struct cls_match *
dfea28b3 99next_rule_in_list_protected(struct cls_match *rule)
c501b427 100{
dfea28b3 101 struct cls_match *next = next_rule_in_list_protected__(rule);
c501b427
JR
102 return next->priority < rule->priority ? next : NULL;
103}
104
e65413ab
JR
105/* Iterates RULE over HEAD and all of the cls_rules on HEAD->list.
106 * Classifier's mutex must be held while iterating, as the list is
107 * protoceted by it. */
b5d97350
BP
108#define FOR_EACH_RULE_IN_LIST(RULE, HEAD) \
109 for ((RULE) = (HEAD); (RULE) != NULL; (RULE) = next_rule_in_list(RULE))
dfea28b3
JR
110#define FOR_EACH_RULE_IN_LIST_PROTECTED(RULE, HEAD) \
111 for ((RULE) = (HEAD); (RULE) != NULL; \
112 (RULE) = next_rule_in_list_protected(RULE))
13751fd8
JR
113
114static unsigned int minimask_get_prefix_len(const struct minimask *,
115 const struct mf_field *);
e48eccd1 116static void trie_init(struct classifier *cls, int trie_idx,
e65413ab
JR
117 const struct mf_field *)
118 OVS_REQUIRES(cls->mutex);
13751fd8 119static unsigned int trie_lookup(const struct cls_trie *, const struct flow *,
c0bfb650 120 union mf_value *plens);
f358a2cb 121static unsigned int trie_lookup_value(const rcu_trie_ptr *,
c0bfb650
JR
122 const ovs_be32 value[], ovs_be32 plens[],
123 unsigned int value_bits);
f358a2cb 124static void trie_destroy(rcu_trie_ptr *);
13751fd8 125static void trie_insert(struct cls_trie *, const struct cls_rule *, int mlen);
f358a2cb 126static void trie_insert_prefix(rcu_trie_ptr *, const ovs_be32 *prefix,
69d6040e 127 int mlen);
13751fd8 128static void trie_remove(struct cls_trie *, const struct cls_rule *, int mlen);
f358a2cb 129static void trie_remove_prefix(rcu_trie_ptr *, const ovs_be32 *prefix,
69d6040e 130 int mlen);
13751fd8 131static void mask_set_prefix_bits(struct flow_wildcards *, uint8_t be32ofs,
c30cfa6b 132 unsigned int n_bits);
13751fd8 133static bool mask_prefix_bits_set(const struct flow_wildcards *,
c30cfa6b 134 uint8_t be32ofs, unsigned int n_bits);
81a76618
BP
135\f
136/* cls_rule. */
b5d97350 137
81a76618 138/* Initializes 'rule' to match packets specified by 'match' at the given
5cb7a798
BP
139 * 'priority'. 'match' must satisfy the invariant described in the comment at
140 * the definition of struct match.
66642cb4 141 *
48d28ac1
BP
142 * The caller must eventually destroy 'rule' with cls_rule_destroy().
143 *
eb391b76
BP
144 * Clients should not use priority INT_MIN. (OpenFlow uses priorities between
145 * 0 and UINT16_MAX, inclusive.) */
47284b1f 146void
eb391b76 147cls_rule_init(struct cls_rule *rule, const struct match *match, int priority)
47284b1f 148{
5cb7a798
BP
149 minimatch_init(&rule->match, match);
150 rule->priority = priority;
627fb667 151 rule->cls_match = NULL;
5cb7a798
BP
152}
153
154/* Same as cls_rule_init() for initialization from a "struct minimatch". */
155void
156cls_rule_init_from_minimatch(struct cls_rule *rule,
eb391b76 157 const struct minimatch *match, int priority)
5cb7a798
BP
158{
159 minimatch_clone(&rule->match, match);
81a76618 160 rule->priority = priority;
627fb667 161 rule->cls_match = NULL;
685a51a5
JP
162}
163
48d28ac1
BP
164/* Initializes 'dst' as a copy of 'src'.
165 *
b2c1f00b 166 * The caller must eventually destroy 'dst' with cls_rule_destroy(). */
48d28ac1
BP
167void
168cls_rule_clone(struct cls_rule *dst, const struct cls_rule *src)
169{
5cb7a798
BP
170 minimatch_clone(&dst->match, &src->match);
171 dst->priority = src->priority;
627fb667 172 dst->cls_match = NULL;
48d28ac1
BP
173}
174
b2c1f00b
BP
175/* Initializes 'dst' with the data in 'src', destroying 'src'.
176 *
177 * The caller must eventually destroy 'dst' with cls_rule_destroy(). */
178void
179cls_rule_move(struct cls_rule *dst, struct cls_rule *src)
180{
181 minimatch_move(&dst->match, &src->match);
182 dst->priority = src->priority;
627fb667 183 dst->cls_match = NULL;
b2c1f00b
BP
184}
185
48d28ac1
BP
186/* Frees memory referenced by 'rule'. Doesn't free 'rule' itself (it's
187 * normally embedded into a larger structure).
188 *
189 * ('rule' must not currently be in a classifier.) */
190void
5cb7a798 191cls_rule_destroy(struct cls_rule *rule)
48d28ac1 192{
627fb667 193 ovs_assert(!rule->cls_match);
5cb7a798 194 minimatch_destroy(&rule->match);
48d28ac1
BP
195}
196
81a76618
BP
197/* Returns true if 'a' and 'b' match the same packets at the same priority,
198 * false if they differ in some way. */
193eb874
BP
199bool
200cls_rule_equal(const struct cls_rule *a, const struct cls_rule *b)
201{
5cb7a798 202 return a->priority == b->priority && minimatch_equal(&a->match, &b->match);
193eb874
BP
203}
204
81a76618 205/* Returns a hash value for 'rule', folding in 'basis'. */
57452fdc
BP
206uint32_t
207cls_rule_hash(const struct cls_rule *rule, uint32_t basis)
208{
5cb7a798 209 return minimatch_hash(&rule->match, hash_int(rule->priority, basis));
73f33563
BP
210}
211
81a76618 212/* Appends a string describing 'rule' to 's'. */
07b37e8f
BP
213void
214cls_rule_format(const struct cls_rule *rule, struct ds *s)
215{
5cb7a798 216 minimatch_format(&rule->match, s, rule->priority);
064af421 217}
3ca1de08
BP
218
219/* Returns true if 'rule' matches every packet, false otherwise. */
220bool
221cls_rule_is_catchall(const struct cls_rule *rule)
222{
5cb7a798 223 return minimask_is_catchall(&rule->match.mask);
3ca1de08 224}
064af421
BP
225\f
226/* Initializes 'cls' as a classifier that initially contains no classification
227 * rules. */
228void
e48eccd1
JR
229classifier_init(struct classifier *cls, const uint8_t *flow_segments)
230 OVS_EXCLUDED(cls->mutex)
064af421 231{
e65413ab 232 ovs_mutex_init(&cls->mutex);
e65413ab 233 ovs_mutex_lock(&cls->mutex);
064af421 234 cls->n_rules = 0;
f2c21402 235 cmap_init(&cls->subtables_map);
fe7cfa5c 236 pvector_init(&cls->subtables);
f2c21402 237 cmap_init(&cls->partitions);
476f36e8
JR
238 cls->n_flow_segments = 0;
239 if (flow_segments) {
240 while (cls->n_flow_segments < CLS_MAX_INDICES
241 && *flow_segments < FLOW_U32S) {
242 cls->flow_segments[cls->n_flow_segments++] = *flow_segments++;
243 }
244 }
13751fd8 245 cls->n_tries = 0;
e65413ab
JR
246 for (int i = 0; i < CLS_MAX_TRIES; i++) {
247 trie_init(cls, i, NULL);
248 }
249 ovs_mutex_unlock(&cls->mutex);
064af421
BP
250}
251
252/* Destroys 'cls'. Rules within 'cls', if any, are not freed; this is the
afae68b1
JR
253 * caller's responsibility.
254 * May only be called after all the readers have been terminated. */
064af421 255void
e48eccd1
JR
256classifier_destroy(struct classifier *cls)
257 OVS_EXCLUDED(cls->mutex)
064af421 258{
e48eccd1 259 if (cls) {
78c8df12
BP
260 struct cls_partition *partition;
261 struct cls_subtable *subtable;
13751fd8
JR
262 int i;
263
e65413ab 264 ovs_mutex_lock(&cls->mutex);
13751fd8 265 for (i = 0; i < cls->n_tries; i++) {
f358a2cb 266 trie_destroy(&cls->tries[i].root);
13751fd8 267 }
064af421 268
6bc3bb82 269 CMAP_FOR_EACH (subtable, cmap_node, &cls->subtables_map) {
03868246 270 destroy_subtable(cls, subtable);
064af421 271 }
f2c21402 272 cmap_destroy(&cls->subtables_map);
c906cedf 273
6bc3bb82 274 CMAP_FOR_EACH (partition, cmap_node, &cls->partitions) {
f2c21402 275 ovsrcu_postpone(free, partition);
c906cedf 276 }
f2c21402 277 cmap_destroy(&cls->partitions);
cabd4c43 278
fe7cfa5c 279 pvector_destroy(&cls->subtables);
e65413ab
JR
280 ovs_mutex_unlock(&cls->mutex);
281 ovs_mutex_destroy(&cls->mutex);
064af421
BP
282 }
283}
284
13751fd8 285/* Set the fields for which prefix lookup should be performed. */
f358a2cb 286bool
e48eccd1 287classifier_set_prefix_fields(struct classifier *cls,
13751fd8
JR
288 const enum mf_field_id *trie_fields,
289 unsigned int n_fields)
e48eccd1 290 OVS_EXCLUDED(cls->mutex)
13751fd8 291{
f358a2cb 292 const struct mf_field * new_fields[CLS_MAX_TRIES];
abadfcb0 293 struct mf_bitmap fields = MF_BITMAP_INITIALIZER;
f358a2cb
JR
294 int i, n_tries = 0;
295 bool changed = false;
13751fd8 296
e65413ab 297 ovs_mutex_lock(&cls->mutex);
f358a2cb 298 for (i = 0; i < n_fields && n_tries < CLS_MAX_TRIES; i++) {
13751fd8
JR
299 const struct mf_field *field = mf_from_id(trie_fields[i]);
300 if (field->flow_be32ofs < 0 || field->n_bits % 32) {
301 /* Incompatible field. This is the only place where we
302 * enforce these requirements, but the rest of the trie code
303 * depends on the flow_be32ofs to be non-negative and the
304 * field length to be a multiple of 32 bits. */
305 continue;
306 }
307
abadfcb0 308 if (bitmap_is_set(fields.bm, trie_fields[i])) {
13751fd8
JR
309 /* Duplicate field, there is no need to build more than
310 * one index for any one field. */
311 continue;
312 }
abadfcb0 313 bitmap_set1(fields.bm, trie_fields[i]);
13751fd8 314
f358a2cb
JR
315 new_fields[n_tries] = NULL;
316 if (n_tries >= cls->n_tries || field != cls->tries[n_tries].field) {
317 new_fields[n_tries] = field;
318 changed = true;
319 }
320 n_tries++;
321 }
322
323 if (changed || n_tries < cls->n_tries) {
324 struct cls_subtable *subtable;
325
326 /* Trie configuration needs to change. Disable trie lookups
327 * for the tries that are changing and wait all the current readers
328 * with the old configuration to be done. */
329 changed = false;
330 CMAP_FOR_EACH (subtable, cmap_node, &cls->subtables_map) {
331 for (i = 0; i < cls->n_tries; i++) {
332 if ((i < n_tries && new_fields[i]) || i >= n_tries) {
333 if (subtable->trie_plen[i]) {
334 subtable->trie_plen[i] = 0;
335 changed = true;
336 }
337 }
338 }
339 }
340 /* Synchronize if any readers were using tries. The readers may
341 * temporarily function without the trie lookup based optimizations. */
342 if (changed) {
343 /* ovsrcu_synchronize() functions as a memory barrier, so it does
344 * not matter that subtable->trie_plen is not atomic. */
345 ovsrcu_synchronize();
13751fd8 346 }
13751fd8 347
f358a2cb
JR
348 /* Now set up the tries. */
349 for (i = 0; i < n_tries; i++) {
350 if (new_fields[i]) {
351 trie_init(cls, i, new_fields[i]);
352 }
353 }
354 /* Destroy the rest, if any. */
355 for (; i < cls->n_tries; i++) {
356 trie_init(cls, i, NULL);
357 }
358
359 cls->n_tries = n_tries;
360 ovs_mutex_unlock(&cls->mutex);
361 return true;
13751fd8 362 }
f358a2cb 363
e65413ab 364 ovs_mutex_unlock(&cls->mutex);
f358a2cb 365 return false; /* No change. */
13751fd8
JR
366}
367
368static void
e48eccd1 369trie_init(struct classifier *cls, int trie_idx, const struct mf_field *field)
e65413ab 370 OVS_REQUIRES(cls->mutex)
13751fd8
JR
371{
372 struct cls_trie *trie = &cls->tries[trie_idx];
373 struct cls_subtable *subtable;
374
375 if (trie_idx < cls->n_tries) {
f358a2cb
JR
376 trie_destroy(&trie->root);
377 } else {
378 ovsrcu_set_hidden(&trie->root, NULL);
13751fd8 379 }
13751fd8
JR
380 trie->field = field;
381
f358a2cb 382 /* Add existing rules to the new trie. */
f2c21402 383 CMAP_FOR_EACH (subtable, cmap_node, &cls->subtables_map) {
13751fd8
JR
384 unsigned int plen;
385
386 plen = field ? minimask_get_prefix_len(&subtable->mask, field) : 0;
13751fd8 387 if (plen) {
627fb667 388 struct cls_match *head;
13751fd8 389
f2c21402 390 CMAP_FOR_EACH (head, cmap_node, &subtable->rules) {
627fb667 391 struct cls_match *match;
13751fd8 392
dfea28b3 393 FOR_EACH_RULE_IN_LIST_PROTECTED (match, head) {
627fb667 394 trie_insert(trie, match->cls_rule, plen);
13751fd8
JR
395 }
396 }
397 }
f358a2cb
JR
398 /* Initialize subtable's prefix length on this field. This will
399 * allow readers to use the trie. */
400 atomic_thread_fence(memory_order_release);
401 subtable->trie_plen[trie_idx] = plen;
13751fd8
JR
402 }
403}
404
5f0476ce
JR
405/* Returns true if 'cls' contains no classification rules, false otherwise.
406 * Checking the cmap requires no locking. */
064af421
BP
407bool
408classifier_is_empty(const struct classifier *cls)
409{
e48eccd1 410 return cmap_is_empty(&cls->subtables_map);
064af421
BP
411}
412
dbda2960 413/* Returns the number of rules in 'cls'. */
064af421
BP
414int
415classifier_count(const struct classifier *cls)
afae68b1 416 OVS_NO_THREAD_SAFETY_ANALYSIS
064af421 417{
afae68b1
JR
418 /* n_rules is an int, so in the presence of concurrent writers this will
419 * return either the old or a new value. */
e48eccd1 420 return cls->n_rules;
064af421
BP
421}
422
c906cedf
BP
423static uint32_t
424hash_metadata(ovs_be64 metadata_)
425{
426 uint64_t metadata = (OVS_FORCE uint64_t) metadata_;
965607c8 427 return hash_uint64(metadata);
c906cedf
BP
428}
429
430static struct cls_partition *
e48eccd1 431find_partition(const struct classifier *cls, ovs_be64 metadata, uint32_t hash)
c906cedf
BP
432{
433 struct cls_partition *partition;
434
f2c21402 435 CMAP_FOR_EACH_WITH_HASH (partition, cmap_node, hash, &cls->partitions) {
c906cedf
BP
436 if (partition->metadata == metadata) {
437 return partition;
438 }
439 }
440
441 return NULL;
442}
443
444static struct cls_partition *
e48eccd1 445create_partition(struct classifier *cls, struct cls_subtable *subtable,
c906cedf 446 ovs_be64 metadata)
e65413ab 447 OVS_REQUIRES(cls->mutex)
c906cedf
BP
448{
449 uint32_t hash = hash_metadata(metadata);
450 struct cls_partition *partition = find_partition(cls, metadata, hash);
451 if (!partition) {
452 partition = xmalloc(sizeof *partition);
453 partition->metadata = metadata;
454 partition->tags = 0;
183126a1 455 tag_tracker_init(&partition->tracker);
f2c21402 456 cmap_insert(&cls->partitions, &partition->cmap_node, hash);
c906cedf 457 }
03868246 458 tag_tracker_add(&partition->tracker, &partition->tags, subtable->tag);
c906cedf
BP
459 return partition;
460}
461
69d6040e
JR
462static inline ovs_be32 minimatch_get_ports(const struct minimatch *match)
463{
464 /* Could optimize to use the same map if needed for fast path. */
465 return MINIFLOW_GET_BE32(&match->flow, tp_src)
466 & MINIFLOW_GET_BE32(&match->mask.masks, tp_src);
467}
468
b5d97350
BP
469/* Inserts 'rule' into 'cls'. Until 'rule' is removed from 'cls', the caller
470 * must not modify or free it.
064af421
BP
471 *
472 * If 'cls' already contains an identical rule (including wildcards, values of
473 * fixed fields, and priority), replaces the old rule by 'rule' and returns the
474 * rule that was replaced. The caller takes ownership of the returned rule and
48d28ac1
BP
475 * is thus responsible for destroying it with cls_rule_destroy(), freeing the
476 * memory block in which it resides, etc., as necessary.
064af421
BP
477 *
478 * Returns NULL if 'cls' does not contain a rule with an identical key, after
479 * inserting the new rule. In this case, no rules are displaced by the new
480 * rule, even rules that cannot have any effect because the new rule matches a
481 * superset of their flows and has higher priority. */
dfea28b3 482const struct cls_rule *
e48eccd1
JR
483classifier_replace(struct classifier *cls, struct cls_rule *rule)
484 OVS_EXCLUDED(cls->mutex)
064af421 485{
627fb667 486 struct cls_match *old_rule;
03868246 487 struct cls_subtable *subtable;
dfea28b3 488 const struct cls_rule *old_cls_rule = NULL;
b5d97350 489
e65413ab 490 ovs_mutex_lock(&cls->mutex);
03868246
JR
491 subtable = find_subtable(cls, &rule->match.mask);
492 if (!subtable) {
493 subtable = insert_subtable(cls, &rule->match.mask);
b5d97350
BP
494 }
495
03868246 496 old_rule = insert_rule(cls, subtable, rule);
b5d97350 497 if (!old_rule) {
e65413ab 498 old_cls_rule = NULL;
13751fd8 499
627fb667 500 rule->cls_match->partition = NULL;
c906cedf
BP
501 if (minimask_get_metadata_mask(&rule->match.mask) == OVS_BE64_MAX) {
502 ovs_be64 metadata = miniflow_get_metadata(&rule->match.flow);
627fb667
JR
503 rule->cls_match->partition = create_partition(cls, subtable,
504 metadata);
c906cedf
BP
505 }
506
064af421 507 cls->n_rules++;
13751fd8 508
e65413ab 509 for (int i = 0; i < cls->n_tries; i++) {
13751fd8
JR
510 if (subtable->trie_plen[i]) {
511 trie_insert(&cls->tries[i], rule, subtable->trie_plen[i]);
512 }
513 }
69d6040e
JR
514
515 /* Ports trie. */
516 if (subtable->ports_mask_len) {
517 /* We mask the value to be inserted to always have the wildcarded
518 * bits in known (zero) state, so we can include them in comparison
519 * and they will always match (== their original value does not
520 * matter). */
521 ovs_be32 masked_ports = minimatch_get_ports(&rule->match);
522
523 trie_insert_prefix(&subtable->ports_trie, &masked_ports,
524 subtable->ports_mask_len);
525 }
c906cedf 526 } else {
e65413ab 527 old_cls_rule = old_rule->cls_rule;
627fb667 528 rule->cls_match->partition = old_rule->partition;
dfea28b3 529 CONST_CAST(struct cls_rule *, old_cls_rule)->cls_match = NULL;
f2c21402
JR
530
531 /* 'old_rule' contains a cmap_node, which may not be freed
532 * immediately. */
533 ovsrcu_postpone(free, old_rule);
064af421 534 }
e65413ab
JR
535 ovs_mutex_unlock(&cls->mutex);
536 return old_cls_rule;
064af421
BP
537}
538
08944c1d
BP
539/* Inserts 'rule' into 'cls'. Until 'rule' is removed from 'cls', the caller
540 * must not modify or free it.
541 *
542 * 'cls' must not contain an identical rule (including wildcards, values of
543 * fixed fields, and priority). Use classifier_find_rule_exactly() to find
544 * such a rule. */
545void
546classifier_insert(struct classifier *cls, struct cls_rule *rule)
547{
dfea28b3 548 const struct cls_rule *displaced_rule = classifier_replace(cls, rule);
cb22974d 549 ovs_assert(!displaced_rule);
08944c1d
BP
550}
551
48d28ac1
BP
552/* Removes 'rule' from 'cls'. It is the caller's responsibility to destroy
553 * 'rule' with cls_rule_destroy(), freeing the memory block in which 'rule'
747f140a
JR
554 * resides, etc., as necessary.
555 *
556 * Does nothing if 'rule' has been already removed, or was never inserted.
557 *
558 * Returns the removed rule, or NULL, if it was already removed.
559 */
dfea28b3
JR
560const struct cls_rule *
561classifier_remove(struct classifier *cls, const struct cls_rule *rule)
e48eccd1 562 OVS_EXCLUDED(cls->mutex)
064af421 563{
c906cedf 564 struct cls_partition *partition;
747f140a 565 struct cls_match *cls_match;
627fb667 566 struct cls_match *head;
03868246 567 struct cls_subtable *subtable;
476f36e8 568 int i;
f2c21402
JR
569 uint32_t basis = 0, hash, ihash[CLS_MAX_INDICES];
570 uint8_t prev_be32ofs = 0;
064af421 571
e65413ab 572 ovs_mutex_lock(&cls->mutex);
747f140a
JR
573 cls_match = rule->cls_match;
574 if (!cls_match) {
575 rule = NULL;
576 goto unlock; /* Already removed. */
577 }
578
03868246 579 subtable = find_subtable(cls, &rule->match.mask);
627fb667
JR
580 ovs_assert(subtable);
581
69d6040e
JR
582 if (subtable->ports_mask_len) {
583 ovs_be32 masked_ports = minimatch_get_ports(&rule->match);
584
585 trie_remove_prefix(&subtable->ports_trie,
586 &masked_ports, subtable->ports_mask_len);
587 }
13751fd8
JR
588 for (i = 0; i < cls->n_tries; i++) {
589 if (subtable->trie_plen[i]) {
590 trie_remove(&cls->tries[i], rule, subtable->trie_plen[i]);
591 }
592 }
593
476f36e8
JR
594 /* Remove rule node from indices. */
595 for (i = 0; i < subtable->n_indices; i++) {
f2c21402
JR
596 ihash[i] = minimatch_hash_range(&rule->match, prev_be32ofs,
597 subtable->index_ofs[i], &basis);
598 cmap_remove(&subtable->indices[i], &cls_match->index_nodes[i],
599 ihash[i]);
600 prev_be32ofs = subtable->index_ofs[i];
476f36e8 601 }
f2c21402 602 hash = minimatch_hash_range(&rule->match, prev_be32ofs, FLOW_U32S, &basis);
476f36e8 603
f2c21402 604 head = find_equal(subtable, &rule->match.flow, hash);
627fb667 605 if (head != cls_match) {
c501b427
JR
606 rculist_remove(&cls_match->list);
607 } else if (rculist_is_empty(&cls_match->list)) {
f2c21402 608 cmap_remove(&subtable->rules, &cls_match->cmap_node, hash);
b5d97350 609 } else {
dfea28b3 610 struct cls_match *next = next_rule_in_list_protected(cls_match);
064af421 611
c501b427 612 rculist_remove(&cls_match->list);
f2c21402
JR
613 cmap_replace(&subtable->rules, &cls_match->cmap_node,
614 &next->cmap_node, hash);
b5d97350 615 }
064af421 616
627fb667 617 partition = cls_match->partition;
183126a1
BP
618 if (partition) {
619 tag_tracker_subtract(&partition->tracker, &partition->tags,
03868246 620 subtable->tag);
183126a1 621 if (!partition->tags) {
f2c21402
JR
622 cmap_remove(&cls->partitions, &partition->cmap_node,
623 hash_metadata(partition->metadata));
624 ovsrcu_postpone(free, partition);
183126a1 625 }
c906cedf
BP
626 }
627
03868246
JR
628 if (--subtable->n_rules == 0) {
629 destroy_subtable(cls, subtable);
fe7cfa5c
JR
630 } else if (subtable->max_priority == cls_match->priority
631 && --subtable->max_count == 0) {
632 /* Find the new 'max_priority' and 'max_count'. */
633 struct cls_match *head;
eb391b76 634 int max_priority = INT_MIN;
fe7cfa5c 635
f2c21402 636 CMAP_FOR_EACH (head, cmap_node, &subtable->rules) {
fe7cfa5c
JR
637 if (head->priority > max_priority) {
638 max_priority = head->priority;
639 subtable->max_count = 1;
640 } else if (head->priority == max_priority) {
641 ++subtable->max_count;
642 }
643 }
644 subtable->max_priority = max_priority;
645 pvector_change_priority(&cls->subtables, subtable, max_priority);
4d935a6b 646 }
13751fd8 647
b5d97350 648 cls->n_rules--;
627fb667 649
f2c21402 650 ovsrcu_postpone(free, cls_match);
dfea28b3 651 CONST_CAST(struct cls_rule *, rule)->cls_match = NULL;
747f140a 652unlock:
e65413ab 653 ovs_mutex_unlock(&cls->mutex);
747f140a
JR
654
655 return rule;
064af421
BP
656}
657
13751fd8 658/* Prefix tree context. Valid when 'lookup_done' is true. Can skip all
c0bfb650
JR
659 * subtables which have a prefix match on the trie field, but whose prefix
660 * length is not indicated in 'match_plens'. For example, a subtable that
661 * has a 8-bit trie field prefix match can be skipped if
662 * !be_get_bit_at(&match_plens, 8 - 1). If skipped, 'maskbits' prefix bits
663 * must be unwildcarded to make datapath flow only match packets it should. */
13751fd8
JR
664struct trie_ctx {
665 const struct cls_trie *trie;
666 bool lookup_done; /* Status of the lookup. */
667 uint8_t be32ofs; /* U32 offset of the field in question. */
13751fd8 668 unsigned int maskbits; /* Prefix length needed to avoid false matches. */
c0bfb650
JR
669 union mf_value match_plens; /* Bitmask of prefix lengths with possible
670 * matches. */
13751fd8
JR
671};
672
673static void
674trie_ctx_init(struct trie_ctx *ctx, const struct cls_trie *trie)
675{
676 ctx->trie = trie;
677 ctx->be32ofs = trie->field->flow_be32ofs;
678 ctx->lookup_done = false;
679}
680
48c3de13
BP
681/* Finds and returns the highest-priority rule in 'cls' that matches 'flow'.
682 * Returns a null pointer if no rules in 'cls' match 'flow'. If multiple rules
74f74083
EJ
683 * of equal priority match 'flow', returns one arbitrarily.
684 *
685 * If a rule is found and 'wc' is non-null, bitwise-OR's 'wc' with the
686 * set of bits that were significant in the lookup. At some point
687 * earlier, 'wc' should have been initialized (e.g., by
688 * flow_wildcards_init_catchall()). */
dfea28b3 689const struct cls_rule *
e48eccd1 690classifier_lookup(const struct classifier *cls, const struct flow *flow,
74f74083 691 struct flow_wildcards *wc)
48c3de13 692{
c906cedf 693 const struct cls_partition *partition;
c906cedf 694 tag_type tags;
eb391b76 695 int best_priority = INT_MIN;
fe7cfa5c
JR
696 const struct cls_match *best;
697 struct trie_ctx trie_ctx[CLS_MAX_TRIES];
698 struct cls_subtable *subtable;
c906cedf 699
f358a2cb
JR
700 /* Synchronize for cls->n_tries and subtable->trie_plen. They can change
701 * when table configuration changes, which happens typically only on
702 * startup. */
703 atomic_thread_fence(memory_order_acquire);
704
03868246
JR
705 /* Determine 'tags' such that, if 'subtable->tag' doesn't intersect them,
706 * then 'flow' cannot possibly match in 'subtable':
c906cedf
BP
707 *
708 * - If flow->metadata maps to a given 'partition', then we can use
709 * 'tags' for 'partition->tags'.
710 *
711 * - If flow->metadata has no partition, then no rule in 'cls' has an
712 * exact-match for flow->metadata. That means that we don't need to
03868246 713 * search any subtable that includes flow->metadata in its mask.
c906cedf 714 *
03868246 715 * In either case, we always need to search any cls_subtables that do not
c906cedf 716 * include flow->metadata in its mask. One way to do that would be to
03868246
JR
717 * check the "cls_subtable"s explicitly for that, but that would require an
718 * extra branch per subtable. Instead, we mark such a cls_subtable's
719 * 'tags' as TAG_ALL and make sure that 'tags' is never empty. This means
720 * that 'tags' always intersects such a cls_subtable's 'tags', so we don't
721 * need a special case.
c906cedf 722 */
f2c21402 723 partition = (cmap_is_empty(&cls->partitions)
c906cedf
BP
724 ? NULL
725 : find_partition(cls, flow->metadata,
726 hash_metadata(flow->metadata)));
727 tags = partition ? partition->tags : TAG_ARBITRARY;
48c3de13 728
ff8241db 729 /* Initialize trie contexts for find_match_wc(). */
fe7cfa5c 730 for (int i = 0; i < cls->n_tries; i++) {
13751fd8
JR
731 trie_ctx_init(&trie_ctx[i], &cls->tries[i]);
732 }
ec988646 733
b5d97350 734 best = NULL;
fe7cfa5c
JR
735 PVECTOR_FOR_EACH_PRIORITY(subtable, best_priority, 2,
736 sizeof(struct cls_subtable), &cls->subtables) {
dfea28b3 737 const struct cls_match *rule;
c906cedf 738
fe7cfa5c 739 if (!tag_intersects(tags, subtable->tag)) {
c906cedf
BP
740 continue;
741 }
74f74083 742
fe7cfa5c 743 rule = find_match_wc(subtable, flow, trie_ctx, cls->n_tries, wc);
eb391b76
BP
744 if (rule && rule->priority > best_priority) {
745 best_priority = rule->priority;
1f3c5efc 746 best = rule;
b5d97350 747 }
48c3de13 748 }
13751fd8 749
627fb667 750 return best ? best->cls_rule : NULL;
48c3de13
BP
751}
752
b5d97350
BP
753/* Finds and returns a rule in 'cls' with exactly the same priority and
754 * matching criteria as 'target'. Returns a null pointer if 'cls' doesn't
c084ce1d 755 * contain an exact match. */
dfea28b3 756const struct cls_rule *
e48eccd1 757classifier_find_rule_exactly(const struct classifier *cls,
76ecc721 758 const struct cls_rule *target)
064af421 759{
dfea28b3
JR
760 const struct cls_match *head, *rule;
761 const struct cls_subtable *subtable;
064af421 762
03868246 763 subtable = find_subtable(cls, &target->match.mask);
0722ee5c 764 if (!subtable) {
98abae4a 765 return NULL;
4d935a6b
JR
766 }
767
03868246 768 head = find_equal(subtable, &target->match.flow,
5cb7a798
BP
769 miniflow_hash_in_minimask(&target->match.flow,
770 &target->match.mask, 0));
98abae4a
JR
771 if (!head) {
772 return NULL;
773 }
b5d97350
BP
774 FOR_EACH_RULE_IN_LIST (rule, head) {
775 if (target->priority >= rule->priority) {
627fb667 776 return target->priority == rule->priority ? rule->cls_rule : NULL;
064af421
BP
777 }
778 }
779 return NULL;
780}
781
81a76618
BP
782/* Finds and returns a rule in 'cls' with priority 'priority' and exactly the
783 * same matching criteria as 'target'. Returns a null pointer if 'cls' doesn't
784 * contain an exact match. */
dfea28b3 785const struct cls_rule *
81a76618 786classifier_find_match_exactly(const struct classifier *cls,
eb391b76 787 const struct match *target, int priority)
81a76618 788{
dfea28b3 789 const struct cls_rule *retval;
81a76618
BP
790 struct cls_rule cr;
791
792 cls_rule_init(&cr, target, priority);
793 retval = classifier_find_rule_exactly(cls, &cr);
48d28ac1 794 cls_rule_destroy(&cr);
81a76618
BP
795
796 return retval;
797}
798
faa50f40
BP
799/* Checks if 'target' would overlap any other rule in 'cls'. Two rules are
800 * considered to overlap if both rules have the same priority and a packet
801 * could match both. */
49bdc010 802bool
e48eccd1 803classifier_rule_overlaps(const struct classifier *cls,
faa50f40 804 const struct cls_rule *target)
e48eccd1 805 OVS_EXCLUDED(cls->mutex)
49bdc010 806{
03868246 807 struct cls_subtable *subtable;
49bdc010 808
e65413ab 809 ovs_mutex_lock(&cls->mutex);
03868246 810 /* Iterate subtables in the descending max priority order. */
eb391b76 811 PVECTOR_FOR_EACH_PRIORITY (subtable, target->priority - 1, 2,
fe7cfa5c 812 sizeof(struct cls_subtable), &cls->subtables) {
5cb7a798
BP
813 uint32_t storage[FLOW_U32S];
814 struct minimask mask;
627fb667 815 struct cls_match *head;
49bdc010 816
03868246 817 minimask_combine(&mask, &target->match.mask, &subtable->mask, storage);
f2c21402 818 CMAP_FOR_EACH (head, cmap_node, &subtable->rules) {
627fb667 819 struct cls_match *rule;
49bdc010 820
dfea28b3 821 FOR_EACH_RULE_IN_LIST_PROTECTED (rule, head) {
4d935a6b
JR
822 if (rule->priority < target->priority) {
823 break; /* Rules in descending priority order. */
824 }
faa50f40 825 if (rule->priority == target->priority
5cb7a798 826 && miniflow_equal_in_minimask(&target->match.flow,
3016f3e4 827 &rule->flow, &mask)) {
e65413ab 828 ovs_mutex_unlock(&cls->mutex);
49bdc010
JP
829 return true;
830 }
831 }
832 }
833 }
834
e65413ab 835 ovs_mutex_unlock(&cls->mutex);
49bdc010
JP
836 return false;
837}
6ceeaa92
BP
838
839/* Returns true if 'rule' exactly matches 'criteria' or if 'rule' is more
840 * specific than 'criteria'. That is, 'rule' matches 'criteria' and this
841 * function returns true if, for every field:
842 *
843 * - 'criteria' and 'rule' specify the same (non-wildcarded) value for the
844 * field, or
845 *
846 * - 'criteria' wildcards the field,
847 *
848 * Conversely, 'rule' does not match 'criteria' and this function returns false
849 * if, for at least one field:
850 *
851 * - 'criteria' and 'rule' specify different values for the field, or
852 *
853 * - 'criteria' specifies a value for the field but 'rule' wildcards it.
854 *
855 * Equivalently, the truth table for whether a field matches is:
856 *
857 * rule
858 *
859 * c wildcard exact
860 * r +---------+---------+
861 * i wild | yes | yes |
862 * t card | | |
863 * e +---------+---------+
864 * r exact | no |if values|
865 * i | |are equal|
866 * a +---------+---------+
867 *
868 * This is the matching rule used by OpenFlow 1.0 non-strict OFPT_FLOW_MOD
869 * commands and by OpenFlow 1.0 aggregate and flow stats.
870 *
81a76618 871 * Ignores rule->priority. */
6ceeaa92
BP
872bool
873cls_rule_is_loose_match(const struct cls_rule *rule,
5cb7a798 874 const struct minimatch *criteria)
6ceeaa92 875{
5cb7a798
BP
876 return (!minimask_has_extra(&rule->match.mask, &criteria->mask)
877 && miniflow_equal_in_minimask(&rule->match.flow, &criteria->flow,
878 &criteria->mask));
6ceeaa92 879}
b5d97350 880\f
5ecc9d81
BP
881/* Iteration. */
882
883static bool
627fb667 884rule_matches(const struct cls_match *rule, const struct cls_rule *target)
5ecc9d81
BP
885{
886 return (!target
3016f3e4 887 || miniflow_equal_in_minimask(&rule->flow,
5cb7a798
BP
888 &target->match.flow,
889 &target->match.mask));
5ecc9d81
BP
890}
891
dfea28b3 892static const struct cls_match *
03868246 893search_subtable(const struct cls_subtable *subtable,
f2c21402 894 struct cls_cursor *cursor)
5ecc9d81 895{
f2c21402
JR
896 if (!cursor->target
897 || !minimask_has_extra(&subtable->mask, &cursor->target->match.mask)) {
dfea28b3 898 const struct cls_match *rule;
5ecc9d81 899
f2c21402
JR
900 CMAP_CURSOR_FOR_EACH (rule, cmap_node, &cursor->rules,
901 &subtable->rules) {
902 if (rule_matches(rule, cursor->target)) {
5ecc9d81
BP
903 return rule;
904 }
905 }
906 }
907 return NULL;
908}
909
5f0476ce
JR
910/* Initializes 'cursor' for iterating through rules in 'cls', and returns the
911 * first matching cls_rule via '*pnode', or NULL if there are no matches.
5ecc9d81 912 *
6ceeaa92 913 * - If 'target' is null, the cursor will visit every rule in 'cls'.
5ecc9d81 914 *
6ceeaa92
BP
915 * - If 'target' is nonnull, the cursor will visit each 'rule' in 'cls'
916 * such that cls_rule_is_loose_match(rule, target) returns true.
5ecc9d81 917 *
6ceeaa92 918 * Ignores target->priority. */
78c8df12
BP
919struct cls_cursor cls_cursor_start(const struct classifier *cls,
920 const struct cls_rule *target,
921 bool safe)
5f0476ce 922 OVS_NO_THREAD_SAFETY_ANALYSIS
5ecc9d81 923{
5f0476ce 924 struct cls_cursor cursor;
03868246 925 struct cls_subtable *subtable;
5ecc9d81 926
5f0476ce 927 cursor.safe = safe;
e48eccd1 928 cursor.cls = cls;
5f0476ce 929 cursor.target = target && !cls_rule_is_catchall(target) ? target : NULL;
78c8df12 930 cursor.rule = NULL;
5f0476ce
JR
931
932 /* Find first rule. */
e65413ab 933 ovs_mutex_lock(&cursor.cls->mutex);
5f0476ce 934 CMAP_CURSOR_FOR_EACH (subtable, cmap_node, &cursor.subtables,
e65413ab 935 &cursor.cls->subtables_map) {
dfea28b3 936 const struct cls_match *rule = search_subtable(subtable, &cursor);
f2c21402 937
5ecc9d81 938 if (rule) {
5f0476ce 939 cursor.subtable = subtable;
78c8df12 940 cursor.rule = rule->cls_rule;
5f0476ce 941 break;
5ecc9d81
BP
942 }
943 }
944
5f0476ce 945 /* Leave locked if requested and have a rule. */
78c8df12 946 if (safe || !cursor.rule) {
e65413ab 947 ovs_mutex_unlock(&cursor.cls->mutex);
5f0476ce
JR
948 }
949 return cursor;
950}
951
dfea28b3 952static const struct cls_rule *
1caa1561 953cls_cursor_next(struct cls_cursor *cursor)
5f0476ce 954 OVS_NO_THREAD_SAFETY_ANALYSIS
5ecc9d81 955{
dfea28b3 956 const struct cls_match *rule = cursor->rule->cls_match;
03868246 957 const struct cls_subtable *subtable;
dfea28b3 958 const struct cls_match *next;
5ecc9d81 959
955f579d
BP
960 next = next_rule_in_list__(rule);
961 if (next->priority < rule->priority) {
1caa1561 962 return next->cls_rule;
5ecc9d81
BP
963 }
964
955f579d 965 /* 'next' is the head of the list, that is, the rule that is included in
f2c21402 966 * the subtable's map. (This is important when the classifier contains
03868246 967 * rules that differ only in priority.) */
955f579d 968 rule = next;
f2c21402 969 CMAP_CURSOR_FOR_EACH_CONTINUE (rule, cmap_node, &cursor->rules) {
5ecc9d81 970 if (rule_matches(rule, cursor->target)) {
1caa1561 971 return rule->cls_rule;
5ecc9d81
BP
972 }
973 }
974
03868246 975 subtable = cursor->subtable;
f2c21402
JR
976 CMAP_CURSOR_FOR_EACH_CONTINUE (subtable, cmap_node, &cursor->subtables) {
977 rule = search_subtable(subtable, cursor);
5ecc9d81 978 if (rule) {
03868246 979 cursor->subtable = subtable;
1caa1561 980 return rule->cls_rule;
5ecc9d81
BP
981 }
982 }
983
1caa1561
BP
984 return NULL;
985}
986
987/* Sets 'cursor->rule' to the next matching cls_rule in 'cursor''s iteration,
988 * or to null if all matching rules have been visited. */
989void
990cls_cursor_advance(struct cls_cursor *cursor)
991 OVS_NO_THREAD_SAFETY_ANALYSIS
992{
993 if (cursor->safe) {
994 ovs_mutex_lock(&cursor->cls->mutex);
995 }
996 cursor->rule = cls_cursor_next(cursor);
997 if (cursor->safe || !cursor->rule) {
998 ovs_mutex_unlock(&cursor->cls->mutex);
999 }
5ecc9d81
BP
1000}
1001\f
03868246 1002static struct cls_subtable *
e48eccd1 1003find_subtable(const struct classifier *cls, const struct minimask *mask)
b5d97350 1004{
03868246 1005 struct cls_subtable *subtable;
064af421 1006
f2c21402 1007 CMAP_FOR_EACH_WITH_HASH (subtable, cmap_node, minimask_hash(mask, 0),
5a87054c 1008 &cls->subtables_map) {
03868246
JR
1009 if (minimask_equal(mask, &subtable->mask)) {
1010 return subtable;
064af421
BP
1011 }
1012 }
b5d97350 1013 return NULL;
064af421 1014}
064af421 1015
e65413ab 1016/* The new subtable will be visible to the readers only after this. */
03868246 1017static struct cls_subtable *
e48eccd1 1018insert_subtable(struct classifier *cls, const struct minimask *mask)
e65413ab 1019 OVS_REQUIRES(cls->mutex)
b5d97350 1020{
c906cedf 1021 uint32_t hash = minimask_hash(mask, 0);
03868246 1022 struct cls_subtable *subtable;
476f36e8
JR
1023 int i, index = 0;
1024 struct flow_wildcards old, new;
1025 uint8_t prev;
3016f3e4 1026 int count = count_1bits(mask->masks.map);
064af421 1027
3016f3e4
JR
1028 subtable = xzalloc(sizeof *subtable - sizeof mask->masks.inline_values
1029 + MINIFLOW_VALUES_SIZE(count));
f2c21402 1030 cmap_init(&subtable->rules);
f80028fe
JR
1031 miniflow_clone_inline(CONST_CAST(struct miniflow *, &subtable->mask.masks),
1032 &mask->masks, count);
476f36e8
JR
1033
1034 /* Init indices for segmented lookup, if any. */
1035 flow_wildcards_init_catchall(&new);
1036 old = new;
1037 prev = 0;
1038 for (i = 0; i < cls->n_flow_segments; i++) {
1039 flow_wildcards_fold_minimask_range(&new, mask, prev,
1040 cls->flow_segments[i]);
1041 /* Add an index if it adds mask bits. */
1042 if (!flow_wildcards_equal(&new, &old)) {
f2c21402 1043 cmap_init(&subtable->indices[index]);
f80028fe
JR
1044 *CONST_CAST(uint8_t *, &subtable->index_ofs[index])
1045 = cls->flow_segments[i];
476f36e8
JR
1046 index++;
1047 old = new;
1048 }
1049 prev = cls->flow_segments[i];
1050 }
1051 /* Check if the rest of the subtable's mask adds any bits,
1052 * and remove the last index if it doesn't. */
1053 if (index > 0) {
1054 flow_wildcards_fold_minimask_range(&new, mask, prev, FLOW_U32S);
1055 if (flow_wildcards_equal(&new, &old)) {
1056 --index;
f80028fe 1057 *CONST_CAST(uint8_t *, &subtable->index_ofs[index]) = 0;
f2c21402 1058 cmap_destroy(&subtable->indices[index]);
476f36e8
JR
1059 }
1060 }
f80028fe 1061 *CONST_CAST(uint8_t *, &subtable->n_indices) = index;
476f36e8 1062
f80028fe
JR
1063 *CONST_CAST(tag_type *, &subtable->tag) =
1064 (minimask_get_metadata_mask(mask) == OVS_BE64_MAX
1065 ? tag_create_deterministic(hash)
1066 : TAG_ALL);
064af421 1067
13751fd8
JR
1068 for (i = 0; i < cls->n_tries; i++) {
1069 subtable->trie_plen[i] = minimask_get_prefix_len(mask,
1070 cls->tries[i].field);
1071 }
1072
69d6040e 1073 /* Ports trie. */
f358a2cb 1074 ovsrcu_set_hidden(&subtable->ports_trie, NULL);
f80028fe 1075 *CONST_CAST(int *, &subtable->ports_mask_len)
69d6040e
JR
1076 = 32 - ctz32(ntohl(MINIFLOW_GET_BE32(&mask->masks, tp_src)));
1077
f2c21402 1078 cmap_insert(&cls->subtables_map, &subtable->cmap_node, hash);
ec988646 1079
03868246 1080 return subtable;
064af421
BP
1081}
1082
01c0f83a 1083/* RCU readers may still access the subtable before it is actually freed. */
b5d97350 1084static void
e48eccd1 1085destroy_subtable(struct classifier *cls, struct cls_subtable *subtable)
e65413ab 1086 OVS_REQUIRES(cls->mutex)
b5d97350 1087{
476f36e8
JR
1088 int i;
1089
fe7cfa5c 1090 pvector_remove(&cls->subtables, subtable);
01c0f83a
JR
1091 cmap_remove(&cls->subtables_map, &subtable->cmap_node,
1092 minimask_hash(&subtable->mask, 0));
1093
1094 ovs_assert(ovsrcu_get_protected(struct trie_node *, &subtable->ports_trie)
1095 == NULL);
1096 ovs_assert(cmap_is_empty(&subtable->rules));
69d6040e 1097
476f36e8 1098 for (i = 0; i < subtable->n_indices; i++) {
f2c21402 1099 cmap_destroy(&subtable->indices[i]);
476f36e8 1100 }
f2c21402 1101 cmap_destroy(&subtable->rules);
fe7cfa5c 1102 ovsrcu_postpone(free, subtable);
4aacd02d
BP
1103}
1104
13751fd8
JR
1105struct range {
1106 uint8_t start;
1107 uint8_t end;
1108};
1109
c0bfb650
JR
1110static unsigned int be_get_bit_at(const ovs_be32 value[], unsigned int ofs);
1111
13751fd8
JR
1112/* Return 'true' if can skip rest of the subtable based on the prefix trie
1113 * lookup results. */
1114static inline bool
1115check_tries(struct trie_ctx trie_ctx[CLS_MAX_TRIES], unsigned int n_tries,
1116 const unsigned int field_plen[CLS_MAX_TRIES],
1117 const struct range ofs, const struct flow *flow,
1118 struct flow_wildcards *wc)
1119{
1120 int j;
1121
1122 /* Check if we could avoid fully unwildcarding the next level of
1123 * fields using the prefix tries. The trie checks are done only as
1124 * needed to avoid folding in additional bits to the wildcards mask. */
1125 for (j = 0; j < n_tries; j++) {
1126 /* Is the trie field relevant for this subtable? */
1127 if (field_plen[j]) {
1128 struct trie_ctx *ctx = &trie_ctx[j];
1129 uint8_t be32ofs = ctx->be32ofs;
1130
1131 /* Is the trie field within the current range of fields? */
1132 if (be32ofs >= ofs.start && be32ofs < ofs.end) {
1133 /* On-demand trie lookup. */
1134 if (!ctx->lookup_done) {
c0bfb650
JR
1135 memset(&ctx->match_plens, 0, sizeof ctx->match_plens);
1136 ctx->maskbits = trie_lookup(ctx->trie, flow,
1137 &ctx->match_plens);
13751fd8
JR
1138 ctx->lookup_done = true;
1139 }
1140 /* Possible to skip the rest of the subtable if subtable's
c0bfb650
JR
1141 * prefix on the field is not included in the lookup result. */
1142 if (!be_get_bit_at(&ctx->match_plens.be32, field_plen[j] - 1)) {
1817dcea
JR
1143 /* We want the trie lookup to never result in unwildcarding
1144 * any bits that would not be unwildcarded otherwise.
1145 * Since the trie is shared by the whole classifier, it is
1146 * possible that the 'maskbits' contain bits that are
1147 * irrelevant for the partition relevant for the current
1148 * packet. Hence the checks below. */
13751fd8 1149
13751fd8 1150 /* Check that the trie result will not unwildcard more bits
1817dcea 1151 * than this subtable would otherwise. */
13751fd8
JR
1152 if (ctx->maskbits <= field_plen[j]) {
1153 /* Unwildcard the bits and skip the rest. */
1154 mask_set_prefix_bits(wc, be32ofs, ctx->maskbits);
1155 /* Note: Prerequisite already unwildcarded, as the only
1156 * prerequisite of the supported trie lookup fields is
1817dcea
JR
1157 * the ethertype, which is always unwildcarded. */
1158 return true;
1159 }
1160 /* Can skip if the field is already unwildcarded. */
1161 if (mask_prefix_bits_set(wc, be32ofs, ctx->maskbits)) {
13751fd8
JR
1162 return true;
1163 }
1164 }
1165 }
1166 }
1167 }
1168 return false;
1169}
1170
3016f3e4
JR
1171/* Returns true if 'target' satisifies 'flow'/'mask', that is, if each bit
1172 * for which 'flow', for which 'mask' has a bit set, specifies a particular
1173 * value has the correct value in 'target'.
1174 *
1175 * This function is equivalent to miniflow_equal_flow_in_minimask(flow,
a64759f0
JR
1176 * target, mask) but this is faster because of the invariant that
1177 * flow->map and mask->masks.map are the same, and that this version
1178 * takes the 'wc'. */
3016f3e4
JR
1179static inline bool
1180miniflow_and_mask_matches_flow(const struct miniflow *flow,
1181 const struct minimask *mask,
e9319757 1182 const struct flow *target)
3016f3e4
JR
1183{
1184 const uint32_t *flowp = miniflow_get_u32_values(flow);
1185 const uint32_t *maskp = miniflow_get_u32_values(&mask->masks);
a64759f0 1186 uint32_t idx;
3016f3e4 1187
a64759f0
JR
1188 MAP_FOR_EACH_INDEX(idx, mask->masks.map) {
1189 uint32_t diff = (*flowp++ ^ flow_u32_value(target, idx)) & *maskp++;
1190
1191 if (diff) {
3016f3e4
JR
1192 return false;
1193 }
1194 }
1195
1196 return true;
1197}
1198
dfea28b3 1199static inline const struct cls_match *
476f36e8
JR
1200find_match(const struct cls_subtable *subtable, const struct flow *flow,
1201 uint32_t hash)
b5d97350 1202{
dfea28b3 1203 const struct cls_match *rule;
b5d97350 1204
f2c21402 1205 CMAP_FOR_EACH_WITH_HASH (rule, cmap_node, hash, &subtable->rules) {
3016f3e4 1206 if (miniflow_and_mask_matches_flow(&rule->flow, &subtable->mask,
e9319757 1207 flow)) {
b5d97350 1208 return rule;
064af421
BP
1209 }
1210 }
c23740be 1211
064af421
BP
1212 return NULL;
1213}
1214
e9319757
JR
1215/* Returns true if 'target' satisifies 'flow'/'mask', that is, if each bit
1216 * for which 'flow', for which 'mask' has a bit set, specifies a particular
1217 * value has the correct value in 'target'.
1218 *
1219 * This function is equivalent to miniflow_and_mask_matches_flow() but this
1220 * version fills in the mask bits in 'wc'. */
1221static inline bool
1222miniflow_and_mask_matches_flow_wc(const struct miniflow *flow,
1223 const struct minimask *mask,
1224 const struct flow *target,
1225 struct flow_wildcards *wc)
1226{
1227 const uint32_t *flowp = miniflow_get_u32_values(flow);
1228 const uint32_t *maskp = miniflow_get_u32_values(&mask->masks);
1229 uint32_t idx;
1230
1231 MAP_FOR_EACH_INDEX(idx, mask->masks.map) {
1232 uint32_t mask = *maskp++;
1233 uint32_t diff = (*flowp++ ^ flow_u32_value(target, idx)) & mask;
1234
1235 if (diff) {
1236 /* Only unwildcard if none of the differing bits is already
1237 * exact-matched. */
1238 if (!(flow_u32_value(&wc->masks, idx) & diff)) {
1239 /* Keep one bit of the difference. */
1240 *flow_u32_lvalue(&wc->masks, idx) |= rightmost_1bit(diff);
1241 }
1242 return false;
1243 }
1244 /* Fill in the bits that were looked at. */
1245 *flow_u32_lvalue(&wc->masks, idx) |= mask;
1246 }
1247
1248 return true;
1249}
1250
386cb9f7
JR
1251/* Unwildcard the fields looked up so far, if any. */
1252static void
1253fill_range_wc(const struct cls_subtable *subtable, struct flow_wildcards *wc,
1254 uint8_t to)
1255{
1256 if (to) {
1257 flow_wildcards_fold_minimask_range(wc, &subtable->mask, 0, to);
1258 }
1259}
1260
dfea28b3 1261static const struct cls_match *
476f36e8 1262find_match_wc(const struct cls_subtable *subtable, const struct flow *flow,
13751fd8
JR
1263 struct trie_ctx trie_ctx[CLS_MAX_TRIES], unsigned int n_tries,
1264 struct flow_wildcards *wc)
476f36e8
JR
1265{
1266 uint32_t basis = 0, hash;
dfea28b3 1267 const struct cls_match *rule = NULL;
476f36e8 1268 int i;
13751fd8 1269 struct range ofs;
476f36e8 1270
ec988646 1271 if (OVS_UNLIKELY(!wc)) {
476f36e8
JR
1272 return find_match(subtable, flow,
1273 flow_hash_in_minimask(flow, &subtable->mask, 0));
1274 }
1275
13751fd8 1276 ofs.start = 0;
476f36e8
JR
1277 /* Try to finish early by checking fields in segments. */
1278 for (i = 0; i < subtable->n_indices; i++) {
55847abe 1279 const struct cmap_node *inode;
f2c21402 1280
13751fd8 1281 ofs.end = subtable->index_ofs[i];
476f36e8 1282
13751fd8
JR
1283 if (check_tries(trie_ctx, n_tries, subtable->trie_plen, ofs, flow,
1284 wc)) {
386cb9f7
JR
1285 /* 'wc' bits for the trie field set, now unwildcard the preceding
1286 * bits used so far. */
1287 fill_range_wc(subtable, wc, ofs.start);
1288 return NULL;
13751fd8
JR
1289 }
1290 hash = flow_hash_in_minimask_range(flow, &subtable->mask, ofs.start,
1291 ofs.end, &basis);
f2c21402 1292 inode = cmap_find(&subtable->indices[i], hash);
476f36e8 1293 if (!inode) {
386cb9f7
JR
1294 /* No match, can stop immediately, but must fold in the bits
1295 * used in lookup so far. */
1296 fill_range_wc(subtable, wc, ofs.end);
1297 return NULL;
476f36e8
JR
1298 }
1299
1300 /* If we have narrowed down to a single rule already, check whether
a64759f0 1301 * that rule matches. Either way, we're done.
476f36e8
JR
1302 *
1303 * (Rare) hash collisions may cause us to miss the opportunity for this
1304 * optimization. */
f2c21402 1305 if (!cmap_node_next(inode)) {
476f36e8 1306 ASSIGN_CONTAINER(rule, inode - i, index_nodes);
e9319757
JR
1307 if (miniflow_and_mask_matches_flow_wc(&rule->flow, &subtable->mask,
1308 flow, wc)) {
1309 return rule;
476f36e8 1310 }
e9319757 1311 return NULL;
476f36e8 1312 }
386cb9f7 1313 ofs.start = ofs.end;
476f36e8 1314 }
13751fd8
JR
1315 ofs.end = FLOW_U32S;
1316 /* Trie check for the final range. */
1317 if (check_tries(trie_ctx, n_tries, subtable->trie_plen, ofs, flow, wc)) {
386cb9f7
JR
1318 fill_range_wc(subtable, wc, ofs.start);
1319 return NULL;
13751fd8 1320 }
a64759f0
JR
1321 hash = flow_hash_in_minimask_range(flow, &subtable->mask, ofs.start,
1322 ofs.end, &basis);
1323 rule = find_match(subtable, flow, hash);
69d6040e
JR
1324 if (!rule && subtable->ports_mask_len) {
1325 /* Ports are always part of the final range, if any.
1326 * No match was found for the ports. Use the ports trie to figure out
1327 * which ports bits to unwildcard. */
1328 unsigned int mbits;
c0bfb650 1329 ovs_be32 value, plens, mask;
69d6040e
JR
1330
1331 mask = MINIFLOW_GET_BE32(&subtable->mask.masks, tp_src);
1332 value = ((OVS_FORCE ovs_be32 *)flow)[TP_PORTS_OFS32] & mask;
c0bfb650 1333 mbits = trie_lookup_value(&subtable->ports_trie, &value, &plens, 32);
69d6040e
JR
1334
1335 ((OVS_FORCE ovs_be32 *)&wc->masks)[TP_PORTS_OFS32] |=
86f35fb5 1336 mask & be32_prefix_mask(mbits);
69d6040e 1337
386cb9f7
JR
1338 /* Unwildcard all bits in the mask upto the ports, as they were used
1339 * to determine there is no match. */
1340 fill_range_wc(subtable, wc, TP_PORTS_OFS32);
1341 return NULL;
69d6040e 1342 }
e9319757 1343
13751fd8 1344 /* Must unwildcard all the fields, as they were looked at. */
476f36e8
JR
1345 flow_wildcards_fold_minimask(wc, &subtable->mask);
1346 return rule;
1347}
1348
627fb667 1349static struct cls_match *
dfea28b3 1350find_equal(const struct cls_subtable *subtable, const struct miniflow *flow,
03868246 1351 uint32_t hash)
064af421 1352{
627fb667 1353 struct cls_match *head;
064af421 1354
f2c21402 1355 CMAP_FOR_EACH_WITH_HASH (head, cmap_node, hash, &subtable->rules) {
3016f3e4 1356 if (miniflow_equal(&head->flow, flow)) {
b5d97350 1357 return head;
064af421
BP
1358 }
1359 }
1360 return NULL;
1361}
1362
afae68b1
JR
1363/*
1364 * As the readers are operating concurrently with the modifications, a
1365 * concurrent reader may or may not see the new rule, depending on how
1366 * the concurrent events overlap with each other. This is no
1367 * different from the former locked behavior, but there the visibility
1368 * of the new rule only depended on the timing of the locking
1369 * functions.
1370 *
1371 * The new rule is first added to the segment indices, so the readers
1372 * may find the rule in the indices before the rule is visible in the
1373 * subtables 'rules' map. This may result in us losing the
1374 * opportunity to quit lookups earlier, resulting in sub-optimal
1375 * wildcarding. This will be fixed by forthcoming revalidation always
1376 * scheduled after flow table changes.
1377 *
1378 * Similar behavior may happen due to us removing the overlapping rule
1379 * (if any) from the indices only after the new rule has been added.
1380 *
1381 * The subtable's max priority is updated only after the rule is
1382 * inserted, so the concurrent readers may not see the rule, as the
1383 * updated priority ordered subtable list will only be visible after
1384 * the subtable's max priority is updated.
1385 *
1386 * Similarly, the classifier's partitions for new rules are updated by
1387 * the caller after this function, so the readers may keep skipping
1388 * the subtable until they see the updated partitions.
1389 */
627fb667 1390static struct cls_match *
e48eccd1 1391insert_rule(struct classifier *cls, struct cls_subtable *subtable,
f2c21402 1392 struct cls_rule *new_rule)
e65413ab 1393 OVS_REQUIRES(cls->mutex)
064af421 1394{
627fb667 1395 struct cls_match *old = NULL;
f2c21402
JR
1396 struct cls_match *new = cls_match_alloc(new_rule);
1397 struct cls_match *head;
476f36e8 1398 int i;
f2c21402 1399 uint32_t basis = 0, hash, ihash[CLS_MAX_INDICES];
13751fd8 1400 uint8_t prev_be32ofs = 0;
476f36e8
JR
1401
1402 /* Add new node to segment indices. */
1403 for (i = 0; i < subtable->n_indices; i++) {
f2c21402
JR
1404 ihash[i] = minimatch_hash_range(&new_rule->match, prev_be32ofs,
1405 subtable->index_ofs[i], &basis);
1406 cmap_insert(&subtable->indices[i], &new->index_nodes[i], ihash[i]);
13751fd8 1407 prev_be32ofs = subtable->index_ofs[i];
476f36e8 1408 }
f2c21402
JR
1409 hash = minimatch_hash_range(&new_rule->match, prev_be32ofs, FLOW_U32S,
1410 &basis);
1411 head = find_equal(subtable, &new_rule->match.flow, hash);
b5d97350 1412 if (!head) {
f2c21402 1413 cmap_insert(&subtable->rules, &new->cmap_node, hash);
c501b427 1414 rculist_init(&new->list);
4aacd02d 1415 goto out;
b5d97350
BP
1416 } else {
1417 /* Scan the list for the insertion point that will keep the list in
1418 * order of decreasing priority. */
627fb667 1419 struct cls_match *rule;
476f36e8 1420
dfea28b3 1421 FOR_EACH_RULE_IN_LIST_PROTECTED (rule, head) {
f2c21402 1422 if (new->priority >= rule->priority) {
b5d97350 1423 if (rule == head) {
f2c21402
JR
1424 /* 'new' is the new highest-priority flow in the list. */
1425 cmap_replace(&subtable->rules, &rule->cmap_node,
1426 &new->cmap_node, hash);
b5d97350 1427 }
064af421 1428
f2c21402 1429 if (new->priority == rule->priority) {
c501b427 1430 rculist_replace(&new->list, &rule->list);
4aacd02d 1431 old = rule;
b5d97350 1432 } else {
c501b427 1433 rculist_insert(&rule->list, &new->list);
b5d97350 1434 }
fe7cfa5c 1435 goto out;
b5d97350
BP
1436 }
1437 }
064af421 1438
b5d97350 1439 /* Insert 'new' at the end of the list. */
c501b427 1440 rculist_push_back(&head->list, &new->list);
064af421 1441 }
4aacd02d
BP
1442
1443 out:
1444 if (!old) {
fe7cfa5c
JR
1445 subtable->n_rules++;
1446
1447 /* Rule was added, not replaced. Update 'subtable's 'max_priority'
1448 * and 'max_count', if necessary. */
1449 if (subtable->n_rules == 1) {
f2c21402 1450 subtable->max_priority = new->priority;
fe7cfa5c 1451 subtable->max_count = 1;
f2c21402
JR
1452 pvector_insert(&cls->subtables, subtable, new->priority);
1453 } else if (subtable->max_priority == new->priority) {
fe7cfa5c 1454 ++subtable->max_count;
f2c21402
JR
1455 } else if (new->priority > subtable->max_priority) {
1456 subtable->max_priority = new->priority;
fe7cfa5c 1457 subtable->max_count = 1;
f2c21402 1458 pvector_change_priority(&cls->subtables, subtable, new->priority);
fe7cfa5c 1459 }
476f36e8
JR
1460 } else {
1461 /* Remove old node from indices. */
1462 for (i = 0; i < subtable->n_indices; i++) {
f2c21402 1463 cmap_remove(&subtable->indices[i], &old->index_nodes[i], ihash[i]);
476f36e8 1464 }
4aacd02d
BP
1465 }
1466 return old;
064af421 1467}
13751fd8
JR
1468\f
1469/* A longest-prefix match tree. */
13751fd8
JR
1470
1471/* Return at least 'plen' bits of the 'prefix', starting at bit offset 'ofs'.
1472 * Prefixes are in the network byte order, and the offset 0 corresponds to
1473 * the most significant bit of the first byte. The offset can be read as
1474 * "how many bits to skip from the start of the prefix starting at 'pr'". */
1475static uint32_t
1476raw_get_prefix(const ovs_be32 pr[], unsigned int ofs, unsigned int plen)
1477{
1478 uint32_t prefix;
1479
1480 pr += ofs / 32; /* Where to start. */
1481 ofs %= 32; /* How many bits to skip at 'pr'. */
1482
1483 prefix = ntohl(*pr) << ofs; /* Get the first 32 - ofs bits. */
1484 if (plen > 32 - ofs) { /* Need more than we have already? */
1485 prefix |= ntohl(*++pr) >> (32 - ofs);
1486 }
1487 /* Return with possible unwanted bits at the end. */
1488 return prefix;
1489}
1490
1491/* Return min(TRIE_PREFIX_BITS, plen) bits of the 'prefix', starting at bit
1492 * offset 'ofs'. Prefixes are in the network byte order, and the offset 0
1493 * corresponds to the most significant bit of the first byte. The offset can
1494 * be read as "how many bits to skip from the start of the prefix starting at
1495 * 'pr'". */
1496static uint32_t
1497trie_get_prefix(const ovs_be32 pr[], unsigned int ofs, unsigned int plen)
1498{
1499 if (!plen) {
1500 return 0;
1501 }
1502 if (plen > TRIE_PREFIX_BITS) {
1503 plen = TRIE_PREFIX_BITS; /* Get at most TRIE_PREFIX_BITS. */
1504 }
1505 /* Return with unwanted bits cleared. */
1506 return raw_get_prefix(pr, ofs, plen) & ~0u << (32 - plen);
1507}
1508
c30cfa6b 1509/* Return the number of equal bits in 'n_bits' of 'prefix's MSBs and a 'value'
13751fd8
JR
1510 * starting at "MSB 0"-based offset 'ofs'. */
1511static unsigned int
c30cfa6b 1512prefix_equal_bits(uint32_t prefix, unsigned int n_bits, const ovs_be32 value[],
13751fd8
JR
1513 unsigned int ofs)
1514{
c30cfa6b 1515 uint64_t diff = prefix ^ raw_get_prefix(value, ofs, n_bits);
13751fd8 1516 /* Set the bit after the relevant bits to limit the result. */
c30cfa6b 1517 return raw_clz64(diff << 32 | UINT64_C(1) << (63 - n_bits));
13751fd8
JR
1518}
1519
1520/* Return the number of equal bits in 'node' prefix and a 'prefix' of length
1521 * 'plen', starting at "MSB 0"-based offset 'ofs'. */
1522static unsigned int
1523trie_prefix_equal_bits(const struct trie_node *node, const ovs_be32 prefix[],
1524 unsigned int ofs, unsigned int plen)
1525{
c30cfa6b 1526 return prefix_equal_bits(node->prefix, MIN(node->n_bits, plen - ofs),
13751fd8
JR
1527 prefix, ofs);
1528}
1529
1530/* Return the bit at ("MSB 0"-based) offset 'ofs' as an int. 'ofs' can
1531 * be greater than 31. */
1532static unsigned int
1533be_get_bit_at(const ovs_be32 value[], unsigned int ofs)
1534{
1535 return (((const uint8_t *)value)[ofs / 8] >> (7 - ofs % 8)) & 1u;
1536}
1537
1538/* Return the bit at ("MSB 0"-based) offset 'ofs' as an int. 'ofs' must
1539 * be between 0 and 31, inclusive. */
1540static unsigned int
1541get_bit_at(const uint32_t prefix, unsigned int ofs)
1542{
1543 return (prefix >> (31 - ofs)) & 1u;
1544}
1545
1546/* Create new branch. */
1547static struct trie_node *
1548trie_branch_create(const ovs_be32 *prefix, unsigned int ofs, unsigned int plen,
1549 unsigned int n_rules)
1550{
1551 struct trie_node *node = xmalloc(sizeof *node);
1552
1553 node->prefix = trie_get_prefix(prefix, ofs, plen);
1554
1555 if (plen <= TRIE_PREFIX_BITS) {
c30cfa6b 1556 node->n_bits = plen;
f358a2cb
JR
1557 ovsrcu_set_hidden(&node->edges[0], NULL);
1558 ovsrcu_set_hidden(&node->edges[1], NULL);
13751fd8
JR
1559 node->n_rules = n_rules;
1560 } else { /* Need intermediate nodes. */
1561 struct trie_node *subnode = trie_branch_create(prefix,
1562 ofs + TRIE_PREFIX_BITS,
1563 plen - TRIE_PREFIX_BITS,
1564 n_rules);
1565 int bit = get_bit_at(subnode->prefix, 0);
c30cfa6b 1566 node->n_bits = TRIE_PREFIX_BITS;
f358a2cb
JR
1567 ovsrcu_set_hidden(&node->edges[bit], subnode);
1568 ovsrcu_set_hidden(&node->edges[!bit], NULL);
13751fd8
JR
1569 node->n_rules = 0;
1570 }
1571 return node;
1572}
1573
1574static void
f358a2cb 1575trie_node_destroy(const struct trie_node *node)
13751fd8 1576{
f358a2cb
JR
1577 ovsrcu_postpone(free, CONST_CAST(struct trie_node *, node));
1578}
1579
1580/* Copy a trie node for modification and postpone delete the old one. */
1581static struct trie_node *
1582trie_node_rcu_realloc(const struct trie_node *node)
1583{
1584 struct trie_node *new_node = xmalloc(sizeof *node);
1585
1586 *new_node = *node;
1587 trie_node_destroy(node);
1588
1589 return new_node;
13751fd8
JR
1590}
1591
e48eccd1 1592/* May only be called while holding the classifier mutex. */
13751fd8 1593static void
f358a2cb 1594trie_destroy(rcu_trie_ptr *trie)
13751fd8 1595{
f358a2cb
JR
1596 struct trie_node *node = ovsrcu_get_protected(struct trie_node *, trie);
1597
13751fd8 1598 if (node) {
f358a2cb
JR
1599 ovsrcu_set_hidden(trie, NULL);
1600 trie_destroy(&node->edges[0]);
1601 trie_destroy(&node->edges[1]);
1602 trie_node_destroy(node);
13751fd8
JR
1603 }
1604}
1605
1606static bool
1607trie_is_leaf(const struct trie_node *trie)
1608{
f358a2cb
JR
1609 /* No children? */
1610 return !ovsrcu_get(struct trie_node *, &trie->edges[0])
1611 && !ovsrcu_get(struct trie_node *, &trie->edges[1]);
13751fd8
JR
1612}
1613
1614static void
1615mask_set_prefix_bits(struct flow_wildcards *wc, uint8_t be32ofs,
c30cfa6b 1616 unsigned int n_bits)
13751fd8
JR
1617{
1618 ovs_be32 *mask = &((ovs_be32 *)&wc->masks)[be32ofs];
1619 unsigned int i;
1620
c30cfa6b 1621 for (i = 0; i < n_bits / 32; i++) {
13751fd8
JR
1622 mask[i] = OVS_BE32_MAX;
1623 }
c30cfa6b
JR
1624 if (n_bits % 32) {
1625 mask[i] |= htonl(~0u << (32 - n_bits % 32));
13751fd8
JR
1626 }
1627}
1628
1629static bool
1630mask_prefix_bits_set(const struct flow_wildcards *wc, uint8_t be32ofs,
c30cfa6b 1631 unsigned int n_bits)
13751fd8
JR
1632{
1633 ovs_be32 *mask = &((ovs_be32 *)&wc->masks)[be32ofs];
1634 unsigned int i;
1635 ovs_be32 zeroes = 0;
1636
c30cfa6b 1637 for (i = 0; i < n_bits / 32; i++) {
13751fd8
JR
1638 zeroes |= ~mask[i];
1639 }
c30cfa6b
JR
1640 if (n_bits % 32) {
1641 zeroes |= ~mask[i] & htonl(~0u << (32 - n_bits % 32));
13751fd8
JR
1642 }
1643
c30cfa6b 1644 return !zeroes; /* All 'n_bits' bits set. */
13751fd8
JR
1645}
1646
f358a2cb 1647static rcu_trie_ptr *
13751fd8
JR
1648trie_next_edge(struct trie_node *node, const ovs_be32 value[],
1649 unsigned int ofs)
1650{
1651 return node->edges + be_get_bit_at(value, ofs);
1652}
1653
1654static const struct trie_node *
1655trie_next_node(const struct trie_node *node, const ovs_be32 value[],
1656 unsigned int ofs)
1657{
f358a2cb
JR
1658 return ovsrcu_get(struct trie_node *,
1659 &node->edges[be_get_bit_at(value, ofs)]);
13751fd8
JR
1660}
1661
c0bfb650
JR
1662/* Set the bit at ("MSB 0"-based) offset 'ofs'. 'ofs' can be greater than 31.
1663 */
1664static void
1665be_set_bit_at(ovs_be32 value[], unsigned int ofs)
1666{
1667 ((uint8_t *)value)[ofs / 8] |= 1u << (7 - ofs % 8);
1668}
1669
1670/* Returns the number of bits in the prefix mask necessary to determine a
1671 * mismatch, in case there are longer prefixes in the tree below the one that
1672 * matched.
1673 * '*plens' will have a bit set for each prefix length that may have matching
1674 * rules. The caller is responsible for clearing the '*plens' prior to
1675 * calling this.
13751fd8
JR
1676 */
1677static unsigned int
f358a2cb 1678trie_lookup_value(const rcu_trie_ptr *trie, const ovs_be32 value[],
c0bfb650 1679 ovs_be32 plens[], unsigned int n_bits)
13751fd8 1680{
13751fd8 1681 const struct trie_node *prev = NULL;
c0bfb650
JR
1682 const struct trie_node *node = ovsrcu_get(struct trie_node *, trie);
1683 unsigned int match_len = 0; /* Number of matching bits. */
13751fd8 1684
27ce650f 1685 for (; node; prev = node, node = trie_next_node(node, value, match_len)) {
13751fd8
JR
1686 unsigned int eqbits;
1687 /* Check if this edge can be followed. */
27ce650f
JR
1688 eqbits = prefix_equal_bits(node->prefix, node->n_bits, value,
1689 match_len);
1690 match_len += eqbits;
c30cfa6b 1691 if (eqbits < node->n_bits) { /* Mismatch, nothing more to be found. */
27ce650f 1692 /* Bit at offset 'match_len' differed. */
c0bfb650 1693 return match_len + 1; /* Includes the first mismatching bit. */
13751fd8
JR
1694 }
1695 /* Full match, check if rules exist at this prefix length. */
1696 if (node->n_rules > 0) {
c0bfb650 1697 be_set_bit_at(plens, match_len - 1);
13751fd8 1698 }
27ce650f 1699 if (match_len >= n_bits) {
c0bfb650 1700 return n_bits; /* Full prefix. */
f0e5aa11 1701 }
13751fd8 1702 }
c0bfb650
JR
1703 /* node == NULL. Full match so far, but we tried to follow an
1704 * non-existing branch. Need to exclude the other branch if it exists
1705 * (it does not if we were called on an empty trie or 'prev' is a leaf
1706 * node). */
1707 return !prev || trie_is_leaf(prev) ? match_len : match_len + 1;
13751fd8
JR
1708}
1709
1710static unsigned int
1711trie_lookup(const struct cls_trie *trie, const struct flow *flow,
c0bfb650 1712 union mf_value *plens)
13751fd8
JR
1713{
1714 const struct mf_field *mf = trie->field;
1715
1716 /* Check that current flow matches the prerequisites for the trie
1717 * field. Some match fields are used for multiple purposes, so we
1718 * must check that the trie is relevant for this flow. */
1719 if (mf_are_prereqs_ok(mf, flow)) {
f358a2cb 1720 return trie_lookup_value(&trie->root,
13751fd8 1721 &((ovs_be32 *)flow)[mf->flow_be32ofs],
c0bfb650 1722 &plens->be32, mf->n_bits);
13751fd8 1723 }
c0bfb650
JR
1724 memset(plens, 0xff, sizeof *plens); /* All prefixes, no skipping. */
1725 return 0; /* Value not used in this case. */
13751fd8
JR
1726}
1727
1728/* Returns the length of a prefix match mask for the field 'mf' in 'minimask'.
1729 * Returns the u32 offset to the miniflow data in '*miniflow_index', if
1730 * 'miniflow_index' is not NULL. */
1731static unsigned int
1732minimask_get_prefix_len(const struct minimask *minimask,
1733 const struct mf_field *mf)
1734{
c30cfa6b 1735 unsigned int n_bits = 0, mask_tz = 0; /* Non-zero when end of mask seen. */
13751fd8
JR
1736 uint8_t u32_ofs = mf->flow_be32ofs;
1737 uint8_t u32_end = u32_ofs + mf->n_bytes / 4;
1738
1739 for (; u32_ofs < u32_end; ++u32_ofs) {
1740 uint32_t mask;
1741 mask = ntohl((OVS_FORCE ovs_be32)minimask_get(minimask, u32_ofs));
1742
1743 /* Validate mask, count the mask length. */
1744 if (mask_tz) {
1745 if (mask) {
1746 return 0; /* No bits allowed after mask ended. */
1747 }
1748 } else {
1749 if (~mask & (~mask + 1)) {
1750 return 0; /* Mask not contiguous. */
1751 }
1752 mask_tz = ctz32(mask);
c30cfa6b 1753 n_bits += 32 - mask_tz;
13751fd8
JR
1754 }
1755 }
1756
c30cfa6b 1757 return n_bits;
13751fd8
JR
1758}
1759
1760/*
1761 * This is called only when mask prefix is known to be CIDR and non-zero.
1762 * Relies on the fact that the flow and mask have the same map, and since
1763 * the mask is CIDR, the storage for the flow field exists even if it
1764 * happened to be zeros.
1765 */
1766static const ovs_be32 *
1767minimatch_get_prefix(const struct minimatch *match, const struct mf_field *mf)
1768{
27bbe15d 1769 return miniflow_get_be32_values(&match->flow) +
13751fd8
JR
1770 count_1bits(match->flow.map & ((UINT64_C(1) << mf->flow_be32ofs) - 1));
1771}
1772
1773/* Insert rule in to the prefix tree.
1774 * 'mlen' must be the (non-zero) CIDR prefix length of the 'trie->field' mask
1775 * in 'rule'. */
1776static void
1777trie_insert(struct cls_trie *trie, const struct cls_rule *rule, int mlen)
1778{
69d6040e
JR
1779 trie_insert_prefix(&trie->root,
1780 minimatch_get_prefix(&rule->match, trie->field), mlen);
1781}
1782
1783static void
f358a2cb 1784trie_insert_prefix(rcu_trie_ptr *edge, const ovs_be32 *prefix, int mlen)
69d6040e 1785{
13751fd8 1786 struct trie_node *node;
13751fd8
JR
1787 int ofs = 0;
1788
1789 /* Walk the tree. */
f358a2cb 1790 for (; (node = ovsrcu_get_protected(struct trie_node *, edge));
13751fd8
JR
1791 edge = trie_next_edge(node, prefix, ofs)) {
1792 unsigned int eqbits = trie_prefix_equal_bits(node, prefix, ofs, mlen);
1793 ofs += eqbits;
c30cfa6b 1794 if (eqbits < node->n_bits) {
13751fd8
JR
1795 /* Mismatch, new node needs to be inserted above. */
1796 int old_branch = get_bit_at(node->prefix, eqbits);
f358a2cb 1797 struct trie_node *new_parent;
13751fd8 1798
f358a2cb
JR
1799 new_parent = trie_branch_create(prefix, ofs - eqbits, eqbits,
1800 ofs == mlen ? 1 : 0);
1801 /* Copy the node to modify it. */
1802 node = trie_node_rcu_realloc(node);
1803 /* Adjust the new node for its new position in the tree. */
13751fd8 1804 node->prefix <<= eqbits;
c30cfa6b 1805 node->n_bits -= eqbits;
f358a2cb 1806 ovsrcu_set_hidden(&new_parent->edges[old_branch], node);
13751fd8
JR
1807
1808 /* Check if need a new branch for the new rule. */
1809 if (ofs < mlen) {
f358a2cb
JR
1810 ovsrcu_set_hidden(&new_parent->edges[!old_branch],
1811 trie_branch_create(prefix, ofs, mlen - ofs,
1812 1));
13751fd8 1813 }
f358a2cb 1814 ovsrcu_set(edge, new_parent); /* Publish changes. */
13751fd8
JR
1815 return;
1816 }
1817 /* Full match so far. */
1818
1819 if (ofs == mlen) {
1820 /* Full match at the current node, rule needs to be added here. */
1821 node->n_rules++;
1822 return;
1823 }
1824 }
1825 /* Must insert a new tree branch for the new rule. */
f358a2cb 1826 ovsrcu_set(edge, trie_branch_create(prefix, ofs, mlen - ofs, 1));
13751fd8
JR
1827}
1828
1829/* 'mlen' must be the (non-zero) CIDR prefix length of the 'trie->field' mask
1830 * in 'rule'. */
1831static void
1832trie_remove(struct cls_trie *trie, const struct cls_rule *rule, int mlen)
1833{
69d6040e
JR
1834 trie_remove_prefix(&trie->root,
1835 minimatch_get_prefix(&rule->match, trie->field), mlen);
1836}
1837
1838/* 'mlen' must be the (non-zero) CIDR prefix length of the 'trie->field' mask
1839 * in 'rule'. */
1840static void
f358a2cb 1841trie_remove_prefix(rcu_trie_ptr *root, const ovs_be32 *prefix, int mlen)
69d6040e 1842{
13751fd8 1843 struct trie_node *node;
f358a2cb 1844 rcu_trie_ptr *edges[sizeof(union mf_value) * 8];
13751fd8
JR
1845 int depth = 0, ofs = 0;
1846
1847 /* Walk the tree. */
69d6040e 1848 for (edges[0] = root;
f358a2cb 1849 (node = ovsrcu_get_protected(struct trie_node *, edges[depth]));
13751fd8
JR
1850 edges[++depth] = trie_next_edge(node, prefix, ofs)) {
1851 unsigned int eqbits = trie_prefix_equal_bits(node, prefix, ofs, mlen);
69d6040e 1852
c30cfa6b 1853 if (eqbits < node->n_bits) {
13751fd8
JR
1854 /* Mismatch, nothing to be removed. This should never happen, as
1855 * only rules in the classifier are ever removed. */
1856 break; /* Log a warning. */
1857 }
1858 /* Full match so far. */
1859 ofs += eqbits;
1860
1861 if (ofs == mlen) {
1862 /* Full prefix match at the current node, remove rule here. */
1863 if (!node->n_rules) {
1864 break; /* Log a warning. */
1865 }
1866 node->n_rules--;
1867
1868 /* Check if can prune the tree. */
f358a2cb
JR
1869 while (!node->n_rules) {
1870 struct trie_node *next,
1871 *edge0 = ovsrcu_get_protected(struct trie_node *,
1872 &node->edges[0]),
1873 *edge1 = ovsrcu_get_protected(struct trie_node *,
1874 &node->edges[1]);
1875
1876 if (edge0 && edge1) {
1877 break; /* A branching point, cannot prune. */
1878 }
1879
1880 /* Else have at most one child node, remove this node. */
1881 next = edge0 ? edge0 : edge1;
13751fd8
JR
1882
1883 if (next) {
c30cfa6b 1884 if (node->n_bits + next->n_bits > TRIE_PREFIX_BITS) {
13751fd8
JR
1885 break; /* Cannot combine. */
1886 }
f358a2cb
JR
1887 next = trie_node_rcu_realloc(next); /* Modify. */
1888
13751fd8 1889 /* Combine node with next. */
c30cfa6b
JR
1890 next->prefix = node->prefix | next->prefix >> node->n_bits;
1891 next->n_bits += node->n_bits;
13751fd8 1892 }
13751fd8 1893 /* Update the parent's edge. */
f358a2cb
JR
1894 ovsrcu_set(edges[depth], next); /* Publish changes. */
1895 trie_node_destroy(node);
1896
13751fd8
JR
1897 if (next || !depth) {
1898 /* Branch not pruned or at root, nothing more to do. */
1899 break;
1900 }
f358a2cb
JR
1901 node = ovsrcu_get_protected(struct trie_node *,
1902 edges[--depth]);
13751fd8
JR
1903 }
1904 return;
1905 }
1906 }
1907 /* Cannot go deeper. This should never happen, since only rules
1908 * that actually exist in the classifier are ever removed. */
1909 VLOG_WARN("Trying to remove non-existing rule from a prefix trie.");
1910}