]> git.proxmox.com Git - mirror_ovs.git/blame - lib/classifier.c
vswitchd/bridge: Fix setting default prefix fields.
[mirror_ovs.git] / lib / classifier.c
CommitLineData
064af421 1/*
4aacd02d 2 * Copyright (c) 2009, 2010, 2011, 2012, 2013 Nicira, Inc.
064af421 3 *
a14bc59f
BP
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
064af421 7 *
a14bc59f
BP
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
064af421
BP
15 */
16
17#include <config.h>
18#include "classifier.h"
064af421
BP
19#include <errno.h>
20#include <netinet/in.h>
844dff32 21#include "byte-order.h"
68d1c8c3 22#include "dynamic-string.h"
064af421
BP
23#include "flow.h"
24#include "hash.h"
f2c21402 25#include "cmap.h"
52054c15 26#include "list.h"
07b37e8f 27#include "odp-util.h"
d8ae4d67 28#include "ofp-util.h"
13751fd8 29#include "packets.h"
52054c15
JR
30#include "tag.h"
31#include "util.h"
13751fd8
JR
32#include "vlog.h"
33
34VLOG_DEFINE_THIS_MODULE(classifier);
064af421 35
69d6040e
JR
36struct trie_ctx;
37
38/* Ports trie depends on both ports sharing the same ovs_be32. */
39#define TP_PORTS_OFS32 (offsetof(struct flow, tp_src) / 4)
40BUILD_ASSERT_DECL(TP_PORTS_OFS32 == offsetof(struct flow, tp_dst) / 4);
cabd4c43 41
cabd4c43
JR
42/* A set of rules that all have the same fields wildcarded. */
43struct cls_subtable {
e65413ab 44 /* The fields are only used by writers and iterators. */
e48eccd1 45 struct cmap_node cmap_node; /* Within struct classifier 'subtables_map'. */
e65413ab
JR
46
47 /* The fields are only used by writers. */
48 int n_rules OVS_GUARDED; /* Number of rules, including
49 * duplicates. */
50 unsigned int max_priority OVS_GUARDED; /* Max priority of any rule in
51 * the subtable. */
52 unsigned int max_count OVS_GUARDED; /* Count of max_priority rules. */
53
54 /* These fields are accessed by readers who care about wildcarding. */
55 tag_type tag; /* Tag generated from mask for partitioning (const). */
56 uint8_t n_indices; /* How many indices to use (const). */
57 uint8_t index_ofs[CLS_MAX_INDICES]; /* u32 segment boundaries (const). */
58 unsigned int trie_plen[CLS_MAX_TRIES]; /* Trie prefix length in 'mask'
59 * (runtime configurable). */
60 int ports_mask_len; /* (const) */
61 struct cmap indices[CLS_MAX_INDICES]; /* Staged lookup indices. */
f358a2cb 62 rcu_trie_ptr ports_trie; /* NULL if none. */
e65413ab
JR
63
64 /* These fields are accessed by all readers. */
65 struct cmap rules; /* Contains "struct cls_rule"s. */
66 struct minimask mask; /* Wildcards for fields (const). */
3016f3e4 67 /* 'mask' must be the last field. */
cabd4c43
JR
68};
69
70/* Associates a metadata value (that is, a value of the OpenFlow 1.1+ metadata
71 * field) with tags for the "cls_subtable"s that contain rules that match that
72 * metadata value. */
73struct cls_partition {
e48eccd1 74 struct cmap_node cmap_node; /* In struct classifier's 'partitions' map. */
cabd4c43
JR
75 ovs_be64 metadata; /* metadata value for this partition. */
76 tag_type tags; /* OR of each flow's cls_subtable tag. */
e65413ab 77 struct tag_tracker tracker OVS_GUARDED; /* Tracks the bits in 'tags'. */
cabd4c43
JR
78};
79
627fb667
JR
80/* Internal representation of a rule in a "struct cls_subtable". */
81struct cls_match {
e65413ab
JR
82 /* Accessed only by writers and iterators. */
83 struct list list OVS_GUARDED; /* List of identical, lower-priority rules. */
84
85 /* Accessed only by writers. */
86 struct cls_partition *partition OVS_GUARDED;
87
88 /* Accessed by readers interested in wildcarding. */
89 unsigned int priority; /* Larger numbers are higher priorities. */
f2c21402
JR
90 struct cmap_node index_nodes[CLS_MAX_INDICES]; /* Within subtable's
91 * 'indices'. */
e65413ab 92 /* Accessed by all readers. */
f2c21402 93 struct cmap_node cmap_node; /* Within struct cls_subtable 'rules'. */
e65413ab 94 struct cls_rule *cls_rule;
3016f3e4
JR
95 struct miniflow flow; /* Matching rule. Mask is in the subtable. */
96 /* 'flow' must be the last field. */
627fb667 97};
cabd4c43 98
627fb667
JR
99static struct cls_match *
100cls_match_alloc(struct cls_rule *rule)
101{
3016f3e4
JR
102 int count = count_1bits(rule->match.flow.map);
103
104 struct cls_match *cls_match
105 = xmalloc(sizeof *cls_match - sizeof cls_match->flow.inline_values
106 + MINIFLOW_VALUES_SIZE(count));
627fb667
JR
107
108 cls_match->cls_rule = rule;
3016f3e4 109 miniflow_clone_inline(&cls_match->flow, &rule->match.flow, count);
627fb667
JR
110 cls_match->priority = rule->priority;
111 rule->cls_match = cls_match;
112
113 return cls_match;
114}
cabd4c43 115
e48eccd1 116static struct cls_subtable *find_subtable(const struct classifier *cls,
e65413ab
JR
117 const struct minimask *)
118 OVS_REQUIRES(cls->mutex);
e48eccd1 119static struct cls_subtable *insert_subtable(struct classifier *cls,
e65413ab
JR
120 const struct minimask *)
121 OVS_REQUIRES(cls->mutex);
e48eccd1 122static void destroy_subtable(struct classifier *cls, struct cls_subtable *)
e65413ab 123 OVS_REQUIRES(cls->mutex);
e48eccd1 124static struct cls_match *insert_rule(struct classifier *cls,
e65413ab
JR
125 struct cls_subtable *, struct cls_rule *)
126 OVS_REQUIRES(cls->mutex);
b5d97350 127
627fb667
JR
128static struct cls_match *find_match_wc(const struct cls_subtable *,
129 const struct flow *, struct trie_ctx *,
130 unsigned int n_tries,
131 struct flow_wildcards *);
132static struct cls_match *find_equal(struct cls_subtable *,
133 const struct miniflow *, uint32_t hash);
b5d97350 134
e65413ab
JR
135/* Iterates RULE over HEAD and all of the cls_rules on HEAD->list.
136 * Classifier's mutex must be held while iterating, as the list is
137 * protoceted by it. */
b5d97350
BP
138#define FOR_EACH_RULE_IN_LIST(RULE, HEAD) \
139 for ((RULE) = (HEAD); (RULE) != NULL; (RULE) = next_rule_in_list(RULE))
140#define FOR_EACH_RULE_IN_LIST_SAFE(RULE, NEXT, HEAD) \
141 for ((RULE) = (HEAD); \
142 (RULE) != NULL && ((NEXT) = next_rule_in_list(RULE), true); \
143 (RULE) = (NEXT))
144
627fb667
JR
145static struct cls_match *next_rule_in_list__(struct cls_match *);
146static struct cls_match *next_rule_in_list(struct cls_match *);
13751fd8
JR
147
148static unsigned int minimask_get_prefix_len(const struct minimask *,
149 const struct mf_field *);
e48eccd1 150static void trie_init(struct classifier *cls, int trie_idx,
e65413ab
JR
151 const struct mf_field *)
152 OVS_REQUIRES(cls->mutex);
13751fd8 153static unsigned int trie_lookup(const struct cls_trie *, const struct flow *,
c0bfb650 154 union mf_value *plens);
f358a2cb 155static unsigned int trie_lookup_value(const rcu_trie_ptr *,
c0bfb650
JR
156 const ovs_be32 value[], ovs_be32 plens[],
157 unsigned int value_bits);
f358a2cb 158static void trie_destroy(rcu_trie_ptr *);
13751fd8 159static void trie_insert(struct cls_trie *, const struct cls_rule *, int mlen);
f358a2cb 160static void trie_insert_prefix(rcu_trie_ptr *, const ovs_be32 *prefix,
69d6040e 161 int mlen);
13751fd8 162static void trie_remove(struct cls_trie *, const struct cls_rule *, int mlen);
f358a2cb 163static void trie_remove_prefix(rcu_trie_ptr *, const ovs_be32 *prefix,
69d6040e 164 int mlen);
13751fd8 165static void mask_set_prefix_bits(struct flow_wildcards *, uint8_t be32ofs,
c30cfa6b 166 unsigned int n_bits);
13751fd8 167static bool mask_prefix_bits_set(const struct flow_wildcards *,
c30cfa6b 168 uint8_t be32ofs, unsigned int n_bits);
3d91d909
JR
169\f
170/* flow/miniflow/minimask/minimatch utilities.
171 * These are only used by the classifier, so place them here to allow
172 * for better optimization. */
173
174static inline uint64_t
175miniflow_get_map_in_range(const struct miniflow *miniflow,
176 uint8_t start, uint8_t end, unsigned int *offset)
177{
178 uint64_t map = miniflow->map;
179 *offset = 0;
180
181 if (start > 0) {
182 uint64_t msk = (UINT64_C(1) << start) - 1; /* 'start' LSBs set */
183 *offset = count_1bits(map & msk);
184 map &= ~msk;
185 }
186 if (end < FLOW_U32S) {
187 uint64_t msk = (UINT64_C(1) << end) - 1; /* 'end' LSBs set */
188 map &= msk;
189 }
190 return map;
191}
192
193/* Returns a hash value for the bits of 'flow' where there are 1-bits in
194 * 'mask', given 'basis'.
195 *
196 * The hash values returned by this function are the same as those returned by
197 * miniflow_hash_in_minimask(), only the form of the arguments differ. */
198static inline uint32_t
199flow_hash_in_minimask(const struct flow *flow, const struct minimask *mask,
200 uint32_t basis)
201{
27bbe15d 202 const uint32_t *mask_values = miniflow_get_u32_values(&mask->masks);
3d91d909 203 const uint32_t *flow_u32 = (const uint32_t *)flow;
27bbe15d 204 const uint32_t *p = mask_values;
3d91d909
JR
205 uint32_t hash;
206 uint64_t map;
207
208 hash = basis;
209 for (map = mask->masks.map; map; map = zero_rightmost_1bit(map)) {
33c6a1b9 210 hash = hash_add(hash, flow_u32[raw_ctz(map)] & *p++);
3d91d909
JR
211 }
212
33c6a1b9 213 return hash_finish(hash, (p - mask_values) * 4);
3d91d909
JR
214}
215
216/* Returns a hash value for the bits of 'flow' where there are 1-bits in
217 * 'mask', given 'basis'.
218 *
219 * The hash values returned by this function are the same as those returned by
220 * flow_hash_in_minimask(), only the form of the arguments differ. */
221static inline uint32_t
222miniflow_hash_in_minimask(const struct miniflow *flow,
223 const struct minimask *mask, uint32_t basis)
224{
27bbe15d
JR
225 const uint32_t *mask_values = miniflow_get_u32_values(&mask->masks);
226 const uint32_t *p = mask_values;
3d91d909
JR
227 uint32_t hash = basis;
228 uint32_t flow_u32;
229
230 MINIFLOW_FOR_EACH_IN_MAP(flow_u32, flow, mask->masks.map) {
33c6a1b9 231 hash = hash_add(hash, flow_u32 & *p++);
3d91d909
JR
232 }
233
33c6a1b9 234 return hash_finish(hash, (p - mask_values) * 4);
3d91d909
JR
235}
236
237/* Returns a hash value for the bits of range [start, end) in 'flow',
238 * where there are 1-bits in 'mask', given 'hash'.
239 *
240 * The hash values returned by this function are the same as those returned by
241 * minimatch_hash_range(), only the form of the arguments differ. */
242static inline uint32_t
243flow_hash_in_minimask_range(const struct flow *flow,
244 const struct minimask *mask,
245 uint8_t start, uint8_t end, uint32_t *basis)
246{
27bbe15d 247 const uint32_t *mask_values = miniflow_get_u32_values(&mask->masks);
3d91d909
JR
248 const uint32_t *flow_u32 = (const uint32_t *)flow;
249 unsigned int offset;
250 uint64_t map = miniflow_get_map_in_range(&mask->masks, start, end,
251 &offset);
27bbe15d 252 const uint32_t *p = mask_values + offset;
3d91d909
JR
253 uint32_t hash = *basis;
254
255 for (; map; map = zero_rightmost_1bit(map)) {
33c6a1b9 256 hash = hash_add(hash, flow_u32[raw_ctz(map)] & *p++);
3d91d909
JR
257 }
258
259 *basis = hash; /* Allow continuation from the unfinished value. */
33c6a1b9 260 return hash_finish(hash, (p - mask_values) * 4);
3d91d909
JR
261}
262
263/* Fold minimask 'mask''s wildcard mask into 'wc's wildcard mask. */
264static inline void
265flow_wildcards_fold_minimask(struct flow_wildcards *wc,
266 const struct minimask *mask)
267{
268 flow_union_with_miniflow(&wc->masks, &mask->masks);
269}
270
271/* Fold minimask 'mask''s wildcard mask into 'wc's wildcard mask
272 * in range [start, end). */
273static inline void
274flow_wildcards_fold_minimask_range(struct flow_wildcards *wc,
275 const struct minimask *mask,
276 uint8_t start, uint8_t end)
277{
278 uint32_t *dst_u32 = (uint32_t *)&wc->masks;
279 unsigned int offset;
280 uint64_t map = miniflow_get_map_in_range(&mask->masks, start, end,
281 &offset);
27bbe15d 282 const uint32_t *p = miniflow_get_u32_values(&mask->masks) + offset;
3d91d909
JR
283
284 for (; map; map = zero_rightmost_1bit(map)) {
285 dst_u32[raw_ctz(map)] |= *p++;
286 }
287}
288
289/* Returns a hash value for 'flow', given 'basis'. */
290static inline uint32_t
291miniflow_hash(const struct miniflow *flow, uint32_t basis)
292{
27bbe15d
JR
293 const uint32_t *values = miniflow_get_u32_values(flow);
294 const uint32_t *p = values;
3d91d909
JR
295 uint32_t hash = basis;
296 uint64_t hash_map = 0;
297 uint64_t map;
298
299 for (map = flow->map; map; map = zero_rightmost_1bit(map)) {
300 if (*p) {
33c6a1b9 301 hash = hash_add(hash, *p);
3d91d909
JR
302 hash_map |= rightmost_1bit(map);
303 }
304 p++;
305 }
33c6a1b9
JR
306 hash = hash_add(hash, hash_map);
307 hash = hash_add(hash, hash_map >> 32);
3d91d909 308
33c6a1b9 309 return hash_finish(hash, p - values);
3d91d909
JR
310}
311
312/* Returns a hash value for 'mask', given 'basis'. */
313static inline uint32_t
314minimask_hash(const struct minimask *mask, uint32_t basis)
315{
316 return miniflow_hash(&mask->masks, basis);
317}
318
319/* Returns a hash value for 'match', given 'basis'. */
320static inline uint32_t
321minimatch_hash(const struct minimatch *match, uint32_t basis)
322{
323 return miniflow_hash(&match->flow, minimask_hash(&match->mask, basis));
324}
325
326/* Returns a hash value for the bits of range [start, end) in 'minimatch',
327 * given 'basis'.
328 *
329 * The hash values returned by this function are the same as those returned by
330 * flow_hash_in_minimask_range(), only the form of the arguments differ. */
331static inline uint32_t
332minimatch_hash_range(const struct minimatch *match, uint8_t start, uint8_t end,
333 uint32_t *basis)
334{
335 unsigned int offset;
336 const uint32_t *p, *q;
337 uint32_t hash = *basis;
338 int n, i;
339
340 n = count_1bits(miniflow_get_map_in_range(&match->mask.masks, start, end,
341 &offset));
27bbe15d
JR
342 q = miniflow_get_u32_values(&match->mask.masks) + offset;
343 p = miniflow_get_u32_values(&match->flow) + offset;
3d91d909
JR
344
345 for (i = 0; i < n; i++) {
33c6a1b9 346 hash = hash_add(hash, p[i] & q[i]);
3d91d909
JR
347 }
348 *basis = hash; /* Allow continuation from the unfinished value. */
33c6a1b9 349 return hash_finish(hash, (offset + n) * 4);
3d91d909
JR
350}
351
81a76618
BP
352\f
353/* cls_rule. */
b5d97350 354
81a76618 355/* Initializes 'rule' to match packets specified by 'match' at the given
5cb7a798
BP
356 * 'priority'. 'match' must satisfy the invariant described in the comment at
357 * the definition of struct match.
66642cb4 358 *
48d28ac1
BP
359 * The caller must eventually destroy 'rule' with cls_rule_destroy().
360 *
81a76618
BP
361 * (OpenFlow uses priorities between 0 and UINT16_MAX, inclusive, but
362 * internally Open vSwitch supports a wider range.) */
47284b1f 363void
81a76618
BP
364cls_rule_init(struct cls_rule *rule,
365 const struct match *match, unsigned int priority)
47284b1f 366{
5cb7a798
BP
367 minimatch_init(&rule->match, match);
368 rule->priority = priority;
627fb667 369 rule->cls_match = NULL;
5cb7a798
BP
370}
371
372/* Same as cls_rule_init() for initialization from a "struct minimatch". */
373void
374cls_rule_init_from_minimatch(struct cls_rule *rule,
375 const struct minimatch *match,
376 unsigned int priority)
377{
378 minimatch_clone(&rule->match, match);
81a76618 379 rule->priority = priority;
627fb667 380 rule->cls_match = NULL;
685a51a5
JP
381}
382
48d28ac1
BP
383/* Initializes 'dst' as a copy of 'src'.
384 *
b2c1f00b 385 * The caller must eventually destroy 'dst' with cls_rule_destroy(). */
48d28ac1
BP
386void
387cls_rule_clone(struct cls_rule *dst, const struct cls_rule *src)
388{
5cb7a798
BP
389 minimatch_clone(&dst->match, &src->match);
390 dst->priority = src->priority;
627fb667 391 dst->cls_match = NULL;
48d28ac1
BP
392}
393
b2c1f00b
BP
394/* Initializes 'dst' with the data in 'src', destroying 'src'.
395 *
396 * The caller must eventually destroy 'dst' with cls_rule_destroy(). */
397void
398cls_rule_move(struct cls_rule *dst, struct cls_rule *src)
399{
400 minimatch_move(&dst->match, &src->match);
401 dst->priority = src->priority;
627fb667 402 dst->cls_match = NULL;
b2c1f00b
BP
403}
404
48d28ac1
BP
405/* Frees memory referenced by 'rule'. Doesn't free 'rule' itself (it's
406 * normally embedded into a larger structure).
407 *
408 * ('rule' must not currently be in a classifier.) */
409void
5cb7a798 410cls_rule_destroy(struct cls_rule *rule)
48d28ac1 411{
627fb667 412 ovs_assert(!rule->cls_match);
5cb7a798 413 minimatch_destroy(&rule->match);
48d28ac1
BP
414}
415
81a76618
BP
416/* Returns true if 'a' and 'b' match the same packets at the same priority,
417 * false if they differ in some way. */
193eb874
BP
418bool
419cls_rule_equal(const struct cls_rule *a, const struct cls_rule *b)
420{
5cb7a798 421 return a->priority == b->priority && minimatch_equal(&a->match, &b->match);
193eb874
BP
422}
423
81a76618 424/* Returns a hash value for 'rule', folding in 'basis'. */
57452fdc
BP
425uint32_t
426cls_rule_hash(const struct cls_rule *rule, uint32_t basis)
427{
5cb7a798 428 return minimatch_hash(&rule->match, hash_int(rule->priority, basis));
73f33563
BP
429}
430
81a76618 431/* Appends a string describing 'rule' to 's'. */
07b37e8f
BP
432void
433cls_rule_format(const struct cls_rule *rule, struct ds *s)
434{
5cb7a798 435 minimatch_format(&rule->match, s, rule->priority);
064af421 436}
3ca1de08
BP
437
438/* Returns true if 'rule' matches every packet, false otherwise. */
439bool
440cls_rule_is_catchall(const struct cls_rule *rule)
441{
5cb7a798 442 return minimask_is_catchall(&rule->match.mask);
3ca1de08 443}
064af421
BP
444\f
445/* Initializes 'cls' as a classifier that initially contains no classification
446 * rules. */
447void
e48eccd1
JR
448classifier_init(struct classifier *cls, const uint8_t *flow_segments)
449 OVS_EXCLUDED(cls->mutex)
064af421 450{
e65413ab 451 ovs_mutex_init(&cls->mutex);
e65413ab 452 ovs_mutex_lock(&cls->mutex);
064af421 453 cls->n_rules = 0;
f2c21402 454 cmap_init(&cls->subtables_map);
fe7cfa5c 455 pvector_init(&cls->subtables);
f2c21402 456 cmap_init(&cls->partitions);
476f36e8
JR
457 cls->n_flow_segments = 0;
458 if (flow_segments) {
459 while (cls->n_flow_segments < CLS_MAX_INDICES
460 && *flow_segments < FLOW_U32S) {
461 cls->flow_segments[cls->n_flow_segments++] = *flow_segments++;
462 }
463 }
13751fd8 464 cls->n_tries = 0;
e65413ab
JR
465 for (int i = 0; i < CLS_MAX_TRIES; i++) {
466 trie_init(cls, i, NULL);
467 }
468 ovs_mutex_unlock(&cls->mutex);
064af421
BP
469}
470
471/* Destroys 'cls'. Rules within 'cls', if any, are not freed; this is the
afae68b1
JR
472 * caller's responsibility.
473 * May only be called after all the readers have been terminated. */
064af421 474void
e48eccd1
JR
475classifier_destroy(struct classifier *cls)
476 OVS_EXCLUDED(cls->mutex)
064af421 477{
e48eccd1 478 if (cls) {
42181540 479 struct cls_partition *partition, *next_partition;
03868246 480 struct cls_subtable *subtable, *next_subtable;
13751fd8
JR
481 int i;
482
e65413ab 483 ovs_mutex_lock(&cls->mutex);
13751fd8 484 for (i = 0; i < cls->n_tries; i++) {
f358a2cb 485 trie_destroy(&cls->tries[i].root);
13751fd8 486 }
064af421 487
f2c21402 488 CMAP_FOR_EACH_SAFE (subtable, next_subtable, cmap_node,
5a87054c 489 &cls->subtables_map) {
03868246 490 destroy_subtable(cls, subtable);
064af421 491 }
f2c21402 492 cmap_destroy(&cls->subtables_map);
c906cedf 493
f2c21402 494 CMAP_FOR_EACH_SAFE (partition, next_partition, cmap_node,
c906cedf 495 &cls->partitions) {
f2c21402 496 ovsrcu_postpone(free, partition);
c906cedf 497 }
f2c21402 498 cmap_destroy(&cls->partitions);
cabd4c43 499
fe7cfa5c 500 pvector_destroy(&cls->subtables);
e65413ab
JR
501 ovs_mutex_unlock(&cls->mutex);
502 ovs_mutex_destroy(&cls->mutex);
064af421
BP
503 }
504}
505
13751fd8
JR
506/* We use uint64_t as a set for the fields below. */
507BUILD_ASSERT_DECL(MFF_N_IDS <= 64);
508
509/* Set the fields for which prefix lookup should be performed. */
f358a2cb 510bool
e48eccd1 511classifier_set_prefix_fields(struct classifier *cls,
13751fd8
JR
512 const enum mf_field_id *trie_fields,
513 unsigned int n_fields)
e48eccd1 514 OVS_EXCLUDED(cls->mutex)
13751fd8
JR
515{
516 uint64_t fields = 0;
f358a2cb
JR
517 const struct mf_field * new_fields[CLS_MAX_TRIES];
518 int i, n_tries = 0;
519 bool changed = false;
13751fd8 520
e65413ab 521 ovs_mutex_lock(&cls->mutex);
f358a2cb 522 for (i = 0; i < n_fields && n_tries < CLS_MAX_TRIES; i++) {
13751fd8
JR
523 const struct mf_field *field = mf_from_id(trie_fields[i]);
524 if (field->flow_be32ofs < 0 || field->n_bits % 32) {
525 /* Incompatible field. This is the only place where we
526 * enforce these requirements, but the rest of the trie code
527 * depends on the flow_be32ofs to be non-negative and the
528 * field length to be a multiple of 32 bits. */
529 continue;
530 }
531
532 if (fields & (UINT64_C(1) << trie_fields[i])) {
533 /* Duplicate field, there is no need to build more than
534 * one index for any one field. */
535 continue;
536 }
537 fields |= UINT64_C(1) << trie_fields[i];
538
f358a2cb
JR
539 new_fields[n_tries] = NULL;
540 if (n_tries >= cls->n_tries || field != cls->tries[n_tries].field) {
541 new_fields[n_tries] = field;
542 changed = true;
543 }
544 n_tries++;
545 }
546
547 if (changed || n_tries < cls->n_tries) {
548 struct cls_subtable *subtable;
549
550 /* Trie configuration needs to change. Disable trie lookups
551 * for the tries that are changing and wait all the current readers
552 * with the old configuration to be done. */
553 changed = false;
554 CMAP_FOR_EACH (subtable, cmap_node, &cls->subtables_map) {
555 for (i = 0; i < cls->n_tries; i++) {
556 if ((i < n_tries && new_fields[i]) || i >= n_tries) {
557 if (subtable->trie_plen[i]) {
558 subtable->trie_plen[i] = 0;
559 changed = true;
560 }
561 }
562 }
563 }
564 /* Synchronize if any readers were using tries. The readers may
565 * temporarily function without the trie lookup based optimizations. */
566 if (changed) {
567 /* ovsrcu_synchronize() functions as a memory barrier, so it does
568 * not matter that subtable->trie_plen is not atomic. */
569 ovsrcu_synchronize();
13751fd8 570 }
13751fd8 571
f358a2cb
JR
572 /* Now set up the tries. */
573 for (i = 0; i < n_tries; i++) {
574 if (new_fields[i]) {
575 trie_init(cls, i, new_fields[i]);
576 }
577 }
578 /* Destroy the rest, if any. */
579 for (; i < cls->n_tries; i++) {
580 trie_init(cls, i, NULL);
581 }
582
583 cls->n_tries = n_tries;
584 ovs_mutex_unlock(&cls->mutex);
585 return true;
13751fd8 586 }
f358a2cb 587
e65413ab 588 ovs_mutex_unlock(&cls->mutex);
f358a2cb 589 return false; /* No change. */
13751fd8
JR
590}
591
592static void
e48eccd1 593trie_init(struct classifier *cls, int trie_idx, const struct mf_field *field)
e65413ab 594 OVS_REQUIRES(cls->mutex)
13751fd8
JR
595{
596 struct cls_trie *trie = &cls->tries[trie_idx];
597 struct cls_subtable *subtable;
598
599 if (trie_idx < cls->n_tries) {
f358a2cb
JR
600 trie_destroy(&trie->root);
601 } else {
602 ovsrcu_set_hidden(&trie->root, NULL);
13751fd8 603 }
13751fd8
JR
604 trie->field = field;
605
f358a2cb 606 /* Add existing rules to the new trie. */
f2c21402 607 CMAP_FOR_EACH (subtable, cmap_node, &cls->subtables_map) {
13751fd8
JR
608 unsigned int plen;
609
610 plen = field ? minimask_get_prefix_len(&subtable->mask, field) : 0;
13751fd8 611 if (plen) {
627fb667 612 struct cls_match *head;
13751fd8 613
f2c21402 614 CMAP_FOR_EACH (head, cmap_node, &subtable->rules) {
627fb667 615 struct cls_match *match;
13751fd8 616
627fb667
JR
617 FOR_EACH_RULE_IN_LIST (match, head) {
618 trie_insert(trie, match->cls_rule, plen);
13751fd8
JR
619 }
620 }
621 }
f358a2cb
JR
622 /* Initialize subtable's prefix length on this field. This will
623 * allow readers to use the trie. */
624 atomic_thread_fence(memory_order_release);
625 subtable->trie_plen[trie_idx] = plen;
13751fd8
JR
626 }
627}
628
5f0476ce
JR
629/* Returns true if 'cls' contains no classification rules, false otherwise.
630 * Checking the cmap requires no locking. */
064af421
BP
631bool
632classifier_is_empty(const struct classifier *cls)
633{
e48eccd1 634 return cmap_is_empty(&cls->subtables_map);
064af421
BP
635}
636
dbda2960 637/* Returns the number of rules in 'cls'. */
064af421
BP
638int
639classifier_count(const struct classifier *cls)
afae68b1 640 OVS_NO_THREAD_SAFETY_ANALYSIS
064af421 641{
afae68b1
JR
642 /* n_rules is an int, so in the presence of concurrent writers this will
643 * return either the old or a new value. */
e48eccd1 644 return cls->n_rules;
064af421
BP
645}
646
c906cedf
BP
647static uint32_t
648hash_metadata(ovs_be64 metadata_)
649{
650 uint64_t metadata = (OVS_FORCE uint64_t) metadata_;
965607c8 651 return hash_uint64(metadata);
c906cedf
BP
652}
653
654static struct cls_partition *
e48eccd1 655find_partition(const struct classifier *cls, ovs_be64 metadata, uint32_t hash)
c906cedf
BP
656{
657 struct cls_partition *partition;
658
f2c21402 659 CMAP_FOR_EACH_WITH_HASH (partition, cmap_node, hash, &cls->partitions) {
c906cedf
BP
660 if (partition->metadata == metadata) {
661 return partition;
662 }
663 }
664
665 return NULL;
666}
667
668static struct cls_partition *
e48eccd1 669create_partition(struct classifier *cls, struct cls_subtable *subtable,
c906cedf 670 ovs_be64 metadata)
e65413ab 671 OVS_REQUIRES(cls->mutex)
c906cedf
BP
672{
673 uint32_t hash = hash_metadata(metadata);
674 struct cls_partition *partition = find_partition(cls, metadata, hash);
675 if (!partition) {
676 partition = xmalloc(sizeof *partition);
677 partition->metadata = metadata;
678 partition->tags = 0;
183126a1 679 tag_tracker_init(&partition->tracker);
f2c21402 680 cmap_insert(&cls->partitions, &partition->cmap_node, hash);
c906cedf 681 }
03868246 682 tag_tracker_add(&partition->tracker, &partition->tags, subtable->tag);
c906cedf
BP
683 return partition;
684}
685
69d6040e
JR
686static inline ovs_be32 minimatch_get_ports(const struct minimatch *match)
687{
688 /* Could optimize to use the same map if needed for fast path. */
689 return MINIFLOW_GET_BE32(&match->flow, tp_src)
690 & MINIFLOW_GET_BE32(&match->mask.masks, tp_src);
691}
692
b5d97350
BP
693/* Inserts 'rule' into 'cls'. Until 'rule' is removed from 'cls', the caller
694 * must not modify or free it.
064af421
BP
695 *
696 * If 'cls' already contains an identical rule (including wildcards, values of
697 * fixed fields, and priority), replaces the old rule by 'rule' and returns the
698 * rule that was replaced. The caller takes ownership of the returned rule and
48d28ac1
BP
699 * is thus responsible for destroying it with cls_rule_destroy(), freeing the
700 * memory block in which it resides, etc., as necessary.
064af421
BP
701 *
702 * Returns NULL if 'cls' does not contain a rule with an identical key, after
703 * inserting the new rule. In this case, no rules are displaced by the new
704 * rule, even rules that cannot have any effect because the new rule matches a
705 * superset of their flows and has higher priority. */
706struct cls_rule *
e48eccd1
JR
707classifier_replace(struct classifier *cls, struct cls_rule *rule)
708 OVS_EXCLUDED(cls->mutex)
064af421 709{
627fb667 710 struct cls_match *old_rule;
03868246 711 struct cls_subtable *subtable;
e65413ab 712 struct cls_rule *old_cls_rule = NULL;
b5d97350 713
e65413ab 714 ovs_mutex_lock(&cls->mutex);
03868246
JR
715 subtable = find_subtable(cls, &rule->match.mask);
716 if (!subtable) {
717 subtable = insert_subtable(cls, &rule->match.mask);
b5d97350
BP
718 }
719
03868246 720 old_rule = insert_rule(cls, subtable, rule);
b5d97350 721 if (!old_rule) {
e65413ab 722 old_cls_rule = NULL;
13751fd8 723
627fb667 724 rule->cls_match->partition = NULL;
c906cedf
BP
725 if (minimask_get_metadata_mask(&rule->match.mask) == OVS_BE64_MAX) {
726 ovs_be64 metadata = miniflow_get_metadata(&rule->match.flow);
627fb667
JR
727 rule->cls_match->partition = create_partition(cls, subtable,
728 metadata);
c906cedf
BP
729 }
730
064af421 731 cls->n_rules++;
13751fd8 732
e65413ab 733 for (int i = 0; i < cls->n_tries; i++) {
13751fd8
JR
734 if (subtable->trie_plen[i]) {
735 trie_insert(&cls->tries[i], rule, subtable->trie_plen[i]);
736 }
737 }
69d6040e
JR
738
739 /* Ports trie. */
740 if (subtable->ports_mask_len) {
741 /* We mask the value to be inserted to always have the wildcarded
742 * bits in known (zero) state, so we can include them in comparison
743 * and they will always match (== their original value does not
744 * matter). */
745 ovs_be32 masked_ports = minimatch_get_ports(&rule->match);
746
747 trie_insert_prefix(&subtable->ports_trie, &masked_ports,
748 subtable->ports_mask_len);
749 }
c906cedf 750 } else {
e65413ab 751 old_cls_rule = old_rule->cls_rule;
627fb667
JR
752 rule->cls_match->partition = old_rule->partition;
753 old_cls_rule->cls_match = NULL;
f2c21402
JR
754
755 /* 'old_rule' contains a cmap_node, which may not be freed
756 * immediately. */
757 ovsrcu_postpone(free, old_rule);
064af421 758 }
e65413ab
JR
759 ovs_mutex_unlock(&cls->mutex);
760 return old_cls_rule;
064af421
BP
761}
762
08944c1d
BP
763/* Inserts 'rule' into 'cls'. Until 'rule' is removed from 'cls', the caller
764 * must not modify or free it.
765 *
766 * 'cls' must not contain an identical rule (including wildcards, values of
767 * fixed fields, and priority). Use classifier_find_rule_exactly() to find
768 * such a rule. */
769void
770classifier_insert(struct classifier *cls, struct cls_rule *rule)
771{
772 struct cls_rule *displaced_rule = classifier_replace(cls, rule);
cb22974d 773 ovs_assert(!displaced_rule);
08944c1d
BP
774}
775
48d28ac1
BP
776/* Removes 'rule' from 'cls'. It is the caller's responsibility to destroy
777 * 'rule' with cls_rule_destroy(), freeing the memory block in which 'rule'
778 * resides, etc., as necessary. */
064af421 779void
e48eccd1
JR
780classifier_remove(struct classifier *cls, struct cls_rule *rule)
781 OVS_EXCLUDED(cls->mutex)
064af421 782{
c906cedf 783 struct cls_partition *partition;
627fb667
JR
784 struct cls_match *cls_match = rule->cls_match;
785 struct cls_match *head;
03868246 786 struct cls_subtable *subtable;
476f36e8 787 int i;
f2c21402
JR
788 uint32_t basis = 0, hash, ihash[CLS_MAX_INDICES];
789 uint8_t prev_be32ofs = 0;
064af421 790
627fb667
JR
791 ovs_assert(cls_match);
792
e65413ab 793 ovs_mutex_lock(&cls->mutex);
03868246 794 subtable = find_subtable(cls, &rule->match.mask);
627fb667
JR
795 ovs_assert(subtable);
796
69d6040e
JR
797 if (subtable->ports_mask_len) {
798 ovs_be32 masked_ports = minimatch_get_ports(&rule->match);
799
800 trie_remove_prefix(&subtable->ports_trie,
801 &masked_ports, subtable->ports_mask_len);
802 }
13751fd8
JR
803 for (i = 0; i < cls->n_tries; i++) {
804 if (subtable->trie_plen[i]) {
805 trie_remove(&cls->tries[i], rule, subtable->trie_plen[i]);
806 }
807 }
808
476f36e8
JR
809 /* Remove rule node from indices. */
810 for (i = 0; i < subtable->n_indices; i++) {
f2c21402
JR
811 ihash[i] = minimatch_hash_range(&rule->match, prev_be32ofs,
812 subtable->index_ofs[i], &basis);
813 cmap_remove(&subtable->indices[i], &cls_match->index_nodes[i],
814 ihash[i]);
815 prev_be32ofs = subtable->index_ofs[i];
476f36e8 816 }
f2c21402 817 hash = minimatch_hash_range(&rule->match, prev_be32ofs, FLOW_U32S, &basis);
476f36e8 818
f2c21402 819 head = find_equal(subtable, &rule->match.flow, hash);
627fb667
JR
820 if (head != cls_match) {
821 list_remove(&cls_match->list);
822 } else if (list_is_empty(&cls_match->list)) {
f2c21402 823 cmap_remove(&subtable->rules, &cls_match->cmap_node, hash);
b5d97350 824 } else {
627fb667
JR
825 struct cls_match *next = CONTAINER_OF(cls_match->list.next,
826 struct cls_match, list);
064af421 827
627fb667 828 list_remove(&cls_match->list);
f2c21402
JR
829 cmap_replace(&subtable->rules, &cls_match->cmap_node,
830 &next->cmap_node, hash);
b5d97350 831 }
064af421 832
627fb667 833 partition = cls_match->partition;
183126a1
BP
834 if (partition) {
835 tag_tracker_subtract(&partition->tracker, &partition->tags,
03868246 836 subtable->tag);
183126a1 837 if (!partition->tags) {
f2c21402
JR
838 cmap_remove(&cls->partitions, &partition->cmap_node,
839 hash_metadata(partition->metadata));
840 ovsrcu_postpone(free, partition);
183126a1 841 }
c906cedf
BP
842 }
843
03868246
JR
844 if (--subtable->n_rules == 0) {
845 destroy_subtable(cls, subtable);
fe7cfa5c
JR
846 } else if (subtable->max_priority == cls_match->priority
847 && --subtable->max_count == 0) {
848 /* Find the new 'max_priority' and 'max_count'. */
849 struct cls_match *head;
850 unsigned int max_priority = 0;
851
f2c21402 852 CMAP_FOR_EACH (head, cmap_node, &subtable->rules) {
fe7cfa5c
JR
853 if (head->priority > max_priority) {
854 max_priority = head->priority;
855 subtable->max_count = 1;
856 } else if (head->priority == max_priority) {
857 ++subtable->max_count;
858 }
859 }
860 subtable->max_priority = max_priority;
861 pvector_change_priority(&cls->subtables, subtable, max_priority);
4d935a6b 862 }
13751fd8 863
b5d97350 864 cls->n_rules--;
627fb667
JR
865
866 rule->cls_match = NULL;
f2c21402 867 ovsrcu_postpone(free, cls_match);
e65413ab 868 ovs_mutex_unlock(&cls->mutex);
064af421
BP
869}
870
13751fd8 871/* Prefix tree context. Valid when 'lookup_done' is true. Can skip all
c0bfb650
JR
872 * subtables which have a prefix match on the trie field, but whose prefix
873 * length is not indicated in 'match_plens'. For example, a subtable that
874 * has a 8-bit trie field prefix match can be skipped if
875 * !be_get_bit_at(&match_plens, 8 - 1). If skipped, 'maskbits' prefix bits
876 * must be unwildcarded to make datapath flow only match packets it should. */
13751fd8
JR
877struct trie_ctx {
878 const struct cls_trie *trie;
879 bool lookup_done; /* Status of the lookup. */
880 uint8_t be32ofs; /* U32 offset of the field in question. */
13751fd8 881 unsigned int maskbits; /* Prefix length needed to avoid false matches. */
c0bfb650
JR
882 union mf_value match_plens; /* Bitmask of prefix lengths with possible
883 * matches. */
13751fd8
JR
884};
885
886static void
887trie_ctx_init(struct trie_ctx *ctx, const struct cls_trie *trie)
888{
889 ctx->trie = trie;
890 ctx->be32ofs = trie->field->flow_be32ofs;
891 ctx->lookup_done = false;
892}
893
48c3de13
BP
894/* Finds and returns the highest-priority rule in 'cls' that matches 'flow'.
895 * Returns a null pointer if no rules in 'cls' match 'flow'. If multiple rules
74f74083
EJ
896 * of equal priority match 'flow', returns one arbitrarily.
897 *
898 * If a rule is found and 'wc' is non-null, bitwise-OR's 'wc' with the
899 * set of bits that were significant in the lookup. At some point
900 * earlier, 'wc' should have been initialized (e.g., by
901 * flow_wildcards_init_catchall()). */
48c3de13 902struct cls_rule *
e48eccd1 903classifier_lookup(const struct classifier *cls, const struct flow *flow,
74f74083 904 struct flow_wildcards *wc)
48c3de13 905{
c906cedf 906 const struct cls_partition *partition;
c906cedf 907 tag_type tags;
ec988646 908 int64_t best_priority = -1;
fe7cfa5c
JR
909 const struct cls_match *best;
910 struct trie_ctx trie_ctx[CLS_MAX_TRIES];
911 struct cls_subtable *subtable;
c906cedf 912
f358a2cb
JR
913 /* Synchronize for cls->n_tries and subtable->trie_plen. They can change
914 * when table configuration changes, which happens typically only on
915 * startup. */
916 atomic_thread_fence(memory_order_acquire);
917
03868246
JR
918 /* Determine 'tags' such that, if 'subtable->tag' doesn't intersect them,
919 * then 'flow' cannot possibly match in 'subtable':
c906cedf
BP
920 *
921 * - If flow->metadata maps to a given 'partition', then we can use
922 * 'tags' for 'partition->tags'.
923 *
924 * - If flow->metadata has no partition, then no rule in 'cls' has an
925 * exact-match for flow->metadata. That means that we don't need to
03868246 926 * search any subtable that includes flow->metadata in its mask.
c906cedf 927 *
03868246 928 * In either case, we always need to search any cls_subtables that do not
c906cedf 929 * include flow->metadata in its mask. One way to do that would be to
03868246
JR
930 * check the "cls_subtable"s explicitly for that, but that would require an
931 * extra branch per subtable. Instead, we mark such a cls_subtable's
932 * 'tags' as TAG_ALL and make sure that 'tags' is never empty. This means
933 * that 'tags' always intersects such a cls_subtable's 'tags', so we don't
934 * need a special case.
c906cedf 935 */
f2c21402 936 partition = (cmap_is_empty(&cls->partitions)
c906cedf
BP
937 ? NULL
938 : find_partition(cls, flow->metadata,
939 hash_metadata(flow->metadata)));
940 tags = partition ? partition->tags : TAG_ARBITRARY;
48c3de13 941
13751fd8 942 /* Initialize trie contexts for match_find_wc(). */
fe7cfa5c 943 for (int i = 0; i < cls->n_tries; i++) {
13751fd8
JR
944 trie_ctx_init(&trie_ctx[i], &cls->tries[i]);
945 }
ec988646 946
b5d97350 947 best = NULL;
fe7cfa5c
JR
948 PVECTOR_FOR_EACH_PRIORITY(subtable, best_priority, 2,
949 sizeof(struct cls_subtable), &cls->subtables) {
627fb667 950 struct cls_match *rule;
c906cedf 951
fe7cfa5c 952 if (!tag_intersects(tags, subtable->tag)) {
c906cedf
BP
953 continue;
954 }
74f74083 955
fe7cfa5c 956 rule = find_match_wc(subtable, flow, trie_ctx, cls->n_tries, wc);
ec988646
JR
957 if (rule && (int64_t)rule->priority > best_priority) {
958 best_priority = (int64_t)rule->priority;
1f3c5efc 959 best = rule;
b5d97350 960 }
48c3de13 961 }
13751fd8 962
627fb667 963 return best ? best->cls_rule : NULL;
48c3de13
BP
964}
965
2abf78ff 966/* Returns true if 'target' satisifies 'match', that is, if each bit for which
3016f3e4
JR
967 * 'match' specifies a particular value has the correct value in 'target'.
968 *
969 * 'flow' and 'mask' have the same mask! */
2abf78ff 970static bool
3016f3e4
JR
971miniflow_and_mask_matches_miniflow(const struct miniflow *flow,
972 const struct minimask *mask,
973 const struct miniflow *target)
2abf78ff 974{
3016f3e4
JR
975 const uint32_t *flowp = miniflow_get_u32_values(flow);
976 const uint32_t *maskp = miniflow_get_u32_values(&mask->masks);
2abf78ff
JR
977 uint32_t target_u32;
978
3016f3e4 979 MINIFLOW_FOR_EACH_IN_MAP(target_u32, target, mask->masks.map) {
2abf78ff
JR
980 if ((*flowp++ ^ target_u32) & *maskp++) {
981 return false;
982 }
983 }
984
985 return true;
986}
987
627fb667 988static inline struct cls_match *
2abf78ff
JR
989find_match_miniflow(const struct cls_subtable *subtable,
990 const struct miniflow *flow,
991 uint32_t hash)
992{
627fb667 993 struct cls_match *rule;
2abf78ff 994
f2c21402 995 CMAP_FOR_EACH_WITH_HASH (rule, cmap_node, hash, &subtable->rules) {
3016f3e4
JR
996 if (miniflow_and_mask_matches_miniflow(&rule->flow, &subtable->mask,
997 flow)) {
2abf78ff
JR
998 return rule;
999 }
1000 }
1001
1002 return NULL;
1003}
1004
b7648634
EJ
1005/* For each miniflow in 'flows' performs a classifier lookup writing the result
1006 * into the corresponding slot in 'rules'. If a particular entry in 'flows' is
1007 * NULL it is skipped.
2abf78ff 1008 *
b7648634
EJ
1009 * This function is optimized for use in the userspace datapath and therefore
1010 * does not implement a lot of features available in the standard
1011 * classifier_lookup() function. Specifically, it does not implement
1012 * priorities, instead returning any rule which matches the flow. */
1013void
e48eccd1 1014classifier_lookup_miniflow_batch(const struct classifier *cls,
b7648634
EJ
1015 const struct miniflow **flows,
1016 struct cls_rule **rules, size_t len)
2abf78ff
JR
1017{
1018 struct cls_subtable *subtable;
b7648634 1019 size_t i, begin = 0;
2abf78ff 1020
b7648634 1021 memset(rules, 0, len * sizeof *rules);
fe7cfa5c 1022 PVECTOR_FOR_EACH (subtable, &cls->subtables) {
b7648634
EJ
1023 for (i = begin; i < len; i++) {
1024 struct cls_match *match;
1025 uint32_t hash;
2abf78ff 1026
b7648634
EJ
1027 if (OVS_UNLIKELY(rules[i] || !flows[i])) {
1028 continue;
1029 }
1030
1031 hash = miniflow_hash_in_minimask(flows[i], &subtable->mask, 0);
1032 match = find_match_miniflow(subtable, flows[i], hash);
1033 if (OVS_UNLIKELY(match)) {
1034 rules[i] = match->cls_rule;
1035 }
2abf78ff 1036 }
2abf78ff 1037
b7648634
EJ
1038 while (begin < len && (rules[begin] || !flows[begin])) {
1039 begin++;
1040 }
1041 if (begin >= len) {
1042 break;
1043 }
1044 }
2abf78ff
JR
1045}
1046
b5d97350
BP
1047/* Finds and returns a rule in 'cls' with exactly the same priority and
1048 * matching criteria as 'target'. Returns a null pointer if 'cls' doesn't
c084ce1d 1049 * contain an exact match. */
064af421 1050struct cls_rule *
e48eccd1 1051classifier_find_rule_exactly(const struct classifier *cls,
76ecc721 1052 const struct cls_rule *target)
e48eccd1 1053 OVS_EXCLUDED(cls->mutex)
064af421 1054{
627fb667 1055 struct cls_match *head, *rule;
03868246 1056 struct cls_subtable *subtable;
064af421 1057
e65413ab 1058 ovs_mutex_lock(&cls->mutex);
03868246
JR
1059 subtable = find_subtable(cls, &target->match.mask);
1060 if (!subtable) {
e65413ab 1061 goto out;
064af421
BP
1062 }
1063
4d935a6b 1064 /* Skip if there is no hope. */
03868246 1065 if (target->priority > subtable->max_priority) {
e65413ab 1066 goto out;
4d935a6b
JR
1067 }
1068
03868246 1069 head = find_equal(subtable, &target->match.flow,
5cb7a798
BP
1070 miniflow_hash_in_minimask(&target->match.flow,
1071 &target->match.mask, 0));
b5d97350
BP
1072 FOR_EACH_RULE_IN_LIST (rule, head) {
1073 if (target->priority >= rule->priority) {
e65413ab 1074 ovs_mutex_unlock(&cls->mutex);
627fb667 1075 return target->priority == rule->priority ? rule->cls_rule : NULL;
064af421
BP
1076 }
1077 }
e65413ab
JR
1078out:
1079 ovs_mutex_unlock(&cls->mutex);
064af421
BP
1080 return NULL;
1081}
1082
81a76618
BP
1083/* Finds and returns a rule in 'cls' with priority 'priority' and exactly the
1084 * same matching criteria as 'target'. Returns a null pointer if 'cls' doesn't
1085 * contain an exact match. */
1086struct cls_rule *
1087classifier_find_match_exactly(const struct classifier *cls,
1088 const struct match *target,
1089 unsigned int priority)
1090{
1091 struct cls_rule *retval;
1092 struct cls_rule cr;
1093
1094 cls_rule_init(&cr, target, priority);
1095 retval = classifier_find_rule_exactly(cls, &cr);
48d28ac1 1096 cls_rule_destroy(&cr);
81a76618
BP
1097
1098 return retval;
1099}
1100
faa50f40
BP
1101/* Checks if 'target' would overlap any other rule in 'cls'. Two rules are
1102 * considered to overlap if both rules have the same priority and a packet
1103 * could match both. */
49bdc010 1104bool
e48eccd1 1105classifier_rule_overlaps(const struct classifier *cls,
faa50f40 1106 const struct cls_rule *target)
e48eccd1 1107 OVS_EXCLUDED(cls->mutex)
49bdc010 1108{
03868246 1109 struct cls_subtable *subtable;
fe7cfa5c 1110 int64_t stop_at_priority = (int64_t)target->priority - 1;
49bdc010 1111
e65413ab 1112 ovs_mutex_lock(&cls->mutex);
03868246 1113 /* Iterate subtables in the descending max priority order. */
fe7cfa5c
JR
1114 PVECTOR_FOR_EACH_PRIORITY (subtable, stop_at_priority, 2,
1115 sizeof(struct cls_subtable), &cls->subtables) {
5cb7a798
BP
1116 uint32_t storage[FLOW_U32S];
1117 struct minimask mask;
627fb667 1118 struct cls_match *head;
49bdc010 1119
03868246 1120 minimask_combine(&mask, &target->match.mask, &subtable->mask, storage);
f2c21402 1121 CMAP_FOR_EACH (head, cmap_node, &subtable->rules) {
627fb667 1122 struct cls_match *rule;
49bdc010 1123
b5d97350 1124 FOR_EACH_RULE_IN_LIST (rule, head) {
4d935a6b
JR
1125 if (rule->priority < target->priority) {
1126 break; /* Rules in descending priority order. */
1127 }
faa50f40 1128 if (rule->priority == target->priority
5cb7a798 1129 && miniflow_equal_in_minimask(&target->match.flow,
3016f3e4 1130 &rule->flow, &mask)) {
e65413ab 1131 ovs_mutex_unlock(&cls->mutex);
49bdc010
JP
1132 return true;
1133 }
1134 }
1135 }
1136 }
1137
e65413ab 1138 ovs_mutex_unlock(&cls->mutex);
49bdc010
JP
1139 return false;
1140}
6ceeaa92
BP
1141
1142/* Returns true if 'rule' exactly matches 'criteria' or if 'rule' is more
1143 * specific than 'criteria'. That is, 'rule' matches 'criteria' and this
1144 * function returns true if, for every field:
1145 *
1146 * - 'criteria' and 'rule' specify the same (non-wildcarded) value for the
1147 * field, or
1148 *
1149 * - 'criteria' wildcards the field,
1150 *
1151 * Conversely, 'rule' does not match 'criteria' and this function returns false
1152 * if, for at least one field:
1153 *
1154 * - 'criteria' and 'rule' specify different values for the field, or
1155 *
1156 * - 'criteria' specifies a value for the field but 'rule' wildcards it.
1157 *
1158 * Equivalently, the truth table for whether a field matches is:
1159 *
1160 * rule
1161 *
1162 * c wildcard exact
1163 * r +---------+---------+
1164 * i wild | yes | yes |
1165 * t card | | |
1166 * e +---------+---------+
1167 * r exact | no |if values|
1168 * i | |are equal|
1169 * a +---------+---------+
1170 *
1171 * This is the matching rule used by OpenFlow 1.0 non-strict OFPT_FLOW_MOD
1172 * commands and by OpenFlow 1.0 aggregate and flow stats.
1173 *
81a76618 1174 * Ignores rule->priority. */
6ceeaa92
BP
1175bool
1176cls_rule_is_loose_match(const struct cls_rule *rule,
5cb7a798 1177 const struct minimatch *criteria)
6ceeaa92 1178{
5cb7a798
BP
1179 return (!minimask_has_extra(&rule->match.mask, &criteria->mask)
1180 && miniflow_equal_in_minimask(&rule->match.flow, &criteria->flow,
1181 &criteria->mask));
6ceeaa92 1182}
b5d97350 1183\f
5ecc9d81
BP
1184/* Iteration. */
1185
1186static bool
627fb667 1187rule_matches(const struct cls_match *rule, const struct cls_rule *target)
5ecc9d81
BP
1188{
1189 return (!target
3016f3e4 1190 || miniflow_equal_in_minimask(&rule->flow,
5cb7a798
BP
1191 &target->match.flow,
1192 &target->match.mask));
5ecc9d81
BP
1193}
1194
627fb667 1195static struct cls_match *
03868246 1196search_subtable(const struct cls_subtable *subtable,
f2c21402 1197 struct cls_cursor *cursor)
5ecc9d81 1198{
f2c21402
JR
1199 if (!cursor->target
1200 || !minimask_has_extra(&subtable->mask, &cursor->target->match.mask)) {
627fb667 1201 struct cls_match *rule;
5ecc9d81 1202
f2c21402
JR
1203 CMAP_CURSOR_FOR_EACH (rule, cmap_node, &cursor->rules,
1204 &subtable->rules) {
1205 if (rule_matches(rule, cursor->target)) {
5ecc9d81
BP
1206 return rule;
1207 }
1208 }
1209 }
1210 return NULL;
1211}
1212
5f0476ce
JR
1213/* Initializes 'cursor' for iterating through rules in 'cls', and returns the
1214 * first matching cls_rule via '*pnode', or NULL if there are no matches.
5ecc9d81 1215 *
6ceeaa92 1216 * - If 'target' is null, the cursor will visit every rule in 'cls'.
5ecc9d81 1217 *
6ceeaa92
BP
1218 * - If 'target' is nonnull, the cursor will visit each 'rule' in 'cls'
1219 * such that cls_rule_is_loose_match(rule, target) returns true.
5ecc9d81 1220 *
6ceeaa92 1221 * Ignores target->priority. */
5f0476ce
JR
1222struct cls_cursor cls_cursor_init(const struct classifier *cls,
1223 const struct cls_rule *target,
1224 void **pnode, const void *offset, bool safe)
1225 OVS_NO_THREAD_SAFETY_ANALYSIS
5ecc9d81 1226{
5f0476ce 1227 struct cls_cursor cursor;
03868246 1228 struct cls_subtable *subtable;
5f0476ce 1229 struct cls_rule *cls_rule = NULL;
5ecc9d81 1230
5f0476ce 1231 cursor.safe = safe;
e48eccd1 1232 cursor.cls = cls;
5f0476ce
JR
1233 cursor.target = target && !cls_rule_is_catchall(target) ? target : NULL;
1234
1235 /* Find first rule. */
e65413ab 1236 ovs_mutex_lock(&cursor.cls->mutex);
5f0476ce 1237 CMAP_CURSOR_FOR_EACH (subtable, cmap_node, &cursor.subtables,
e65413ab 1238 &cursor.cls->subtables_map) {
5f0476ce 1239 struct cls_match *rule = search_subtable(subtable, &cursor);
f2c21402 1240
5ecc9d81 1241 if (rule) {
5f0476ce
JR
1242 cursor.subtable = subtable;
1243 cls_rule = rule->cls_rule;
1244 break;
5ecc9d81
BP
1245 }
1246 }
5f0476ce 1247 *pnode = (char *)cls_rule + (ptrdiff_t)offset;
5ecc9d81 1248
5f0476ce
JR
1249 /* Leave locked if requested and have a rule. */
1250 if (safe || !cls_rule) {
e65413ab 1251 ovs_mutex_unlock(&cursor.cls->mutex);
5f0476ce
JR
1252 }
1253 return cursor;
1254}
1255
1256static void
1257cls_cursor_next_unlock(struct cls_cursor *cursor, struct cls_rule *rule)
1258 OVS_NO_THREAD_SAFETY_ANALYSIS
1259{
e65413ab 1260 /* Release the mutex if no rule, or 'safe' mode. */
5f0476ce 1261 if (!rule || cursor->safe) {
e65413ab 1262 ovs_mutex_unlock(&cursor->cls->mutex);
5f0476ce 1263 }
5ecc9d81
BP
1264}
1265
1266/* Returns the next matching cls_rule in 'cursor''s iteration, or a null
1267 * pointer if there are no more matches. */
1268struct cls_rule *
9850cd0f 1269cls_cursor_next(struct cls_cursor *cursor, const struct cls_rule *rule_)
5f0476ce 1270 OVS_NO_THREAD_SAFETY_ANALYSIS
5ecc9d81 1271{
627fb667 1272 struct cls_match *rule = CONST_CAST(struct cls_match *, rule_->cls_match);
03868246 1273 const struct cls_subtable *subtable;
627fb667 1274 struct cls_match *next;
5ecc9d81 1275
5f0476ce
JR
1276 /* Lock if not locked already. */
1277 if (cursor->safe) {
e65413ab 1278 ovs_mutex_lock(&cursor->cls->mutex);
5f0476ce
JR
1279 }
1280
955f579d
BP
1281 next = next_rule_in_list__(rule);
1282 if (next->priority < rule->priority) {
5f0476ce 1283 cls_cursor_next_unlock(cursor, next->cls_rule);
627fb667 1284 return next->cls_rule;
5ecc9d81
BP
1285 }
1286
955f579d 1287 /* 'next' is the head of the list, that is, the rule that is included in
f2c21402 1288 * the subtable's map. (This is important when the classifier contains
03868246 1289 * rules that differ only in priority.) */
955f579d 1290 rule = next;
f2c21402 1291 CMAP_CURSOR_FOR_EACH_CONTINUE (rule, cmap_node, &cursor->rules) {
5ecc9d81 1292 if (rule_matches(rule, cursor->target)) {
5f0476ce 1293 cls_cursor_next_unlock(cursor, rule->cls_rule);
627fb667 1294 return rule->cls_rule;
5ecc9d81
BP
1295 }
1296 }
1297
03868246 1298 subtable = cursor->subtable;
f2c21402
JR
1299 CMAP_CURSOR_FOR_EACH_CONTINUE (subtable, cmap_node, &cursor->subtables) {
1300 rule = search_subtable(subtable, cursor);
5ecc9d81 1301 if (rule) {
03868246 1302 cursor->subtable = subtable;
5f0476ce 1303 cls_cursor_next_unlock(cursor, rule->cls_rule);
627fb667 1304 return rule->cls_rule;
5ecc9d81
BP
1305 }
1306 }
1307
e65413ab 1308 ovs_mutex_unlock(&cursor->cls->mutex);
5ecc9d81
BP
1309 return NULL;
1310}
1311\f
03868246 1312static struct cls_subtable *
e48eccd1 1313find_subtable(const struct classifier *cls, const struct minimask *mask)
e65413ab 1314 OVS_REQUIRES(cls->mutex)
b5d97350 1315{
03868246 1316 struct cls_subtable *subtable;
064af421 1317
f2c21402 1318 CMAP_FOR_EACH_WITH_HASH (subtable, cmap_node, minimask_hash(mask, 0),
5a87054c 1319 &cls->subtables_map) {
03868246
JR
1320 if (minimask_equal(mask, &subtable->mask)) {
1321 return subtable;
064af421
BP
1322 }
1323 }
b5d97350 1324 return NULL;
064af421 1325}
064af421 1326
e65413ab 1327/* The new subtable will be visible to the readers only after this. */
03868246 1328static struct cls_subtable *
e48eccd1 1329insert_subtable(struct classifier *cls, const struct minimask *mask)
e65413ab 1330 OVS_REQUIRES(cls->mutex)
b5d97350 1331{
c906cedf 1332 uint32_t hash = minimask_hash(mask, 0);
03868246 1333 struct cls_subtable *subtable;
476f36e8
JR
1334 int i, index = 0;
1335 struct flow_wildcards old, new;
1336 uint8_t prev;
3016f3e4 1337 int count = count_1bits(mask->masks.map);
064af421 1338
3016f3e4
JR
1339 subtable = xzalloc(sizeof *subtable - sizeof mask->masks.inline_values
1340 + MINIFLOW_VALUES_SIZE(count));
f2c21402 1341 cmap_init(&subtable->rules);
3016f3e4 1342 miniflow_clone_inline(&subtable->mask.masks, &mask->masks, count);
476f36e8
JR
1343
1344 /* Init indices for segmented lookup, if any. */
1345 flow_wildcards_init_catchall(&new);
1346 old = new;
1347 prev = 0;
1348 for (i = 0; i < cls->n_flow_segments; i++) {
1349 flow_wildcards_fold_minimask_range(&new, mask, prev,
1350 cls->flow_segments[i]);
1351 /* Add an index if it adds mask bits. */
1352 if (!flow_wildcards_equal(&new, &old)) {
f2c21402 1353 cmap_init(&subtable->indices[index]);
476f36e8
JR
1354 subtable->index_ofs[index] = cls->flow_segments[i];
1355 index++;
1356 old = new;
1357 }
1358 prev = cls->flow_segments[i];
1359 }
1360 /* Check if the rest of the subtable's mask adds any bits,
1361 * and remove the last index if it doesn't. */
1362 if (index > 0) {
1363 flow_wildcards_fold_minimask_range(&new, mask, prev, FLOW_U32S);
1364 if (flow_wildcards_equal(&new, &old)) {
1365 --index;
1366 subtable->index_ofs[index] = 0;
f2c21402 1367 cmap_destroy(&subtable->indices[index]);
476f36e8
JR
1368 }
1369 }
1370 subtable->n_indices = index;
1371
03868246
JR
1372 subtable->tag = (minimask_get_metadata_mask(mask) == OVS_BE64_MAX
1373 ? tag_create_deterministic(hash)
1374 : TAG_ALL);
064af421 1375
13751fd8
JR
1376 for (i = 0; i < cls->n_tries; i++) {
1377 subtable->trie_plen[i] = minimask_get_prefix_len(mask,
1378 cls->tries[i].field);
1379 }
1380
69d6040e 1381 /* Ports trie. */
f358a2cb 1382 ovsrcu_set_hidden(&subtable->ports_trie, NULL);
69d6040e
JR
1383 subtable->ports_mask_len
1384 = 32 - ctz32(ntohl(MINIFLOW_GET_BE32(&mask->masks, tp_src)));
1385
f2c21402 1386 cmap_insert(&cls->subtables_map, &subtable->cmap_node, hash);
ec988646 1387
03868246 1388 return subtable;
064af421
BP
1389}
1390
b5d97350 1391static void
e48eccd1 1392destroy_subtable(struct classifier *cls, struct cls_subtable *subtable)
e65413ab 1393 OVS_REQUIRES(cls->mutex)
b5d97350 1394{
476f36e8
JR
1395 int i;
1396
fe7cfa5c 1397 pvector_remove(&cls->subtables, subtable);
f358a2cb 1398 trie_destroy(&subtable->ports_trie);
69d6040e 1399
476f36e8 1400 for (i = 0; i < subtable->n_indices; i++) {
f2c21402 1401 cmap_destroy(&subtable->indices[i]);
476f36e8 1402 }
f2c21402
JR
1403 cmap_remove(&cls->subtables_map, &subtable->cmap_node,
1404 minimask_hash(&subtable->mask, 0));
fe7cfa5c 1405 minimask_destroy(&subtable->mask);
f2c21402 1406 cmap_destroy(&subtable->rules);
fe7cfa5c 1407 ovsrcu_postpone(free, subtable);
4aacd02d
BP
1408}
1409
13751fd8
JR
1410struct range {
1411 uint8_t start;
1412 uint8_t end;
1413};
1414
c0bfb650
JR
1415static unsigned int be_get_bit_at(const ovs_be32 value[], unsigned int ofs);
1416
13751fd8
JR
1417/* Return 'true' if can skip rest of the subtable based on the prefix trie
1418 * lookup results. */
1419static inline bool
1420check_tries(struct trie_ctx trie_ctx[CLS_MAX_TRIES], unsigned int n_tries,
1421 const unsigned int field_plen[CLS_MAX_TRIES],
1422 const struct range ofs, const struct flow *flow,
1423 struct flow_wildcards *wc)
1424{
1425 int j;
1426
1427 /* Check if we could avoid fully unwildcarding the next level of
1428 * fields using the prefix tries. The trie checks are done only as
1429 * needed to avoid folding in additional bits to the wildcards mask. */
1430 for (j = 0; j < n_tries; j++) {
1431 /* Is the trie field relevant for this subtable? */
1432 if (field_plen[j]) {
1433 struct trie_ctx *ctx = &trie_ctx[j];
1434 uint8_t be32ofs = ctx->be32ofs;
1435
1436 /* Is the trie field within the current range of fields? */
1437 if (be32ofs >= ofs.start && be32ofs < ofs.end) {
1438 /* On-demand trie lookup. */
1439 if (!ctx->lookup_done) {
c0bfb650
JR
1440 memset(&ctx->match_plens, 0, sizeof ctx->match_plens);
1441 ctx->maskbits = trie_lookup(ctx->trie, flow,
1442 &ctx->match_plens);
13751fd8
JR
1443 ctx->lookup_done = true;
1444 }
1445 /* Possible to skip the rest of the subtable if subtable's
c0bfb650
JR
1446 * prefix on the field is not included in the lookup result. */
1447 if (!be_get_bit_at(&ctx->match_plens.be32, field_plen[j] - 1)) {
1817dcea
JR
1448 /* We want the trie lookup to never result in unwildcarding
1449 * any bits that would not be unwildcarded otherwise.
1450 * Since the trie is shared by the whole classifier, it is
1451 * possible that the 'maskbits' contain bits that are
1452 * irrelevant for the partition relevant for the current
1453 * packet. Hence the checks below. */
13751fd8 1454
13751fd8 1455 /* Check that the trie result will not unwildcard more bits
1817dcea 1456 * than this subtable would otherwise. */
13751fd8
JR
1457 if (ctx->maskbits <= field_plen[j]) {
1458 /* Unwildcard the bits and skip the rest. */
1459 mask_set_prefix_bits(wc, be32ofs, ctx->maskbits);
1460 /* Note: Prerequisite already unwildcarded, as the only
1461 * prerequisite of the supported trie lookup fields is
1817dcea
JR
1462 * the ethertype, which is always unwildcarded. */
1463 return true;
1464 }
1465 /* Can skip if the field is already unwildcarded. */
1466 if (mask_prefix_bits_set(wc, be32ofs, ctx->maskbits)) {
13751fd8
JR
1467 return true;
1468 }
1469 }
1470 }
1471 }
1472 }
1473 return false;
1474}
1475
3016f3e4
JR
1476/* Returns true if 'target' satisifies 'flow'/'mask', that is, if each bit
1477 * for which 'flow', for which 'mask' has a bit set, specifies a particular
1478 * value has the correct value in 'target'.
1479 *
1480 * This function is equivalent to miniflow_equal_flow_in_minimask(flow,
a64759f0
JR
1481 * target, mask) but this is faster because of the invariant that
1482 * flow->map and mask->masks.map are the same, and that this version
1483 * takes the 'wc'. */
3016f3e4
JR
1484static inline bool
1485miniflow_and_mask_matches_flow(const struct miniflow *flow,
1486 const struct minimask *mask,
e9319757 1487 const struct flow *target)
3016f3e4
JR
1488{
1489 const uint32_t *flowp = miniflow_get_u32_values(flow);
1490 const uint32_t *maskp = miniflow_get_u32_values(&mask->masks);
a64759f0 1491 uint32_t idx;
3016f3e4 1492
a64759f0
JR
1493 MAP_FOR_EACH_INDEX(idx, mask->masks.map) {
1494 uint32_t diff = (*flowp++ ^ flow_u32_value(target, idx)) & *maskp++;
1495
1496 if (diff) {
3016f3e4
JR
1497 return false;
1498 }
1499 }
1500
1501 return true;
1502}
1503
627fb667 1504static inline struct cls_match *
476f36e8
JR
1505find_match(const struct cls_subtable *subtable, const struct flow *flow,
1506 uint32_t hash)
b5d97350 1507{
627fb667 1508 struct cls_match *rule;
b5d97350 1509
f2c21402 1510 CMAP_FOR_EACH_WITH_HASH (rule, cmap_node, hash, &subtable->rules) {
3016f3e4 1511 if (miniflow_and_mask_matches_flow(&rule->flow, &subtable->mask,
e9319757 1512 flow)) {
b5d97350 1513 return rule;
064af421
BP
1514 }
1515 }
c23740be 1516
064af421
BP
1517 return NULL;
1518}
1519
e9319757
JR
1520/* Returns true if 'target' satisifies 'flow'/'mask', that is, if each bit
1521 * for which 'flow', for which 'mask' has a bit set, specifies a particular
1522 * value has the correct value in 'target'.
1523 *
1524 * This function is equivalent to miniflow_and_mask_matches_flow() but this
1525 * version fills in the mask bits in 'wc'. */
1526static inline bool
1527miniflow_and_mask_matches_flow_wc(const struct miniflow *flow,
1528 const struct minimask *mask,
1529 const struct flow *target,
1530 struct flow_wildcards *wc)
1531{
1532 const uint32_t *flowp = miniflow_get_u32_values(flow);
1533 const uint32_t *maskp = miniflow_get_u32_values(&mask->masks);
1534 uint32_t idx;
1535
1536 MAP_FOR_EACH_INDEX(idx, mask->masks.map) {
1537 uint32_t mask = *maskp++;
1538 uint32_t diff = (*flowp++ ^ flow_u32_value(target, idx)) & mask;
1539
1540 if (diff) {
1541 /* Only unwildcard if none of the differing bits is already
1542 * exact-matched. */
1543 if (!(flow_u32_value(&wc->masks, idx) & diff)) {
1544 /* Keep one bit of the difference. */
1545 *flow_u32_lvalue(&wc->masks, idx) |= rightmost_1bit(diff);
1546 }
1547 return false;
1548 }
1549 /* Fill in the bits that were looked at. */
1550 *flow_u32_lvalue(&wc->masks, idx) |= mask;
1551 }
1552
1553 return true;
1554}
1555
386cb9f7
JR
1556/* Unwildcard the fields looked up so far, if any. */
1557static void
1558fill_range_wc(const struct cls_subtable *subtable, struct flow_wildcards *wc,
1559 uint8_t to)
1560{
1561 if (to) {
1562 flow_wildcards_fold_minimask_range(wc, &subtable->mask, 0, to);
1563 }
1564}
1565
627fb667 1566static struct cls_match *
476f36e8 1567find_match_wc(const struct cls_subtable *subtable, const struct flow *flow,
13751fd8
JR
1568 struct trie_ctx trie_ctx[CLS_MAX_TRIES], unsigned int n_tries,
1569 struct flow_wildcards *wc)
476f36e8
JR
1570{
1571 uint32_t basis = 0, hash;
a64759f0 1572 struct cls_match *rule;
476f36e8 1573 int i;
13751fd8 1574 struct range ofs;
476f36e8 1575
ec988646 1576 if (OVS_UNLIKELY(!wc)) {
476f36e8
JR
1577 return find_match(subtable, flow,
1578 flow_hash_in_minimask(flow, &subtable->mask, 0));
1579 }
1580
13751fd8 1581 ofs.start = 0;
476f36e8
JR
1582 /* Try to finish early by checking fields in segments. */
1583 for (i = 0; i < subtable->n_indices; i++) {
f2c21402
JR
1584 struct cmap_node *inode;
1585
13751fd8 1586 ofs.end = subtable->index_ofs[i];
476f36e8 1587
13751fd8
JR
1588 if (check_tries(trie_ctx, n_tries, subtable->trie_plen, ofs, flow,
1589 wc)) {
386cb9f7
JR
1590 /* 'wc' bits for the trie field set, now unwildcard the preceding
1591 * bits used so far. */
1592 fill_range_wc(subtable, wc, ofs.start);
1593 return NULL;
13751fd8
JR
1594 }
1595 hash = flow_hash_in_minimask_range(flow, &subtable->mask, ofs.start,
1596 ofs.end, &basis);
f2c21402 1597 inode = cmap_find(&subtable->indices[i], hash);
476f36e8 1598 if (!inode) {
386cb9f7
JR
1599 /* No match, can stop immediately, but must fold in the bits
1600 * used in lookup so far. */
1601 fill_range_wc(subtable, wc, ofs.end);
1602 return NULL;
476f36e8
JR
1603 }
1604
1605 /* If we have narrowed down to a single rule already, check whether
a64759f0 1606 * that rule matches. Either way, we're done.
476f36e8
JR
1607 *
1608 * (Rare) hash collisions may cause us to miss the opportunity for this
1609 * optimization. */
f2c21402 1610 if (!cmap_node_next(inode)) {
476f36e8 1611 ASSIGN_CONTAINER(rule, inode - i, index_nodes);
e9319757
JR
1612 if (miniflow_and_mask_matches_flow_wc(&rule->flow, &subtable->mask,
1613 flow, wc)) {
1614 return rule;
476f36e8 1615 }
e9319757 1616 return NULL;
476f36e8 1617 }
386cb9f7 1618 ofs.start = ofs.end;
476f36e8 1619 }
13751fd8
JR
1620 ofs.end = FLOW_U32S;
1621 /* Trie check for the final range. */
1622 if (check_tries(trie_ctx, n_tries, subtable->trie_plen, ofs, flow, wc)) {
386cb9f7
JR
1623 fill_range_wc(subtable, wc, ofs.start);
1624 return NULL;
13751fd8 1625 }
a64759f0
JR
1626 hash = flow_hash_in_minimask_range(flow, &subtable->mask, ofs.start,
1627 ofs.end, &basis);
1628 rule = find_match(subtable, flow, hash);
69d6040e
JR
1629 if (!rule && subtable->ports_mask_len) {
1630 /* Ports are always part of the final range, if any.
1631 * No match was found for the ports. Use the ports trie to figure out
1632 * which ports bits to unwildcard. */
1633 unsigned int mbits;
c0bfb650 1634 ovs_be32 value, plens, mask;
69d6040e
JR
1635
1636 mask = MINIFLOW_GET_BE32(&subtable->mask.masks, tp_src);
1637 value = ((OVS_FORCE ovs_be32 *)flow)[TP_PORTS_OFS32] & mask;
c0bfb650 1638 mbits = trie_lookup_value(&subtable->ports_trie, &value, &plens, 32);
69d6040e
JR
1639
1640 ((OVS_FORCE ovs_be32 *)&wc->masks)[TP_PORTS_OFS32] |=
1641 mask & htonl(~0 << (32 - mbits));
1642
386cb9f7
JR
1643 /* Unwildcard all bits in the mask upto the ports, as they were used
1644 * to determine there is no match. */
1645 fill_range_wc(subtable, wc, TP_PORTS_OFS32);
1646 return NULL;
69d6040e 1647 }
e9319757 1648
13751fd8 1649 /* Must unwildcard all the fields, as they were looked at. */
476f36e8
JR
1650 flow_wildcards_fold_minimask(wc, &subtable->mask);
1651 return rule;
1652}
1653
627fb667 1654static struct cls_match *
03868246
JR
1655find_equal(struct cls_subtable *subtable, const struct miniflow *flow,
1656 uint32_t hash)
064af421 1657{
627fb667 1658 struct cls_match *head;
064af421 1659
f2c21402 1660 CMAP_FOR_EACH_WITH_HASH (head, cmap_node, hash, &subtable->rules) {
3016f3e4 1661 if (miniflow_equal(&head->flow, flow)) {
b5d97350 1662 return head;
064af421
BP
1663 }
1664 }
1665 return NULL;
1666}
1667
afae68b1
JR
1668/*
1669 * As the readers are operating concurrently with the modifications, a
1670 * concurrent reader may or may not see the new rule, depending on how
1671 * the concurrent events overlap with each other. This is no
1672 * different from the former locked behavior, but there the visibility
1673 * of the new rule only depended on the timing of the locking
1674 * functions.
1675 *
1676 * The new rule is first added to the segment indices, so the readers
1677 * may find the rule in the indices before the rule is visible in the
1678 * subtables 'rules' map. This may result in us losing the
1679 * opportunity to quit lookups earlier, resulting in sub-optimal
1680 * wildcarding. This will be fixed by forthcoming revalidation always
1681 * scheduled after flow table changes.
1682 *
1683 * Similar behavior may happen due to us removing the overlapping rule
1684 * (if any) from the indices only after the new rule has been added.
1685 *
1686 * The subtable's max priority is updated only after the rule is
1687 * inserted, so the concurrent readers may not see the rule, as the
1688 * updated priority ordered subtable list will only be visible after
1689 * the subtable's max priority is updated.
1690 *
1691 * Similarly, the classifier's partitions for new rules are updated by
1692 * the caller after this function, so the readers may keep skipping
1693 * the subtable until they see the updated partitions.
1694 */
627fb667 1695static struct cls_match *
e48eccd1 1696insert_rule(struct classifier *cls, struct cls_subtable *subtable,
f2c21402 1697 struct cls_rule *new_rule)
e65413ab 1698 OVS_REQUIRES(cls->mutex)
064af421 1699{
627fb667 1700 struct cls_match *old = NULL;
f2c21402
JR
1701 struct cls_match *new = cls_match_alloc(new_rule);
1702 struct cls_match *head;
476f36e8 1703 int i;
f2c21402 1704 uint32_t basis = 0, hash, ihash[CLS_MAX_INDICES];
13751fd8 1705 uint8_t prev_be32ofs = 0;
476f36e8
JR
1706
1707 /* Add new node to segment indices. */
1708 for (i = 0; i < subtable->n_indices; i++) {
f2c21402
JR
1709 ihash[i] = minimatch_hash_range(&new_rule->match, prev_be32ofs,
1710 subtable->index_ofs[i], &basis);
1711 cmap_insert(&subtable->indices[i], &new->index_nodes[i], ihash[i]);
13751fd8 1712 prev_be32ofs = subtable->index_ofs[i];
476f36e8 1713 }
f2c21402
JR
1714 hash = minimatch_hash_range(&new_rule->match, prev_be32ofs, FLOW_U32S,
1715 &basis);
1716 head = find_equal(subtable, &new_rule->match.flow, hash);
b5d97350 1717 if (!head) {
f2c21402
JR
1718 cmap_insert(&subtable->rules, &new->cmap_node, hash);
1719 list_init(&new->list);
4aacd02d 1720 goto out;
b5d97350
BP
1721 } else {
1722 /* Scan the list for the insertion point that will keep the list in
1723 * order of decreasing priority. */
627fb667 1724 struct cls_match *rule;
476f36e8 1725
b5d97350 1726 FOR_EACH_RULE_IN_LIST (rule, head) {
f2c21402 1727 if (new->priority >= rule->priority) {
b5d97350 1728 if (rule == head) {
f2c21402
JR
1729 /* 'new' is the new highest-priority flow in the list. */
1730 cmap_replace(&subtable->rules, &rule->cmap_node,
1731 &new->cmap_node, hash);
b5d97350 1732 }
064af421 1733
f2c21402
JR
1734 if (new->priority == rule->priority) {
1735 list_replace(&new->list, &rule->list);
4aacd02d 1736 old = rule;
b5d97350 1737 } else {
f2c21402 1738 list_insert(&rule->list, &new->list);
b5d97350 1739 }
fe7cfa5c 1740 goto out;
b5d97350
BP
1741 }
1742 }
064af421 1743
b5d97350 1744 /* Insert 'new' at the end of the list. */
f2c21402 1745 list_push_back(&head->list, &new->list);
064af421 1746 }
4aacd02d
BP
1747
1748 out:
1749 if (!old) {
fe7cfa5c
JR
1750 subtable->n_rules++;
1751
1752 /* Rule was added, not replaced. Update 'subtable's 'max_priority'
1753 * and 'max_count', if necessary. */
1754 if (subtable->n_rules == 1) {
f2c21402 1755 subtable->max_priority = new->priority;
fe7cfa5c 1756 subtable->max_count = 1;
f2c21402
JR
1757 pvector_insert(&cls->subtables, subtable, new->priority);
1758 } else if (subtable->max_priority == new->priority) {
fe7cfa5c 1759 ++subtable->max_count;
f2c21402
JR
1760 } else if (new->priority > subtable->max_priority) {
1761 subtable->max_priority = new->priority;
fe7cfa5c 1762 subtable->max_count = 1;
f2c21402 1763 pvector_change_priority(&cls->subtables, subtable, new->priority);
fe7cfa5c 1764 }
476f36e8
JR
1765 } else {
1766 /* Remove old node from indices. */
1767 for (i = 0; i < subtable->n_indices; i++) {
f2c21402 1768 cmap_remove(&subtable->indices[i], &old->index_nodes[i], ihash[i]);
476f36e8 1769 }
4aacd02d
BP
1770 }
1771 return old;
064af421
BP
1772}
1773
627fb667
JR
1774static struct cls_match *
1775next_rule_in_list__(struct cls_match *rule)
e65413ab 1776 OVS_NO_THREAD_SAFETY_ANALYSIS
064af421 1777{
627fb667 1778 struct cls_match *next = OBJECT_CONTAINING(rule->list.next, next, list);
955f579d
BP
1779 return next;
1780}
1781
627fb667
JR
1782static struct cls_match *
1783next_rule_in_list(struct cls_match *rule)
955f579d 1784{
627fb667 1785 struct cls_match *next = next_rule_in_list__(rule);
b5d97350 1786 return next->priority < rule->priority ? next : NULL;
064af421 1787}
13751fd8
JR
1788\f
1789/* A longest-prefix match tree. */
1790struct trie_node {
1791 uint32_t prefix; /* Prefix bits for this node, MSB first. */
c30cfa6b 1792 uint8_t n_bits; /* Never zero, except for the root node. */
13751fd8 1793 unsigned int n_rules; /* Number of rules that have this prefix. */
f358a2cb 1794 rcu_trie_ptr edges[2]; /* Both NULL if leaf. */
13751fd8
JR
1795};
1796
1797/* Max bits per node. Must fit in struct trie_node's 'prefix'.
1798 * Also tested with 16, 8, and 5 to stress the implementation. */
1799#define TRIE_PREFIX_BITS 32
1800
1801/* Return at least 'plen' bits of the 'prefix', starting at bit offset 'ofs'.
1802 * Prefixes are in the network byte order, and the offset 0 corresponds to
1803 * the most significant bit of the first byte. The offset can be read as
1804 * "how many bits to skip from the start of the prefix starting at 'pr'". */
1805static uint32_t
1806raw_get_prefix(const ovs_be32 pr[], unsigned int ofs, unsigned int plen)
1807{
1808 uint32_t prefix;
1809
1810 pr += ofs / 32; /* Where to start. */
1811 ofs %= 32; /* How many bits to skip at 'pr'. */
1812
1813 prefix = ntohl(*pr) << ofs; /* Get the first 32 - ofs bits. */
1814 if (plen > 32 - ofs) { /* Need more than we have already? */
1815 prefix |= ntohl(*++pr) >> (32 - ofs);
1816 }
1817 /* Return with possible unwanted bits at the end. */
1818 return prefix;
1819}
1820
1821/* Return min(TRIE_PREFIX_BITS, plen) bits of the 'prefix', starting at bit
1822 * offset 'ofs'. Prefixes are in the network byte order, and the offset 0
1823 * corresponds to the most significant bit of the first byte. The offset can
1824 * be read as "how many bits to skip from the start of the prefix starting at
1825 * 'pr'". */
1826static uint32_t
1827trie_get_prefix(const ovs_be32 pr[], unsigned int ofs, unsigned int plen)
1828{
1829 if (!plen) {
1830 return 0;
1831 }
1832 if (plen > TRIE_PREFIX_BITS) {
1833 plen = TRIE_PREFIX_BITS; /* Get at most TRIE_PREFIX_BITS. */
1834 }
1835 /* Return with unwanted bits cleared. */
1836 return raw_get_prefix(pr, ofs, plen) & ~0u << (32 - plen);
1837}
1838
c30cfa6b 1839/* Return the number of equal bits in 'n_bits' of 'prefix's MSBs and a 'value'
13751fd8
JR
1840 * starting at "MSB 0"-based offset 'ofs'. */
1841static unsigned int
c30cfa6b 1842prefix_equal_bits(uint32_t prefix, unsigned int n_bits, const ovs_be32 value[],
13751fd8
JR
1843 unsigned int ofs)
1844{
c30cfa6b 1845 uint64_t diff = prefix ^ raw_get_prefix(value, ofs, n_bits);
13751fd8 1846 /* Set the bit after the relevant bits to limit the result. */
c30cfa6b 1847 return raw_clz64(diff << 32 | UINT64_C(1) << (63 - n_bits));
13751fd8
JR
1848}
1849
1850/* Return the number of equal bits in 'node' prefix and a 'prefix' of length
1851 * 'plen', starting at "MSB 0"-based offset 'ofs'. */
1852static unsigned int
1853trie_prefix_equal_bits(const struct trie_node *node, const ovs_be32 prefix[],
1854 unsigned int ofs, unsigned int plen)
1855{
c30cfa6b 1856 return prefix_equal_bits(node->prefix, MIN(node->n_bits, plen - ofs),
13751fd8
JR
1857 prefix, ofs);
1858}
1859
1860/* Return the bit at ("MSB 0"-based) offset 'ofs' as an int. 'ofs' can
1861 * be greater than 31. */
1862static unsigned int
1863be_get_bit_at(const ovs_be32 value[], unsigned int ofs)
1864{
1865 return (((const uint8_t *)value)[ofs / 8] >> (7 - ofs % 8)) & 1u;
1866}
1867
1868/* Return the bit at ("MSB 0"-based) offset 'ofs' as an int. 'ofs' must
1869 * be between 0 and 31, inclusive. */
1870static unsigned int
1871get_bit_at(const uint32_t prefix, unsigned int ofs)
1872{
1873 return (prefix >> (31 - ofs)) & 1u;
1874}
1875
1876/* Create new branch. */
1877static struct trie_node *
1878trie_branch_create(const ovs_be32 *prefix, unsigned int ofs, unsigned int plen,
1879 unsigned int n_rules)
1880{
1881 struct trie_node *node = xmalloc(sizeof *node);
1882
1883 node->prefix = trie_get_prefix(prefix, ofs, plen);
1884
1885 if (plen <= TRIE_PREFIX_BITS) {
c30cfa6b 1886 node->n_bits = plen;
f358a2cb
JR
1887 ovsrcu_set_hidden(&node->edges[0], NULL);
1888 ovsrcu_set_hidden(&node->edges[1], NULL);
13751fd8
JR
1889 node->n_rules = n_rules;
1890 } else { /* Need intermediate nodes. */
1891 struct trie_node *subnode = trie_branch_create(prefix,
1892 ofs + TRIE_PREFIX_BITS,
1893 plen - TRIE_PREFIX_BITS,
1894 n_rules);
1895 int bit = get_bit_at(subnode->prefix, 0);
c30cfa6b 1896 node->n_bits = TRIE_PREFIX_BITS;
f358a2cb
JR
1897 ovsrcu_set_hidden(&node->edges[bit], subnode);
1898 ovsrcu_set_hidden(&node->edges[!bit], NULL);
13751fd8
JR
1899 node->n_rules = 0;
1900 }
1901 return node;
1902}
1903
1904static void
f358a2cb 1905trie_node_destroy(const struct trie_node *node)
13751fd8 1906{
f358a2cb
JR
1907 ovsrcu_postpone(free, CONST_CAST(struct trie_node *, node));
1908}
1909
1910/* Copy a trie node for modification and postpone delete the old one. */
1911static struct trie_node *
1912trie_node_rcu_realloc(const struct trie_node *node)
1913{
1914 struct trie_node *new_node = xmalloc(sizeof *node);
1915
1916 *new_node = *node;
1917 trie_node_destroy(node);
1918
1919 return new_node;
13751fd8
JR
1920}
1921
e48eccd1 1922/* May only be called while holding the classifier mutex. */
13751fd8 1923static void
f358a2cb 1924trie_destroy(rcu_trie_ptr *trie)
13751fd8 1925{
f358a2cb
JR
1926 struct trie_node *node = ovsrcu_get_protected(struct trie_node *, trie);
1927
13751fd8 1928 if (node) {
f358a2cb
JR
1929 ovsrcu_set_hidden(trie, NULL);
1930 trie_destroy(&node->edges[0]);
1931 trie_destroy(&node->edges[1]);
1932 trie_node_destroy(node);
13751fd8
JR
1933 }
1934}
1935
1936static bool
1937trie_is_leaf(const struct trie_node *trie)
1938{
f358a2cb
JR
1939 /* No children? */
1940 return !ovsrcu_get(struct trie_node *, &trie->edges[0])
1941 && !ovsrcu_get(struct trie_node *, &trie->edges[1]);
13751fd8
JR
1942}
1943
1944static void
1945mask_set_prefix_bits(struct flow_wildcards *wc, uint8_t be32ofs,
c30cfa6b 1946 unsigned int n_bits)
13751fd8
JR
1947{
1948 ovs_be32 *mask = &((ovs_be32 *)&wc->masks)[be32ofs];
1949 unsigned int i;
1950
c30cfa6b 1951 for (i = 0; i < n_bits / 32; i++) {
13751fd8
JR
1952 mask[i] = OVS_BE32_MAX;
1953 }
c30cfa6b
JR
1954 if (n_bits % 32) {
1955 mask[i] |= htonl(~0u << (32 - n_bits % 32));
13751fd8
JR
1956 }
1957}
1958
1959static bool
1960mask_prefix_bits_set(const struct flow_wildcards *wc, uint8_t be32ofs,
c30cfa6b 1961 unsigned int n_bits)
13751fd8
JR
1962{
1963 ovs_be32 *mask = &((ovs_be32 *)&wc->masks)[be32ofs];
1964 unsigned int i;
1965 ovs_be32 zeroes = 0;
1966
c30cfa6b 1967 for (i = 0; i < n_bits / 32; i++) {
13751fd8
JR
1968 zeroes |= ~mask[i];
1969 }
c30cfa6b
JR
1970 if (n_bits % 32) {
1971 zeroes |= ~mask[i] & htonl(~0u << (32 - n_bits % 32));
13751fd8
JR
1972 }
1973
c30cfa6b 1974 return !zeroes; /* All 'n_bits' bits set. */
13751fd8
JR
1975}
1976
f358a2cb 1977static rcu_trie_ptr *
13751fd8
JR
1978trie_next_edge(struct trie_node *node, const ovs_be32 value[],
1979 unsigned int ofs)
1980{
1981 return node->edges + be_get_bit_at(value, ofs);
1982}
1983
1984static const struct trie_node *
1985trie_next_node(const struct trie_node *node, const ovs_be32 value[],
1986 unsigned int ofs)
1987{
f358a2cb
JR
1988 return ovsrcu_get(struct trie_node *,
1989 &node->edges[be_get_bit_at(value, ofs)]);
13751fd8
JR
1990}
1991
c0bfb650
JR
1992/* Set the bit at ("MSB 0"-based) offset 'ofs'. 'ofs' can be greater than 31.
1993 */
1994static void
1995be_set_bit_at(ovs_be32 value[], unsigned int ofs)
1996{
1997 ((uint8_t *)value)[ofs / 8] |= 1u << (7 - ofs % 8);
1998}
1999
2000/* Returns the number of bits in the prefix mask necessary to determine a
2001 * mismatch, in case there are longer prefixes in the tree below the one that
2002 * matched.
2003 * '*plens' will have a bit set for each prefix length that may have matching
2004 * rules. The caller is responsible for clearing the '*plens' prior to
2005 * calling this.
13751fd8
JR
2006 */
2007static unsigned int
f358a2cb 2008trie_lookup_value(const rcu_trie_ptr *trie, const ovs_be32 value[],
c0bfb650 2009 ovs_be32 plens[], unsigned int n_bits)
13751fd8 2010{
13751fd8 2011 const struct trie_node *prev = NULL;
c0bfb650
JR
2012 const struct trie_node *node = ovsrcu_get(struct trie_node *, trie);
2013 unsigned int match_len = 0; /* Number of matching bits. */
13751fd8 2014
27ce650f 2015 for (; node; prev = node, node = trie_next_node(node, value, match_len)) {
13751fd8
JR
2016 unsigned int eqbits;
2017 /* Check if this edge can be followed. */
27ce650f
JR
2018 eqbits = prefix_equal_bits(node->prefix, node->n_bits, value,
2019 match_len);
2020 match_len += eqbits;
c30cfa6b 2021 if (eqbits < node->n_bits) { /* Mismatch, nothing more to be found. */
27ce650f 2022 /* Bit at offset 'match_len' differed. */
c0bfb650 2023 return match_len + 1; /* Includes the first mismatching bit. */
13751fd8
JR
2024 }
2025 /* Full match, check if rules exist at this prefix length. */
2026 if (node->n_rules > 0) {
c0bfb650 2027 be_set_bit_at(plens, match_len - 1);
13751fd8 2028 }
27ce650f 2029 if (match_len >= n_bits) {
c0bfb650 2030 return n_bits; /* Full prefix. */
f0e5aa11 2031 }
13751fd8 2032 }
c0bfb650
JR
2033 /* node == NULL. Full match so far, but we tried to follow an
2034 * non-existing branch. Need to exclude the other branch if it exists
2035 * (it does not if we were called on an empty trie or 'prev' is a leaf
2036 * node). */
2037 return !prev || trie_is_leaf(prev) ? match_len : match_len + 1;
13751fd8
JR
2038}
2039
2040static unsigned int
2041trie_lookup(const struct cls_trie *trie, const struct flow *flow,
c0bfb650 2042 union mf_value *plens)
13751fd8
JR
2043{
2044 const struct mf_field *mf = trie->field;
2045
2046 /* Check that current flow matches the prerequisites for the trie
2047 * field. Some match fields are used for multiple purposes, so we
2048 * must check that the trie is relevant for this flow. */
2049 if (mf_are_prereqs_ok(mf, flow)) {
f358a2cb 2050 return trie_lookup_value(&trie->root,
13751fd8 2051 &((ovs_be32 *)flow)[mf->flow_be32ofs],
c0bfb650 2052 &plens->be32, mf->n_bits);
13751fd8 2053 }
c0bfb650
JR
2054 memset(plens, 0xff, sizeof *plens); /* All prefixes, no skipping. */
2055 return 0; /* Value not used in this case. */
13751fd8
JR
2056}
2057
2058/* Returns the length of a prefix match mask for the field 'mf' in 'minimask'.
2059 * Returns the u32 offset to the miniflow data in '*miniflow_index', if
2060 * 'miniflow_index' is not NULL. */
2061static unsigned int
2062minimask_get_prefix_len(const struct minimask *minimask,
2063 const struct mf_field *mf)
2064{
c30cfa6b 2065 unsigned int n_bits = 0, mask_tz = 0; /* Non-zero when end of mask seen. */
13751fd8
JR
2066 uint8_t u32_ofs = mf->flow_be32ofs;
2067 uint8_t u32_end = u32_ofs + mf->n_bytes / 4;
2068
2069 for (; u32_ofs < u32_end; ++u32_ofs) {
2070 uint32_t mask;
2071 mask = ntohl((OVS_FORCE ovs_be32)minimask_get(minimask, u32_ofs));
2072
2073 /* Validate mask, count the mask length. */
2074 if (mask_tz) {
2075 if (mask) {
2076 return 0; /* No bits allowed after mask ended. */
2077 }
2078 } else {
2079 if (~mask & (~mask + 1)) {
2080 return 0; /* Mask not contiguous. */
2081 }
2082 mask_tz = ctz32(mask);
c30cfa6b 2083 n_bits += 32 - mask_tz;
13751fd8
JR
2084 }
2085 }
2086
c30cfa6b 2087 return n_bits;
13751fd8
JR
2088}
2089
2090/*
2091 * This is called only when mask prefix is known to be CIDR and non-zero.
2092 * Relies on the fact that the flow and mask have the same map, and since
2093 * the mask is CIDR, the storage for the flow field exists even if it
2094 * happened to be zeros.
2095 */
2096static const ovs_be32 *
2097minimatch_get_prefix(const struct minimatch *match, const struct mf_field *mf)
2098{
27bbe15d 2099 return miniflow_get_be32_values(&match->flow) +
13751fd8
JR
2100 count_1bits(match->flow.map & ((UINT64_C(1) << mf->flow_be32ofs) - 1));
2101}
2102
2103/* Insert rule in to the prefix tree.
2104 * 'mlen' must be the (non-zero) CIDR prefix length of the 'trie->field' mask
2105 * in 'rule'. */
2106static void
2107trie_insert(struct cls_trie *trie, const struct cls_rule *rule, int mlen)
2108{
69d6040e
JR
2109 trie_insert_prefix(&trie->root,
2110 minimatch_get_prefix(&rule->match, trie->field), mlen);
2111}
2112
2113static void
f358a2cb 2114trie_insert_prefix(rcu_trie_ptr *edge, const ovs_be32 *prefix, int mlen)
69d6040e 2115{
13751fd8 2116 struct trie_node *node;
13751fd8
JR
2117 int ofs = 0;
2118
2119 /* Walk the tree. */
f358a2cb 2120 for (; (node = ovsrcu_get_protected(struct trie_node *, edge));
13751fd8
JR
2121 edge = trie_next_edge(node, prefix, ofs)) {
2122 unsigned int eqbits = trie_prefix_equal_bits(node, prefix, ofs, mlen);
2123 ofs += eqbits;
c30cfa6b 2124 if (eqbits < node->n_bits) {
13751fd8
JR
2125 /* Mismatch, new node needs to be inserted above. */
2126 int old_branch = get_bit_at(node->prefix, eqbits);
f358a2cb 2127 struct trie_node *new_parent;
13751fd8 2128
f358a2cb
JR
2129 new_parent = trie_branch_create(prefix, ofs - eqbits, eqbits,
2130 ofs == mlen ? 1 : 0);
2131 /* Copy the node to modify it. */
2132 node = trie_node_rcu_realloc(node);
2133 /* Adjust the new node for its new position in the tree. */
13751fd8 2134 node->prefix <<= eqbits;
c30cfa6b 2135 node->n_bits -= eqbits;
f358a2cb 2136 ovsrcu_set_hidden(&new_parent->edges[old_branch], node);
13751fd8
JR
2137
2138 /* Check if need a new branch for the new rule. */
2139 if (ofs < mlen) {
f358a2cb
JR
2140 ovsrcu_set_hidden(&new_parent->edges[!old_branch],
2141 trie_branch_create(prefix, ofs, mlen - ofs,
2142 1));
13751fd8 2143 }
f358a2cb 2144 ovsrcu_set(edge, new_parent); /* Publish changes. */
13751fd8
JR
2145 return;
2146 }
2147 /* Full match so far. */
2148
2149 if (ofs == mlen) {
2150 /* Full match at the current node, rule needs to be added here. */
2151 node->n_rules++;
2152 return;
2153 }
2154 }
2155 /* Must insert a new tree branch for the new rule. */
f358a2cb 2156 ovsrcu_set(edge, trie_branch_create(prefix, ofs, mlen - ofs, 1));
13751fd8
JR
2157}
2158
2159/* 'mlen' must be the (non-zero) CIDR prefix length of the 'trie->field' mask
2160 * in 'rule'. */
2161static void
2162trie_remove(struct cls_trie *trie, const struct cls_rule *rule, int mlen)
2163{
69d6040e
JR
2164 trie_remove_prefix(&trie->root,
2165 minimatch_get_prefix(&rule->match, trie->field), mlen);
2166}
2167
2168/* 'mlen' must be the (non-zero) CIDR prefix length of the 'trie->field' mask
2169 * in 'rule'. */
2170static void
f358a2cb 2171trie_remove_prefix(rcu_trie_ptr *root, const ovs_be32 *prefix, int mlen)
69d6040e 2172{
13751fd8 2173 struct trie_node *node;
f358a2cb 2174 rcu_trie_ptr *edges[sizeof(union mf_value) * 8];
13751fd8
JR
2175 int depth = 0, ofs = 0;
2176
2177 /* Walk the tree. */
69d6040e 2178 for (edges[0] = root;
f358a2cb 2179 (node = ovsrcu_get_protected(struct trie_node *, edges[depth]));
13751fd8
JR
2180 edges[++depth] = trie_next_edge(node, prefix, ofs)) {
2181 unsigned int eqbits = trie_prefix_equal_bits(node, prefix, ofs, mlen);
69d6040e 2182
c30cfa6b 2183 if (eqbits < node->n_bits) {
13751fd8
JR
2184 /* Mismatch, nothing to be removed. This should never happen, as
2185 * only rules in the classifier are ever removed. */
2186 break; /* Log a warning. */
2187 }
2188 /* Full match so far. */
2189 ofs += eqbits;
2190
2191 if (ofs == mlen) {
2192 /* Full prefix match at the current node, remove rule here. */
2193 if (!node->n_rules) {
2194 break; /* Log a warning. */
2195 }
2196 node->n_rules--;
2197
2198 /* Check if can prune the tree. */
f358a2cb
JR
2199 while (!node->n_rules) {
2200 struct trie_node *next,
2201 *edge0 = ovsrcu_get_protected(struct trie_node *,
2202 &node->edges[0]),
2203 *edge1 = ovsrcu_get_protected(struct trie_node *,
2204 &node->edges[1]);
2205
2206 if (edge0 && edge1) {
2207 break; /* A branching point, cannot prune. */
2208 }
2209
2210 /* Else have at most one child node, remove this node. */
2211 next = edge0 ? edge0 : edge1;
13751fd8
JR
2212
2213 if (next) {
c30cfa6b 2214 if (node->n_bits + next->n_bits > TRIE_PREFIX_BITS) {
13751fd8
JR
2215 break; /* Cannot combine. */
2216 }
f358a2cb
JR
2217 next = trie_node_rcu_realloc(next); /* Modify. */
2218
13751fd8 2219 /* Combine node with next. */
c30cfa6b
JR
2220 next->prefix = node->prefix | next->prefix >> node->n_bits;
2221 next->n_bits += node->n_bits;
13751fd8 2222 }
13751fd8 2223 /* Update the parent's edge. */
f358a2cb
JR
2224 ovsrcu_set(edges[depth], next); /* Publish changes. */
2225 trie_node_destroy(node);
2226
13751fd8
JR
2227 if (next || !depth) {
2228 /* Branch not pruned or at root, nothing more to do. */
2229 break;
2230 }
f358a2cb
JR
2231 node = ovsrcu_get_protected(struct trie_node *,
2232 edges[--depth]);
13751fd8
JR
2233 }
2234 return;
2235 }
2236 }
2237 /* Cannot go deeper. This should never happen, since only rules
2238 * that actually exist in the classifier are ever removed. */
2239 VLOG_WARN("Trying to remove non-existing rule from a prefix trie.");
2240}