]> git.proxmox.com Git - mirror_ovs.git/blame - lib/classifier.c
xenserver: Add license to uuid.py.
[mirror_ovs.git] / lib / classifier.c
CommitLineData
064af421 1/*
63e60b86 2 * Copyright (c) 2009, 2010 Nicira Networks.
064af421 3 *
a14bc59f
BP
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
064af421 7 *
a14bc59f
BP
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
064af421
BP
15 */
16
17#include <config.h>
18#include "classifier.h"
19#include <assert.h>
20#include <errno.h>
21#include <netinet/in.h>
68d1c8c3 22#include "dynamic-string.h"
064af421
BP
23#include "flow.h"
24#include "hash.h"
25
26const struct cls_field cls_fields[CLS_N_FIELDS + 1] = {
27#define CLS_FIELD(WILDCARDS, MEMBER, NAME) \
28 { offsetof(flow_t, MEMBER), \
29 sizeof ((flow_t *)0)->MEMBER, \
30 WILDCARDS, \
31 #NAME },
32 CLS_FIELDS
33#undef CLS_FIELD
34 { sizeof(flow_t), 0, 0, "exact" },
35};
36
37static uint32_t hash_fields(const flow_t *, int table_idx);
38static bool equal_fields(const flow_t *, const flow_t *, int table_idx);
39
40static int table_idx_from_wildcards(uint32_t wildcards);
41static struct cls_rule *table_insert(struct hmap *, struct cls_rule *);
42static struct cls_rule *insert_exact_rule(struct classifier *,
43 struct cls_rule *);
44static struct cls_bucket *find_bucket(struct hmap *, size_t hash,
45 const struct cls_rule *);
46static struct cls_rule *search_table(const struct hmap *table, int field_idx,
47 const struct cls_rule *);
48static struct cls_rule *search_exact_table(const struct classifier *,
49 size_t hash, const flow_t *);
50static bool rules_match_1wild(const struct cls_rule *fixed,
51 const struct cls_rule *wild, int field_idx);
49bdc010
JP
52static bool rules_match_2wild(const struct cls_rule *wild1,
53 const struct cls_rule *wild2, int field_idx);
064af421
BP
54
55/* Converts the flow in 'flow' into a cls_rule in 'rule', with the given
56 * 'wildcards' and 'priority'.*/
57void
659586ef
JG
58cls_rule_from_flow(const flow_t *flow, uint32_t wildcards,
59 unsigned int priority, struct cls_rule *rule)
064af421 60{
834377ea 61 assert(!flow->reserved[0] && !flow->reserved[1] && !flow->reserved[2]);
064af421
BP
62 rule->flow = *flow;
63 flow_wildcards_init(&rule->wc, wildcards);
64 rule->priority = priority;
65 rule->table_idx = table_idx_from_wildcards(rule->wc.wildcards);
66}
67
68/* Converts the ofp_match in 'match' into a cls_rule in 'rule', with the given
659586ef
JG
69 * 'priority'. If 'tun_id_from_cookie' is set then the upper 32 bits of
70 * 'cookie' are stored in the rule as the tunnel ID. */
064af421 71void
659586ef
JG
72cls_rule_from_match(const struct ofp_match *match, unsigned int priority,
73 bool tun_id_from_cookie, uint64_t cookie,
74 struct cls_rule *rule)
064af421
BP
75{
76 uint32_t wildcards;
659586ef 77 flow_from_match(match, tun_id_from_cookie, cookie, &rule->flow, &wildcards);
064af421
BP
78 flow_wildcards_init(&rule->wc, wildcards);
79 rule->priority = rule->wc.wildcards ? priority : UINT16_MAX;
80 rule->table_idx = table_idx_from_wildcards(rule->wc.wildcards);
81}
82
68d1c8c3
BP
83/* Converts 'rule' to a string and returns the string. The caller must free
84 * the string (with free()). */
85char *
86cls_rule_to_string(const struct cls_rule *rule)
87{
88 struct ds s = DS_EMPTY_INITIALIZER;
89 ds_put_format(&s, "wildcards=%x priority=%u ",
90 rule->wc.wildcards, rule->priority);
91 flow_format(&s, &rule->flow);
92 return ds_cstr(&s);
93}
94
064af421
BP
95/* Prints cls_rule 'rule', for debugging.
96 *
97 * (The output could be improved and expanded, but this was good enough to
98 * debug the classifier.) */
99void
100cls_rule_print(const struct cls_rule *rule)
101{
102 printf("wildcards=%x priority=%u ", rule->wc.wildcards, rule->priority);
103 flow_print(stdout, &rule->flow);
104 putc('\n', stdout);
105}
106
107/* Adjusts pointers around 'old', which must be in classifier 'cls', to
108 * compensate for it having been moved in memory to 'new' (e.g. due to
109 * realloc()).
110 *
111 * This function cannot be realized in all possible flow classifier
112 * implementations, so we will probably have to change the interface if we
113 * change the implementation. Shouldn't be a big deal though. */
114void
115cls_rule_moved(struct classifier *cls, struct cls_rule *old,
116 struct cls_rule *new)
117{
118 if (old != new) {
119 if (new->wc.wildcards) {
120 list_moved(&new->node.list);
121 } else {
63e60b86
BP
122 hmap_node_moved(&cls->exact_table,
123 &old->node.hmap, &new->node.hmap);
064af421
BP
124 }
125 }
126}
127
128/* Replaces 'old', which must be in classifier 'cls', by 'new' (e.g. due to
129 * realloc()); that is, after calling this function 'new' will be in 'cls' in
130 * place of 'old'.
131 *
132 * 'new' and 'old' must be exactly the same: wildcard the same fields, have the
133 * same fixed values for non-wildcarded fields, and have the same priority.
134 *
135 * The caller takes ownership of 'old' and is thus responsible for freeing it,
136 * etc., as necessary.
137 *
138 * This function cannot be realized in all possible flow classifier
139 * implementations, so we will probably have to change the interface if we
140 * change the implementation. Shouldn't be a big deal though. */
141void
142cls_rule_replace(struct classifier *cls, const struct cls_rule *old,
143 struct cls_rule *new)
144{
145 assert(old != new);
146 assert(old->wc.wildcards == new->wc.wildcards);
147 assert(old->priority == new->priority);
148
149 if (new->wc.wildcards) {
150 list_replace(&new->node.list, &old->node.list);
151 } else {
152 hmap_replace(&cls->exact_table, &old->node.hmap, &new->node.hmap);
153 }
154}
155\f
156/* Initializes 'cls' as a classifier that initially contains no classification
157 * rules. */
158void
159classifier_init(struct classifier *cls)
160{
161 int i;
162
163 cls->n_rules = 0;
164 for (i = 0; i < ARRAY_SIZE(cls->tables); i++) {
165 hmap_init(&cls->tables[i]);
166 }
167 hmap_init(&cls->exact_table);
168}
169
170/* Destroys 'cls'. Rules within 'cls', if any, are not freed; this is the
171 * caller's responsibility. */
172void
173classifier_destroy(struct classifier *cls)
174{
175 if (cls) {
176 struct cls_bucket *bucket, *next_bucket;
177 struct hmap *tbl;
178
179 for (tbl = &cls->tables[0]; tbl < &cls->tables[CLS_N_FIELDS]; tbl++) {
180 HMAP_FOR_EACH_SAFE (bucket, next_bucket,
181 struct cls_bucket, hmap_node, tbl) {
182 free(bucket);
183 }
184 hmap_destroy(tbl);
185 }
186 hmap_destroy(&cls->exact_table);
187 }
188}
189
190/* Returns true if 'cls' does not contain any classification rules, false
191 * otherwise. */
192bool
193classifier_is_empty(const struct classifier *cls)
194{
195 return cls->n_rules == 0;
196}
197
198/* Returns the number of rules in 'classifier'. */
199int
200classifier_count(const struct classifier *cls)
201{
202 return cls->n_rules;
203}
204
205/* Returns the number of rules in 'classifier' that have no wildcards. */
206int
207classifier_count_exact(const struct classifier *cls)
208{
209 return hmap_count(&cls->exact_table);
210}
211
212/* Inserts 'rule' into 'cls'. Transfers ownership of 'rule' to 'cls'.
213 *
214 * If 'cls' already contains an identical rule (including wildcards, values of
215 * fixed fields, and priority), replaces the old rule by 'rule' and returns the
216 * rule that was replaced. The caller takes ownership of the returned rule and
217 * is thus responsible for freeing it, etc., as necessary.
218 *
219 * Returns NULL if 'cls' does not contain a rule with an identical key, after
220 * inserting the new rule. In this case, no rules are displaced by the new
221 * rule, even rules that cannot have any effect because the new rule matches a
222 * superset of their flows and has higher priority. */
223struct cls_rule *
224classifier_insert(struct classifier *cls, struct cls_rule *rule)
225{
226 struct cls_rule *old;
227 assert((rule->wc.wildcards == 0) == (rule->table_idx == CLS_F_IDX_EXACT));
228 old = (rule->wc.wildcards
229 ? table_insert(&cls->tables[rule->table_idx], rule)
230 : insert_exact_rule(cls, rule));
231 if (!old) {
232 cls->n_rules++;
233 }
234 return old;
235}
236
237/* Inserts 'rule' into 'cls'. Transfers ownership of 'rule' to 'cls'.
238 *
239 * 'rule' must be an exact-match rule (rule->wc.wildcards must be 0) and 'cls'
240 * must not contain any rule with an identical key. */
241void
242classifier_insert_exact(struct classifier *cls, struct cls_rule *rule)
243{
244 hmap_insert(&cls->exact_table, &rule->node.hmap,
245 flow_hash(&rule->flow, 0));
246 cls->n_rules++;
247}
248
249/* Removes 'rule' from 'cls'. It is caller's responsibility to free 'rule', if
250 * this is desirable. */
251void
252classifier_remove(struct classifier *cls, struct cls_rule *rule)
253{
254 if (rule->wc.wildcards) {
255 /* Remove 'rule' from bucket. If that empties the bucket, remove the
256 * bucket from its table. */
257 struct hmap *table = &cls->tables[rule->table_idx];
258 struct list *rules = list_remove(&rule->node.list);
259 if (list_is_empty(rules)) {
260 /* This code is a little tricky. list_remove() returns the list
261 * element just after the one removed. Since the list is now
262 * empty, this will be the address of the 'rules' member of the
263 * bucket that was just emptied, so pointer arithmetic (via
264 * CONTAINER_OF) can find that bucket. */
265 struct cls_bucket *bucket;
266 bucket = CONTAINER_OF(rules, struct cls_bucket, rules);
267 hmap_remove(table, &bucket->hmap_node);
268 free(bucket);
269 }
270 } else {
271 /* Remove 'rule' from cls->exact_table. */
272 hmap_remove(&cls->exact_table, &rule->node.hmap);
273 }
274 cls->n_rules--;
275}
276
277/* Finds and returns the highest-priority rule in 'cls' that matches 'flow'.
278 * Returns a null pointer if no rules in 'cls' match 'flow'. If multiple rules
279 * of equal priority match 'flow', returns one arbitrarily.
280 *
281 * (When multiple rules of equal priority happen to fall into the same bucket,
282 * rules added more recently take priority over rules added less recently, but
283 * this is subject to change and should not be depended upon.) */
284struct cls_rule *
285classifier_lookup(const struct classifier *cls, const flow_t *flow)
286{
287 struct cls_rule *rule = classifier_lookup_exact(cls, flow);
288 if (!rule) {
289 rule = classifier_lookup_wild(cls, flow);
290 }
291 return rule;
292}
293
294struct cls_rule *
295classifier_lookup_exact(const struct classifier *cls, const flow_t *flow)
296{
297 return (!hmap_is_empty(&cls->exact_table)
298 ? search_exact_table(cls, flow_hash(flow, 0), flow)
299 : NULL);
300}
301
302struct cls_rule *
303classifier_lookup_wild(const struct classifier *cls, const flow_t *flow)
304{
305 struct cls_rule *best = NULL;
306 if (cls->n_rules > hmap_count(&cls->exact_table)) {
307 struct cls_rule target;
308 int i;
309
659586ef 310 cls_rule_from_flow(flow, 0, 0, &target);
064af421
BP
311 for (i = 0; i < CLS_N_FIELDS; i++) {
312 struct cls_rule *rule = search_table(&cls->tables[i], i, &target);
313 if (rule && (!best || rule->priority > best->priority)) {
314 best = rule;
315 }
316 }
317 }
318 return best;
319}
320
321struct cls_rule *
322classifier_find_rule_exactly(const struct classifier *cls,
323 const flow_t *target, uint32_t wildcards,
324 unsigned int priority)
325{
326 struct cls_bucket *bucket;
327 int table_idx;
328 uint32_t hash;
329
330 if (!wildcards) {
331 /* Ignores 'priority'. */
332 return search_exact_table(cls, flow_hash(target, 0), target);
333 }
334
659586ef 335 assert(wildcards == (wildcards & OVSFW_ALL));
064af421
BP
336 table_idx = table_idx_from_wildcards(wildcards);
337 hash = hash_fields(target, table_idx);
338 HMAP_FOR_EACH_WITH_HASH (bucket, struct cls_bucket, hmap_node, hash,
339 &cls->tables[table_idx]) {
340 if (equal_fields(&bucket->fixed, target, table_idx)) {
341 struct cls_rule *pos;
342 LIST_FOR_EACH (pos, struct cls_rule, node.list, &bucket->rules) {
343 if (pos->priority < priority) {
344 return NULL;
345 } else if (pos->priority == priority &&
346 pos->wc.wildcards == wildcards &&
347 flow_equal(target, &pos->flow)) {
348 return pos;
349 }
350 }
351 }
352 }
353 return NULL;
354}
355
d295e8e9
JP
356/* Checks if the flow defined by 'target' with 'wildcards' at 'priority'
357 * overlaps with any other rule at the same priority in the classifier.
49bdc010
JP
358 * Two rules are considered overlapping if a packet could match both. */
359bool
360classifier_rule_overlaps(const struct classifier *cls,
361 const flow_t *target, uint32_t wildcards,
362 unsigned int priority)
363{
364 struct cls_rule target_rule;
365 const struct hmap *tbl;
366
367 if (!wildcards) {
368 return search_exact_table(cls, flow_hash(target, 0), target) ?
369 true : false;
370 }
371
659586ef 372 cls_rule_from_flow(target, wildcards, priority, &target_rule);
49bdc010
JP
373
374 for (tbl = &cls->tables[0]; tbl < &cls->tables[CLS_N_FIELDS]; tbl++) {
375 struct cls_bucket *bucket;
376
377 HMAP_FOR_EACH (bucket, struct cls_bucket, hmap_node, tbl) {
378 struct cls_rule *rule;
379
380 LIST_FOR_EACH (rule, struct cls_rule, node.list,
381 &bucket->rules) {
d295e8e9 382 if (rule->priority == priority
49bdc010
JP
383 && rules_match_2wild(rule, &target_rule, 0)) {
384 return true;
385 }
386 }
387 }
388 }
389
390 return false;
391}
392
064af421
BP
393/* Ignores target->priority.
394 *
395 * 'callback' is allowed to delete the rule that is passed as its argument, but
396 * it must not delete (or move) any other rules in 'cls' that are in the same
397 * table as the argument rule. Two rules are in the same table if their
398 * cls_rule structs have the same table_idx; as a special case, a rule with
399 * wildcards and an exact-match rule will never be in the same table. */
400void
401classifier_for_each_match(const struct classifier *cls,
402 const struct cls_rule *target,
403 int include, cls_cb_func *callback, void *aux)
404{
405 if (include & CLS_INC_WILD) {
406 const struct hmap *table;
407
408 for (table = &cls->tables[0]; table < &cls->tables[CLS_N_FIELDS];
409 table++) {
410 struct cls_bucket *bucket, *next_bucket;
411
412 HMAP_FOR_EACH_SAFE (bucket, next_bucket,
413 struct cls_bucket, hmap_node, table) {
414 /* XXX there is a bit of room for optimization here based on
415 * rejecting entire buckets on their fixed fields, but it will
416 * only be worthwhile for big buckets (which we hope we won't
417 * get anyway, but...) */
418 struct cls_rule *prev_rule, *rule;
419
420 /* We can't just use LIST_FOR_EACH_SAFE here because, if the
421 * callback deletes the last rule in the bucket, then the
422 * bucket itself will be destroyed. The bucket contains the
423 * list head so that's a use-after-free error. */
424 prev_rule = NULL;
425 LIST_FOR_EACH (rule, struct cls_rule, node.list,
426 &bucket->rules) {
427 if (rules_match_1wild(rule, target, 0)) {
428 if (prev_rule) {
429 callback(prev_rule, aux);
430 }
431 prev_rule = rule;
432 }
433 }
434 if (prev_rule) {
435 callback(prev_rule, aux);
436 }
437 }
438 }
439 }
440
441 if (include & CLS_INC_EXACT) {
442 if (target->wc.wildcards) {
443 struct cls_rule *rule, *next_rule;
444
445 HMAP_FOR_EACH_SAFE (rule, next_rule, struct cls_rule, node.hmap,
446 &cls->exact_table) {
447 if (rules_match_1wild(rule, target, 0)) {
448 callback(rule, aux);
449 }
450 }
451 } else {
452 /* Optimization: there can be at most one match in the exact
453 * table. */
454 size_t hash = flow_hash(&target->flow, 0);
455 struct cls_rule *rule = search_exact_table(cls, hash,
456 &target->flow);
457 if (rule) {
458 callback(rule, aux);
459 }
460 }
461 }
462}
463
464/* 'callback' is allowed to delete the rule that is passed as its argument, but
465 * it must not delete (or move) any other rules in 'cls' that are in the same
466 * table as the argument rule. Two rules are in the same table if their
467 * cls_rule structs have the same table_idx; as a special case, a rule with
468 * wildcards and an exact-match rule will never be in the same table. */
469void
470classifier_for_each(const struct classifier *cls, int include,
471 void (*callback)(struct cls_rule *, void *aux),
472 void *aux)
473{
474 if (include & CLS_INC_WILD) {
475 const struct hmap *tbl;
476
477 for (tbl = &cls->tables[0]; tbl < &cls->tables[CLS_N_FIELDS]; tbl++) {
478 struct cls_bucket *bucket, *next_bucket;
479
480 HMAP_FOR_EACH_SAFE (bucket, next_bucket,
481 struct cls_bucket, hmap_node, tbl) {
482 struct cls_rule *prev_rule, *rule;
483
484 /* We can't just use LIST_FOR_EACH_SAFE here because, if the
485 * callback deletes the last rule in the bucket, then the
486 * bucket itself will be destroyed. The bucket contains the
487 * list head so that's a use-after-free error. */
488 prev_rule = NULL;
489 LIST_FOR_EACH (rule, struct cls_rule, node.list,
490 &bucket->rules) {
491 if (prev_rule) {
492 callback(prev_rule, aux);
493 }
494 prev_rule = rule;
495 }
496 if (prev_rule) {
497 callback(prev_rule, aux);
498 }
499 }
500 }
501 }
502
503 if (include & CLS_INC_EXACT) {
504 struct cls_rule *rule, *next_rule;
505
506 HMAP_FOR_EACH_SAFE (rule, next_rule,
507 struct cls_rule, node.hmap, &cls->exact_table) {
508 callback(rule, aux);
509 }
510 }
511}
512\f
513static struct cls_bucket *create_bucket(struct hmap *, size_t hash,
514 const flow_t *fixed);
515static struct cls_rule *bucket_insert(struct cls_bucket *, struct cls_rule *);
516
517static inline bool equal_bytes(const void *, const void *, size_t n);
518
519/* Returns a hash computed across the fields in 'flow' whose field indexes
520 * (CLS_F_IDX_*) are less than 'table_idx'. (If 'table_idx' is
521 * CLS_F_IDX_EXACT, hashes all the fields in 'flow'). */
522static uint32_t
523hash_fields(const flow_t *flow, int table_idx)
524{
525 /* I just know I'm going to hell for writing code this way.
526 *
527 * GCC generates pretty good code here, with only a single taken
528 * conditional jump per execution. Now the question is, would we be better
529 * off marking this function ALWAYS_INLINE and writing a wrapper that
530 * switches on the value of 'table_idx' to get rid of all the conditional
531 * jumps entirely (except for one in the wrapper)? Honestly I really,
532 * really hope that it doesn't matter in practice.
533 *
534 * We could do better by calculating hashes incrementally, instead of
535 * starting over from the top each time. But that would be even uglier. */
536 uint32_t a, b, c;
537 uint32_t tmp[3];
538 size_t n;
539
540 a = b = c = 0xdeadbeef + table_idx;
541 n = 0;
542
543#define CLS_FIELD(WILDCARDS, MEMBER, NAME) \
544 if (table_idx == CLS_F_IDX_##NAME) { \
545 /* Done. */ \
546 memset((uint8_t *) tmp + n, 0, sizeof tmp - n); \
547 goto finish; \
548 } else { \
549 const size_t size = sizeof flow->MEMBER; \
550 const uint8_t *p1 = (const uint8_t *) &flow->MEMBER; \
551 const size_t p1_size = MIN(sizeof tmp - n, size); \
552 const uint8_t *p2 = p1 + p1_size; \
553 const size_t p2_size = size - p1_size; \
554 \
555 /* Append to 'tmp' as much data as will fit. */ \
556 memcpy((uint8_t *) tmp + n, p1, p1_size); \
557 n += p1_size; \
558 \
559 /* If 'tmp' is full, mix. */ \
560 if (n == sizeof tmp) { \
561 a += tmp[0]; \
562 b += tmp[1]; \
563 c += tmp[2]; \
564 HASH_MIX(a, b, c); \
565 n = 0; \
566 } \
567 \
568 /* Append to 'tmp' any data that didn't fit. */ \
569 memcpy(tmp, p2, p2_size); \
570 n += p2_size; \
571 }
572 CLS_FIELDS
573#undef CLS_FIELD
574
575finish:
576 a += tmp[0];
577 b += tmp[1];
578 c += tmp[2];
579 HASH_FINAL(a, b, c);
580 return c;
581}
582
583/* Compares the fields in 'a' and 'b' whose field indexes (CLS_F_IDX_*) are
584 * less than 'table_idx'. (If 'table_idx' is CLS_F_IDX_EXACT, compares all the
585 * fields in 'a' and 'b').
586 *
587 * Returns true if all the compared fields are equal, false otherwise. */
588static bool
589equal_fields(const flow_t *a, const flow_t *b, int table_idx)
590{
591 /* XXX The generated code could be better here. */
592#define CLS_FIELD(WILDCARDS, MEMBER, NAME) \
593 if (table_idx == CLS_F_IDX_##NAME) { \
594 return true; \
595 } else if (!equal_bytes(&a->MEMBER, &b->MEMBER, sizeof a->MEMBER)) { \
596 return false; \
597 }
598 CLS_FIELDS
599#undef CLS_FIELD
600
601 return true;
602}
603
604static int
605table_idx_from_wildcards(uint32_t wildcards)
606{
607 if (!wildcards) {
608 return CLS_F_IDX_EXACT;
609 }
610#define CLS_FIELD(WILDCARDS, MEMBER, NAME) \
611 if (wildcards & WILDCARDS) { \
612 return CLS_F_IDX_##NAME; \
613 }
614 CLS_FIELDS
615#undef CLS_FIELD
616 NOT_REACHED();
617}
618
619/* Inserts 'rule' into 'table'. Returns the rule, if any, that was displaced
620 * in favor of 'rule'. */
621static struct cls_rule *
622table_insert(struct hmap *table, struct cls_rule *rule)
623{
624 struct cls_bucket *bucket;
625 size_t hash;
626
627 hash = hash_fields(&rule->flow, rule->table_idx);
628 bucket = find_bucket(table, hash, rule);
629 if (!bucket) {
630 bucket = create_bucket(table, hash, &rule->flow);
631 }
632
633 return bucket_insert(bucket, rule);
634}
635
636/* Inserts 'rule' into 'bucket', given that 'field' is the first wildcarded
637 * field in 'rule'.
638 *
639 * Returns the rule, if any, that was displaced in favor of 'rule'. */
640static struct cls_rule *
641bucket_insert(struct cls_bucket *bucket, struct cls_rule *rule)
642{
643 struct cls_rule *pos;
644 LIST_FOR_EACH (pos, struct cls_rule, node.list, &bucket->rules) {
4b2db384
JG
645 if (pos->priority == rule->priority) {
646 if (pos->wc.wildcards == rule->wc.wildcards
064af421
BP
647 && rules_match_1wild(pos, rule, rule->table_idx))
648 {
649 list_replace(&rule->node.list, &pos->node.list);
650 return pos;
651 }
4b2db384 652 } else if (pos->priority < rule->priority) {
064af421
BP
653 break;
654 }
655 }
656 list_insert(&pos->node.list, &rule->node.list);
657 return NULL;
658}
659
660static struct cls_rule *
661insert_exact_rule(struct classifier *cls, struct cls_rule *rule)
662{
663 struct cls_rule *old_rule;
664 size_t hash;
665
666 hash = flow_hash(&rule->flow, 0);
667 old_rule = search_exact_table(cls, hash, &rule->flow);
668 if (old_rule) {
669 hmap_remove(&cls->exact_table, &old_rule->node.hmap);
670 }
671 hmap_insert(&cls->exact_table, &rule->node.hmap, hash);
672 return old_rule;
673}
674
675/* Returns the bucket in 'table' that has the given 'hash' and the same fields
676 * as 'rule->flow' (up to 'rule->table_idx'), or a null pointer if no bucket
677 * matches. */
678static struct cls_bucket *
679find_bucket(struct hmap *table, size_t hash, const struct cls_rule *rule)
680{
681 struct cls_bucket *bucket;
682 HMAP_FOR_EACH_WITH_HASH (bucket, struct cls_bucket, hmap_node, hash,
683 table) {
684 if (equal_fields(&bucket->fixed, &rule->flow, rule->table_idx)) {
685 return bucket;
686 }
687 }
688 return NULL;
689}
690
691/* Creates a bucket and inserts it in 'table' with the given 'hash' and 'fixed'
692 * values. Returns the new bucket. */
693static struct cls_bucket *
694create_bucket(struct hmap *table, size_t hash, const flow_t *fixed)
695{
696 struct cls_bucket *bucket = xmalloc(sizeof *bucket);
697 list_init(&bucket->rules);
698 bucket->fixed = *fixed;
699 hmap_insert(table, &bucket->hmap_node, hash);
700 return bucket;
701}
702
703/* Returns true if the 'n' bytes in 'a' and 'b' are equal, false otherwise. */
704static inline bool ALWAYS_INLINE
705equal_bytes(const void *a, const void *b, size_t n)
706{
707#ifdef __i386__
708 /* For some reason GCC generates stupid code for memcmp() of small
709 * constant integer lengths. Help it out.
710 *
711 * This function is always inlined, and it is always called with 'n' as a
712 * compile-time constant, so the switch statement gets optimized out and
713 * this whole function just expands to an instruction or two. */
714 switch (n) {
715 case 1:
716 return *(uint8_t *) a == *(uint8_t *) b;
717
718 case 2:
719 return *(uint16_t *) a == *(uint16_t *) b;
720
721 case 4:
722 return *(uint32_t *) a == *(uint32_t *) b;
723
724 case 6:
725 return (*(uint32_t *) a == *(uint32_t *) b
726 && ((uint16_t *) a)[2] == ((uint16_t *) b)[2]);
727
728 default:
729 abort();
730 }
731#else
732 /* I hope GCC is smarter on your platform. */
733 return !memcmp(a, b, n);
734#endif
735}
736
737/* Returns the 32-bit unsigned integer at 'p'. */
738static inline uint32_t
739read_uint32(const void *p)
740{
741 /* GCC optimizes this into a single machine instruction on x86. */
742 uint32_t x;
743 memcpy(&x, p, sizeof x);
744 return x;
745}
746
747/* Compares the specified field in 'a' and 'b'. Returns true if the fields are
748 * equal, or if the ofp_match wildcard bits in 'wildcards' are set such that
749 * non-equal values may be ignored. 'nw_src_mask' and 'nw_dst_mask' must be
750 * those that would be set for 'wildcards' by cls_rule_set_masks().
751 *
752 * The compared field is the one with wildcard bit or bits 'field_wc', offset
753 * 'rule_ofs' within cls_rule's "fields" member, and length 'len', in bytes. */
754static inline bool ALWAYS_INLINE
755field_matches(const flow_t *a_, const flow_t *b_,
756 uint32_t wildcards, uint32_t nw_src_mask, uint32_t nw_dst_mask,
757 uint32_t field_wc, int ofs, int len)
758{
759 /* This function is always inlined, and it is always called with 'field_wc'
760 * as a compile-time constant, so the "if" conditionals here generate no
761 * code. */
762 const void *a = (const uint8_t *) a_ + ofs;
763 const void *b = (const uint8_t *) b_ + ofs;
764 if (!(field_wc & (field_wc - 1))) {
765 /* Handle all the single-bit wildcard cases. */
766 return wildcards & field_wc || equal_bytes(a, b, len);
767 } else if (field_wc == OFPFW_NW_SRC_MASK ||
768 field_wc == OFPFW_NW_DST_MASK) {
769 uint32_t a_ip = read_uint32(a);
770 uint32_t b_ip = read_uint32(b);
771 uint32_t mask = (field_wc == OFPFW_NW_SRC_MASK
772 ? nw_src_mask : nw_dst_mask);
773 return ((a_ip ^ b_ip) & mask) == 0;
774 } else {
775 abort();
776 }
777}
778
779/* Returns true if 'a' and 'b' match, ignoring fields for which the wildcards
780 * in 'wildcards' are set. 'nw_src_mask' and 'nw_dst_mask' must be those that
781 * would be set for 'wildcards' by cls_rule_set_masks(). 'field_idx' is the
782 * index of the first field to be compared; fields before 'field_idx' are
783 * assumed to match. (Always returns true if 'field_idx' is CLS_N_FIELDS.) */
784static bool
785rules_match(const struct cls_rule *a, const struct cls_rule *b,
786 uint32_t wildcards, uint32_t nw_src_mask, uint32_t nw_dst_mask,
787 int field_idx)
788{
789 /* This is related to Duff's device (see
790 * http://en.wikipedia.org/wiki/Duff's_device). */
791 switch (field_idx) {
792#define CLS_FIELD(WILDCARDS, MEMBER, NAME) \
793 case CLS_F_IDX_##NAME: \
794 if (!field_matches(&a->flow, &b->flow, \
795 wildcards, nw_src_mask, nw_dst_mask, \
796 WILDCARDS, offsetof(flow_t, MEMBER), \
797 sizeof a->flow.MEMBER)) { \
798 return false; \
799 } \
800 /* Fall though */
801 CLS_FIELDS
802#undef CLS_FIELD
803 }
804 return true;
805}
806
807/* Returns true if 'fixed' and 'wild' match. All fields in 'fixed' must have
808 * fixed values; 'wild' may contain wildcards.
809 *
810 * 'field_idx' is the index of the first field to be compared; fields before
811 * 'field_idx' are assumed to match. Always returns true if 'field_idx' is
812 * CLS_N_FIELDS. */
813static bool
814rules_match_1wild(const struct cls_rule *fixed, const struct cls_rule *wild,
815 int field_idx)
816{
817 return rules_match(fixed, wild, wild->wc.wildcards, wild->wc.nw_src_mask,
818 wild->wc.nw_dst_mask, field_idx);
49bdc010
JP
819}
820
821/* Returns true if 'wild1' and 'wild2' match, that is, if their fields
822 * are equal modulo wildcards in 'wild1' or 'wild2'.
823 *
824 * 'field_idx' is the index of the first field to be compared; fields before
825 * 'field_idx' are assumed to match. Always returns true if 'field_idx' is
826 * CLS_N_FIELDS. */
827static bool
828rules_match_2wild(const struct cls_rule *wild1, const struct cls_rule *wild2,
829 int field_idx)
830{
d295e8e9
JP
831 return rules_match(wild1, wild2,
832 wild1->wc.wildcards | wild2->wc.wildcards,
49bdc010 833 wild1->wc.nw_src_mask & wild2->wc.nw_src_mask,
d295e8e9 834 wild1->wc.nw_dst_mask & wild2->wc.nw_dst_mask,
49bdc010 835 field_idx);
064af421
BP
836}
837
838/* Searches 'bucket' for a rule that matches 'target'. Returns the
839 * highest-priority match, if one is found, or a null pointer if there is no
840 * match.
841 *
842 * 'field_idx' must be the index of the first wildcarded field in 'bucket'. */
843static struct cls_rule *
844search_bucket(struct cls_bucket *bucket, int field_idx,
845 const struct cls_rule *target)
846{
847 struct cls_rule *pos;
848
849 if (!equal_fields(&bucket->fixed, &target->flow, field_idx)) {
850 return NULL;
851 }
852
853 LIST_FOR_EACH (pos, struct cls_rule, node.list, &bucket->rules) {
854 if (rules_match_1wild(target, pos, field_idx)) {
855 return pos;
856 }
857 }
858 return NULL;
859}
860
861/* Searches 'table' for a rule that matches 'target'. Returns the
862 * highest-priority match, if one is found, or a null pointer if there is no
863 * match.
864 *
865 * 'field_idx' must be the index of the first wildcarded field in 'table'. */
866static struct cls_rule *
867search_table(const struct hmap *table, int field_idx,
868 const struct cls_rule *target)
869{
870 struct cls_bucket *bucket;
871
872 switch (hmap_count(table)) {
873 /* In these special cases there's no need to hash. */
874 case 0:
875 return NULL;
876 case 1:
877 bucket = CONTAINER_OF(hmap_first(table), struct cls_bucket, hmap_node);
878 return search_bucket(bucket, field_idx, target);
879 }
880
881 HMAP_FOR_EACH_WITH_HASH (bucket, struct cls_bucket, hmap_node,
882 hash_fields(&target->flow, field_idx), table) {
883 struct cls_rule *rule = search_bucket(bucket, field_idx, target);
884 if (rule) {
885 return rule;
886 }
887 }
888 return NULL;
889}
890
891static struct cls_rule *
892search_exact_table(const struct classifier *cls, size_t hash,
893 const flow_t *target)
894{
895 struct cls_rule *rule;
896
897 HMAP_FOR_EACH_WITH_HASH (rule, struct cls_rule, node.hmap,
898 hash, &cls->exact_table) {
899 if (flow_equal(&rule->flow, target)) {
900 return rule;
901 }
902 }
903 return NULL;
904}