]> git.proxmox.com Git - mirror_ovs.git/blob - tests/test-classifier.c
command-line: add ovs_cmdl_context
[mirror_ovs.git] / tests / test-classifier.c
1 /*
2 * Copyright (c) 2009, 2010, 2011, 2012, 2013, 2014, 2015 Nicira, Inc.
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 /* "White box" tests for classifier.
18 *
19 * With very few exceptions, these tests obtain complete coverage of every
20 * basic block and every branch in the classifier implementation, e.g. a clean
21 * report from "gcov -b". (Covering the exceptions would require finding
22 * collisions in the hash function used for flow data, etc.)
23 *
24 * This test should receive a clean report from "valgrind --leak-check=full":
25 * it frees every heap block that it allocates.
26 */
27
28 #include <config.h>
29 #undef NDEBUG
30 #include "classifier.h"
31 #include <assert.h>
32 #include <errno.h>
33 #include <limits.h>
34 #include "byte-order.h"
35 #include "classifier-private.h"
36 #include "command-line.h"
37 #include "flow.h"
38 #include "ofp-util.h"
39 #include "ovstest.h"
40 #include "packets.h"
41 #include "random.h"
42 #include "unaligned.h"
43 #include "util.h"
44
45 /* Fields in a rule. */
46 #define CLS_FIELDS \
47 /* struct flow all-caps */ \
48 /* member name name */ \
49 /* ----------- -------- */ \
50 CLS_FIELD(tunnel.tun_id, TUN_ID) \
51 CLS_FIELD(metadata, METADATA) \
52 CLS_FIELD(nw_src, NW_SRC) \
53 CLS_FIELD(nw_dst, NW_DST) \
54 CLS_FIELD(in_port, IN_PORT) \
55 CLS_FIELD(vlan_tci, VLAN_TCI) \
56 CLS_FIELD(dl_type, DL_TYPE) \
57 CLS_FIELD(tp_src, TP_SRC) \
58 CLS_FIELD(tp_dst, TP_DST) \
59 CLS_FIELD(dl_src, DL_SRC) \
60 CLS_FIELD(dl_dst, DL_DST) \
61 CLS_FIELD(nw_proto, NW_PROTO) \
62 CLS_FIELD(nw_tos, NW_DSCP)
63
64 /* Field indexes.
65 *
66 * (These are also indexed into struct classifier's 'tables' array.) */
67 enum {
68 #define CLS_FIELD(MEMBER, NAME) CLS_F_IDX_##NAME,
69 CLS_FIELDS
70 #undef CLS_FIELD
71 CLS_N_FIELDS
72 };
73
74 /* Field information. */
75 struct cls_field {
76 int ofs; /* Offset in struct flow. */
77 int len; /* Length in bytes. */
78 const char *name; /* Name (for debugging). */
79 };
80
81 static const struct cls_field cls_fields[CLS_N_FIELDS] = {
82 #define CLS_FIELD(MEMBER, NAME) \
83 { offsetof(struct flow, MEMBER), \
84 sizeof ((struct flow *)0)->MEMBER, \
85 #NAME },
86 CLS_FIELDS
87 #undef CLS_FIELD
88 };
89
90 struct test_rule {
91 int aux; /* Auxiliary data. */
92 struct cls_rule cls_rule; /* Classifier rule data. */
93 };
94
95 static struct test_rule *
96 test_rule_from_cls_rule(const struct cls_rule *rule)
97 {
98 return rule ? CONTAINER_OF(rule, struct test_rule, cls_rule) : NULL;
99 }
100
101 static void
102 test_rule_destroy(struct test_rule *rule)
103 {
104 if (rule) {
105 cls_rule_destroy(&rule->cls_rule);
106 free(rule);
107 }
108 }
109
110 static struct test_rule *make_rule(int wc_fields, int priority, int value_pat);
111 static void free_rule(struct test_rule *);
112 static struct test_rule *clone_rule(const struct test_rule *);
113
114 /* Trivial (linear) classifier. */
115 struct tcls {
116 size_t n_rules;
117 size_t allocated_rules;
118 struct test_rule **rules;
119 };
120
121 static void
122 tcls_init(struct tcls *tcls)
123 {
124 tcls->n_rules = 0;
125 tcls->allocated_rules = 0;
126 tcls->rules = NULL;
127 }
128
129 static void
130 tcls_destroy(struct tcls *tcls)
131 {
132 if (tcls) {
133 size_t i;
134
135 for (i = 0; i < tcls->n_rules; i++) {
136 test_rule_destroy(tcls->rules[i]);
137 }
138 free(tcls->rules);
139 }
140 }
141
142 static bool
143 tcls_is_empty(const struct tcls *tcls)
144 {
145 return tcls->n_rules == 0;
146 }
147
148 static struct test_rule *
149 tcls_insert(struct tcls *tcls, const struct test_rule *rule)
150 {
151 size_t i;
152
153 for (i = 0; i < tcls->n_rules; i++) {
154 const struct cls_rule *pos = &tcls->rules[i]->cls_rule;
155 if (cls_rule_equal(pos, &rule->cls_rule)) {
156 /* Exact match. */
157 ovsrcu_postpone(free_rule, tcls->rules[i]);
158 tcls->rules[i] = clone_rule(rule);
159 return tcls->rules[i];
160 } else if (pos->priority < rule->cls_rule.priority) {
161 break;
162 }
163 }
164
165 if (tcls->n_rules >= tcls->allocated_rules) {
166 tcls->rules = x2nrealloc(tcls->rules, &tcls->allocated_rules,
167 sizeof *tcls->rules);
168 }
169 if (i != tcls->n_rules) {
170 memmove(&tcls->rules[i + 1], &tcls->rules[i],
171 sizeof *tcls->rules * (tcls->n_rules - i));
172 }
173 tcls->rules[i] = clone_rule(rule);
174 tcls->n_rules++;
175 return tcls->rules[i];
176 }
177
178 static void
179 tcls_remove(struct tcls *cls, const struct test_rule *rule)
180 {
181 size_t i;
182
183 for (i = 0; i < cls->n_rules; i++) {
184 struct test_rule *pos = cls->rules[i];
185 if (pos == rule) {
186 test_rule_destroy(pos);
187
188 memmove(&cls->rules[i], &cls->rules[i + 1],
189 sizeof *cls->rules * (cls->n_rules - i - 1));
190
191 cls->n_rules--;
192 return;
193 }
194 }
195 OVS_NOT_REACHED();
196 }
197
198 static bool
199 match(const struct cls_rule *wild_, const struct flow *fixed)
200 {
201 struct match wild;
202 int f_idx;
203
204 minimatch_expand(&wild_->match, &wild);
205 for (f_idx = 0; f_idx < CLS_N_FIELDS; f_idx++) {
206 bool eq;
207
208 if (f_idx == CLS_F_IDX_NW_SRC) {
209 eq = !((fixed->nw_src ^ wild.flow.nw_src)
210 & wild.wc.masks.nw_src);
211 } else if (f_idx == CLS_F_IDX_NW_DST) {
212 eq = !((fixed->nw_dst ^ wild.flow.nw_dst)
213 & wild.wc.masks.nw_dst);
214 } else if (f_idx == CLS_F_IDX_TP_SRC) {
215 eq = !((fixed->tp_src ^ wild.flow.tp_src)
216 & wild.wc.masks.tp_src);
217 } else if (f_idx == CLS_F_IDX_TP_DST) {
218 eq = !((fixed->tp_dst ^ wild.flow.tp_dst)
219 & wild.wc.masks.tp_dst);
220 } else if (f_idx == CLS_F_IDX_DL_SRC) {
221 eq = eth_addr_equal_except(fixed->dl_src, wild.flow.dl_src,
222 wild.wc.masks.dl_src);
223 } else if (f_idx == CLS_F_IDX_DL_DST) {
224 eq = eth_addr_equal_except(fixed->dl_dst, wild.flow.dl_dst,
225 wild.wc.masks.dl_dst);
226 } else if (f_idx == CLS_F_IDX_VLAN_TCI) {
227 eq = !((fixed->vlan_tci ^ wild.flow.vlan_tci)
228 & wild.wc.masks.vlan_tci);
229 } else if (f_idx == CLS_F_IDX_TUN_ID) {
230 eq = !((fixed->tunnel.tun_id ^ wild.flow.tunnel.tun_id)
231 & wild.wc.masks.tunnel.tun_id);
232 } else if (f_idx == CLS_F_IDX_METADATA) {
233 eq = !((fixed->metadata ^ wild.flow.metadata)
234 & wild.wc.masks.metadata);
235 } else if (f_idx == CLS_F_IDX_NW_DSCP) {
236 eq = !((fixed->nw_tos ^ wild.flow.nw_tos) &
237 (wild.wc.masks.nw_tos & IP_DSCP_MASK));
238 } else if (f_idx == CLS_F_IDX_NW_PROTO) {
239 eq = !((fixed->nw_proto ^ wild.flow.nw_proto)
240 & wild.wc.masks.nw_proto);
241 } else if (f_idx == CLS_F_IDX_DL_TYPE) {
242 eq = !((fixed->dl_type ^ wild.flow.dl_type)
243 & wild.wc.masks.dl_type);
244 } else if (f_idx == CLS_F_IDX_IN_PORT) {
245 eq = !((fixed->in_port.ofp_port
246 ^ wild.flow.in_port.ofp_port)
247 & wild.wc.masks.in_port.ofp_port);
248 } else {
249 OVS_NOT_REACHED();
250 }
251
252 if (!eq) {
253 return false;
254 }
255 }
256 return true;
257 }
258
259 static struct cls_rule *
260 tcls_lookup(const struct tcls *cls, const struct flow *flow)
261 {
262 size_t i;
263
264 for (i = 0; i < cls->n_rules; i++) {
265 struct test_rule *pos = cls->rules[i];
266 if (match(&pos->cls_rule, flow)) {
267 return &pos->cls_rule;
268 }
269 }
270 return NULL;
271 }
272
273 static void
274 tcls_delete_matches(struct tcls *cls, const struct cls_rule *target)
275 {
276 size_t i;
277
278 for (i = 0; i < cls->n_rules; ) {
279 struct test_rule *pos = cls->rules[i];
280 if (!minimask_has_extra(&pos->cls_rule.match.mask,
281 &target->match.mask)) {
282 struct flow flow;
283
284 miniflow_expand(&pos->cls_rule.match.flow, &flow);
285 if (match(target, &flow)) {
286 tcls_remove(cls, pos);
287 continue;
288 }
289 }
290 i++;
291 }
292 }
293 \f
294 static ovs_be32 nw_src_values[] = { CONSTANT_HTONL(0xc0a80001),
295 CONSTANT_HTONL(0xc0a04455) };
296 static ovs_be32 nw_dst_values[] = { CONSTANT_HTONL(0xc0a80002),
297 CONSTANT_HTONL(0xc0a04455) };
298 static ovs_be64 tun_id_values[] = {
299 0,
300 CONSTANT_HTONLL(UINT64_C(0xfedcba9876543210)) };
301 static ovs_be64 metadata_values[] = {
302 0,
303 CONSTANT_HTONLL(UINT64_C(0xfedcba9876543210)) };
304 static ofp_port_t in_port_values[] = { OFP_PORT_C(1), OFPP_LOCAL };
305 static ovs_be16 vlan_tci_values[] = { CONSTANT_HTONS(101), CONSTANT_HTONS(0) };
306 static ovs_be16 dl_type_values[]
307 = { CONSTANT_HTONS(ETH_TYPE_IP), CONSTANT_HTONS(ETH_TYPE_ARP) };
308 static ovs_be16 tp_src_values[] = { CONSTANT_HTONS(49362),
309 CONSTANT_HTONS(80) };
310 static ovs_be16 tp_dst_values[] = { CONSTANT_HTONS(6667), CONSTANT_HTONS(22) };
311 static uint8_t dl_src_values[][ETH_ADDR_LEN] = {
312 { 0x00, 0x02, 0xe3, 0x0f, 0x80, 0xa4 },
313 { 0x5e, 0x33, 0x7f, 0x5f, 0x1e, 0x99 } };
314 static uint8_t dl_dst_values[][ETH_ADDR_LEN] = {
315 { 0x4a, 0x27, 0x71, 0xae, 0x64, 0xc1 },
316 { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff } };
317 static uint8_t nw_proto_values[] = { IPPROTO_TCP, IPPROTO_ICMP };
318 static uint8_t nw_dscp_values[] = { 48, 0 };
319
320 static void *values[CLS_N_FIELDS][2];
321
322 static void
323 init_values(void)
324 {
325 values[CLS_F_IDX_TUN_ID][0] = &tun_id_values[0];
326 values[CLS_F_IDX_TUN_ID][1] = &tun_id_values[1];
327
328 values[CLS_F_IDX_METADATA][0] = &metadata_values[0];
329 values[CLS_F_IDX_METADATA][1] = &metadata_values[1];
330
331 values[CLS_F_IDX_IN_PORT][0] = &in_port_values[0];
332 values[CLS_F_IDX_IN_PORT][1] = &in_port_values[1];
333
334 values[CLS_F_IDX_VLAN_TCI][0] = &vlan_tci_values[0];
335 values[CLS_F_IDX_VLAN_TCI][1] = &vlan_tci_values[1];
336
337 values[CLS_F_IDX_DL_SRC][0] = dl_src_values[0];
338 values[CLS_F_IDX_DL_SRC][1] = dl_src_values[1];
339
340 values[CLS_F_IDX_DL_DST][0] = dl_dst_values[0];
341 values[CLS_F_IDX_DL_DST][1] = dl_dst_values[1];
342
343 values[CLS_F_IDX_DL_TYPE][0] = &dl_type_values[0];
344 values[CLS_F_IDX_DL_TYPE][1] = &dl_type_values[1];
345
346 values[CLS_F_IDX_NW_SRC][0] = &nw_src_values[0];
347 values[CLS_F_IDX_NW_SRC][1] = &nw_src_values[1];
348
349 values[CLS_F_IDX_NW_DST][0] = &nw_dst_values[0];
350 values[CLS_F_IDX_NW_DST][1] = &nw_dst_values[1];
351
352 values[CLS_F_IDX_NW_PROTO][0] = &nw_proto_values[0];
353 values[CLS_F_IDX_NW_PROTO][1] = &nw_proto_values[1];
354
355 values[CLS_F_IDX_NW_DSCP][0] = &nw_dscp_values[0];
356 values[CLS_F_IDX_NW_DSCP][1] = &nw_dscp_values[1];
357
358 values[CLS_F_IDX_TP_SRC][0] = &tp_src_values[0];
359 values[CLS_F_IDX_TP_SRC][1] = &tp_src_values[1];
360
361 values[CLS_F_IDX_TP_DST][0] = &tp_dst_values[0];
362 values[CLS_F_IDX_TP_DST][1] = &tp_dst_values[1];
363 }
364
365 #define N_NW_SRC_VALUES ARRAY_SIZE(nw_src_values)
366 #define N_NW_DST_VALUES ARRAY_SIZE(nw_dst_values)
367 #define N_TUN_ID_VALUES ARRAY_SIZE(tun_id_values)
368 #define N_METADATA_VALUES ARRAY_SIZE(metadata_values)
369 #define N_IN_PORT_VALUES ARRAY_SIZE(in_port_values)
370 #define N_VLAN_TCI_VALUES ARRAY_SIZE(vlan_tci_values)
371 #define N_DL_TYPE_VALUES ARRAY_SIZE(dl_type_values)
372 #define N_TP_SRC_VALUES ARRAY_SIZE(tp_src_values)
373 #define N_TP_DST_VALUES ARRAY_SIZE(tp_dst_values)
374 #define N_DL_SRC_VALUES ARRAY_SIZE(dl_src_values)
375 #define N_DL_DST_VALUES ARRAY_SIZE(dl_dst_values)
376 #define N_NW_PROTO_VALUES ARRAY_SIZE(nw_proto_values)
377 #define N_NW_DSCP_VALUES ARRAY_SIZE(nw_dscp_values)
378
379 #define N_FLOW_VALUES (N_NW_SRC_VALUES * \
380 N_NW_DST_VALUES * \
381 N_TUN_ID_VALUES * \
382 N_IN_PORT_VALUES * \
383 N_VLAN_TCI_VALUES * \
384 N_DL_TYPE_VALUES * \
385 N_TP_SRC_VALUES * \
386 N_TP_DST_VALUES * \
387 N_DL_SRC_VALUES * \
388 N_DL_DST_VALUES * \
389 N_NW_PROTO_VALUES * \
390 N_NW_DSCP_VALUES)
391
392 static unsigned int
393 get_value(unsigned int *x, unsigned n_values)
394 {
395 unsigned int rem = *x % n_values;
396 *x /= n_values;
397 return rem;
398 }
399
400 static void
401 compare_classifiers(struct classifier *cls, struct tcls *tcls)
402 {
403 static const int confidence = 500;
404 unsigned int i;
405
406 assert(classifier_count(cls) == tcls->n_rules);
407 for (i = 0; i < confidence; i++) {
408 const struct cls_rule *cr0, *cr1, *cr2;
409 struct flow flow;
410 struct flow_wildcards wc;
411 unsigned int x;
412
413 flow_wildcards_init_catchall(&wc);
414 x = random_range(N_FLOW_VALUES);
415 memset(&flow, 0, sizeof flow);
416 flow.nw_src = nw_src_values[get_value(&x, N_NW_SRC_VALUES)];
417 flow.nw_dst = nw_dst_values[get_value(&x, N_NW_DST_VALUES)];
418 flow.tunnel.tun_id = tun_id_values[get_value(&x, N_TUN_ID_VALUES)];
419 flow.metadata = metadata_values[get_value(&x, N_METADATA_VALUES)];
420 flow.in_port.ofp_port = in_port_values[get_value(&x,
421 N_IN_PORT_VALUES)];
422 flow.vlan_tci = vlan_tci_values[get_value(&x, N_VLAN_TCI_VALUES)];
423 flow.dl_type = dl_type_values[get_value(&x, N_DL_TYPE_VALUES)];
424 flow.tp_src = tp_src_values[get_value(&x, N_TP_SRC_VALUES)];
425 flow.tp_dst = tp_dst_values[get_value(&x, N_TP_DST_VALUES)];
426 memcpy(flow.dl_src, dl_src_values[get_value(&x, N_DL_SRC_VALUES)],
427 ETH_ADDR_LEN);
428 memcpy(flow.dl_dst, dl_dst_values[get_value(&x, N_DL_DST_VALUES)],
429 ETH_ADDR_LEN);
430 flow.nw_proto = nw_proto_values[get_value(&x, N_NW_PROTO_VALUES)];
431 flow.nw_tos = nw_dscp_values[get_value(&x, N_NW_DSCP_VALUES)];
432
433 /* This assertion is here to suppress a GCC 4.9 array-bounds warning */
434 ovs_assert(cls->n_tries <= CLS_MAX_TRIES);
435
436 cr0 = classifier_lookup(cls, &flow, &wc);
437 cr1 = tcls_lookup(tcls, &flow);
438 assert((cr0 == NULL) == (cr1 == NULL));
439 if (cr0 != NULL) {
440 const struct test_rule *tr0 = test_rule_from_cls_rule(cr0);
441 const struct test_rule *tr1 = test_rule_from_cls_rule(cr1);
442
443 assert(cls_rule_equal(cr0, cr1));
444 assert(tr0->aux == tr1->aux);
445 }
446 cr2 = classifier_lookup(cls, &flow, NULL);
447 assert(cr2 == cr0);
448 }
449 }
450
451 static void
452 destroy_classifier(struct classifier *cls)
453 {
454 struct test_rule *rule;
455
456 classifier_defer(cls);
457 CLS_FOR_EACH (rule, cls_rule, cls) {
458 if (classifier_remove(cls, &rule->cls_rule)) {
459 ovsrcu_postpone(free_rule, rule);
460 }
461 }
462 classifier_destroy(cls);
463 }
464
465 static void
466 pvector_verify(const struct pvector *pvec)
467 {
468 void *ptr OVS_UNUSED;
469 int prev_priority = INT_MAX;
470
471 PVECTOR_FOR_EACH (ptr, pvec) {
472 int priority = cursor__.vector[cursor__.entry_idx].priority;
473 if (priority > prev_priority) {
474 ovs_abort(0, "Priority vector is out of order (%u > %u)",
475 priority, prev_priority);
476 }
477 prev_priority = priority;
478 }
479 }
480
481 static unsigned int
482 trie_verify(const rcu_trie_ptr *trie, unsigned int ofs, unsigned int n_bits)
483 {
484 const struct trie_node *node = ovsrcu_get(struct trie_node *, trie);
485
486 if (node) {
487 assert(node->n_rules == 0 || node->n_bits > 0);
488 ofs += node->n_bits;
489 assert((ofs > 0 || (ofs == 0 && node->n_bits == 0)) && ofs <= n_bits);
490
491 return node->n_rules
492 + trie_verify(&node->edges[0], ofs, n_bits)
493 + trie_verify(&node->edges[1], ofs, n_bits);
494 }
495 return 0;
496 }
497
498 static void
499 verify_tries(struct classifier *cls)
500 OVS_NO_THREAD_SAFETY_ANALYSIS
501 {
502 unsigned int n_rules = 0;
503 int i;
504
505 for (i = 0; i < cls->n_tries; i++) {
506 n_rules += trie_verify(&cls->tries[i].root, 0,
507 cls->tries[i].field->n_bits);
508 }
509 assert(n_rules <= cls->n_rules);
510 }
511
512 static void
513 check_tables(const struct classifier *cls, int n_tables, int n_rules,
514 int n_dups)
515 OVS_NO_THREAD_SAFETY_ANALYSIS
516 {
517 const struct cls_subtable *table;
518 struct test_rule *test_rule;
519 int found_tables = 0;
520 int found_rules = 0;
521 int found_dups = 0;
522 int found_rules2 = 0;
523
524 pvector_verify(&cls->subtables);
525 CMAP_FOR_EACH (table, cmap_node, &cls->subtables_map) {
526 const struct cls_match *head;
527 int max_priority = INT_MIN;
528 unsigned int max_count = 0;
529 bool found = false;
530 const struct cls_subtable *iter;
531
532 /* Locate the subtable from 'subtables'. */
533 PVECTOR_FOR_EACH (iter, &cls->subtables) {
534 if (iter == table) {
535 if (found) {
536 ovs_abort(0, "Subtable %p duplicated in 'subtables'.",
537 table);
538 }
539 found = true;
540 }
541 }
542 if (!found) {
543 ovs_abort(0, "Subtable %p not found from 'subtables'.", table);
544 }
545
546 assert(!cmap_is_empty(&table->rules));
547 assert(trie_verify(&table->ports_trie, 0, table->ports_mask_len)
548 == (table->ports_mask_len ? cmap_count(&table->rules) : 0));
549
550 found_tables++;
551 CMAP_FOR_EACH (head, cmap_node, &table->rules) {
552 int prev_priority = INT_MAX;
553 const struct cls_match *rule;
554
555 if (head->priority > max_priority) {
556 max_priority = head->priority;
557 max_count = 1;
558 } else if (head->priority == max_priority) {
559 ++max_count;
560 }
561
562 found_rules++;
563 RCULIST_FOR_EACH (rule, list, &head->list) {
564 assert(rule->priority < prev_priority);
565 assert(rule->priority <= table->max_priority);
566
567 prev_priority = rule->priority;
568 found_rules++;
569 found_dups++;
570 assert(classifier_find_rule_exactly(cls, rule->cls_rule)
571 == rule->cls_rule);
572 }
573 }
574 assert(table->max_priority == max_priority);
575 assert(table->max_count == max_count);
576 }
577
578 assert(found_tables == cmap_count(&cls->subtables_map));
579 assert(found_tables == pvector_count(&cls->subtables));
580 assert(n_tables == -1 || n_tables == cmap_count(&cls->subtables_map));
581 assert(n_rules == -1 || found_rules == n_rules);
582 assert(n_dups == -1 || found_dups == n_dups);
583
584 CLS_FOR_EACH (test_rule, cls_rule, cls) {
585 found_rules2++;
586 }
587 assert(found_rules == found_rules2);
588 }
589
590 static struct test_rule *
591 make_rule(int wc_fields, int priority, int value_pat)
592 {
593 const struct cls_field *f;
594 struct test_rule *rule;
595 struct match match;
596
597 match_init_catchall(&match);
598 for (f = &cls_fields[0]; f < &cls_fields[CLS_N_FIELDS]; f++) {
599 int f_idx = f - cls_fields;
600 int value_idx = (value_pat & (1u << f_idx)) != 0;
601 memcpy((char *) &match.flow + f->ofs,
602 values[f_idx][value_idx], f->len);
603
604 if (f_idx == CLS_F_IDX_NW_SRC) {
605 match.wc.masks.nw_src = OVS_BE32_MAX;
606 } else if (f_idx == CLS_F_IDX_NW_DST) {
607 match.wc.masks.nw_dst = OVS_BE32_MAX;
608 } else if (f_idx == CLS_F_IDX_TP_SRC) {
609 match.wc.masks.tp_src = OVS_BE16_MAX;
610 } else if (f_idx == CLS_F_IDX_TP_DST) {
611 match.wc.masks.tp_dst = OVS_BE16_MAX;
612 } else if (f_idx == CLS_F_IDX_DL_SRC) {
613 memset(match.wc.masks.dl_src, 0xff, ETH_ADDR_LEN);
614 } else if (f_idx == CLS_F_IDX_DL_DST) {
615 memset(match.wc.masks.dl_dst, 0xff, ETH_ADDR_LEN);
616 } else if (f_idx == CLS_F_IDX_VLAN_TCI) {
617 match.wc.masks.vlan_tci = OVS_BE16_MAX;
618 } else if (f_idx == CLS_F_IDX_TUN_ID) {
619 match.wc.masks.tunnel.tun_id = OVS_BE64_MAX;
620 } else if (f_idx == CLS_F_IDX_METADATA) {
621 match.wc.masks.metadata = OVS_BE64_MAX;
622 } else if (f_idx == CLS_F_IDX_NW_DSCP) {
623 match.wc.masks.nw_tos |= IP_DSCP_MASK;
624 } else if (f_idx == CLS_F_IDX_NW_PROTO) {
625 match.wc.masks.nw_proto = UINT8_MAX;
626 } else if (f_idx == CLS_F_IDX_DL_TYPE) {
627 match.wc.masks.dl_type = OVS_BE16_MAX;
628 } else if (f_idx == CLS_F_IDX_IN_PORT) {
629 match.wc.masks.in_port.ofp_port = u16_to_ofp(UINT16_MAX);
630 } else {
631 OVS_NOT_REACHED();
632 }
633 }
634
635 rule = xzalloc(sizeof *rule);
636 cls_rule_init(&rule->cls_rule, &match, wc_fields
637 ? (priority == INT_MIN ? priority + 1 : priority)
638 : INT_MAX);
639 return rule;
640 }
641
642 static struct test_rule *
643 clone_rule(const struct test_rule *src)
644 {
645 struct test_rule *dst;
646
647 dst = xmalloc(sizeof *dst);
648 dst->aux = src->aux;
649 cls_rule_clone(&dst->cls_rule, &src->cls_rule);
650 return dst;
651 }
652
653 static void
654 free_rule(struct test_rule *rule)
655 {
656 cls_rule_destroy(&rule->cls_rule);
657 free(rule);
658 }
659
660 static void
661 shuffle(int *p, size_t n)
662 {
663 for (; n > 1; n--, p++) {
664 int *q = &p[random_range(n)];
665 int tmp = *p;
666 *p = *q;
667 *q = tmp;
668 }
669 }
670
671 static void
672 shuffle_u32s(uint32_t *p, size_t n)
673 {
674 for (; n > 1; n--, p++) {
675 uint32_t *q = &p[random_range(n)];
676 uint32_t tmp = *p;
677 *p = *q;
678 *q = tmp;
679 }
680 }
681 \f
682 /* Classifier tests. */
683
684 static enum mf_field_id trie_fields[2] = {
685 MFF_IPV4_DST, MFF_IPV4_SRC
686 };
687
688 static void
689 set_prefix_fields(struct classifier *cls)
690 {
691 verify_tries(cls);
692 classifier_set_prefix_fields(cls, trie_fields, ARRAY_SIZE(trie_fields));
693 verify_tries(cls);
694 }
695
696 /* Tests an empty classifier. */
697 static void
698 test_empty(struct ovs_cmdl_context *ctx OVS_UNUSED)
699 {
700 struct classifier cls;
701 struct tcls tcls;
702
703 classifier_init(&cls, flow_segment_u64s);
704 set_prefix_fields(&cls);
705 tcls_init(&tcls);
706 assert(classifier_is_empty(&cls));
707 assert(tcls_is_empty(&tcls));
708 compare_classifiers(&cls, &tcls);
709 classifier_destroy(&cls);
710 tcls_destroy(&tcls);
711 }
712
713 /* Destroys a null classifier. */
714 static void
715 test_destroy_null(struct ovs_cmdl_context *ctx OVS_UNUSED)
716 {
717 classifier_destroy(NULL);
718 }
719
720 /* Tests classification with one rule at a time. */
721 static void
722 test_single_rule(struct ovs_cmdl_context *ctx OVS_UNUSED)
723 {
724 unsigned int wc_fields; /* Hilarious. */
725
726 for (wc_fields = 0; wc_fields < (1u << CLS_N_FIELDS); wc_fields++) {
727 struct classifier cls;
728 struct test_rule *rule, *tcls_rule;
729 struct tcls tcls;
730
731 rule = make_rule(wc_fields,
732 hash_bytes(&wc_fields, sizeof wc_fields, 0), 0);
733
734 classifier_init(&cls, flow_segment_u64s);
735 set_prefix_fields(&cls);
736 tcls_init(&tcls);
737
738 tcls_rule = tcls_insert(&tcls, rule);
739 classifier_insert(&cls, &rule->cls_rule, NULL, 0);
740 compare_classifiers(&cls, &tcls);
741 check_tables(&cls, 1, 1, 0);
742
743 classifier_remove(&cls, &rule->cls_rule);
744 tcls_remove(&tcls, tcls_rule);
745 assert(classifier_is_empty(&cls));
746 assert(tcls_is_empty(&tcls));
747 compare_classifiers(&cls, &tcls);
748
749 ovsrcu_postpone(free_rule, rule);
750 classifier_destroy(&cls);
751 tcls_destroy(&tcls);
752 }
753 }
754
755 /* Tests replacing one rule by another. */
756 static void
757 test_rule_replacement(struct ovs_cmdl_context *ctx OVS_UNUSED)
758 {
759 unsigned int wc_fields;
760
761 for (wc_fields = 0; wc_fields < (1u << CLS_N_FIELDS); wc_fields++) {
762 struct classifier cls;
763 struct test_rule *rule1;
764 struct test_rule *rule2;
765 struct tcls tcls;
766
767 rule1 = make_rule(wc_fields, OFP_DEFAULT_PRIORITY, UINT_MAX);
768 rule2 = make_rule(wc_fields, OFP_DEFAULT_PRIORITY, UINT_MAX);
769 rule2->aux += 5;
770 rule2->aux += 5;
771
772 classifier_init(&cls, flow_segment_u64s);
773 set_prefix_fields(&cls);
774 tcls_init(&tcls);
775 tcls_insert(&tcls, rule1);
776 classifier_insert(&cls, &rule1->cls_rule, NULL, 0);
777 compare_classifiers(&cls, &tcls);
778 check_tables(&cls, 1, 1, 0);
779 tcls_destroy(&tcls);
780
781 tcls_init(&tcls);
782 tcls_insert(&tcls, rule2);
783
784 assert(test_rule_from_cls_rule(
785 classifier_replace(&cls, &rule2->cls_rule,
786 NULL, 0)) == rule1);
787 ovsrcu_postpone(free_rule, rule1);
788 compare_classifiers(&cls, &tcls);
789 check_tables(&cls, 1, 1, 0);
790 classifier_defer(&cls);
791 classifier_remove(&cls, &rule2->cls_rule);
792
793 tcls_destroy(&tcls);
794 destroy_classifier(&cls);
795 }
796 }
797
798 static int
799 factorial(int n_items)
800 {
801 int n, i;
802
803 n = 1;
804 for (i = 2; i <= n_items; i++) {
805 n *= i;
806 }
807 return n;
808 }
809
810 static void
811 swap(int *a, int *b)
812 {
813 int tmp = *a;
814 *a = *b;
815 *b = tmp;
816 }
817
818 static void
819 reverse(int *a, int n)
820 {
821 int i;
822
823 for (i = 0; i < n / 2; i++) {
824 int j = n - (i + 1);
825 swap(&a[i], &a[j]);
826 }
827 }
828
829 static bool
830 next_permutation(int *a, int n)
831 {
832 int k;
833
834 for (k = n - 2; k >= 0; k--) {
835 if (a[k] < a[k + 1]) {
836 int l;
837
838 for (l = n - 1; ; l--) {
839 if (a[l] > a[k]) {
840 swap(&a[k], &a[l]);
841 reverse(a + (k + 1), n - (k + 1));
842 return true;
843 }
844 }
845 }
846 }
847 return false;
848 }
849
850 /* Tests classification with rules that have the same matching criteria. */
851 static void
852 test_many_rules_in_one_list (struct ovs_cmdl_context *ctx OVS_UNUSED)
853 {
854 enum { N_RULES = 3 };
855 int n_pris;
856
857 for (n_pris = N_RULES; n_pris >= 1; n_pris--) {
858 int ops[N_RULES * 2];
859 int pris[N_RULES];
860 int n_permutations;
861 int i;
862
863 pris[0] = 0;
864 for (i = 1; i < N_RULES; i++) {
865 pris[i] = pris[i - 1] + (n_pris > i);
866 }
867
868 for (i = 0; i < N_RULES * 2; i++) {
869 ops[i] = i / 2;
870 }
871
872 n_permutations = 0;
873 do {
874 struct test_rule *rules[N_RULES];
875 struct test_rule *tcls_rules[N_RULES];
876 int pri_rules[N_RULES];
877 struct classifier cls;
878 struct tcls tcls;
879
880 n_permutations++;
881
882 for (i = 0; i < N_RULES; i++) {
883 rules[i] = make_rule(456, pris[i], 0);
884 tcls_rules[i] = NULL;
885 pri_rules[i] = -1;
886 }
887
888 classifier_init(&cls, flow_segment_u64s);
889 set_prefix_fields(&cls);
890 tcls_init(&tcls);
891
892 for (i = 0; i < ARRAY_SIZE(ops); i++) {
893 int j = ops[i];
894 int m, n;
895
896 if (!tcls_rules[j]) {
897 struct test_rule *displaced_rule;
898
899 tcls_rules[j] = tcls_insert(&tcls, rules[j]);
900 displaced_rule = test_rule_from_cls_rule(
901 classifier_replace(&cls, &rules[j]->cls_rule,
902 NULL, 0));
903 if (pri_rules[pris[j]] >= 0) {
904 int k = pri_rules[pris[j]];
905 assert(displaced_rule != NULL);
906 assert(displaced_rule != rules[j]);
907 assert(pris[j] == displaced_rule->cls_rule.priority);
908 tcls_rules[k] = NULL;
909 } else {
910 assert(displaced_rule == NULL);
911 }
912 pri_rules[pris[j]] = j;
913 } else {
914 classifier_remove(&cls, &rules[j]->cls_rule);
915 tcls_remove(&tcls, tcls_rules[j]);
916 tcls_rules[j] = NULL;
917 pri_rules[pris[j]] = -1;
918 }
919 compare_classifiers(&cls, &tcls);
920
921 n = 0;
922 for (m = 0; m < N_RULES; m++) {
923 n += tcls_rules[m] != NULL;
924 }
925 check_tables(&cls, n > 0, n, n - 1);
926 }
927
928 classifier_defer(&cls);
929 for (i = 0; i < N_RULES; i++) {
930 if (classifier_remove(&cls, &rules[i]->cls_rule)) {
931 ovsrcu_postpone(free_rule, rules[i]);
932 }
933 }
934 classifier_destroy(&cls);
935 tcls_destroy(&tcls);
936 } while (next_permutation(ops, ARRAY_SIZE(ops)));
937 assert(n_permutations == (factorial(N_RULES * 2) >> N_RULES));
938 }
939 }
940
941 static int
942 count_ones(unsigned long int x)
943 {
944 int n = 0;
945
946 while (x) {
947 x = zero_rightmost_1bit(x);
948 n++;
949 }
950
951 return n;
952 }
953
954 static bool
955 array_contains(int *array, int n, int value)
956 {
957 int i;
958
959 for (i = 0; i < n; i++) {
960 if (array[i] == value) {
961 return true;
962 }
963 }
964
965 return false;
966 }
967
968 /* Tests classification with two rules at a time that fall into the same
969 * table but different lists. */
970 static void
971 test_many_rules_in_one_table(struct ovs_cmdl_context *ctx OVS_UNUSED)
972 {
973 int iteration;
974
975 for (iteration = 0; iteration < 50; iteration++) {
976 enum { N_RULES = 20 };
977 struct test_rule *rules[N_RULES];
978 struct test_rule *tcls_rules[N_RULES];
979 struct classifier cls;
980 struct tcls tcls;
981 int value_pats[N_RULES];
982 int value_mask;
983 int wcf;
984 int i;
985
986 do {
987 wcf = random_uint32() & ((1u << CLS_N_FIELDS) - 1);
988 value_mask = ~wcf & ((1u << CLS_N_FIELDS) - 1);
989 } while ((1 << count_ones(value_mask)) < N_RULES);
990
991 classifier_init(&cls, flow_segment_u64s);
992 set_prefix_fields(&cls);
993 tcls_init(&tcls);
994
995 for (i = 0; i < N_RULES; i++) {
996 int priority = random_range(INT_MAX);
997
998 do {
999 value_pats[i] = random_uint32() & value_mask;
1000 } while (array_contains(value_pats, i, value_pats[i]));
1001
1002 rules[i] = make_rule(wcf, priority, value_pats[i]);
1003 tcls_rules[i] = tcls_insert(&tcls, rules[i]);
1004
1005 classifier_insert(&cls, &rules[i]->cls_rule, NULL, 0);
1006 compare_classifiers(&cls, &tcls);
1007
1008 check_tables(&cls, 1, i + 1, 0);
1009 }
1010
1011 for (i = 0; i < N_RULES; i++) {
1012 tcls_remove(&tcls, tcls_rules[i]);
1013 classifier_remove(&cls, &rules[i]->cls_rule);
1014 compare_classifiers(&cls, &tcls);
1015 ovsrcu_postpone(free_rule, rules[i]);
1016
1017 check_tables(&cls, i < N_RULES - 1, N_RULES - (i + 1), 0);
1018 }
1019
1020 classifier_destroy(&cls);
1021 tcls_destroy(&tcls);
1022 }
1023 }
1024
1025 /* Tests classification with many rules at a time that fall into random lists
1026 * in 'n' tables. */
1027 static void
1028 test_many_rules_in_n_tables(int n_tables)
1029 {
1030 enum { MAX_RULES = 50 };
1031 int wcfs[10];
1032 int iteration;
1033 int i;
1034
1035 assert(n_tables < 10);
1036 for (i = 0; i < n_tables; i++) {
1037 do {
1038 wcfs[i] = random_uint32() & ((1u << CLS_N_FIELDS) - 1);
1039 } while (array_contains(wcfs, i, wcfs[i]));
1040 }
1041
1042 for (iteration = 0; iteration < 30; iteration++) {
1043 int priorities[MAX_RULES];
1044 struct classifier cls;
1045 struct tcls tcls;
1046
1047 random_set_seed(iteration + 1);
1048 for (i = 0; i < MAX_RULES; i++) {
1049 priorities[i] = (i * 129) & INT_MAX;
1050 }
1051 shuffle(priorities, ARRAY_SIZE(priorities));
1052
1053 classifier_init(&cls, flow_segment_u64s);
1054 set_prefix_fields(&cls);
1055 tcls_init(&tcls);
1056
1057 for (i = 0; i < MAX_RULES; i++) {
1058 struct test_rule *rule;
1059 int priority = priorities[i];
1060 int wcf = wcfs[random_range(n_tables)];
1061 int value_pat = random_uint32() & ((1u << CLS_N_FIELDS) - 1);
1062 rule = make_rule(wcf, priority, value_pat);
1063 tcls_insert(&tcls, rule);
1064 classifier_insert(&cls, &rule->cls_rule, NULL, 0);
1065 compare_classifiers(&cls, &tcls);
1066 check_tables(&cls, -1, i + 1, -1);
1067 }
1068
1069 while (!classifier_is_empty(&cls)) {
1070 struct test_rule *target;
1071 struct test_rule *rule;
1072
1073 target = clone_rule(tcls.rules[random_range(tcls.n_rules)]);
1074
1075 CLS_FOR_EACH_TARGET (rule, cls_rule, &cls, &target->cls_rule) {
1076 if (classifier_remove(&cls, &rule->cls_rule)) {
1077 ovsrcu_postpone(free_rule, rule);
1078 }
1079 }
1080
1081 tcls_delete_matches(&tcls, &target->cls_rule);
1082 compare_classifiers(&cls, &tcls);
1083 check_tables(&cls, -1, -1, -1);
1084 free_rule(target);
1085 }
1086
1087 destroy_classifier(&cls);
1088 tcls_destroy(&tcls);
1089 }
1090 }
1091
1092 static void
1093 test_many_rules_in_two_tables(struct ovs_cmdl_context *ctx OVS_UNUSED)
1094 {
1095 test_many_rules_in_n_tables(2);
1096 }
1097
1098 static void
1099 test_many_rules_in_five_tables(struct ovs_cmdl_context *ctx OVS_UNUSED)
1100 {
1101 test_many_rules_in_n_tables(5);
1102 }
1103 \f
1104 /* Miniflow tests. */
1105
1106 static uint32_t
1107 random_value(void)
1108 {
1109 static const uint32_t values[] =
1110 { 0xffffffff, 0xaaaaaaaa, 0x55555555, 0x80000000,
1111 0x00000001, 0xface0000, 0x00d00d1e, 0xdeadbeef };
1112
1113 return values[random_range(ARRAY_SIZE(values))];
1114 }
1115
1116 static bool
1117 choose(unsigned int n, unsigned int *idxp)
1118 {
1119 if (*idxp < n) {
1120 return true;
1121 } else {
1122 *idxp -= n;
1123 return false;
1124 }
1125 }
1126
1127 #define FLOW_U32S (FLOW_U64S * 2)
1128
1129 static bool
1130 init_consecutive_values(int n_consecutive, struct flow *flow,
1131 unsigned int *idxp)
1132 {
1133 uint32_t *flow_u32 = (uint32_t *) flow;
1134
1135 if (choose(FLOW_U32S - n_consecutive + 1, idxp)) {
1136 int i;
1137
1138 for (i = 0; i < n_consecutive; i++) {
1139 flow_u32[*idxp + i] = random_value();
1140 }
1141 return true;
1142 } else {
1143 return false;
1144 }
1145 }
1146
1147 static bool
1148 next_random_flow(struct flow *flow, unsigned int idx)
1149 {
1150 uint32_t *flow_u32 = (uint32_t *) flow;
1151 int i;
1152
1153 memset(flow, 0, sizeof *flow);
1154
1155 /* Empty flow. */
1156 if (choose(1, &idx)) {
1157 return true;
1158 }
1159
1160 /* All flows with a small number of consecutive nonzero values. */
1161 for (i = 1; i <= 4; i++) {
1162 if (init_consecutive_values(i, flow, &idx)) {
1163 return true;
1164 }
1165 }
1166
1167 /* All flows with a large number of consecutive nonzero values. */
1168 for (i = FLOW_U32S - 4; i <= FLOW_U32S; i++) {
1169 if (init_consecutive_values(i, flow, &idx)) {
1170 return true;
1171 }
1172 }
1173
1174 /* All flows with exactly two nonconsecutive nonzero values. */
1175 if (choose((FLOW_U32S - 1) * (FLOW_U32S - 2) / 2, &idx)) {
1176 int ofs1;
1177
1178 for (ofs1 = 0; ofs1 < FLOW_U32S - 2; ofs1++) {
1179 int ofs2;
1180
1181 for (ofs2 = ofs1 + 2; ofs2 < FLOW_U32S; ofs2++) {
1182 if (choose(1, &idx)) {
1183 flow_u32[ofs1] = random_value();
1184 flow_u32[ofs2] = random_value();
1185 return true;
1186 }
1187 }
1188 }
1189 OVS_NOT_REACHED();
1190 }
1191
1192 /* 16 randomly chosen flows with N >= 3 nonzero values. */
1193 if (choose(16 * (FLOW_U32S - 4), &idx)) {
1194 int n = idx / 16 + 3;
1195 int i;
1196
1197 for (i = 0; i < n; i++) {
1198 flow_u32[i] = random_value();
1199 }
1200 shuffle_u32s(flow_u32, FLOW_U32S);
1201
1202 return true;
1203 }
1204
1205 return false;
1206 }
1207
1208 static void
1209 any_random_flow(struct flow *flow)
1210 {
1211 static unsigned int max;
1212 if (!max) {
1213 while (next_random_flow(flow, max)) {
1214 max++;
1215 }
1216 }
1217
1218 next_random_flow(flow, random_range(max));
1219 }
1220
1221 static void
1222 toggle_masked_flow_bits(struct flow *flow, const struct flow_wildcards *mask)
1223 {
1224 const uint32_t *mask_u32 = (const uint32_t *) &mask->masks;
1225 uint32_t *flow_u32 = (uint32_t *) flow;
1226 int i;
1227
1228 for (i = 0; i < FLOW_U32S; i++) {
1229 if (mask_u32[i] != 0) {
1230 uint32_t bit;
1231
1232 do {
1233 bit = 1u << random_range(32);
1234 } while (!(bit & mask_u32[i]));
1235 flow_u32[i] ^= bit;
1236 }
1237 }
1238 }
1239
1240 static void
1241 wildcard_extra_bits(struct flow_wildcards *mask)
1242 {
1243 uint32_t *mask_u32 = (uint32_t *) &mask->masks;
1244 int i;
1245
1246 for (i = 0; i < FLOW_U32S; i++) {
1247 if (mask_u32[i] != 0) {
1248 uint32_t bit;
1249
1250 do {
1251 bit = 1u << random_range(32);
1252 } while (!(bit & mask_u32[i]));
1253 mask_u32[i] &= ~bit;
1254 }
1255 }
1256 }
1257
1258 static void
1259 test_miniflow(struct ovs_cmdl_context *ctx OVS_UNUSED)
1260 {
1261 struct flow flow;
1262 unsigned int idx;
1263
1264 random_set_seed(0xb3faca38);
1265 for (idx = 0; next_random_flow(&flow, idx); idx++) {
1266 const uint64_t *flow_u64 = (const uint64_t *) &flow;
1267 struct miniflow miniflow, miniflow2, miniflow3;
1268 struct flow flow2, flow3;
1269 struct flow_wildcards mask;
1270 struct minimask minimask;
1271 int i;
1272
1273 /* Convert flow to miniflow. */
1274 miniflow_init(&miniflow, &flow);
1275
1276 /* Check that the flow equals its miniflow. */
1277 assert(miniflow_get_vid(&miniflow) == vlan_tci_to_vid(flow.vlan_tci));
1278 for (i = 0; i < FLOW_U64S; i++) {
1279 assert(miniflow_get(&miniflow, i) == flow_u64[i]);
1280 }
1281
1282 /* Check that the miniflow equals itself. */
1283 assert(miniflow_equal(&miniflow, &miniflow));
1284
1285 /* Convert miniflow back to flow and verify that it's the same. */
1286 miniflow_expand(&miniflow, &flow2);
1287 assert(flow_equal(&flow, &flow2));
1288
1289 /* Check that copying a miniflow works properly. */
1290 miniflow_clone(&miniflow2, &miniflow);
1291 assert(miniflow_equal(&miniflow, &miniflow2));
1292 assert(miniflow_hash(&miniflow, 0) == miniflow_hash(&miniflow2, 0));
1293 miniflow_expand(&miniflow2, &flow3);
1294 assert(flow_equal(&flow, &flow3));
1295
1296 /* Check that masked matches work as expected for identical flows and
1297 * miniflows. */
1298 do {
1299 next_random_flow(&mask.masks, 1);
1300 } while (flow_wildcards_is_catchall(&mask));
1301 minimask_init(&minimask, &mask);
1302 assert(minimask_is_catchall(&minimask)
1303 == flow_wildcards_is_catchall(&mask));
1304 assert(miniflow_equal_in_minimask(&miniflow, &miniflow2, &minimask));
1305 assert(miniflow_equal_flow_in_minimask(&miniflow, &flow2, &minimask));
1306 assert(miniflow_hash_in_minimask(&miniflow, &minimask, 0x12345678) ==
1307 flow_hash_in_minimask(&flow, &minimask, 0x12345678));
1308
1309 /* Check that masked matches work as expected for differing flows and
1310 * miniflows. */
1311 toggle_masked_flow_bits(&flow2, &mask);
1312 assert(!miniflow_equal_flow_in_minimask(&miniflow, &flow2, &minimask));
1313 miniflow_init(&miniflow3, &flow2);
1314 assert(!miniflow_equal_in_minimask(&miniflow, &miniflow3, &minimask));
1315
1316 /* Clean up. */
1317 miniflow_destroy(&miniflow);
1318 miniflow_destroy(&miniflow2);
1319 miniflow_destroy(&miniflow3);
1320 minimask_destroy(&minimask);
1321 }
1322 }
1323
1324 static void
1325 test_minimask_has_extra(struct ovs_cmdl_context *ctx OVS_UNUSED)
1326 {
1327 struct flow_wildcards catchall;
1328 struct minimask minicatchall;
1329 struct flow flow;
1330 unsigned int idx;
1331
1332 flow_wildcards_init_catchall(&catchall);
1333 minimask_init(&minicatchall, &catchall);
1334 assert(minimask_is_catchall(&minicatchall));
1335
1336 random_set_seed(0x2ec7905b);
1337 for (idx = 0; next_random_flow(&flow, idx); idx++) {
1338 struct flow_wildcards mask;
1339 struct minimask minimask;
1340
1341 mask.masks = flow;
1342 minimask_init(&minimask, &mask);
1343 assert(!minimask_has_extra(&minimask, &minimask));
1344 assert(minimask_has_extra(&minicatchall, &minimask)
1345 == !minimask_is_catchall(&minimask));
1346 if (!minimask_is_catchall(&minimask)) {
1347 struct minimask minimask2;
1348
1349 wildcard_extra_bits(&mask);
1350 minimask_init(&minimask2, &mask);
1351 assert(minimask_has_extra(&minimask2, &minimask));
1352 assert(!minimask_has_extra(&minimask, &minimask2));
1353 minimask_destroy(&minimask2);
1354 }
1355
1356 minimask_destroy(&minimask);
1357 }
1358
1359 minimask_destroy(&minicatchall);
1360 }
1361
1362 static void
1363 test_minimask_combine(struct ovs_cmdl_context *ctx OVS_UNUSED)
1364 {
1365 struct flow_wildcards catchall;
1366 struct minimask minicatchall;
1367 struct flow flow;
1368 unsigned int idx;
1369
1370 flow_wildcards_init_catchall(&catchall);
1371 minimask_init(&minicatchall, &catchall);
1372 assert(minimask_is_catchall(&minicatchall));
1373
1374 random_set_seed(0x181bf0cd);
1375 for (idx = 0; next_random_flow(&flow, idx); idx++) {
1376 struct minimask minimask, minimask2, minicombined;
1377 struct flow_wildcards mask, mask2, combined, combined2;
1378 uint64_t storage[FLOW_U64S];
1379 struct flow flow2;
1380
1381 mask.masks = flow;
1382 minimask_init(&minimask, &mask);
1383
1384 minimask_combine(&minicombined, &minimask, &minicatchall, storage);
1385 assert(minimask_is_catchall(&minicombined));
1386
1387 any_random_flow(&flow2);
1388 mask2.masks = flow2;
1389 minimask_init(&minimask2, &mask2);
1390
1391 minimask_combine(&minicombined, &minimask, &minimask2, storage);
1392 flow_wildcards_and(&combined, &mask, &mask2);
1393 minimask_expand(&minicombined, &combined2);
1394 assert(flow_wildcards_equal(&combined, &combined2));
1395
1396 minimask_destroy(&minimask);
1397 minimask_destroy(&minimask2);
1398 }
1399
1400 minimask_destroy(&minicatchall);
1401 }
1402 \f
1403 static const struct ovs_cmdl_command commands[] = {
1404 /* Classifier tests. */
1405 {"empty", NULL, 0, 0, test_empty},
1406 {"destroy-null", NULL, 0, 0, test_destroy_null},
1407 {"single-rule", NULL, 0, 0, test_single_rule},
1408 {"rule-replacement", NULL, 0, 0, test_rule_replacement},
1409 {"many-rules-in-one-list", NULL, 0, 0, test_many_rules_in_one_list},
1410 {"many-rules-in-one-table", NULL, 0, 0, test_many_rules_in_one_table},
1411 {"many-rules-in-two-tables", NULL, 0, 0, test_many_rules_in_two_tables},
1412 {"many-rules-in-five-tables", NULL, 0, 0, test_many_rules_in_five_tables},
1413
1414 /* Miniflow and minimask tests. */
1415 {"miniflow", NULL, 0, 0, test_miniflow},
1416 {"minimask_has_extra", NULL, 0, 0, test_minimask_has_extra},
1417 {"minimask_combine", NULL, 0, 0, test_minimask_combine},
1418
1419 {NULL, NULL, 0, 0, NULL},
1420 };
1421
1422 static void
1423 test_classifier_main(int argc, char *argv[])
1424 {
1425 struct ovs_cmdl_context ctx = {
1426 .argc = argc - 1,
1427 .argv = argv + 1,
1428 };
1429 set_program_name(argv[0]);
1430 init_values();
1431 ovs_cmdl_run_command(&ctx, commands);
1432 }
1433
1434 OVSTEST_REGISTER("test-classifier", test_classifier_main);