]> git.proxmox.com Git - ceph.git/blob - ceph/src/spdk/dpdk/lib/librte_acl/acl_bld.c
update sources to ceph Nautilus 14.2.1
[ceph.git] / ceph / src / spdk / dpdk / lib / librte_acl / acl_bld.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
3 */
4
5 #include <rte_acl.h>
6 #include "tb_mem.h"
7 #include "acl.h"
8
9 #define ACL_POOL_ALIGN 8
10 #define ACL_POOL_ALLOC_MIN 0x800000
11
12 /* number of pointers per alloc */
13 #define ACL_PTR_ALLOC 32
14
15 /* macros for dividing rule sets heuristics */
16 #define NODE_MAX 0x4000
17 #define NODE_MIN 0x800
18
19 /* TALLY are statistics per field */
20 enum {
21 TALLY_0 = 0, /* number of rules that are 0% or more wild. */
22 TALLY_25, /* number of rules that are 25% or more wild. */
23 TALLY_50,
24 TALLY_75,
25 TALLY_100,
26 TALLY_DEACTIVATED, /* deactivated fields (100% wild in all rules). */
27 TALLY_DEPTH,
28 /* number of rules that are 100% wild for this field and higher. */
29 TALLY_NUM
30 };
31
32 static const uint32_t wild_limits[TALLY_DEACTIVATED] = {0, 25, 50, 75, 100};
33
34 enum {
35 ACL_INTERSECT_NONE = 0,
36 ACL_INTERSECT_A = 1, /* set A is a superset of A and B intersect */
37 ACL_INTERSECT_B = 2, /* set B is a superset of A and B intersect */
38 ACL_INTERSECT = 4, /* sets A and B intersect */
39 };
40
41 enum {
42 ACL_PRIORITY_EQUAL = 0,
43 ACL_PRIORITY_NODE_A = 1,
44 ACL_PRIORITY_NODE_B = 2,
45 ACL_PRIORITY_MIXED = 3
46 };
47
48
49 struct acl_mem_block {
50 uint32_t block_size;
51 void *mem_ptr;
52 };
53
54 #define MEM_BLOCK_NUM 16
55
56 /* Single ACL rule, build representation.*/
57 struct rte_acl_build_rule {
58 struct rte_acl_build_rule *next;
59 struct rte_acl_config *config;
60 /**< configuration for each field in the rule. */
61 const struct rte_acl_rule *f;
62 uint32_t *wildness;
63 };
64
65 /* Context for build phase */
66 struct acl_build_context {
67 const struct rte_acl_ctx *acx;
68 struct rte_acl_build_rule *build_rules;
69 struct rte_acl_config cfg;
70 int32_t node_max;
71 int32_t cur_node_max;
72 uint32_t node;
73 uint32_t num_nodes;
74 uint32_t category_mask;
75 uint32_t num_rules;
76 uint32_t node_id;
77 uint32_t src_mask;
78 uint32_t num_build_rules;
79 uint32_t num_tries;
80 struct tb_mem_pool pool;
81 struct rte_acl_trie tries[RTE_ACL_MAX_TRIES];
82 struct rte_acl_bld_trie bld_tries[RTE_ACL_MAX_TRIES];
83 uint32_t data_indexes[RTE_ACL_MAX_TRIES][RTE_ACL_MAX_FIELDS];
84
85 /* memory free lists for nodes and blocks used for node ptrs */
86 struct acl_mem_block blocks[MEM_BLOCK_NUM];
87 struct rte_acl_node *node_free_list;
88 };
89
90 static int acl_merge_trie(struct acl_build_context *context,
91 struct rte_acl_node *node_a, struct rte_acl_node *node_b,
92 uint32_t level, struct rte_acl_node **node_c);
93
94 static void
95 acl_deref_ptr(struct acl_build_context *context,
96 struct rte_acl_node *node, int index);
97
98 static void *
99 acl_build_alloc(struct acl_build_context *context, size_t n, size_t s)
100 {
101 uint32_t m;
102 void *p;
103 size_t alloc_size = n * s;
104
105 /*
106 * look for memory in free lists
107 */
108 for (m = 0; m < RTE_DIM(context->blocks); m++) {
109 if (context->blocks[m].block_size ==
110 alloc_size && context->blocks[m].mem_ptr != NULL) {
111 p = context->blocks[m].mem_ptr;
112 context->blocks[m].mem_ptr = *((void **)p);
113 memset(p, 0, alloc_size);
114 return p;
115 }
116 }
117
118 /*
119 * return allocation from memory pool
120 */
121 p = tb_alloc(&context->pool, alloc_size);
122 return p;
123 }
124
125 /*
126 * Free memory blocks (kept in context for reuse).
127 */
128 static void
129 acl_build_free(struct acl_build_context *context, size_t s, void *p)
130 {
131 uint32_t n;
132
133 for (n = 0; n < RTE_DIM(context->blocks); n++) {
134 if (context->blocks[n].block_size == s) {
135 *((void **)p) = context->blocks[n].mem_ptr;
136 context->blocks[n].mem_ptr = p;
137 return;
138 }
139 }
140 for (n = 0; n < RTE_DIM(context->blocks); n++) {
141 if (context->blocks[n].block_size == 0) {
142 context->blocks[n].block_size = s;
143 *((void **)p) = NULL;
144 context->blocks[n].mem_ptr = p;
145 return;
146 }
147 }
148 }
149
150 /*
151 * Allocate and initialize a new node.
152 */
153 static struct rte_acl_node *
154 acl_alloc_node(struct acl_build_context *context, int level)
155 {
156 struct rte_acl_node *node;
157
158 if (context->node_free_list != NULL) {
159 node = context->node_free_list;
160 context->node_free_list = node->next;
161 memset(node, 0, sizeof(struct rte_acl_node));
162 } else {
163 node = acl_build_alloc(context, sizeof(struct rte_acl_node), 1);
164 }
165
166 if (node != NULL) {
167 node->num_ptrs = 0;
168 node->level = level;
169 node->node_type = RTE_ACL_NODE_UNDEFINED;
170 node->node_index = RTE_ACL_NODE_UNDEFINED;
171 context->num_nodes++;
172 node->id = context->node_id++;
173 }
174 return node;
175 }
176
177 /*
178 * Dereference all nodes to which this node points
179 */
180 static void
181 acl_free_node(struct acl_build_context *context,
182 struct rte_acl_node *node)
183 {
184 uint32_t n;
185
186 if (node->prev != NULL)
187 node->prev->next = NULL;
188 for (n = 0; n < node->num_ptrs; n++)
189 acl_deref_ptr(context, node, n);
190
191 /* free mrt if this is a match node */
192 if (node->mrt != NULL) {
193 acl_build_free(context, sizeof(struct rte_acl_match_results),
194 node->mrt);
195 node->mrt = NULL;
196 }
197
198 /* free transitions to other nodes */
199 if (node->ptrs != NULL) {
200 acl_build_free(context,
201 node->max_ptrs * sizeof(struct rte_acl_ptr_set),
202 node->ptrs);
203 node->ptrs = NULL;
204 }
205
206 /* put it on the free list */
207 context->num_nodes--;
208 node->next = context->node_free_list;
209 context->node_free_list = node;
210 }
211
212
213 /*
214 * Include src bitset in dst bitset
215 */
216 static void
217 acl_include(struct rte_acl_bitset *dst, struct rte_acl_bitset *src, bits_t mask)
218 {
219 uint32_t n;
220
221 for (n = 0; n < RTE_ACL_BIT_SET_SIZE; n++)
222 dst->bits[n] = (dst->bits[n] & mask) | src->bits[n];
223 }
224
225 /*
226 * Set dst to bits of src1 that are not in src2
227 */
228 static int
229 acl_exclude(struct rte_acl_bitset *dst,
230 struct rte_acl_bitset *src1,
231 struct rte_acl_bitset *src2)
232 {
233 uint32_t n;
234 bits_t all_bits = 0;
235
236 for (n = 0; n < RTE_ACL_BIT_SET_SIZE; n++) {
237 dst->bits[n] = src1->bits[n] & ~src2->bits[n];
238 all_bits |= dst->bits[n];
239 }
240 return all_bits != 0;
241 }
242
243 /*
244 * Add a pointer (ptr) to a node.
245 */
246 static int
247 acl_add_ptr(struct acl_build_context *context,
248 struct rte_acl_node *node,
249 struct rte_acl_node *ptr,
250 struct rte_acl_bitset *bits)
251 {
252 uint32_t n, num_ptrs;
253 struct rte_acl_ptr_set *ptrs = NULL;
254
255 /*
256 * If there's already a pointer to the same node, just add to the bitset
257 */
258 for (n = 0; n < node->num_ptrs; n++) {
259 if (node->ptrs[n].ptr != NULL) {
260 if (node->ptrs[n].ptr == ptr) {
261 acl_include(&node->ptrs[n].values, bits, -1);
262 acl_include(&node->values, bits, -1);
263 return 0;
264 }
265 }
266 }
267
268 /* if there's no room for another pointer, make room */
269 if (node->num_ptrs >= node->max_ptrs) {
270 /* add room for more pointers */
271 num_ptrs = node->max_ptrs + ACL_PTR_ALLOC;
272 ptrs = acl_build_alloc(context, num_ptrs, sizeof(*ptrs));
273
274 /* copy current points to new memory allocation */
275 if (node->ptrs != NULL) {
276 memcpy(ptrs, node->ptrs,
277 node->num_ptrs * sizeof(*ptrs));
278 acl_build_free(context, node->max_ptrs * sizeof(*ptrs),
279 node->ptrs);
280 }
281 node->ptrs = ptrs;
282 node->max_ptrs = num_ptrs;
283 }
284
285 /* Find available ptr and add a new pointer to this node */
286 for (n = node->min_add; n < node->max_ptrs; n++) {
287 if (node->ptrs[n].ptr == NULL) {
288 node->ptrs[n].ptr = ptr;
289 acl_include(&node->ptrs[n].values, bits, 0);
290 acl_include(&node->values, bits, -1);
291 if (ptr != NULL)
292 ptr->ref_count++;
293 if (node->num_ptrs <= n)
294 node->num_ptrs = n + 1;
295 return 0;
296 }
297 }
298
299 return 0;
300 }
301
302 /*
303 * Add a pointer for a range of values
304 */
305 static int
306 acl_add_ptr_range(struct acl_build_context *context,
307 struct rte_acl_node *root,
308 struct rte_acl_node *node,
309 uint8_t low,
310 uint8_t high)
311 {
312 uint32_t n;
313 struct rte_acl_bitset bitset;
314
315 /* clear the bitset values */
316 for (n = 0; n < RTE_ACL_BIT_SET_SIZE; n++)
317 bitset.bits[n] = 0;
318
319 /* for each bit in range, add bit to set */
320 for (n = 0; n < UINT8_MAX + 1; n++)
321 if (n >= low && n <= high)
322 bitset.bits[n / (sizeof(bits_t) * 8)] |=
323 1 << (n % (sizeof(bits_t) * 8));
324
325 return acl_add_ptr(context, root, node, &bitset);
326 }
327
328 /*
329 * Generate a bitset from a byte value and mask.
330 */
331 static int
332 acl_gen_mask(struct rte_acl_bitset *bitset, uint32_t value, uint32_t mask)
333 {
334 int range = 0;
335 uint32_t n;
336
337 /* clear the bitset values */
338 for (n = 0; n < RTE_ACL_BIT_SET_SIZE; n++)
339 bitset->bits[n] = 0;
340
341 /* for each bit in value/mask, add bit to set */
342 for (n = 0; n < UINT8_MAX + 1; n++) {
343 if ((n & mask) == value) {
344 range++;
345 bitset->bits[n / (sizeof(bits_t) * 8)] |=
346 1 << (n % (sizeof(bits_t) * 8));
347 }
348 }
349 return range;
350 }
351
352 /*
353 * Determine how A and B intersect.
354 * Determine if A and/or B are supersets of the intersection.
355 */
356 static int
357 acl_intersect_type(const struct rte_acl_bitset *a_bits,
358 const struct rte_acl_bitset *b_bits,
359 struct rte_acl_bitset *intersect)
360 {
361 uint32_t n;
362 bits_t intersect_bits = 0;
363 bits_t a_superset = 0;
364 bits_t b_superset = 0;
365
366 /*
367 * calculate and store intersection and check if A and/or B have
368 * bits outside the intersection (superset)
369 */
370 for (n = 0; n < RTE_ACL_BIT_SET_SIZE; n++) {
371 intersect->bits[n] = a_bits->bits[n] & b_bits->bits[n];
372 a_superset |= a_bits->bits[n] ^ intersect->bits[n];
373 b_superset |= b_bits->bits[n] ^ intersect->bits[n];
374 intersect_bits |= intersect->bits[n];
375 }
376
377 n = (intersect_bits == 0 ? ACL_INTERSECT_NONE : ACL_INTERSECT) |
378 (b_superset == 0 ? 0 : ACL_INTERSECT_B) |
379 (a_superset == 0 ? 0 : ACL_INTERSECT_A);
380
381 return n;
382 }
383
384 /*
385 * Duplicate a node
386 */
387 static struct rte_acl_node *
388 acl_dup_node(struct acl_build_context *context, struct rte_acl_node *node)
389 {
390 uint32_t n;
391 struct rte_acl_node *next;
392
393 next = acl_alloc_node(context, node->level);
394
395 /* allocate the pointers */
396 if (node->num_ptrs > 0) {
397 next->ptrs = acl_build_alloc(context,
398 node->max_ptrs,
399 sizeof(struct rte_acl_ptr_set));
400 next->max_ptrs = node->max_ptrs;
401 }
402
403 /* copy over the pointers */
404 for (n = 0; n < node->num_ptrs; n++) {
405 if (node->ptrs[n].ptr != NULL) {
406 next->ptrs[n].ptr = node->ptrs[n].ptr;
407 next->ptrs[n].ptr->ref_count++;
408 acl_include(&next->ptrs[n].values,
409 &node->ptrs[n].values, -1);
410 }
411 }
412
413 next->num_ptrs = node->num_ptrs;
414
415 /* copy over node's match results */
416 if (node->match_flag == 0)
417 next->match_flag = 0;
418 else {
419 next->match_flag = -1;
420 next->mrt = acl_build_alloc(context, 1, sizeof(*next->mrt));
421 memcpy(next->mrt, node->mrt, sizeof(*next->mrt));
422 }
423
424 /* copy over node's bitset */
425 acl_include(&next->values, &node->values, -1);
426
427 node->next = next;
428 next->prev = node;
429
430 return next;
431 }
432
433 /*
434 * Dereference a pointer from a node
435 */
436 static void
437 acl_deref_ptr(struct acl_build_context *context,
438 struct rte_acl_node *node, int index)
439 {
440 struct rte_acl_node *ref_node;
441
442 /* De-reference the node at the specified pointer */
443 if (node != NULL && node->ptrs[index].ptr != NULL) {
444 ref_node = node->ptrs[index].ptr;
445 ref_node->ref_count--;
446 if (ref_node->ref_count == 0)
447 acl_free_node(context, ref_node);
448 }
449 }
450
451 /*
452 * acl_exclude rte_acl_bitset from src and copy remaining pointer to dst
453 */
454 static int
455 acl_copy_ptr(struct acl_build_context *context,
456 struct rte_acl_node *dst,
457 struct rte_acl_node *src,
458 int index,
459 struct rte_acl_bitset *b_bits)
460 {
461 int rc;
462 struct rte_acl_bitset bits;
463
464 if (b_bits != NULL)
465 if (!acl_exclude(&bits, &src->ptrs[index].values, b_bits))
466 return 0;
467
468 rc = acl_add_ptr(context, dst, src->ptrs[index].ptr, &bits);
469 if (rc < 0)
470 return rc;
471 return 1;
472 }
473
474 /*
475 * Fill in gaps in ptrs list with the ptr at the end of the list
476 */
477 static void
478 acl_compact_node_ptrs(struct rte_acl_node *node_a)
479 {
480 uint32_t n;
481 int min_add = node_a->min_add;
482
483 while (node_a->num_ptrs > 0 &&
484 node_a->ptrs[node_a->num_ptrs - 1].ptr == NULL)
485 node_a->num_ptrs--;
486
487 for (n = min_add; n + 1 < node_a->num_ptrs; n++) {
488
489 /* if this entry is empty */
490 if (node_a->ptrs[n].ptr == NULL) {
491
492 /* move the last pointer to this entry */
493 acl_include(&node_a->ptrs[n].values,
494 &node_a->ptrs[node_a->num_ptrs - 1].values,
495 0);
496 node_a->ptrs[n].ptr =
497 node_a->ptrs[node_a->num_ptrs - 1].ptr;
498
499 /*
500 * mark the end as empty and adjust the number
501 * of used pointer enum_tries
502 */
503 node_a->ptrs[node_a->num_ptrs - 1].ptr = NULL;
504 while (node_a->num_ptrs > 0 &&
505 node_a->ptrs[node_a->num_ptrs - 1].ptr == NULL)
506 node_a->num_ptrs--;
507 }
508 }
509 }
510
511 static int
512 acl_resolve_leaf(struct acl_build_context *context,
513 struct rte_acl_node *node_a,
514 struct rte_acl_node *node_b,
515 struct rte_acl_node **node_c)
516 {
517 uint32_t n;
518 int combined_priority = ACL_PRIORITY_EQUAL;
519
520 for (n = 0; n < context->cfg.num_categories; n++) {
521 if (node_a->mrt->priority[n] != node_b->mrt->priority[n]) {
522 combined_priority |= (node_a->mrt->priority[n] >
523 node_b->mrt->priority[n]) ?
524 ACL_PRIORITY_NODE_A : ACL_PRIORITY_NODE_B;
525 }
526 }
527
528 /*
529 * if node a is higher or equal priority for all categories,
530 * then return node_a.
531 */
532 if (combined_priority == ACL_PRIORITY_NODE_A ||
533 combined_priority == ACL_PRIORITY_EQUAL) {
534 *node_c = node_a;
535 return 0;
536 }
537
538 /*
539 * if node b is higher or equal priority for all categories,
540 * then return node_b.
541 */
542 if (combined_priority == ACL_PRIORITY_NODE_B) {
543 *node_c = node_b;
544 return 0;
545 }
546
547 /*
548 * mixed priorities - create a new node with the highest priority
549 * for each category.
550 */
551
552 /* force new duplication. */
553 node_a->next = NULL;
554
555 *node_c = acl_dup_node(context, node_a);
556 for (n = 0; n < context->cfg.num_categories; n++) {
557 if ((*node_c)->mrt->priority[n] < node_b->mrt->priority[n]) {
558 (*node_c)->mrt->priority[n] = node_b->mrt->priority[n];
559 (*node_c)->mrt->results[n] = node_b->mrt->results[n];
560 }
561 }
562 return 0;
563 }
564
565 /*
566 * Merge nodes A and B together,
567 * returns a node that is the path for the intersection
568 *
569 * If match node (leaf on trie)
570 * For each category
571 * return node = highest priority result
572 *
573 * Create C as a duplicate of A to point to child intersections
574 * If any pointers in C intersect with any in B
575 * For each intersection
576 * merge children
577 * remove intersection from C pointer
578 * add a pointer from C to child intersection node
579 * Compact the pointers in A and B
580 * Copy any B pointers that are outside of the intersection to C
581 * If C has no references to the B trie
582 * free C and return A
583 * Else If C has no references to the A trie
584 * free C and return B
585 * Else
586 * return C
587 */
588 static int
589 acl_merge_trie(struct acl_build_context *context,
590 struct rte_acl_node *node_a, struct rte_acl_node *node_b,
591 uint32_t level, struct rte_acl_node **return_c)
592 {
593 uint32_t n, m, ptrs_c, ptrs_b;
594 uint32_t min_add_c, min_add_b;
595 int node_intersect_type;
596 struct rte_acl_bitset node_intersect;
597 struct rte_acl_node *node_c;
598 struct rte_acl_node *node_a_next;
599 int node_b_refs;
600 int node_a_refs;
601
602 node_c = node_a;
603 node_a_next = node_a->next;
604 min_add_c = 0;
605 min_add_b = 0;
606 node_a_refs = node_a->num_ptrs;
607 node_b_refs = 0;
608 node_intersect_type = 0;
609
610 /* Resolve leaf nodes (matches) */
611 if (node_a->match_flag != 0) {
612 acl_resolve_leaf(context, node_a, node_b, return_c);
613 return 0;
614 }
615
616 /*
617 * Create node C as a copy of node A, and do: C = merge(A,B);
618 * If node A can be used instead (A==C), then later we'll
619 * destroy C and return A.
620 */
621 if (level > 0)
622 node_c = acl_dup_node(context, node_a);
623
624 /*
625 * If the two node transitions intersect then merge the transitions.
626 * Check intersection for entire node (all pointers)
627 */
628 node_intersect_type = acl_intersect_type(&node_c->values,
629 &node_b->values,
630 &node_intersect);
631
632 if (node_intersect_type & ACL_INTERSECT) {
633
634 min_add_b = node_b->min_add;
635 node_b->min_add = node_b->num_ptrs;
636 ptrs_b = node_b->num_ptrs;
637
638 min_add_c = node_c->min_add;
639 node_c->min_add = node_c->num_ptrs;
640 ptrs_c = node_c->num_ptrs;
641
642 for (n = 0; n < ptrs_c; n++) {
643 if (node_c->ptrs[n].ptr == NULL) {
644 node_a_refs--;
645 continue;
646 }
647 node_c->ptrs[n].ptr->next = NULL;
648 for (m = 0; m < ptrs_b; m++) {
649
650 struct rte_acl_bitset child_intersect;
651 int child_intersect_type;
652 struct rte_acl_node *child_node_c = NULL;
653
654 if (node_b->ptrs[m].ptr == NULL ||
655 node_c->ptrs[n].ptr ==
656 node_b->ptrs[m].ptr)
657 continue;
658
659 child_intersect_type = acl_intersect_type(
660 &node_c->ptrs[n].values,
661 &node_b->ptrs[m].values,
662 &child_intersect);
663
664 if ((child_intersect_type & ACL_INTERSECT) !=
665 0) {
666 if (acl_merge_trie(context,
667 node_c->ptrs[n].ptr,
668 node_b->ptrs[m].ptr,
669 level + 1,
670 &child_node_c))
671 return 1;
672
673 if (child_node_c != NULL &&
674 child_node_c !=
675 node_c->ptrs[n].ptr) {
676
677 node_b_refs++;
678
679 /*
680 * Added link from C to
681 * child_C for all transitions
682 * in the intersection.
683 */
684 acl_add_ptr(context, node_c,
685 child_node_c,
686 &child_intersect);
687
688 /*
689 * inc refs if pointer is not
690 * to node b.
691 */
692 node_a_refs += (child_node_c !=
693 node_b->ptrs[m].ptr);
694
695 /*
696 * Remove intersection from C
697 * pointer.
698 */
699 if (!acl_exclude(
700 &node_c->ptrs[n].values,
701 &node_c->ptrs[n].values,
702 &child_intersect)) {
703 acl_deref_ptr(context,
704 node_c, n);
705 node_c->ptrs[n].ptr =
706 NULL;
707 node_a_refs--;
708 }
709 }
710 }
711 }
712 }
713
714 /* Compact pointers */
715 node_c->min_add = min_add_c;
716 acl_compact_node_ptrs(node_c);
717 node_b->min_add = min_add_b;
718 acl_compact_node_ptrs(node_b);
719 }
720
721 /*
722 * Copy pointers outside of the intersection from B to C
723 */
724 if ((node_intersect_type & ACL_INTERSECT_B) != 0) {
725 node_b_refs++;
726 for (m = 0; m < node_b->num_ptrs; m++)
727 if (node_b->ptrs[m].ptr != NULL)
728 acl_copy_ptr(context, node_c,
729 node_b, m, &node_intersect);
730 }
731
732 /*
733 * Free node C if top of trie is contained in A or B
734 * if node C is a duplicate of node A &&
735 * node C was not an existing duplicate
736 */
737 if (node_c != node_a && node_c != node_a_next) {
738
739 /*
740 * if the intersection has no references to the
741 * B side, then it is contained in A
742 */
743 if (node_b_refs == 0) {
744 acl_free_node(context, node_c);
745 node_c = node_a;
746 } else {
747 /*
748 * if the intersection has no references to the
749 * A side, then it is contained in B.
750 */
751 if (node_a_refs == 0) {
752 acl_free_node(context, node_c);
753 node_c = node_b;
754 }
755 }
756 }
757
758 if (return_c != NULL)
759 *return_c = node_c;
760
761 if (level == 0)
762 acl_free_node(context, node_b);
763
764 return 0;
765 }
766
767 /*
768 * Reset current runtime fields before next build:
769 * - free allocated RT memory.
770 * - reset all RT related fields to zero.
771 */
772 static void
773 acl_build_reset(struct rte_acl_ctx *ctx)
774 {
775 rte_free(ctx->mem);
776 memset(&ctx->num_categories, 0,
777 sizeof(*ctx) - offsetof(struct rte_acl_ctx, num_categories));
778 }
779
780 static void
781 acl_gen_range(struct acl_build_context *context,
782 const uint8_t *hi, const uint8_t *lo, int size, int level,
783 struct rte_acl_node *root, struct rte_acl_node *end)
784 {
785 struct rte_acl_node *node, *prev;
786 uint32_t n;
787
788 prev = root;
789 for (n = size - 1; n > 0; n--) {
790 node = acl_alloc_node(context, level++);
791 acl_add_ptr_range(context, prev, node, lo[n], hi[n]);
792 prev = node;
793 }
794 acl_add_ptr_range(context, prev, end, lo[0], hi[0]);
795 }
796
797 static struct rte_acl_node *
798 acl_gen_range_trie(struct acl_build_context *context,
799 const void *min, const void *max,
800 int size, int level, struct rte_acl_node **pend)
801 {
802 int32_t n;
803 struct rte_acl_node *root;
804 const uint8_t *lo = min;
805 const uint8_t *hi = max;
806
807 *pend = acl_alloc_node(context, level+size);
808 root = acl_alloc_node(context, level++);
809
810 if (lo[size - 1] == hi[size - 1]) {
811 acl_gen_range(context, hi, lo, size, level, root, *pend);
812 } else {
813 uint8_t limit_lo[64];
814 uint8_t limit_hi[64];
815 uint8_t hi_ff = UINT8_MAX;
816 uint8_t lo_00 = 0;
817
818 memset(limit_lo, 0, RTE_DIM(limit_lo));
819 memset(limit_hi, UINT8_MAX, RTE_DIM(limit_hi));
820
821 for (n = size - 2; n >= 0; n--) {
822 hi_ff = (uint8_t)(hi_ff & hi[n]);
823 lo_00 = (uint8_t)(lo_00 | lo[n]);
824 }
825
826 if (hi_ff != UINT8_MAX) {
827 limit_lo[size - 1] = hi[size - 1];
828 acl_gen_range(context, hi, limit_lo, size, level,
829 root, *pend);
830 }
831
832 if (lo_00 != 0) {
833 limit_hi[size - 1] = lo[size - 1];
834 acl_gen_range(context, limit_hi, lo, size, level,
835 root, *pend);
836 }
837
838 if (hi[size - 1] - lo[size - 1] > 1 ||
839 lo_00 == 0 ||
840 hi_ff == UINT8_MAX) {
841 limit_lo[size-1] = (uint8_t)(lo[size-1] + (lo_00 != 0));
842 limit_hi[size-1] = (uint8_t)(hi[size-1] -
843 (hi_ff != UINT8_MAX));
844 acl_gen_range(context, limit_hi, limit_lo, size,
845 level, root, *pend);
846 }
847 }
848 return root;
849 }
850
851 static struct rte_acl_node *
852 acl_gen_mask_trie(struct acl_build_context *context,
853 const void *value, const void *mask,
854 int size, int level, struct rte_acl_node **pend)
855 {
856 int32_t n;
857 struct rte_acl_node *root;
858 struct rte_acl_node *node, *prev;
859 struct rte_acl_bitset bits;
860 const uint8_t *val = value;
861 const uint8_t *msk = mask;
862
863 root = acl_alloc_node(context, level++);
864 prev = root;
865
866 for (n = size - 1; n >= 0; n--) {
867 node = acl_alloc_node(context, level++);
868 acl_gen_mask(&bits, val[n] & msk[n], msk[n]);
869 acl_add_ptr(context, prev, node, &bits);
870 prev = node;
871 }
872
873 *pend = prev;
874 return root;
875 }
876
877 static struct rte_acl_node *
878 build_trie(struct acl_build_context *context, struct rte_acl_build_rule *head,
879 struct rte_acl_build_rule **last, uint32_t *count)
880 {
881 uint32_t n, m;
882 int field_index, node_count;
883 struct rte_acl_node *trie;
884 struct rte_acl_build_rule *prev, *rule;
885 struct rte_acl_node *end, *merge, *root, *end_prev;
886 const struct rte_acl_field *fld;
887
888 prev = head;
889 rule = head;
890 *last = prev;
891
892 trie = acl_alloc_node(context, 0);
893
894 while (rule != NULL) {
895
896 root = acl_alloc_node(context, 0);
897
898 root->ref_count = 1;
899 end = root;
900
901 for (n = 0; n < rule->config->num_fields; n++) {
902
903 field_index = rule->config->defs[n].field_index;
904 fld = rule->f->field + field_index;
905 end_prev = end;
906
907 /* build a mini-trie for this field */
908 switch (rule->config->defs[n].type) {
909
910 case RTE_ACL_FIELD_TYPE_BITMASK:
911 merge = acl_gen_mask_trie(context,
912 &fld->value,
913 &fld->mask_range,
914 rule->config->defs[n].size,
915 end->level + 1,
916 &end);
917 break;
918
919 case RTE_ACL_FIELD_TYPE_MASK:
920 {
921 /*
922 * set msb for the size of the field and
923 * all higher bits.
924 */
925 uint64_t mask;
926 mask = RTE_ACL_MASKLEN_TO_BITMASK(
927 fld->mask_range.u32,
928 rule->config->defs[n].size);
929
930 /* gen a mini-trie for this field */
931 merge = acl_gen_mask_trie(context,
932 &fld->value,
933 (char *)&mask,
934 rule->config->defs[n].size,
935 end->level + 1,
936 &end);
937 }
938 break;
939
940 case RTE_ACL_FIELD_TYPE_RANGE:
941 merge = acl_gen_range_trie(context,
942 &rule->f->field[field_index].value,
943 &rule->f->field[field_index].mask_range,
944 rule->config->defs[n].size,
945 end->level + 1,
946 &end);
947 break;
948
949 default:
950 RTE_LOG(ERR, ACL,
951 "Error in rule[%u] type - %hhu\n",
952 rule->f->data.userdata,
953 rule->config->defs[n].type);
954 return NULL;
955 }
956
957 /* merge this field on to the end of the rule */
958 if (acl_merge_trie(context, end_prev, merge, 0,
959 NULL) != 0) {
960 return NULL;
961 }
962 }
963
964 end->match_flag = ++context->num_build_rules;
965
966 /*
967 * Setup the results for this rule.
968 * The result and priority of each category.
969 */
970 if (end->mrt == NULL)
971 end->mrt = acl_build_alloc(context, 1,
972 sizeof(*end->mrt));
973
974 for (m = context->cfg.num_categories; 0 != m--; ) {
975 if (rule->f->data.category_mask & (1 << m)) {
976 end->mrt->results[m] = rule->f->data.userdata;
977 end->mrt->priority[m] = rule->f->data.priority;
978 } else {
979 end->mrt->results[m] = 0;
980 end->mrt->priority[m] = 0;
981 }
982 }
983
984 node_count = context->num_nodes;
985 (*count)++;
986
987 /* merge this rule into the trie */
988 if (acl_merge_trie(context, trie, root, 0, NULL))
989 return NULL;
990
991 node_count = context->num_nodes - node_count;
992 if (node_count > context->cur_node_max) {
993 *last = prev;
994 return trie;
995 }
996
997 prev = rule;
998 rule = rule->next;
999 }
1000
1001 *last = NULL;
1002 return trie;
1003 }
1004
1005 static void
1006 acl_calc_wildness(struct rte_acl_build_rule *head,
1007 const struct rte_acl_config *config)
1008 {
1009 uint32_t n;
1010 struct rte_acl_build_rule *rule;
1011
1012 for (rule = head; rule != NULL; rule = rule->next) {
1013
1014 for (n = 0; n < config->num_fields; n++) {
1015
1016 double wild = 0;
1017 uint32_t bit_len = CHAR_BIT * config->defs[n].size;
1018 uint64_t msk_val = RTE_LEN2MASK(bit_len,
1019 typeof(msk_val));
1020 double size = bit_len;
1021 int field_index = config->defs[n].field_index;
1022 const struct rte_acl_field *fld = rule->f->field +
1023 field_index;
1024
1025 switch (rule->config->defs[n].type) {
1026 case RTE_ACL_FIELD_TYPE_BITMASK:
1027 wild = (size - __builtin_popcountll(
1028 fld->mask_range.u64 & msk_val)) /
1029 size;
1030 break;
1031
1032 case RTE_ACL_FIELD_TYPE_MASK:
1033 wild = (size - fld->mask_range.u32) / size;
1034 break;
1035
1036 case RTE_ACL_FIELD_TYPE_RANGE:
1037 wild = (fld->mask_range.u64 & msk_val) -
1038 (fld->value.u64 & msk_val);
1039 wild = wild / msk_val;
1040 break;
1041 }
1042
1043 rule->wildness[field_index] = (uint32_t)(wild * 100);
1044 }
1045 }
1046 }
1047
1048 static void
1049 acl_rule_stats(struct rte_acl_build_rule *head, struct rte_acl_config *config)
1050 {
1051 struct rte_acl_build_rule *rule;
1052 uint32_t n, m, fields_deactivated = 0;
1053 uint32_t start = 0, deactivate = 0;
1054 int tally[RTE_ACL_MAX_LEVELS][TALLY_NUM];
1055
1056 memset(tally, 0, sizeof(tally));
1057
1058 for (rule = head; rule != NULL; rule = rule->next) {
1059
1060 for (n = 0; n < config->num_fields; n++) {
1061 uint32_t field_index = config->defs[n].field_index;
1062
1063 tally[n][TALLY_0]++;
1064 for (m = 1; m < RTE_DIM(wild_limits); m++) {
1065 if (rule->wildness[field_index] >=
1066 wild_limits[m])
1067 tally[n][m]++;
1068 }
1069 }
1070
1071 for (n = config->num_fields - 1; n > 0; n--) {
1072 uint32_t field_index = config->defs[n].field_index;
1073
1074 if (rule->wildness[field_index] == 100)
1075 tally[n][TALLY_DEPTH]++;
1076 else
1077 break;
1078 }
1079 }
1080
1081 /*
1082 * Look for any field that is always wild and drop it from the config
1083 * Only deactivate if all fields for a given input loop are deactivated.
1084 */
1085 for (n = 1; n < config->num_fields; n++) {
1086 if (config->defs[n].input_index !=
1087 config->defs[n - 1].input_index) {
1088 for (m = start; m < n; m++)
1089 tally[m][TALLY_DEACTIVATED] = deactivate;
1090 fields_deactivated += deactivate;
1091 start = n;
1092 deactivate = 1;
1093 }
1094
1095 /* if the field is not always completely wild */
1096 if (tally[n][TALLY_100] != tally[n][TALLY_0])
1097 deactivate = 0;
1098 }
1099
1100 for (m = start; m < n; m++)
1101 tally[m][TALLY_DEACTIVATED] = deactivate;
1102
1103 fields_deactivated += deactivate;
1104
1105 /* remove deactivated fields */
1106 if (fields_deactivated) {
1107 uint32_t k, l = 0;
1108
1109 for (k = 0; k < config->num_fields; k++) {
1110 if (tally[k][TALLY_DEACTIVATED] == 0) {
1111 memmove(&tally[l][0], &tally[k][0],
1112 TALLY_NUM * sizeof(tally[0][0]));
1113 memmove(&config->defs[l++],
1114 &config->defs[k],
1115 sizeof(struct rte_acl_field_def));
1116 }
1117 }
1118 config->num_fields = l;
1119 }
1120 }
1121
1122 static int
1123 rule_cmp_wildness(struct rte_acl_build_rule *r1, struct rte_acl_build_rule *r2)
1124 {
1125 uint32_t n;
1126
1127 for (n = 1; n < r1->config->num_fields; n++) {
1128 int field_index = r1->config->defs[n].field_index;
1129
1130 if (r1->wildness[field_index] != r2->wildness[field_index])
1131 return r1->wildness[field_index] -
1132 r2->wildness[field_index];
1133 }
1134 return 0;
1135 }
1136
1137 /*
1138 * Split the rte_acl_build_rule list into two lists.
1139 */
1140 static void
1141 rule_list_split(struct rte_acl_build_rule *source,
1142 struct rte_acl_build_rule **list_a,
1143 struct rte_acl_build_rule **list_b)
1144 {
1145 struct rte_acl_build_rule *fast;
1146 struct rte_acl_build_rule *slow;
1147
1148 if (source == NULL || source->next == NULL) {
1149 /* length < 2 cases */
1150 *list_a = source;
1151 *list_b = NULL;
1152 } else {
1153 slow = source;
1154 fast = source->next;
1155 /* Advance 'fast' two nodes, and advance 'slow' one node */
1156 while (fast != NULL) {
1157 fast = fast->next;
1158 if (fast != NULL) {
1159 slow = slow->next;
1160 fast = fast->next;
1161 }
1162 }
1163 /* 'slow' is before the midpoint in the list, so split it in two
1164 at that point. */
1165 *list_a = source;
1166 *list_b = slow->next;
1167 slow->next = NULL;
1168 }
1169 }
1170
1171 /*
1172 * Merge two sorted lists.
1173 */
1174 static struct rte_acl_build_rule *
1175 rule_list_sorted_merge(struct rte_acl_build_rule *a,
1176 struct rte_acl_build_rule *b)
1177 {
1178 struct rte_acl_build_rule *result = NULL;
1179 struct rte_acl_build_rule **last_next = &result;
1180
1181 while (1) {
1182 if (a == NULL) {
1183 *last_next = b;
1184 break;
1185 } else if (b == NULL) {
1186 *last_next = a;
1187 break;
1188 }
1189 if (rule_cmp_wildness(a, b) >= 0) {
1190 *last_next = a;
1191 last_next = &a->next;
1192 a = a->next;
1193 } else {
1194 *last_next = b;
1195 last_next = &b->next;
1196 b = b->next;
1197 }
1198 }
1199 return result;
1200 }
1201
1202 /*
1203 * Sort list of rules based on the rules wildness.
1204 * Use recursive mergesort algorithm.
1205 */
1206 static struct rte_acl_build_rule *
1207 sort_rules(struct rte_acl_build_rule *head)
1208 {
1209 struct rte_acl_build_rule *a;
1210 struct rte_acl_build_rule *b;
1211
1212 /* Base case -- length 0 or 1 */
1213 if (head == NULL || head->next == NULL)
1214 return head;
1215
1216 /* Split head into 'a' and 'b' sublists */
1217 rule_list_split(head, &a, &b);
1218
1219 /* Recursively sort the sublists */
1220 a = sort_rules(a);
1221 b = sort_rules(b);
1222
1223 /* answer = merge the two sorted lists together */
1224 return rule_list_sorted_merge(a, b);
1225 }
1226
1227 static uint32_t
1228 acl_build_index(const struct rte_acl_config *config, uint32_t *data_index)
1229 {
1230 uint32_t n, m;
1231 int32_t last_header;
1232
1233 m = 0;
1234 last_header = -1;
1235
1236 for (n = 0; n < config->num_fields; n++) {
1237 if (last_header != config->defs[n].input_index) {
1238 last_header = config->defs[n].input_index;
1239 data_index[m++] = config->defs[n].offset;
1240 }
1241 }
1242
1243 return m;
1244 }
1245
1246 static struct rte_acl_build_rule *
1247 build_one_trie(struct acl_build_context *context,
1248 struct rte_acl_build_rule *rule_sets[RTE_ACL_MAX_TRIES],
1249 uint32_t n, int32_t node_max)
1250 {
1251 struct rte_acl_build_rule *last;
1252 struct rte_acl_config *config;
1253
1254 config = rule_sets[n]->config;
1255
1256 acl_rule_stats(rule_sets[n], config);
1257 rule_sets[n] = sort_rules(rule_sets[n]);
1258
1259 context->tries[n].type = RTE_ACL_FULL_TRIE;
1260 context->tries[n].count = 0;
1261
1262 context->tries[n].num_data_indexes = acl_build_index(config,
1263 context->data_indexes[n]);
1264 context->tries[n].data_index = context->data_indexes[n];
1265
1266 context->cur_node_max = node_max;
1267
1268 context->bld_tries[n].trie = build_trie(context, rule_sets[n],
1269 &last, &context->tries[n].count);
1270
1271 return last;
1272 }
1273
1274 static int
1275 acl_build_tries(struct acl_build_context *context,
1276 struct rte_acl_build_rule *head)
1277 {
1278 uint32_t n, num_tries;
1279 struct rte_acl_config *config;
1280 struct rte_acl_build_rule *last;
1281 struct rte_acl_build_rule *rule_sets[RTE_ACL_MAX_TRIES];
1282
1283 config = head->config;
1284 rule_sets[0] = head;
1285
1286 /* initialize tries */
1287 for (n = 0; n < RTE_DIM(context->tries); n++) {
1288 context->tries[n].type = RTE_ACL_UNUSED_TRIE;
1289 context->bld_tries[n].trie = NULL;
1290 context->tries[n].count = 0;
1291 }
1292
1293 context->tries[0].type = RTE_ACL_FULL_TRIE;
1294
1295 /* calc wildness of each field of each rule */
1296 acl_calc_wildness(head, config);
1297
1298 for (n = 0;; n = num_tries) {
1299
1300 num_tries = n + 1;
1301
1302 last = build_one_trie(context, rule_sets, n, context->node_max);
1303 if (context->bld_tries[n].trie == NULL) {
1304 RTE_LOG(ERR, ACL, "Build of %u-th trie failed\n", n);
1305 return -ENOMEM;
1306 }
1307
1308 /* Build of the last trie completed. */
1309 if (last == NULL)
1310 break;
1311
1312 if (num_tries == RTE_DIM(context->tries)) {
1313 RTE_LOG(ERR, ACL,
1314 "Exceeded max number of tries: %u\n",
1315 num_tries);
1316 return -ENOMEM;
1317 }
1318
1319 /* Trie is getting too big, split remaining rule set. */
1320 rule_sets[num_tries] = last->next;
1321 last->next = NULL;
1322 acl_free_node(context, context->bld_tries[n].trie);
1323
1324 /* Create a new copy of config for remaining rules. */
1325 config = acl_build_alloc(context, 1, sizeof(*config));
1326 memcpy(config, rule_sets[n]->config, sizeof(*config));
1327
1328 /* Make remaining rules use new config. */
1329 for (head = rule_sets[num_tries]; head != NULL;
1330 head = head->next)
1331 head->config = config;
1332
1333 /*
1334 * Rebuild the trie for the reduced rule-set.
1335 * Don't try to split it any further.
1336 */
1337 last = build_one_trie(context, rule_sets, n, INT32_MAX);
1338 if (context->bld_tries[n].trie == NULL || last != NULL) {
1339 RTE_LOG(ERR, ACL, "Build of %u-th trie failed\n", n);
1340 return -ENOMEM;
1341 }
1342
1343 }
1344
1345 context->num_tries = num_tries;
1346 return 0;
1347 }
1348
1349 static void
1350 acl_build_log(const struct acl_build_context *ctx)
1351 {
1352 uint32_t n;
1353
1354 RTE_LOG(DEBUG, ACL, "Build phase for ACL \"%s\":\n"
1355 "node limit for tree split: %u\n"
1356 "nodes created: %u\n"
1357 "memory consumed: %zu\n",
1358 ctx->acx->name,
1359 ctx->node_max,
1360 ctx->num_nodes,
1361 ctx->pool.alloc);
1362
1363 for (n = 0; n < RTE_DIM(ctx->tries); n++) {
1364 if (ctx->tries[n].count != 0)
1365 RTE_LOG(DEBUG, ACL,
1366 "trie %u: number of rules: %u, indexes: %u\n",
1367 n, ctx->tries[n].count,
1368 ctx->tries[n].num_data_indexes);
1369 }
1370 }
1371
1372 static int
1373 acl_build_rules(struct acl_build_context *bcx)
1374 {
1375 struct rte_acl_build_rule *br, *head;
1376 const struct rte_acl_rule *rule;
1377 uint32_t *wp;
1378 uint32_t fn, i, n, num;
1379 size_t ofs, sz;
1380
1381 fn = bcx->cfg.num_fields;
1382 n = bcx->acx->num_rules;
1383 ofs = n * sizeof(*br);
1384 sz = ofs + n * fn * sizeof(*wp);
1385
1386 br = tb_alloc(&bcx->pool, sz);
1387
1388 wp = (uint32_t *)((uintptr_t)br + ofs);
1389 num = 0;
1390 head = NULL;
1391
1392 for (i = 0; i != n; i++) {
1393 rule = (const struct rte_acl_rule *)
1394 ((uintptr_t)bcx->acx->rules + bcx->acx->rule_sz * i);
1395 if ((rule->data.category_mask & bcx->category_mask) != 0) {
1396 br[num].next = head;
1397 br[num].config = &bcx->cfg;
1398 br[num].f = rule;
1399 br[num].wildness = wp;
1400 wp += fn;
1401 head = br + num;
1402 num++;
1403 }
1404 }
1405
1406 bcx->num_rules = num;
1407 bcx->build_rules = head;
1408
1409 return 0;
1410 }
1411
1412 /*
1413 * Copy data_indexes for each trie into RT location.
1414 */
1415 static void
1416 acl_set_data_indexes(struct rte_acl_ctx *ctx)
1417 {
1418 uint32_t i, n, ofs;
1419
1420 ofs = 0;
1421 for (i = 0; i != ctx->num_tries; i++) {
1422 n = ctx->trie[i].num_data_indexes;
1423 memcpy(ctx->data_indexes + ofs, ctx->trie[i].data_index,
1424 n * sizeof(ctx->data_indexes[0]));
1425 ctx->trie[i].data_index = ctx->data_indexes + ofs;
1426 ofs += RTE_ACL_MAX_FIELDS;
1427 }
1428 }
1429
1430 /*
1431 * Internal routine, performs 'build' phase of trie generation:
1432 * - setups build context.
1433 * - analizes given set of rules.
1434 * - builds internal tree(s).
1435 */
1436 static int
1437 acl_bld(struct acl_build_context *bcx, struct rte_acl_ctx *ctx,
1438 const struct rte_acl_config *cfg, uint32_t node_max)
1439 {
1440 int32_t rc;
1441
1442 /* setup build context. */
1443 memset(bcx, 0, sizeof(*bcx));
1444 bcx->acx = ctx;
1445 bcx->pool.alignment = ACL_POOL_ALIGN;
1446 bcx->pool.min_alloc = ACL_POOL_ALLOC_MIN;
1447 bcx->cfg = *cfg;
1448 bcx->category_mask = RTE_LEN2MASK(bcx->cfg.num_categories,
1449 typeof(bcx->category_mask));
1450 bcx->node_max = node_max;
1451
1452 rc = sigsetjmp(bcx->pool.fail, 0);
1453
1454 /* build phase runs out of memory. */
1455 if (rc != 0) {
1456 RTE_LOG(ERR, ACL,
1457 "ACL context: %s, %s() failed with error code: %d\n",
1458 bcx->acx->name, __func__, rc);
1459 return rc;
1460 }
1461
1462 /* Create a build rules copy. */
1463 rc = acl_build_rules(bcx);
1464 if (rc != 0)
1465 return rc;
1466
1467 /* No rules to build for that context+config */
1468 if (bcx->build_rules == NULL) {
1469 rc = -EINVAL;
1470 } else {
1471 /* build internal trie representation. */
1472 rc = acl_build_tries(bcx, bcx->build_rules);
1473 }
1474 return rc;
1475 }
1476
1477 /*
1478 * Check that parameters for acl_build() are valid.
1479 */
1480 static int
1481 acl_check_bld_param(struct rte_acl_ctx *ctx, const struct rte_acl_config *cfg)
1482 {
1483 static const size_t field_sizes[] = {
1484 sizeof(uint8_t), sizeof(uint16_t),
1485 sizeof(uint32_t), sizeof(uint64_t),
1486 };
1487
1488 uint32_t i, j;
1489
1490 if (ctx == NULL || cfg == NULL || cfg->num_categories == 0 ||
1491 cfg->num_categories > RTE_ACL_MAX_CATEGORIES ||
1492 cfg->num_fields == 0 ||
1493 cfg->num_fields > RTE_ACL_MAX_FIELDS)
1494 return -EINVAL;
1495
1496 for (i = 0; i != cfg->num_fields; i++) {
1497 if (cfg->defs[i].type > RTE_ACL_FIELD_TYPE_BITMASK) {
1498 RTE_LOG(ERR, ACL,
1499 "ACL context: %s, invalid type: %hhu for %u-th field\n",
1500 ctx->name, cfg->defs[i].type, i);
1501 return -EINVAL;
1502 }
1503 for (j = 0;
1504 j != RTE_DIM(field_sizes) &&
1505 cfg->defs[i].size != field_sizes[j];
1506 j++)
1507 ;
1508
1509 if (j == RTE_DIM(field_sizes)) {
1510 RTE_LOG(ERR, ACL,
1511 "ACL context: %s, invalid size: %hhu for %u-th field\n",
1512 ctx->name, cfg->defs[i].size, i);
1513 return -EINVAL;
1514 }
1515 }
1516
1517 return 0;
1518 }
1519
1520 int
1521 rte_acl_build(struct rte_acl_ctx *ctx, const struct rte_acl_config *cfg)
1522 {
1523 int32_t rc;
1524 uint32_t n;
1525 size_t max_size;
1526 struct acl_build_context bcx;
1527
1528 rc = acl_check_bld_param(ctx, cfg);
1529 if (rc != 0)
1530 return rc;
1531
1532 acl_build_reset(ctx);
1533
1534 if (cfg->max_size == 0) {
1535 n = NODE_MIN;
1536 max_size = SIZE_MAX;
1537 } else {
1538 n = NODE_MAX;
1539 max_size = cfg->max_size;
1540 }
1541
1542 for (rc = -ERANGE; n >= NODE_MIN && rc == -ERANGE; n /= 2) {
1543
1544 /* perform build phase. */
1545 rc = acl_bld(&bcx, ctx, cfg, n);
1546
1547 if (rc == 0) {
1548 /* allocate and fill run-time structures. */
1549 rc = rte_acl_gen(ctx, bcx.tries, bcx.bld_tries,
1550 bcx.num_tries, bcx.cfg.num_categories,
1551 RTE_ACL_MAX_FIELDS * RTE_DIM(bcx.tries) *
1552 sizeof(ctx->data_indexes[0]), max_size);
1553 if (rc == 0) {
1554 /* set data indexes. */
1555 acl_set_data_indexes(ctx);
1556
1557 /* copy in build config. */
1558 ctx->config = *cfg;
1559 }
1560 }
1561
1562 acl_build_log(&bcx);
1563
1564 /* cleanup after build. */
1565 tb_free_pool(&bcx.pool);
1566 }
1567
1568 return rc;
1569 }