4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37 * Resolve priority for multiple results (scalar version).
38 * This consists comparing the priority of the current traversal with the
39 * running set of results for the packet.
40 * For each result, keep a running array of the result (rule number) and
41 * its priority for each category.
44 resolve_priority_scalar(uint64_t transition
, int n
,
45 const struct rte_acl_ctx
*ctx
, struct parms
*parms
,
46 const struct rte_acl_match_results
*p
, uint32_t categories
)
49 int32_t *saved_priority
;
50 uint32_t *saved_results
;
51 const int32_t *priority
;
52 const uint32_t *results
;
54 saved_results
= parms
[n
].cmplt
->results
;
55 saved_priority
= parms
[n
].cmplt
->priority
;
57 /* results and priorities for completed trie */
58 results
= p
[transition
].results
;
59 priority
= p
[transition
].priority
;
61 /* if this is not the first completed trie */
62 if (parms
[n
].cmplt
->count
!= ctx
->num_tries
) {
63 for (i
= 0; i
< categories
; i
+= RTE_ACL_RESULTS_MULTIPLIER
) {
65 if (saved_priority
[i
] <= priority
[i
]) {
66 saved_priority
[i
] = priority
[i
];
67 saved_results
[i
] = results
[i
];
69 if (saved_priority
[i
+ 1] <= priority
[i
+ 1]) {
70 saved_priority
[i
+ 1] = priority
[i
+ 1];
71 saved_results
[i
+ 1] = results
[i
+ 1];
73 if (saved_priority
[i
+ 2] <= priority
[i
+ 2]) {
74 saved_priority
[i
+ 2] = priority
[i
+ 2];
75 saved_results
[i
+ 2] = results
[i
+ 2];
77 if (saved_priority
[i
+ 3] <= priority
[i
+ 3]) {
78 saved_priority
[i
+ 3] = priority
[i
+ 3];
79 saved_results
[i
+ 3] = results
[i
+ 3];
83 for (i
= 0; i
< categories
; i
+= RTE_ACL_RESULTS_MULTIPLIER
) {
84 saved_priority
[i
] = priority
[i
];
85 saved_priority
[i
+ 1] = priority
[i
+ 1];
86 saved_priority
[i
+ 2] = priority
[i
+ 2];
87 saved_priority
[i
+ 3] = priority
[i
+ 3];
89 saved_results
[i
] = results
[i
];
90 saved_results
[i
+ 1] = results
[i
+ 1];
91 saved_results
[i
+ 2] = results
[i
+ 2];
92 saved_results
[i
+ 3] = results
[i
+ 3];
97 static inline uint32_t
98 scan_forward(uint32_t input
, uint32_t max
)
100 return (input
== 0) ? max
: rte_bsf32(input
);
103 static inline uint64_t
104 scalar_transition(const uint64_t *trans_table
, uint64_t transition
,
107 uint32_t addr
, index
, ranges
, x
, a
, b
, c
;
109 /* break transition into component parts */
110 ranges
= transition
>> (sizeof(index
) * CHAR_BIT
);
111 index
= transition
& ~RTE_ACL_NODE_INDEX
;
112 addr
= transition
^ index
;
114 if (index
!= RTE_ACL_NODE_DFA
) {
115 /* calc address for a QRANGE/SINGLE node */
116 c
= (uint32_t)input
* SCALAR_QRANGE_MULT
;
117 a
= ranges
| SCALAR_QRANGE_MIN
;
118 a
-= (c
& SCALAR_QRANGE_MASK
);
119 b
= c
& SCALAR_QRANGE_MIN
;
120 a
&= SCALAR_QRANGE_MIN
;
121 a
^= (ranges
^ b
) & (a
^ b
);
122 x
= scan_forward(a
, 32) >> 3;
124 /* calc address for a DFA node */
125 x
= ranges
>> (input
/
126 RTE_ACL_DFA_GR64_SIZE
* RTE_ACL_DFA_GR64_BIT
);
133 /* pickup next transition */
134 transition
= *(trans_table
+ addr
);
139 rte_acl_classify_scalar(const struct rte_acl_ctx
*ctx
, const uint8_t **data
,
140 uint32_t *results
, uint32_t num
, uint32_t categories
)
143 uint64_t transition0
, transition1
;
144 uint32_t input0
, input1
;
145 struct acl_flow_data flows
;
146 uint64_t index_array
[MAX_SEARCHES_SCALAR
];
147 struct completion cmplt
[MAX_SEARCHES_SCALAR
];
148 struct parms parms
[MAX_SEARCHES_SCALAR
];
150 acl_set_flow(&flows
, cmplt
, RTE_DIM(cmplt
), data
, results
, num
,
151 categories
, ctx
->trans_table
);
153 for (n
= 0; n
< MAX_SEARCHES_SCALAR
; n
++) {
155 index_array
[n
] = acl_start_next_trie(&flows
, parms
, n
, ctx
);
158 transition0
= index_array
[0];
159 transition1
= index_array
[1];
161 while ((transition0
| transition1
) & RTE_ACL_NODE_MATCH
) {
162 transition0
= acl_match_check(transition0
,
163 0, ctx
, parms
, &flows
, resolve_priority_scalar
);
164 transition1
= acl_match_check(transition1
,
165 1, ctx
, parms
, &flows
, resolve_priority_scalar
);
168 while (flows
.started
> 0) {
170 input0
= GET_NEXT_4BYTES(parms
, 0);
171 input1
= GET_NEXT_4BYTES(parms
, 1);
173 for (n
= 0; n
< 4; n
++) {
175 transition0
= scalar_transition(flows
.trans
,
176 transition0
, (uint8_t)input0
);
179 transition1
= scalar_transition(flows
.trans
,
180 transition1
, (uint8_t)input1
);
184 while ((transition0
| transition1
) & RTE_ACL_NODE_MATCH
) {
185 transition0
= acl_match_check(transition0
,
186 0, ctx
, parms
, &flows
, resolve_priority_scalar
);
187 transition1
= acl_match_check(transition1
,
188 1, ctx
, parms
, &flows
, resolve_priority_scalar
);