]>
Commit | Line | Data |
---|---|---|
11fdf7f2 TL |
1 | /* SPDX-License-Identifier: BSD-3-Clause |
2 | * Copyright(c) 2010-2014 Intel Corporation | |
7c673cae FG |
3 | */ |
4 | ||
5 | #include <stdio.h> | |
6 | #include <stdint.h> | |
7 | #include <unistd.h> | |
8 | #include <sys/queue.h> | |
9 | ||
10 | #include <rte_memory.h> | |
7c673cae FG |
11 | #include <rte_per_lcore.h> |
12 | #include <rte_launch.h> | |
13 | #include <rte_atomic.h> | |
14 | #include <rte_eal.h> | |
7c673cae FG |
15 | #include <rte_lcore.h> |
16 | ||
17 | #include "test.h" | |
18 | ||
19 | /* | |
20 | * Atomic Variables | |
21 | * ================ | |
22 | * | |
23 | * - The main test function performs three subtests. The first test | |
24 | * checks that the usual inc/dec/add/sub functions are working | |
25 | * correctly: | |
26 | * | |
27 | * - Initialize 16-bit, 32-bit and 64-bit atomic variables to specific | |
28 | * values. | |
29 | * | |
30 | * - These variables are incremented and decremented on each core at | |
31 | * the same time in ``test_atomic_usual()``. | |
32 | * | |
33 | * - The function checks that once all lcores finish their function, | |
34 | * the value of the atomic variables are still the same. | |
35 | * | |
36 | * - The second test verifies the behavior of "test and set" functions. | |
37 | * | |
38 | * - Initialize 16-bit, 32-bit and 64-bit atomic variables to zero. | |
39 | * | |
40 | * - Invoke ``test_atomic_tas()`` on each lcore: before doing anything | |
41 | * else. The cores are waiting a synchro using ``while | |
42 | * (rte_atomic32_read(&val) == 0)`` which is triggered by the main test | |
43 | * function. Then all cores do a | |
44 | * ``rte_atomicXX_test_and_set()`` at the same time. If it is successful, | |
45 | * it increments another atomic counter. | |
46 | * | |
47 | * - The main function checks that the atomic counter was incremented | |
48 | * twice only (one for 16-bit, one for 32-bit and one for 64-bit values). | |
49 | * | |
50 | * - Test "add/sub and return" | |
51 | * | |
52 | * - Initialize 16-bit, 32-bit and 64-bit atomic variables to zero. | |
53 | * | |
54 | * - Invoke ``test_atomic_addsub_return()`` on each lcore. Before doing | |
55 | * anything else, the cores are waiting a synchro. Each lcore does | |
56 | * this operation several times:: | |
57 | * | |
58 | * tmp = rte_atomicXX_add_return(&a, 1); | |
59 | * atomic_add(&count, tmp); | |
60 | * tmp = rte_atomicXX_sub_return(&a, 1); | |
61 | * atomic_sub(&count, tmp+1); | |
62 | * | |
63 | * - At the end of the test, the *count* value must be 0. | |
64 | */ | |
65 | ||
66 | #define NUM_ATOMIC_TYPES 3 | |
67 | ||
68 | #define N 10000 | |
69 | ||
70 | static rte_atomic16_t a16; | |
71 | static rte_atomic32_t a32; | |
72 | static rte_atomic64_t a64; | |
73 | static rte_atomic64_t count; | |
74 | static rte_atomic32_t synchro; | |
75 | ||
76 | static int | |
77 | test_atomic_usual(__attribute__((unused)) void *arg) | |
78 | { | |
79 | unsigned i; | |
80 | ||
81 | while (rte_atomic32_read(&synchro) == 0) | |
82 | ; | |
83 | ||
84 | for (i = 0; i < N; i++) | |
85 | rte_atomic16_inc(&a16); | |
86 | for (i = 0; i < N; i++) | |
87 | rte_atomic16_dec(&a16); | |
88 | for (i = 0; i < (N / 5); i++) | |
89 | rte_atomic16_add(&a16, 5); | |
90 | for (i = 0; i < (N / 5); i++) | |
91 | rte_atomic16_sub(&a16, 5); | |
92 | ||
93 | for (i = 0; i < N; i++) | |
94 | rte_atomic32_inc(&a32); | |
95 | for (i = 0; i < N; i++) | |
96 | rte_atomic32_dec(&a32); | |
97 | for (i = 0; i < (N / 5); i++) | |
98 | rte_atomic32_add(&a32, 5); | |
99 | for (i = 0; i < (N / 5); i++) | |
100 | rte_atomic32_sub(&a32, 5); | |
101 | ||
102 | for (i = 0; i < N; i++) | |
103 | rte_atomic64_inc(&a64); | |
104 | for (i = 0; i < N; i++) | |
105 | rte_atomic64_dec(&a64); | |
106 | for (i = 0; i < (N / 5); i++) | |
107 | rte_atomic64_add(&a64, 5); | |
108 | for (i = 0; i < (N / 5); i++) | |
109 | rte_atomic64_sub(&a64, 5); | |
110 | ||
111 | return 0; | |
112 | } | |
113 | ||
114 | static int | |
115 | test_atomic_tas(__attribute__((unused)) void *arg) | |
116 | { | |
117 | while (rte_atomic32_read(&synchro) == 0) | |
118 | ; | |
119 | ||
120 | if (rte_atomic16_test_and_set(&a16)) | |
121 | rte_atomic64_inc(&count); | |
122 | if (rte_atomic32_test_and_set(&a32)) | |
123 | rte_atomic64_inc(&count); | |
124 | if (rte_atomic64_test_and_set(&a64)) | |
125 | rte_atomic64_inc(&count); | |
126 | ||
127 | return 0; | |
128 | } | |
129 | ||
130 | static int | |
131 | test_atomic_addsub_and_return(__attribute__((unused)) void *arg) | |
132 | { | |
133 | uint32_t tmp16; | |
134 | uint32_t tmp32; | |
135 | uint64_t tmp64; | |
136 | unsigned i; | |
137 | ||
138 | while (rte_atomic32_read(&synchro) == 0) | |
139 | ; | |
140 | ||
141 | for (i = 0; i < N; i++) { | |
142 | tmp16 = rte_atomic16_add_return(&a16, 1); | |
143 | rte_atomic64_add(&count, tmp16); | |
144 | ||
145 | tmp16 = rte_atomic16_sub_return(&a16, 1); | |
146 | rte_atomic64_sub(&count, tmp16+1); | |
147 | ||
148 | tmp32 = rte_atomic32_add_return(&a32, 1); | |
149 | rte_atomic64_add(&count, tmp32); | |
150 | ||
151 | tmp32 = rte_atomic32_sub_return(&a32, 1); | |
152 | rte_atomic64_sub(&count, tmp32+1); | |
153 | ||
154 | tmp64 = rte_atomic64_add_return(&a64, 1); | |
155 | rte_atomic64_add(&count, tmp64); | |
156 | ||
157 | tmp64 = rte_atomic64_sub_return(&a64, 1); | |
158 | rte_atomic64_sub(&count, tmp64+1); | |
159 | } | |
160 | ||
161 | return 0; | |
162 | } | |
163 | ||
164 | /* | |
165 | * rte_atomic32_inc_and_test() would increase a 32 bits counter by one and then | |
166 | * test if that counter is equal to 0. It would return true if the counter is 0 | |
167 | * and false if the counter is not 0. rte_atomic64_inc_and_test() could do the | |
168 | * same thing but for a 64 bits counter. | |
169 | * Here checks that if the 32/64 bits counter is equal to 0 after being atomically | |
170 | * increased by one. If it is, increase the variable of "count" by one which would | |
171 | * be checked as the result later. | |
172 | * | |
173 | */ | |
174 | static int | |
175 | test_atomic_inc_and_test(__attribute__((unused)) void *arg) | |
176 | { | |
177 | while (rte_atomic32_read(&synchro) == 0) | |
178 | ; | |
179 | ||
180 | if (rte_atomic16_inc_and_test(&a16)) { | |
181 | rte_atomic64_inc(&count); | |
182 | } | |
183 | if (rte_atomic32_inc_and_test(&a32)) { | |
184 | rte_atomic64_inc(&count); | |
185 | } | |
186 | if (rte_atomic64_inc_and_test(&a64)) { | |
187 | rte_atomic64_inc(&count); | |
188 | } | |
189 | ||
190 | return 0; | |
191 | } | |
192 | ||
193 | /* | |
194 | * rte_atomicXX_dec_and_test() should decrease a 32 bits counter by one and then | |
195 | * test if that counter is equal to 0. It should return true if the counter is 0 | |
196 | * and false if the counter is not 0. | |
197 | * This test checks if the counter is equal to 0 after being atomically | |
198 | * decreased by one. If it is, increase the value of "count" by one which is to | |
199 | * be checked as the result later. | |
200 | */ | |
201 | static int | |
202 | test_atomic_dec_and_test(__attribute__((unused)) void *arg) | |
203 | { | |
204 | while (rte_atomic32_read(&synchro) == 0) | |
205 | ; | |
206 | ||
207 | if (rte_atomic16_dec_and_test(&a16)) | |
208 | rte_atomic64_inc(&count); | |
209 | ||
210 | if (rte_atomic32_dec_and_test(&a32)) | |
211 | rte_atomic64_inc(&count); | |
212 | ||
213 | if (rte_atomic64_dec_and_test(&a64)) | |
214 | rte_atomic64_inc(&count); | |
215 | ||
216 | return 0; | |
217 | } | |
218 | ||
219 | static int | |
220 | test_atomic(void) | |
221 | { | |
222 | rte_atomic16_init(&a16); | |
223 | rte_atomic32_init(&a32); | |
224 | rte_atomic64_init(&a64); | |
225 | rte_atomic64_init(&count); | |
226 | rte_atomic32_init(&synchro); | |
227 | ||
228 | rte_atomic16_set(&a16, 1UL << 10); | |
229 | rte_atomic32_set(&a32, 1UL << 10); | |
230 | rte_atomic64_set(&a64, 1ULL << 33); | |
231 | ||
232 | printf("usual inc/dec/add/sub functions\n"); | |
233 | ||
234 | rte_eal_mp_remote_launch(test_atomic_usual, NULL, SKIP_MASTER); | |
235 | rte_atomic32_set(&synchro, 1); | |
236 | rte_eal_mp_wait_lcore(); | |
237 | rte_atomic32_set(&synchro, 0); | |
238 | ||
239 | if (rte_atomic16_read(&a16) != 1UL << 10) { | |
240 | printf("Atomic16 usual functions failed\n"); | |
241 | return -1; | |
242 | } | |
243 | ||
244 | if (rte_atomic32_read(&a32) != 1UL << 10) { | |
245 | printf("Atomic32 usual functions failed\n"); | |
246 | return -1; | |
247 | } | |
248 | ||
249 | if (rte_atomic64_read(&a64) != 1ULL << 33) { | |
250 | printf("Atomic64 usual functions failed\n"); | |
251 | return -1; | |
252 | } | |
253 | ||
254 | printf("test and set\n"); | |
255 | ||
256 | rte_atomic64_set(&a64, 0); | |
257 | rte_atomic32_set(&a32, 0); | |
258 | rte_atomic16_set(&a16, 0); | |
259 | rte_atomic64_set(&count, 0); | |
260 | rte_eal_mp_remote_launch(test_atomic_tas, NULL, SKIP_MASTER); | |
261 | rte_atomic32_set(&synchro, 1); | |
262 | rte_eal_mp_wait_lcore(); | |
263 | rte_atomic32_set(&synchro, 0); | |
264 | ||
265 | if (rte_atomic64_read(&count) != NUM_ATOMIC_TYPES) { | |
266 | printf("Atomic test and set failed\n"); | |
267 | return -1; | |
268 | } | |
269 | ||
270 | printf("add/sub and return\n"); | |
271 | ||
272 | rte_atomic64_set(&a64, 0); | |
273 | rte_atomic32_set(&a32, 0); | |
274 | rte_atomic16_set(&a16, 0); | |
275 | rte_atomic64_set(&count, 0); | |
276 | rte_eal_mp_remote_launch(test_atomic_addsub_and_return, NULL, | |
277 | SKIP_MASTER); | |
278 | rte_atomic32_set(&synchro, 1); | |
279 | rte_eal_mp_wait_lcore(); | |
280 | rte_atomic32_set(&synchro, 0); | |
281 | ||
282 | if (rte_atomic64_read(&count) != 0) { | |
283 | printf("Atomic add/sub+return failed\n"); | |
284 | return -1; | |
285 | } | |
286 | ||
287 | /* | |
288 | * Set a64, a32 and a16 with the same value of minus "number of slave | |
289 | * lcores", launch all slave lcores to atomically increase by one and | |
290 | * test them respectively. | |
291 | * Each lcore should have only one chance to increase a64 by one and | |
292 | * then check if it is equal to 0, but there should be only one lcore | |
293 | * that finds that it is 0. It is similar for a32 and a16. | |
294 | * Then a variable of "count", initialized to zero, is increased by | |
295 | * one if a64, a32 or a16 is 0 after being increased and tested | |
296 | * atomically. | |
297 | * We can check if "count" is finally equal to 3 to see if all slave | |
298 | * lcores performed "atomic inc and test" right. | |
299 | */ | |
300 | printf("inc and test\n"); | |
301 | ||
302 | rte_atomic64_clear(&a64); | |
303 | rte_atomic32_clear(&a32); | |
304 | rte_atomic16_clear(&a16); | |
305 | rte_atomic32_clear(&synchro); | |
306 | rte_atomic64_clear(&count); | |
307 | ||
308 | rte_atomic64_set(&a64, (int64_t)(1 - (int64_t)rte_lcore_count())); | |
309 | rte_atomic32_set(&a32, (int32_t)(1 - (int32_t)rte_lcore_count())); | |
310 | rte_atomic16_set(&a16, (int16_t)(1 - (int16_t)rte_lcore_count())); | |
311 | rte_eal_mp_remote_launch(test_atomic_inc_and_test, NULL, SKIP_MASTER); | |
312 | rte_atomic32_set(&synchro, 1); | |
313 | rte_eal_mp_wait_lcore(); | |
314 | rte_atomic32_clear(&synchro); | |
315 | ||
316 | if (rte_atomic64_read(&count) != NUM_ATOMIC_TYPES) { | |
317 | printf("Atomic inc and test failed %d\n", (int)count.cnt); | |
318 | return -1; | |
319 | } | |
320 | ||
321 | /* | |
322 | * Same as above, but this time we set the values to "number of slave | |
323 | * lcores", and decrement instead of increment. | |
324 | */ | |
325 | printf("dec and test\n"); | |
326 | ||
327 | rte_atomic32_clear(&synchro); | |
328 | rte_atomic64_clear(&count); | |
329 | ||
330 | rte_atomic64_set(&a64, (int64_t)(rte_lcore_count() - 1)); | |
331 | rte_atomic32_set(&a32, (int32_t)(rte_lcore_count() - 1)); | |
332 | rte_atomic16_set(&a16, (int16_t)(rte_lcore_count() - 1)); | |
333 | rte_eal_mp_remote_launch(test_atomic_dec_and_test, NULL, SKIP_MASTER); | |
334 | rte_atomic32_set(&synchro, 1); | |
335 | rte_eal_mp_wait_lcore(); | |
336 | rte_atomic32_clear(&synchro); | |
337 | ||
338 | if (rte_atomic64_read(&count) != NUM_ATOMIC_TYPES) { | |
339 | printf("Atomic dec and test failed\n"); | |
340 | return -1; | |
341 | } | |
342 | ||
343 | return 0; | |
344 | } | |
345 | ||
346 | REGISTER_TEST_COMMAND(atomic_autotest, test_atomic); |