1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
10 #include <rte_memory.h>
11 #include <rte_per_lcore.h>
12 #include <rte_launch.h>
13 #include <rte_atomic.h>
15 #include <rte_lcore.h>
23 * - The main test function performs three subtests. The first test
24 * checks that the usual inc/dec/add/sub functions are working
27 * - Initialize 16-bit, 32-bit and 64-bit atomic variables to specific
30 * - These variables are incremented and decremented on each core at
31 * the same time in ``test_atomic_usual()``.
33 * - The function checks that once all lcores finish their function,
34 * the value of the atomic variables are still the same.
36 * - The second test verifies the behavior of "test and set" functions.
38 * - Initialize 16-bit, 32-bit and 64-bit atomic variables to zero.
40 * - Invoke ``test_atomic_tas()`` on each lcore: before doing anything
41 * else. The cores are waiting a synchro using ``while
42 * (rte_atomic32_read(&val) == 0)`` which is triggered by the main test
43 * function. Then all cores do a
44 * ``rte_atomicXX_test_and_set()`` at the same time. If it is successful,
45 * it increments another atomic counter.
47 * - The main function checks that the atomic counter was incremented
48 * twice only (one for 16-bit, one for 32-bit and one for 64-bit values).
50 * - Test "add/sub and return"
52 * - Initialize 16-bit, 32-bit and 64-bit atomic variables to zero.
54 * - Invoke ``test_atomic_addsub_return()`` on each lcore. Before doing
55 * anything else, the cores are waiting a synchro. Each lcore does
56 * this operation several times::
58 * tmp = rte_atomicXX_add_return(&a, 1);
59 * atomic_add(&count, tmp);
60 * tmp = rte_atomicXX_sub_return(&a, 1);
61 * atomic_sub(&count, tmp+1);
63 * - At the end of the test, the *count* value must be 0.
66 #define NUM_ATOMIC_TYPES 3
70 static rte_atomic16_t a16
;
71 static rte_atomic32_t a32
;
72 static rte_atomic64_t a64
;
73 static rte_atomic64_t count
;
74 static rte_atomic32_t synchro
;
77 test_atomic_usual(__attribute__((unused
)) void *arg
)
81 while (rte_atomic32_read(&synchro
) == 0)
84 for (i
= 0; i
< N
; i
++)
85 rte_atomic16_inc(&a16
);
86 for (i
= 0; i
< N
; i
++)
87 rte_atomic16_dec(&a16
);
88 for (i
= 0; i
< (N
/ 5); i
++)
89 rte_atomic16_add(&a16
, 5);
90 for (i
= 0; i
< (N
/ 5); i
++)
91 rte_atomic16_sub(&a16
, 5);
93 for (i
= 0; i
< N
; i
++)
94 rte_atomic32_inc(&a32
);
95 for (i
= 0; i
< N
; i
++)
96 rte_atomic32_dec(&a32
);
97 for (i
= 0; i
< (N
/ 5); i
++)
98 rte_atomic32_add(&a32
, 5);
99 for (i
= 0; i
< (N
/ 5); i
++)
100 rte_atomic32_sub(&a32
, 5);
102 for (i
= 0; i
< N
; i
++)
103 rte_atomic64_inc(&a64
);
104 for (i
= 0; i
< N
; i
++)
105 rte_atomic64_dec(&a64
);
106 for (i
= 0; i
< (N
/ 5); i
++)
107 rte_atomic64_add(&a64
, 5);
108 for (i
= 0; i
< (N
/ 5); i
++)
109 rte_atomic64_sub(&a64
, 5);
115 test_atomic_tas(__attribute__((unused
)) void *arg
)
117 while (rte_atomic32_read(&synchro
) == 0)
120 if (rte_atomic16_test_and_set(&a16
))
121 rte_atomic64_inc(&count
);
122 if (rte_atomic32_test_and_set(&a32
))
123 rte_atomic64_inc(&count
);
124 if (rte_atomic64_test_and_set(&a64
))
125 rte_atomic64_inc(&count
);
131 test_atomic_addsub_and_return(__attribute__((unused
)) void *arg
)
138 while (rte_atomic32_read(&synchro
) == 0)
141 for (i
= 0; i
< N
; i
++) {
142 tmp16
= rte_atomic16_add_return(&a16
, 1);
143 rte_atomic64_add(&count
, tmp16
);
145 tmp16
= rte_atomic16_sub_return(&a16
, 1);
146 rte_atomic64_sub(&count
, tmp16
+1);
148 tmp32
= rte_atomic32_add_return(&a32
, 1);
149 rte_atomic64_add(&count
, tmp32
);
151 tmp32
= rte_atomic32_sub_return(&a32
, 1);
152 rte_atomic64_sub(&count
, tmp32
+1);
154 tmp64
= rte_atomic64_add_return(&a64
, 1);
155 rte_atomic64_add(&count
, tmp64
);
157 tmp64
= rte_atomic64_sub_return(&a64
, 1);
158 rte_atomic64_sub(&count
, tmp64
+1);
165 * rte_atomic32_inc_and_test() would increase a 32 bits counter by one and then
166 * test if that counter is equal to 0. It would return true if the counter is 0
167 * and false if the counter is not 0. rte_atomic64_inc_and_test() could do the
168 * same thing but for a 64 bits counter.
169 * Here checks that if the 32/64 bits counter is equal to 0 after being atomically
170 * increased by one. If it is, increase the variable of "count" by one which would
171 * be checked as the result later.
175 test_atomic_inc_and_test(__attribute__((unused
)) void *arg
)
177 while (rte_atomic32_read(&synchro
) == 0)
180 if (rte_atomic16_inc_and_test(&a16
)) {
181 rte_atomic64_inc(&count
);
183 if (rte_atomic32_inc_and_test(&a32
)) {
184 rte_atomic64_inc(&count
);
186 if (rte_atomic64_inc_and_test(&a64
)) {
187 rte_atomic64_inc(&count
);
194 * rte_atomicXX_dec_and_test() should decrease a 32 bits counter by one and then
195 * test if that counter is equal to 0. It should return true if the counter is 0
196 * and false if the counter is not 0.
197 * This test checks if the counter is equal to 0 after being atomically
198 * decreased by one. If it is, increase the value of "count" by one which is to
199 * be checked as the result later.
202 test_atomic_dec_and_test(__attribute__((unused
)) void *arg
)
204 while (rte_atomic32_read(&synchro
) == 0)
207 if (rte_atomic16_dec_and_test(&a16
))
208 rte_atomic64_inc(&count
);
210 if (rte_atomic32_dec_and_test(&a32
))
211 rte_atomic64_inc(&count
);
213 if (rte_atomic64_dec_and_test(&a64
))
214 rte_atomic64_inc(&count
);
222 rte_atomic16_init(&a16
);
223 rte_atomic32_init(&a32
);
224 rte_atomic64_init(&a64
);
225 rte_atomic64_init(&count
);
226 rte_atomic32_init(&synchro
);
228 rte_atomic16_set(&a16
, 1UL << 10);
229 rte_atomic32_set(&a32
, 1UL << 10);
230 rte_atomic64_set(&a64
, 1ULL << 33);
232 printf("usual inc/dec/add/sub functions\n");
234 rte_eal_mp_remote_launch(test_atomic_usual
, NULL
, SKIP_MASTER
);
235 rte_atomic32_set(&synchro
, 1);
236 rte_eal_mp_wait_lcore();
237 rte_atomic32_set(&synchro
, 0);
239 if (rte_atomic16_read(&a16
) != 1UL << 10) {
240 printf("Atomic16 usual functions failed\n");
244 if (rte_atomic32_read(&a32
) != 1UL << 10) {
245 printf("Atomic32 usual functions failed\n");
249 if (rte_atomic64_read(&a64
) != 1ULL << 33) {
250 printf("Atomic64 usual functions failed\n");
254 printf("test and set\n");
256 rte_atomic64_set(&a64
, 0);
257 rte_atomic32_set(&a32
, 0);
258 rte_atomic16_set(&a16
, 0);
259 rte_atomic64_set(&count
, 0);
260 rte_eal_mp_remote_launch(test_atomic_tas
, NULL
, SKIP_MASTER
);
261 rte_atomic32_set(&synchro
, 1);
262 rte_eal_mp_wait_lcore();
263 rte_atomic32_set(&synchro
, 0);
265 if (rte_atomic64_read(&count
) != NUM_ATOMIC_TYPES
) {
266 printf("Atomic test and set failed\n");
270 printf("add/sub and return\n");
272 rte_atomic64_set(&a64
, 0);
273 rte_atomic32_set(&a32
, 0);
274 rte_atomic16_set(&a16
, 0);
275 rte_atomic64_set(&count
, 0);
276 rte_eal_mp_remote_launch(test_atomic_addsub_and_return
, NULL
,
278 rte_atomic32_set(&synchro
, 1);
279 rte_eal_mp_wait_lcore();
280 rte_atomic32_set(&synchro
, 0);
282 if (rte_atomic64_read(&count
) != 0) {
283 printf("Atomic add/sub+return failed\n");
288 * Set a64, a32 and a16 with the same value of minus "number of slave
289 * lcores", launch all slave lcores to atomically increase by one and
290 * test them respectively.
291 * Each lcore should have only one chance to increase a64 by one and
292 * then check if it is equal to 0, but there should be only one lcore
293 * that finds that it is 0. It is similar for a32 and a16.
294 * Then a variable of "count", initialized to zero, is increased by
295 * one if a64, a32 or a16 is 0 after being increased and tested
297 * We can check if "count" is finally equal to 3 to see if all slave
298 * lcores performed "atomic inc and test" right.
300 printf("inc and test\n");
302 rte_atomic64_clear(&a64
);
303 rte_atomic32_clear(&a32
);
304 rte_atomic16_clear(&a16
);
305 rte_atomic32_clear(&synchro
);
306 rte_atomic64_clear(&count
);
308 rte_atomic64_set(&a64
, (int64_t)(1 - (int64_t)rte_lcore_count()));
309 rte_atomic32_set(&a32
, (int32_t)(1 - (int32_t)rte_lcore_count()));
310 rte_atomic16_set(&a16
, (int16_t)(1 - (int16_t)rte_lcore_count()));
311 rte_eal_mp_remote_launch(test_atomic_inc_and_test
, NULL
, SKIP_MASTER
);
312 rte_atomic32_set(&synchro
, 1);
313 rte_eal_mp_wait_lcore();
314 rte_atomic32_clear(&synchro
);
316 if (rte_atomic64_read(&count
) != NUM_ATOMIC_TYPES
) {
317 printf("Atomic inc and test failed %d\n", (int)count
.cnt
);
322 * Same as above, but this time we set the values to "number of slave
323 * lcores", and decrement instead of increment.
325 printf("dec and test\n");
327 rte_atomic32_clear(&synchro
);
328 rte_atomic64_clear(&count
);
330 rte_atomic64_set(&a64
, (int64_t)(rte_lcore_count() - 1));
331 rte_atomic32_set(&a32
, (int32_t)(rte_lcore_count() - 1));
332 rte_atomic16_set(&a16
, (int16_t)(rte_lcore_count() - 1));
333 rte_eal_mp_remote_launch(test_atomic_dec_and_test
, NULL
, SKIP_MASTER
);
334 rte_atomic32_set(&synchro
, 1);
335 rte_eal_mp_wait_lcore();
336 rte_atomic32_clear(&synchro
);
338 if (rte_atomic64_read(&count
) != NUM_ATOMIC_TYPES
) {
339 printf("Atomic dec and test failed\n");
346 REGISTER_TEST_COMMAND(atomic_autotest
, test_atomic
);