]> git.proxmox.com Git - ceph.git/blob - ceph/src/seastar/dpdk/test/test/test_atomic.c
update sources to ceph Nautilus 14.2.1
[ceph.git] / ceph / src / seastar / dpdk / test / test / test_atomic.c
1 /*-
2 * BSD LICENSE
3 *
4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34 #include <stdio.h>
35 #include <stdint.h>
36 #include <unistd.h>
37 #include <sys/queue.h>
38
39 #include <rte_memory.h>
40 #include <rte_memzone.h>
41 #include <rte_per_lcore.h>
42 #include <rte_launch.h>
43 #include <rte_atomic.h>
44 #include <rte_eal.h>
45 #include <rte_per_lcore.h>
46 #include <rte_lcore.h>
47
48 #include "test.h"
49
50 /*
51 * Atomic Variables
52 * ================
53 *
54 * - The main test function performs three subtests. The first test
55 * checks that the usual inc/dec/add/sub functions are working
56 * correctly:
57 *
58 * - Initialize 16-bit, 32-bit and 64-bit atomic variables to specific
59 * values.
60 *
61 * - These variables are incremented and decremented on each core at
62 * the same time in ``test_atomic_usual()``.
63 *
64 * - The function checks that once all lcores finish their function,
65 * the value of the atomic variables are still the same.
66 *
67 * - The second test verifies the behavior of "test and set" functions.
68 *
69 * - Initialize 16-bit, 32-bit and 64-bit atomic variables to zero.
70 *
71 * - Invoke ``test_atomic_tas()`` on each lcore: before doing anything
72 * else. The cores are waiting a synchro using ``while
73 * (rte_atomic32_read(&val) == 0)`` which is triggered by the main test
74 * function. Then all cores do a
75 * ``rte_atomicXX_test_and_set()`` at the same time. If it is successful,
76 * it increments another atomic counter.
77 *
78 * - The main function checks that the atomic counter was incremented
79 * twice only (one for 16-bit, one for 32-bit and one for 64-bit values).
80 *
81 * - Test "add/sub and return"
82 *
83 * - Initialize 16-bit, 32-bit and 64-bit atomic variables to zero.
84 *
85 * - Invoke ``test_atomic_addsub_return()`` on each lcore. Before doing
86 * anything else, the cores are waiting a synchro. Each lcore does
87 * this operation several times::
88 *
89 * tmp = rte_atomicXX_add_return(&a, 1);
90 * atomic_add(&count, tmp);
91 * tmp = rte_atomicXX_sub_return(&a, 1);
92 * atomic_sub(&count, tmp+1);
93 *
94 * - At the end of the test, the *count* value must be 0.
95 */
96
97 #define NUM_ATOMIC_TYPES 3
98
99 #define N 10000
100
101 static rte_atomic16_t a16;
102 static rte_atomic32_t a32;
103 static rte_atomic64_t a64;
104 static rte_atomic64_t count;
105 static rte_atomic32_t synchro;
106
107 static int
108 test_atomic_usual(__attribute__((unused)) void *arg)
109 {
110 unsigned i;
111
112 while (rte_atomic32_read(&synchro) == 0)
113 ;
114
115 for (i = 0; i < N; i++)
116 rte_atomic16_inc(&a16);
117 for (i = 0; i < N; i++)
118 rte_atomic16_dec(&a16);
119 for (i = 0; i < (N / 5); i++)
120 rte_atomic16_add(&a16, 5);
121 for (i = 0; i < (N / 5); i++)
122 rte_atomic16_sub(&a16, 5);
123
124 for (i = 0; i < N; i++)
125 rte_atomic32_inc(&a32);
126 for (i = 0; i < N; i++)
127 rte_atomic32_dec(&a32);
128 for (i = 0; i < (N / 5); i++)
129 rte_atomic32_add(&a32, 5);
130 for (i = 0; i < (N / 5); i++)
131 rte_atomic32_sub(&a32, 5);
132
133 for (i = 0; i < N; i++)
134 rte_atomic64_inc(&a64);
135 for (i = 0; i < N; i++)
136 rte_atomic64_dec(&a64);
137 for (i = 0; i < (N / 5); i++)
138 rte_atomic64_add(&a64, 5);
139 for (i = 0; i < (N / 5); i++)
140 rte_atomic64_sub(&a64, 5);
141
142 return 0;
143 }
144
145 static int
146 test_atomic_tas(__attribute__((unused)) void *arg)
147 {
148 while (rte_atomic32_read(&synchro) == 0)
149 ;
150
151 if (rte_atomic16_test_and_set(&a16))
152 rte_atomic64_inc(&count);
153 if (rte_atomic32_test_and_set(&a32))
154 rte_atomic64_inc(&count);
155 if (rte_atomic64_test_and_set(&a64))
156 rte_atomic64_inc(&count);
157
158 return 0;
159 }
160
161 static int
162 test_atomic_addsub_and_return(__attribute__((unused)) void *arg)
163 {
164 uint32_t tmp16;
165 uint32_t tmp32;
166 uint64_t tmp64;
167 unsigned i;
168
169 while (rte_atomic32_read(&synchro) == 0)
170 ;
171
172 for (i = 0; i < N; i++) {
173 tmp16 = rte_atomic16_add_return(&a16, 1);
174 rte_atomic64_add(&count, tmp16);
175
176 tmp16 = rte_atomic16_sub_return(&a16, 1);
177 rte_atomic64_sub(&count, tmp16+1);
178
179 tmp32 = rte_atomic32_add_return(&a32, 1);
180 rte_atomic64_add(&count, tmp32);
181
182 tmp32 = rte_atomic32_sub_return(&a32, 1);
183 rte_atomic64_sub(&count, tmp32+1);
184
185 tmp64 = rte_atomic64_add_return(&a64, 1);
186 rte_atomic64_add(&count, tmp64);
187
188 tmp64 = rte_atomic64_sub_return(&a64, 1);
189 rte_atomic64_sub(&count, tmp64+1);
190 }
191
192 return 0;
193 }
194
195 /*
196 * rte_atomic32_inc_and_test() would increase a 32 bits counter by one and then
197 * test if that counter is equal to 0. It would return true if the counter is 0
198 * and false if the counter is not 0. rte_atomic64_inc_and_test() could do the
199 * same thing but for a 64 bits counter.
200 * Here checks that if the 32/64 bits counter is equal to 0 after being atomically
201 * increased by one. If it is, increase the variable of "count" by one which would
202 * be checked as the result later.
203 *
204 */
205 static int
206 test_atomic_inc_and_test(__attribute__((unused)) void *arg)
207 {
208 while (rte_atomic32_read(&synchro) == 0)
209 ;
210
211 if (rte_atomic16_inc_and_test(&a16)) {
212 rte_atomic64_inc(&count);
213 }
214 if (rte_atomic32_inc_and_test(&a32)) {
215 rte_atomic64_inc(&count);
216 }
217 if (rte_atomic64_inc_and_test(&a64)) {
218 rte_atomic64_inc(&count);
219 }
220
221 return 0;
222 }
223
224 /*
225 * rte_atomicXX_dec_and_test() should decrease a 32 bits counter by one and then
226 * test if that counter is equal to 0. It should return true if the counter is 0
227 * and false if the counter is not 0.
228 * This test checks if the counter is equal to 0 after being atomically
229 * decreased by one. If it is, increase the value of "count" by one which is to
230 * be checked as the result later.
231 */
232 static int
233 test_atomic_dec_and_test(__attribute__((unused)) void *arg)
234 {
235 while (rte_atomic32_read(&synchro) == 0)
236 ;
237
238 if (rte_atomic16_dec_and_test(&a16))
239 rte_atomic64_inc(&count);
240
241 if (rte_atomic32_dec_and_test(&a32))
242 rte_atomic64_inc(&count);
243
244 if (rte_atomic64_dec_and_test(&a64))
245 rte_atomic64_inc(&count);
246
247 return 0;
248 }
249
250 static int
251 test_atomic(void)
252 {
253 rte_atomic16_init(&a16);
254 rte_atomic32_init(&a32);
255 rte_atomic64_init(&a64);
256 rte_atomic64_init(&count);
257 rte_atomic32_init(&synchro);
258
259 rte_atomic16_set(&a16, 1UL << 10);
260 rte_atomic32_set(&a32, 1UL << 10);
261 rte_atomic64_set(&a64, 1ULL << 33);
262
263 printf("usual inc/dec/add/sub functions\n");
264
265 rte_eal_mp_remote_launch(test_atomic_usual, NULL, SKIP_MASTER);
266 rte_atomic32_set(&synchro, 1);
267 rte_eal_mp_wait_lcore();
268 rte_atomic32_set(&synchro, 0);
269
270 if (rte_atomic16_read(&a16) != 1UL << 10) {
271 printf("Atomic16 usual functions failed\n");
272 return -1;
273 }
274
275 if (rte_atomic32_read(&a32) != 1UL << 10) {
276 printf("Atomic32 usual functions failed\n");
277 return -1;
278 }
279
280 if (rte_atomic64_read(&a64) != 1ULL << 33) {
281 printf("Atomic64 usual functions failed\n");
282 return -1;
283 }
284
285 printf("test and set\n");
286
287 rte_atomic64_set(&a64, 0);
288 rte_atomic32_set(&a32, 0);
289 rte_atomic16_set(&a16, 0);
290 rte_atomic64_set(&count, 0);
291 rte_eal_mp_remote_launch(test_atomic_tas, NULL, SKIP_MASTER);
292 rte_atomic32_set(&synchro, 1);
293 rte_eal_mp_wait_lcore();
294 rte_atomic32_set(&synchro, 0);
295
296 if (rte_atomic64_read(&count) != NUM_ATOMIC_TYPES) {
297 printf("Atomic test and set failed\n");
298 return -1;
299 }
300
301 printf("add/sub and return\n");
302
303 rte_atomic64_set(&a64, 0);
304 rte_atomic32_set(&a32, 0);
305 rte_atomic16_set(&a16, 0);
306 rte_atomic64_set(&count, 0);
307 rte_eal_mp_remote_launch(test_atomic_addsub_and_return, NULL,
308 SKIP_MASTER);
309 rte_atomic32_set(&synchro, 1);
310 rte_eal_mp_wait_lcore();
311 rte_atomic32_set(&synchro, 0);
312
313 if (rte_atomic64_read(&count) != 0) {
314 printf("Atomic add/sub+return failed\n");
315 return -1;
316 }
317
318 /*
319 * Set a64, a32 and a16 with the same value of minus "number of slave
320 * lcores", launch all slave lcores to atomically increase by one and
321 * test them respectively.
322 * Each lcore should have only one chance to increase a64 by one and
323 * then check if it is equal to 0, but there should be only one lcore
324 * that finds that it is 0. It is similar for a32 and a16.
325 * Then a variable of "count", initialized to zero, is increased by
326 * one if a64, a32 or a16 is 0 after being increased and tested
327 * atomically.
328 * We can check if "count" is finally equal to 3 to see if all slave
329 * lcores performed "atomic inc and test" right.
330 */
331 printf("inc and test\n");
332
333 rte_atomic64_clear(&a64);
334 rte_atomic32_clear(&a32);
335 rte_atomic16_clear(&a16);
336 rte_atomic32_clear(&synchro);
337 rte_atomic64_clear(&count);
338
339 rte_atomic64_set(&a64, (int64_t)(1 - (int64_t)rte_lcore_count()));
340 rte_atomic32_set(&a32, (int32_t)(1 - (int32_t)rte_lcore_count()));
341 rte_atomic16_set(&a16, (int16_t)(1 - (int16_t)rte_lcore_count()));
342 rte_eal_mp_remote_launch(test_atomic_inc_and_test, NULL, SKIP_MASTER);
343 rte_atomic32_set(&synchro, 1);
344 rte_eal_mp_wait_lcore();
345 rte_atomic32_clear(&synchro);
346
347 if (rte_atomic64_read(&count) != NUM_ATOMIC_TYPES) {
348 printf("Atomic inc and test failed %d\n", (int)count.cnt);
349 return -1;
350 }
351
352 /*
353 * Same as above, but this time we set the values to "number of slave
354 * lcores", and decrement instead of increment.
355 */
356 printf("dec and test\n");
357
358 rte_atomic32_clear(&synchro);
359 rte_atomic64_clear(&count);
360
361 rte_atomic64_set(&a64, (int64_t)(rte_lcore_count() - 1));
362 rte_atomic32_set(&a32, (int32_t)(rte_lcore_count() - 1));
363 rte_atomic16_set(&a16, (int16_t)(rte_lcore_count() - 1));
364 rte_eal_mp_remote_launch(test_atomic_dec_and_test, NULL, SKIP_MASTER);
365 rte_atomic32_set(&synchro, 1);
366 rte_eal_mp_wait_lcore();
367 rte_atomic32_clear(&synchro);
368
369 if (rte_atomic64_read(&count) != NUM_ATOMIC_TYPES) {
370 printf("Atomic dec and test failed\n");
371 return -1;
372 }
373
374 return 0;
375 }
376
377 REGISTER_TEST_COMMAND(atomic_autotest, test_atomic);