]> git.proxmox.com Git - ceph.git/blame - ceph/src/seastar/dpdk/test/test/test_func_reentrancy.c
update download target update for octopus release
[ceph.git] / ceph / src / seastar / dpdk / test / test / test_func_reentrancy.c
CommitLineData
7c673cae
FG
1/*-
2 * BSD LICENSE
3 *
4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34#include <string.h>
35#include <stdio.h>
36#include <stdlib.h>
37#include <stdint.h>
38#include <inttypes.h>
39#include <stdarg.h>
40#include <errno.h>
41#include <sys/queue.h>
42
43#include <rte_common.h>
44#include <rte_log.h>
45#include <rte_debug.h>
46#include <rte_memory.h>
47#include <rte_memzone.h>
48#include <rte_launch.h>
49#include <rte_cycles.h>
50#include <rte_eal.h>
51#include <rte_per_lcore.h>
52#include <rte_lcore.h>
53#include <rte_atomic.h>
54#include <rte_branch_prediction.h>
55#include <rte_ring.h>
56#include <rte_mempool.h>
57#include <rte_spinlock.h>
58#include <rte_malloc.h>
59
60#ifdef RTE_LIBRTE_HASH
61#include <rte_hash.h>
62#include <rte_fbk_hash.h>
63#include <rte_jhash.h>
64#endif /* RTE_LIBRTE_HASH */
65
66#ifdef RTE_LIBRTE_LPM
67#include <rte_lpm.h>
68#endif /* RTE_LIBRTE_LPM */
69
70#include <rte_string_fns.h>
71
72#include "test.h"
73
74typedef int (*case_func_t)(void* arg);
75typedef void (*case_clean_t)(unsigned lcore_id);
76
77#define MAX_STRING_SIZE (256)
78#define MAX_ITER_TIMES (16)
79#define MAX_LPM_ITER_TIMES (8)
80
81#define MEMPOOL_ELT_SIZE (sizeof(uint32_t))
82#define MEMPOOL_SIZE (4)
83
84#define MAX_LCORES RTE_MAX_MEMZONE / (MAX_ITER_TIMES * 4U)
85
86static rte_atomic32_t obj_count = RTE_ATOMIC32_INIT(0);
87static rte_atomic32_t synchro = RTE_ATOMIC32_INIT(0);
88
89#define WAIT_SYNCHRO_FOR_SLAVES() do{ \
90 if (lcore_self != rte_get_master_lcore()) \
91 while (rte_atomic32_read(&synchro) == 0); \
92} while(0)
93
94/*
95 * rte_eal_init only init once
96 */
97static int
98test_eal_init_once(__attribute__((unused)) void *arg)
99{
100 unsigned lcore_self = rte_lcore_id();
101
102 WAIT_SYNCHRO_FOR_SLAVES();
103
104 rte_atomic32_set(&obj_count, 1); /* silent the check in the caller */
105 if (rte_eal_init(0, NULL) != -1)
106 return -1;
107
108 return 0;
109}
110
111/*
112 * ring create/lookup reentrancy test
113 */
114static int
115ring_create_lookup(__attribute__((unused)) void *arg)
116{
117 unsigned lcore_self = rte_lcore_id();
118 struct rte_ring * rp;
119 char ring_name[MAX_STRING_SIZE];
120 int i;
121
122 WAIT_SYNCHRO_FOR_SLAVES();
123
124 /* create the same ring simultaneously on all threads */
125 for (i = 0; i < MAX_ITER_TIMES; i++) {
126 rp = rte_ring_create("fr_test_once", 4096, SOCKET_ID_ANY, 0);
127 if (rp != NULL)
128 rte_atomic32_inc(&obj_count);
129 }
130
131 /* create/lookup new ring several times */
132 for (i = 0; i < MAX_ITER_TIMES; i++) {
133 snprintf(ring_name, sizeof(ring_name), "fr_test_%d_%d", lcore_self, i);
134 rp = rte_ring_create(ring_name, 4096, SOCKET_ID_ANY, 0);
135 if (NULL == rp)
136 return -1;
137 if (rte_ring_lookup(ring_name) != rp)
138 return -1;
139 }
140
141 /* verify all ring created sucessful */
142 for (i = 0; i < MAX_ITER_TIMES; i++) {
143 snprintf(ring_name, sizeof(ring_name), "fr_test_%d_%d", lcore_self, i);
144 if (rte_ring_lookup(ring_name) == NULL)
145 return -1;
146 }
147
148 return 0;
149}
150
151static void
152my_obj_init(struct rte_mempool *mp, __attribute__((unused)) void *arg,
153 void *obj, unsigned i)
154{
155 uint32_t *objnum = obj;
156 memset(obj, 0, mp->elt_size);
157 *objnum = i;
158}
159
160static int
161mempool_create_lookup(__attribute__((unused)) void *arg)
162{
163 unsigned lcore_self = rte_lcore_id();
164 struct rte_mempool * mp;
165 char mempool_name[MAX_STRING_SIZE];
166 int i;
167
168 WAIT_SYNCHRO_FOR_SLAVES();
169
170 /* create the same mempool simultaneously on all threads */
171 for (i = 0; i < MAX_ITER_TIMES; i++) {
172 mp = rte_mempool_create("fr_test_once", MEMPOOL_SIZE,
173 MEMPOOL_ELT_SIZE, 0, 0,
174 NULL, NULL,
175 my_obj_init, NULL,
176 SOCKET_ID_ANY, 0);
177 if (mp != NULL)
178 rte_atomic32_inc(&obj_count);
179 }
180
181 /* create/lookup new ring several times */
182 for (i = 0; i < MAX_ITER_TIMES; i++) {
183 snprintf(mempool_name, sizeof(mempool_name), "fr_test_%d_%d", lcore_self, i);
184 mp = rte_mempool_create(mempool_name, MEMPOOL_SIZE,
185 MEMPOOL_ELT_SIZE, 0, 0,
186 NULL, NULL,
187 my_obj_init, NULL,
188 SOCKET_ID_ANY, 0);
189 if (NULL == mp)
190 return -1;
191 if (rte_mempool_lookup(mempool_name) != mp)
192 return -1;
193 }
194
195 /* verify all ring created sucessful */
196 for (i = 0; i < MAX_ITER_TIMES; i++) {
197 snprintf(mempool_name, sizeof(mempool_name), "fr_test_%d_%d", lcore_self, i);
198 if (rte_mempool_lookup(mempool_name) == NULL)
199 return -1;
200 }
201
202 return 0;
203}
204
205#ifdef RTE_LIBRTE_HASH
206static void
207hash_clean(unsigned lcore_id)
208{
209 char hash_name[MAX_STRING_SIZE];
210 struct rte_hash *handle;
211 int i;
212
213 for (i = 0; i < MAX_ITER_TIMES; i++) {
214 snprintf(hash_name, sizeof(hash_name), "fr_test_%d_%d", lcore_id, i);
215
216 if ((handle = rte_hash_find_existing(hash_name)) != NULL)
217 rte_hash_free(handle);
218 }
219}
220
221static int
222hash_create_free(__attribute__((unused)) void *arg)
223{
224 unsigned lcore_self = rte_lcore_id();
225 struct rte_hash *handle;
226 char hash_name[MAX_STRING_SIZE];
227 int i;
228 struct rte_hash_parameters hash_params = {
229 .name = NULL,
230 .entries = 16,
231 .key_len = 4,
232 .hash_func = (rte_hash_function)rte_jhash_32b,
233 .hash_func_init_val = 0,
234 .socket_id = 0,
235 };
236
237 WAIT_SYNCHRO_FOR_SLAVES();
238
239 /* create the same hash simultaneously on all threads */
240 hash_params.name = "fr_test_once";
241 for (i = 0; i < MAX_ITER_TIMES; i++) {
242 handle = rte_hash_create(&hash_params);
243 if (handle != NULL)
244 rte_atomic32_inc(&obj_count);
245 }
246
247 /* create mutiple times simultaneously */
248 for (i = 0; i < MAX_ITER_TIMES; i++) {
249 snprintf(hash_name, sizeof(hash_name), "fr_test_%d_%d", lcore_self, i);
250 hash_params.name = hash_name;
251
252 handle = rte_hash_create(&hash_params);
253 if (NULL == handle)
254 return -1;
255
256 /* verify correct existing and then free all */
257 if (handle != rte_hash_find_existing(hash_name))
258 return -1;
259
260 rte_hash_free(handle);
261 }
262
263 /* verify free correct */
264 for (i = 0; i < MAX_ITER_TIMES; i++) {
265 snprintf(hash_name, sizeof(hash_name), "fr_test_%d_%d", lcore_self, i);
266
267 if (NULL != rte_hash_find_existing(hash_name))
268 return -1;
269 }
270
271 return 0;
272}
273
274static void
275fbk_clean(unsigned lcore_id)
276{
277 char fbk_name[MAX_STRING_SIZE];
278 struct rte_fbk_hash_table *handle;
279 int i;
280
281 for (i = 0; i < MAX_ITER_TIMES; i++) {
282 snprintf(fbk_name, sizeof(fbk_name), "fr_test_%d_%d", lcore_id, i);
283
284 if ((handle = rte_fbk_hash_find_existing(fbk_name)) != NULL)
285 rte_fbk_hash_free(handle);
286 }
287}
288
289static int
290fbk_create_free(__attribute__((unused)) void *arg)
291{
292 unsigned lcore_self = rte_lcore_id();
293 struct rte_fbk_hash_table *handle;
294 char fbk_name[MAX_STRING_SIZE];
295 int i;
296 struct rte_fbk_hash_params fbk_params = {
297 .name = NULL,
298 .entries = 4,
299 .entries_per_bucket = 4,
300 .socket_id = 0,
301 .hash_func = rte_jhash_1word,
302 .init_val = RTE_FBK_HASH_INIT_VAL_DEFAULT,
303 };
304
305 WAIT_SYNCHRO_FOR_SLAVES();
306
307 /* create the same fbk hash table simultaneously on all threads */
308 fbk_params.name = "fr_test_once";
309 for (i = 0; i < MAX_ITER_TIMES; i++) {
310 handle = rte_fbk_hash_create(&fbk_params);
311 if (handle != NULL)
312 rte_atomic32_inc(&obj_count);
313 }
314
315 /* create mutiple fbk tables simultaneously */
316 for (i = 0; i < MAX_ITER_TIMES; i++) {
317 snprintf(fbk_name, sizeof(fbk_name), "fr_test_%d_%d", lcore_self, i);
318 fbk_params.name = fbk_name;
319
320 handle = rte_fbk_hash_create(&fbk_params);
321 if (NULL == handle)
322 return -1;
323
324 /* verify correct existing and then free all */
325 if (handle != rte_fbk_hash_find_existing(fbk_name))
326 return -1;
327
328 rte_fbk_hash_free(handle);
329 }
330
331 /* verify free correct */
332 for (i = 0; i < MAX_ITER_TIMES; i++) {
333 snprintf(fbk_name, sizeof(fbk_name), "fr_test_%d_%d", lcore_self, i);
334
335 if (NULL != rte_fbk_hash_find_existing(fbk_name))
336 return -1;
337 }
338
339 return 0;
340}
341#endif /* RTE_LIBRTE_HASH */
342
343#ifdef RTE_LIBRTE_LPM
344static void
345lpm_clean(unsigned lcore_id)
346{
347 char lpm_name[MAX_STRING_SIZE];
348 struct rte_lpm *lpm;
349 int i;
350
351 for (i = 0; i < MAX_LPM_ITER_TIMES; i++) {
352 snprintf(lpm_name, sizeof(lpm_name), "fr_test_%d_%d", lcore_id, i);
353
354 if ((lpm = rte_lpm_find_existing(lpm_name)) != NULL)
355 rte_lpm_free(lpm);
356 }
357}
358
359static int
360lpm_create_free(__attribute__((unused)) void *arg)
361{
362 unsigned lcore_self = rte_lcore_id();
363 struct rte_lpm *lpm;
364 struct rte_lpm_config config;
365
366 config.max_rules = 4;
367 config.number_tbl8s = 256;
368 config.flags = 0;
369 char lpm_name[MAX_STRING_SIZE];
370 int i;
371
372 WAIT_SYNCHRO_FOR_SLAVES();
373
374 /* create the same lpm simultaneously on all threads */
375 for (i = 0; i < MAX_ITER_TIMES; i++) {
376 lpm = rte_lpm_create("fr_test_once", SOCKET_ID_ANY, &config);
377 if (lpm != NULL)
378 rte_atomic32_inc(&obj_count);
379 }
380
381 /* create mutiple fbk tables simultaneously */
382 for (i = 0; i < MAX_LPM_ITER_TIMES; i++) {
383 snprintf(lpm_name, sizeof(lpm_name), "fr_test_%d_%d", lcore_self, i);
384 lpm = rte_lpm_create(lpm_name, SOCKET_ID_ANY, &config);
385 if (NULL == lpm)
386 return -1;
387
388 /* verify correct existing and then free all */
389 if (lpm != rte_lpm_find_existing(lpm_name))
390 return -1;
391
392 rte_lpm_free(lpm);
393 }
394
395 /* verify free correct */
396 for (i = 0; i < MAX_LPM_ITER_TIMES; i++) {
397 snprintf(lpm_name, sizeof(lpm_name), "fr_test_%d_%d", lcore_self, i);
398 if (NULL != rte_lpm_find_existing(lpm_name))
399 return -1;
400 }
401
402 return 0;
403}
404#endif /* RTE_LIBRTE_LPM */
405
406struct test_case{
407 case_func_t func;
408 void* arg;
409 case_clean_t clean;
410 char name[MAX_STRING_SIZE];
411};
412
413/* All test cases in the test suite */
414struct test_case test_cases[] = {
415 { test_eal_init_once, NULL, NULL, "eal init once" },
416 { ring_create_lookup, NULL, NULL, "ring create/lookup" },
417 { mempool_create_lookup, NULL, NULL, "mempool create/lookup" },
418#ifdef RTE_LIBRTE_HASH
419 { hash_create_free, NULL, hash_clean, "hash create/free" },
420 { fbk_create_free, NULL, fbk_clean, "fbk create/free" },
421#endif /* RTE_LIBRTE_HASH */
422#ifdef RTE_LIBRTE_LPM
423 { lpm_create_free, NULL, lpm_clean, "lpm create/free" },
424#endif /* RTE_LIBRTE_LPM */
425};
426
427/**
428 * launch test case in two separate thread
429 */
430static int
431launch_test(struct test_case *pt_case)
432{
433 int ret = 0;
434 unsigned lcore_id;
435 unsigned cores_save = rte_lcore_count();
436 unsigned cores = RTE_MIN(cores_save, MAX_LCORES);
437 unsigned count;
438
439 if (pt_case->func == NULL)
440 return -1;
441
442 rte_atomic32_set(&obj_count, 0);
443 rte_atomic32_set(&synchro, 0);
444
445 RTE_LCORE_FOREACH_SLAVE(lcore_id) {
446 if (cores == 1)
447 break;
448 cores--;
449 rte_eal_remote_launch(pt_case->func, pt_case->arg, lcore_id);
450 }
451
452 rte_atomic32_set(&synchro, 1);
453
454 if (pt_case->func(pt_case->arg) < 0)
455 ret = -1;
456
457 cores = cores_save;
458 RTE_LCORE_FOREACH_SLAVE(lcore_id) {
459 if (cores == 1)
460 break;
461 cores--;
462 if (rte_eal_wait_lcore(lcore_id) < 0)
463 ret = -1;
464
465 if (pt_case->clean != NULL)
466 pt_case->clean(lcore_id);
467 }
468
469 count = rte_atomic32_read(&obj_count);
470 if (count != 1) {
471 printf("%s: common object allocated %d times (should be 1)\n",
472 pt_case->name, count);
473 ret = -1;
474 }
475
476 return ret;
477}
478
479/**
480 * Main entry of func_reentrancy test
481 */
482static int
483test_func_reentrancy(void)
484{
485 uint32_t case_id;
486 struct test_case *pt_case = NULL;
487
488 if (rte_lcore_count() <= 1) {
489 printf("Not enough lcore for testing\n");
490 return -1;
491 }
492 else if (rte_lcore_count() > MAX_LCORES)
493 printf("Too many lcores, some cores will be disabled\n");
494
495 for (case_id = 0; case_id < sizeof(test_cases)/sizeof(struct test_case); case_id ++) {
496 pt_case = &test_cases[case_id];
497 if (pt_case->func == NULL)
498 continue;
499
500 if (launch_test(pt_case) < 0) {
501 printf("Func-ReEnt CASE %"PRIu32": %s FAIL\n", case_id, pt_case->name);
502 return -1;
503 }
504 printf("Func-ReEnt CASE %"PRIu32": %s PASS\n", case_id, pt_case->name);
505 }
506
507 return 0;
508}
509
510REGISTER_TEST_COMMAND(func_reentrancy_autotest, test_func_reentrancy);