]> git.proxmox.com Git - ceph.git/blob - ceph/src/seastar/dpdk/app/test/test_mempool.c
import 15.2.0 Octopus source
[ceph.git] / ceph / src / seastar / dpdk / app / test / test_mempool.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
3 */
4
5 #include <string.h>
6 #include <stdio.h>
7 #include <stdlib.h>
8 #include <stdint.h>
9 #include <inttypes.h>
10 #include <stdarg.h>
11 #include <errno.h>
12 #include <sys/queue.h>
13
14 #include <rte_common.h>
15 #include <rte_log.h>
16 #include <rte_debug.h>
17 #include <rte_memory.h>
18 #include <rte_launch.h>
19 #include <rte_cycles.h>
20 #include <rte_eal.h>
21 #include <rte_per_lcore.h>
22 #include <rte_lcore.h>
23 #include <rte_atomic.h>
24 #include <rte_branch_prediction.h>
25 #include <rte_mempool.h>
26 #include <rte_spinlock.h>
27 #include <rte_malloc.h>
28 #include <rte_mbuf_pool_ops.h>
29
30 #include "test.h"
31
32 /*
33 * Mempool
34 * =======
35 *
36 * Basic tests: done on one core with and without cache:
37 *
38 * - Get one object, put one object
39 * - Get two objects, put two objects
40 * - Get all objects, test that their content is not modified and
41 * put them back in the pool.
42 */
43
44 #define MEMPOOL_ELT_SIZE 2048
45 #define MAX_KEEP 16
46 #define MEMPOOL_SIZE ((rte_lcore_count()*(MAX_KEEP+RTE_MEMPOOL_CACHE_MAX_SIZE))-1)
47
48 #define LOG_ERR() printf("test failed at %s():%d\n", __func__, __LINE__)
49 #define RET_ERR() do { \
50 LOG_ERR(); \
51 return -1; \
52 } while (0)
53 #define GOTO_ERR(var, label) do { \
54 LOG_ERR(); \
55 var = -1; \
56 goto label; \
57 } while (0)
58
59 static rte_atomic32_t synchro;
60
61 /*
62 * save the object number in the first 4 bytes of object data. All
63 * other bytes are set to 0.
64 */
65 static void
66 my_obj_init(struct rte_mempool *mp, __attribute__((unused)) void *arg,
67 void *obj, unsigned i)
68 {
69 uint32_t *objnum = obj;
70
71 memset(obj, 0, mp->elt_size);
72 *objnum = i;
73 }
74
75 /* basic tests (done on one core) */
76 static int
77 test_mempool_basic(struct rte_mempool *mp, int use_external_cache)
78 {
79 uint32_t *objnum;
80 void **objtable;
81 void *obj, *obj2;
82 char *obj_data;
83 int ret = 0;
84 unsigned i, j;
85 int offset;
86 struct rte_mempool_cache *cache;
87
88 if (use_external_cache) {
89 /* Create a user-owned mempool cache. */
90 cache = rte_mempool_cache_create(RTE_MEMPOOL_CACHE_MAX_SIZE,
91 SOCKET_ID_ANY);
92 if (cache == NULL)
93 RET_ERR();
94 } else {
95 /* May be NULL if cache is disabled. */
96 cache = rte_mempool_default_cache(mp, rte_lcore_id());
97 }
98
99 /* dump the mempool status */
100 rte_mempool_dump(stdout, mp);
101
102 printf("get an object\n");
103 if (rte_mempool_generic_get(mp, &obj, 1, cache) < 0)
104 GOTO_ERR(ret, out);
105 rte_mempool_dump(stdout, mp);
106
107 /* tests that improve coverage */
108 printf("get object count\n");
109 /* We have to count the extra caches, one in this case. */
110 offset = use_external_cache ? 1 * cache->len : 0;
111 if (rte_mempool_avail_count(mp) + offset != MEMPOOL_SIZE - 1)
112 GOTO_ERR(ret, out);
113
114 printf("get private data\n");
115 if (rte_mempool_get_priv(mp) != (char *)mp +
116 MEMPOOL_HEADER_SIZE(mp, mp->cache_size))
117 GOTO_ERR(ret, out);
118
119 #ifndef RTE_EXEC_ENV_FREEBSD /* rte_mem_virt2iova() not supported on bsd */
120 printf("get physical address of an object\n");
121 if (rte_mempool_virt2iova(obj) != rte_mem_virt2iova(obj))
122 GOTO_ERR(ret, out);
123 #endif
124
125 printf("put the object back\n");
126 rte_mempool_generic_put(mp, &obj, 1, cache);
127 rte_mempool_dump(stdout, mp);
128
129 printf("get 2 objects\n");
130 if (rte_mempool_generic_get(mp, &obj, 1, cache) < 0)
131 GOTO_ERR(ret, out);
132 if (rte_mempool_generic_get(mp, &obj2, 1, cache) < 0) {
133 rte_mempool_generic_put(mp, &obj, 1, cache);
134 GOTO_ERR(ret, out);
135 }
136 rte_mempool_dump(stdout, mp);
137
138 printf("put the objects back\n");
139 rte_mempool_generic_put(mp, &obj, 1, cache);
140 rte_mempool_generic_put(mp, &obj2, 1, cache);
141 rte_mempool_dump(stdout, mp);
142
143 /*
144 * get many objects: we cannot get them all because the cache
145 * on other cores may not be empty.
146 */
147 objtable = malloc(MEMPOOL_SIZE * sizeof(void *));
148 if (objtable == NULL)
149 GOTO_ERR(ret, out);
150
151 for (i = 0; i < MEMPOOL_SIZE; i++) {
152 if (rte_mempool_generic_get(mp, &objtable[i], 1, cache) < 0)
153 break;
154 }
155
156 /*
157 * for each object, check that its content was not modified,
158 * and put objects back in pool
159 */
160 while (i--) {
161 obj = objtable[i];
162 obj_data = obj;
163 objnum = obj;
164 if (*objnum > MEMPOOL_SIZE) {
165 printf("bad object number(%d)\n", *objnum);
166 ret = -1;
167 break;
168 }
169 for (j = sizeof(*objnum); j < mp->elt_size; j++) {
170 if (obj_data[j] != 0)
171 ret = -1;
172 }
173
174 rte_mempool_generic_put(mp, &objtable[i], 1, cache);
175 }
176
177 free(objtable);
178 if (ret == -1)
179 printf("objects were modified!\n");
180
181 out:
182 if (use_external_cache) {
183 rte_mempool_cache_flush(cache, mp);
184 rte_mempool_cache_free(cache);
185 }
186
187 return ret;
188 }
189
190 static int test_mempool_creation_with_exceeded_cache_size(void)
191 {
192 struct rte_mempool *mp_cov;
193
194 mp_cov = rte_mempool_create("test_mempool_cache_too_big",
195 MEMPOOL_SIZE,
196 MEMPOOL_ELT_SIZE,
197 RTE_MEMPOOL_CACHE_MAX_SIZE + 32, 0,
198 NULL, NULL,
199 my_obj_init, NULL,
200 SOCKET_ID_ANY, 0);
201
202 if (mp_cov != NULL) {
203 rte_mempool_free(mp_cov);
204 RET_ERR();
205 }
206
207 return 0;
208 }
209
210 static struct rte_mempool *mp_spsc;
211 static rte_spinlock_t scsp_spinlock;
212 static void *scsp_obj_table[MAX_KEEP];
213
214 /*
215 * single producer function
216 */
217 static int test_mempool_single_producer(void)
218 {
219 unsigned int i;
220 void *obj = NULL;
221 uint64_t start_cycles, end_cycles;
222 uint64_t duration = rte_get_timer_hz() / 4;
223
224 start_cycles = rte_get_timer_cycles();
225 while (1) {
226 end_cycles = rte_get_timer_cycles();
227 /* duration uses up, stop producing */
228 if (start_cycles + duration < end_cycles)
229 break;
230 rte_spinlock_lock(&scsp_spinlock);
231 for (i = 0; i < MAX_KEEP; i ++) {
232 if (NULL != scsp_obj_table[i]) {
233 obj = scsp_obj_table[i];
234 break;
235 }
236 }
237 rte_spinlock_unlock(&scsp_spinlock);
238 if (i >= MAX_KEEP) {
239 continue;
240 }
241 if (rte_mempool_from_obj(obj) != mp_spsc) {
242 printf("obj not owned by this mempool\n");
243 RET_ERR();
244 }
245 rte_mempool_put(mp_spsc, obj);
246 rte_spinlock_lock(&scsp_spinlock);
247 scsp_obj_table[i] = NULL;
248 rte_spinlock_unlock(&scsp_spinlock);
249 }
250
251 return 0;
252 }
253
254 /*
255 * single consumer function
256 */
257 static int test_mempool_single_consumer(void)
258 {
259 unsigned int i;
260 void * obj;
261 uint64_t start_cycles, end_cycles;
262 uint64_t duration = rte_get_timer_hz() / 8;
263
264 start_cycles = rte_get_timer_cycles();
265 while (1) {
266 end_cycles = rte_get_timer_cycles();
267 /* duration uses up, stop consuming */
268 if (start_cycles + duration < end_cycles)
269 break;
270 rte_spinlock_lock(&scsp_spinlock);
271 for (i = 0; i < MAX_KEEP; i ++) {
272 if (NULL == scsp_obj_table[i])
273 break;
274 }
275 rte_spinlock_unlock(&scsp_spinlock);
276 if (i >= MAX_KEEP)
277 continue;
278 if (rte_mempool_get(mp_spsc, &obj) < 0)
279 break;
280 rte_spinlock_lock(&scsp_spinlock);
281 scsp_obj_table[i] = obj;
282 rte_spinlock_unlock(&scsp_spinlock);
283 }
284
285 return 0;
286 }
287
288 /*
289 * test function for mempool test based on singple consumer and single producer,
290 * can run on one lcore only
291 */
292 static int
293 test_mempool_launch_single_consumer(__attribute__((unused)) void *arg)
294 {
295 return test_mempool_single_consumer();
296 }
297
298 static void
299 my_mp_init(struct rte_mempool *mp, __attribute__((unused)) void *arg)
300 {
301 printf("mempool name is %s\n", mp->name);
302 /* nothing to be implemented here*/
303 return ;
304 }
305
306 /*
307 * it tests the mempool operations based on singple producer and single consumer
308 */
309 static int
310 test_mempool_sp_sc(void)
311 {
312 int ret = 0;
313 unsigned lcore_id = rte_lcore_id();
314 unsigned lcore_next;
315
316 /* create a mempool with single producer/consumer ring */
317 if (mp_spsc == NULL) {
318 mp_spsc = rte_mempool_create("test_mempool_sp_sc", MEMPOOL_SIZE,
319 MEMPOOL_ELT_SIZE, 0, 0,
320 my_mp_init, NULL,
321 my_obj_init, NULL,
322 SOCKET_ID_ANY,
323 MEMPOOL_F_NO_CACHE_ALIGN | MEMPOOL_F_SP_PUT |
324 MEMPOOL_F_SC_GET);
325 if (mp_spsc == NULL)
326 RET_ERR();
327 }
328 if (rte_mempool_lookup("test_mempool_sp_sc") != mp_spsc) {
329 printf("Cannot lookup mempool from its name\n");
330 ret = -1;
331 goto err;
332 }
333 lcore_next = rte_get_next_lcore(lcore_id, 0, 1);
334 if (lcore_next >= RTE_MAX_LCORE) {
335 ret = -1;
336 goto err;
337 }
338 if (rte_eal_lcore_role(lcore_next) != ROLE_RTE) {
339 ret = -1;
340 goto err;
341 }
342 rte_spinlock_init(&scsp_spinlock);
343 memset(scsp_obj_table, 0, sizeof(scsp_obj_table));
344 rte_eal_remote_launch(test_mempool_launch_single_consumer, NULL,
345 lcore_next);
346 if (test_mempool_single_producer() < 0)
347 ret = -1;
348
349 if (rte_eal_wait_lcore(lcore_next) < 0)
350 ret = -1;
351
352 err:
353 rte_mempool_free(mp_spsc);
354 mp_spsc = NULL;
355
356 return ret;
357 }
358
359 /*
360 * it tests some more basic of mempool
361 */
362 static int
363 test_mempool_basic_ex(struct rte_mempool *mp)
364 {
365 unsigned i;
366 void **obj;
367 void *err_obj;
368 int ret = -1;
369
370 if (mp == NULL)
371 return ret;
372
373 obj = rte_calloc("test_mempool_basic_ex", MEMPOOL_SIZE,
374 sizeof(void *), 0);
375 if (obj == NULL) {
376 printf("test_mempool_basic_ex fail to rte_malloc\n");
377 return ret;
378 }
379 printf("test_mempool_basic_ex now mempool (%s) has %u free entries\n",
380 mp->name, rte_mempool_in_use_count(mp));
381 if (rte_mempool_full(mp) != 1) {
382 printf("test_mempool_basic_ex the mempool should be full\n");
383 goto fail_mp_basic_ex;
384 }
385
386 for (i = 0; i < MEMPOOL_SIZE; i ++) {
387 if (rte_mempool_get(mp, &obj[i]) < 0) {
388 printf("test_mp_basic_ex fail to get object for [%u]\n",
389 i);
390 goto fail_mp_basic_ex;
391 }
392 }
393 if (rte_mempool_get(mp, &err_obj) == 0) {
394 printf("test_mempool_basic_ex get an impossible obj\n");
395 goto fail_mp_basic_ex;
396 }
397 printf("number: %u\n", i);
398 if (rte_mempool_empty(mp) != 1) {
399 printf("test_mempool_basic_ex the mempool should be empty\n");
400 goto fail_mp_basic_ex;
401 }
402
403 for (i = 0; i < MEMPOOL_SIZE; i++)
404 rte_mempool_put(mp, obj[i]);
405
406 if (rte_mempool_full(mp) != 1) {
407 printf("test_mempool_basic_ex the mempool should be full\n");
408 goto fail_mp_basic_ex;
409 }
410
411 ret = 0;
412
413 fail_mp_basic_ex:
414 if (obj != NULL)
415 rte_free((void *)obj);
416
417 return ret;
418 }
419
420 static int
421 test_mempool_same_name_twice_creation(void)
422 {
423 struct rte_mempool *mp_tc, *mp_tc2;
424
425 mp_tc = rte_mempool_create("test_mempool_same_name", MEMPOOL_SIZE,
426 MEMPOOL_ELT_SIZE, 0, 0,
427 NULL, NULL,
428 NULL, NULL,
429 SOCKET_ID_ANY, 0);
430
431 if (mp_tc == NULL)
432 RET_ERR();
433
434 mp_tc2 = rte_mempool_create("test_mempool_same_name", MEMPOOL_SIZE,
435 MEMPOOL_ELT_SIZE, 0, 0,
436 NULL, NULL,
437 NULL, NULL,
438 SOCKET_ID_ANY, 0);
439
440 if (mp_tc2 != NULL) {
441 rte_mempool_free(mp_tc);
442 rte_mempool_free(mp_tc2);
443 RET_ERR();
444 }
445
446 rte_mempool_free(mp_tc);
447 return 0;
448 }
449
450 static void
451 walk_cb(struct rte_mempool *mp, void *userdata __rte_unused)
452 {
453 printf("\t%s\n", mp->name);
454 }
455
456 static int
457 test_mempool(void)
458 {
459 int ret = -1;
460 struct rte_mempool *mp_cache = NULL;
461 struct rte_mempool *mp_nocache = NULL;
462 struct rte_mempool *mp_stack = NULL;
463 struct rte_mempool *default_pool = NULL;
464 const char *default_pool_ops = rte_mbuf_best_mempool_ops();
465
466 rte_atomic32_init(&synchro);
467
468 /* create a mempool (without cache) */
469 mp_nocache = rte_mempool_create("test_nocache", MEMPOOL_SIZE,
470 MEMPOOL_ELT_SIZE, 0, 0,
471 NULL, NULL,
472 my_obj_init, NULL,
473 SOCKET_ID_ANY, 0);
474
475 if (mp_nocache == NULL) {
476 printf("cannot allocate mp_nocache mempool\n");
477 goto err;
478 }
479
480 /* create a mempool (with cache) */
481 mp_cache = rte_mempool_create("test_cache", MEMPOOL_SIZE,
482 MEMPOOL_ELT_SIZE,
483 RTE_MEMPOOL_CACHE_MAX_SIZE, 0,
484 NULL, NULL,
485 my_obj_init, NULL,
486 SOCKET_ID_ANY, 0);
487
488 if (mp_cache == NULL) {
489 printf("cannot allocate mp_cache mempool\n");
490 goto err;
491 }
492
493 /* create a mempool with an external handler */
494 mp_stack = rte_mempool_create_empty("test_stack",
495 MEMPOOL_SIZE,
496 MEMPOOL_ELT_SIZE,
497 RTE_MEMPOOL_CACHE_MAX_SIZE, 0,
498 SOCKET_ID_ANY, 0);
499
500 if (mp_stack == NULL) {
501 printf("cannot allocate mp_stack mempool\n");
502 goto err;
503 }
504 if (rte_mempool_set_ops_byname(mp_stack, "stack", NULL) < 0) {
505 printf("cannot set stack handler\n");
506 goto err;
507 }
508 if (rte_mempool_populate_default(mp_stack) < 0) {
509 printf("cannot populate mp_stack mempool\n");
510 goto err;
511 }
512 rte_mempool_obj_iter(mp_stack, my_obj_init, NULL);
513
514 /* Create a mempool based on Default handler */
515 printf("Testing %s mempool handler\n", default_pool_ops);
516 default_pool = rte_mempool_create_empty("default_pool",
517 MEMPOOL_SIZE,
518 MEMPOOL_ELT_SIZE,
519 RTE_MEMPOOL_CACHE_MAX_SIZE, 0,
520 SOCKET_ID_ANY, 0);
521
522 if (default_pool == NULL) {
523 printf("cannot allocate default mempool\n");
524 goto err;
525 }
526 if (rte_mempool_set_ops_byname(default_pool,
527 default_pool_ops, NULL) < 0) {
528 printf("cannot set %s handler\n", default_pool_ops);
529 goto err;
530 }
531 if (rte_mempool_populate_default(default_pool) < 0) {
532 printf("cannot populate %s mempool\n", default_pool_ops);
533 goto err;
534 }
535 rte_mempool_obj_iter(default_pool, my_obj_init, NULL);
536
537 /* retrieve the mempool from its name */
538 if (rte_mempool_lookup("test_nocache") != mp_nocache) {
539 printf("Cannot lookup mempool from its name\n");
540 goto err;
541 }
542
543 printf("Walk into mempools:\n");
544 rte_mempool_walk(walk_cb, NULL);
545
546 rte_mempool_list_dump(stdout);
547
548 /* basic tests without cache */
549 if (test_mempool_basic(mp_nocache, 0) < 0)
550 goto err;
551
552 /* basic tests with cache */
553 if (test_mempool_basic(mp_cache, 0) < 0)
554 goto err;
555
556 /* basic tests with user-owned cache */
557 if (test_mempool_basic(mp_nocache, 1) < 0)
558 goto err;
559
560 /* more basic tests without cache */
561 if (test_mempool_basic_ex(mp_nocache) < 0)
562 goto err;
563
564 /* mempool operation test based on single producer and single comsumer */
565 if (test_mempool_sp_sc() < 0)
566 goto err;
567
568 if (test_mempool_creation_with_exceeded_cache_size() < 0)
569 goto err;
570
571 if (test_mempool_same_name_twice_creation() < 0)
572 goto err;
573
574 /* test the stack handler */
575 if (test_mempool_basic(mp_stack, 1) < 0)
576 goto err;
577
578 if (test_mempool_basic(default_pool, 1) < 0)
579 goto err;
580
581 rte_mempool_list_dump(stdout);
582
583 ret = 0;
584
585 err:
586 rte_mempool_free(mp_nocache);
587 rte_mempool_free(mp_cache);
588 rte_mempool_free(mp_stack);
589 rte_mempool_free(default_pool);
590
591 return ret;
592 }
593
594 REGISTER_TEST_COMMAND(mempool_autotest, test_mempool);