]> git.proxmox.com Git - ceph.git/blob - ceph/src/seastar/dpdk/app/test/test_stack.c
import 15.2.0 Octopus source
[ceph.git] / ceph / src / seastar / dpdk / app / test / test_stack.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019 Intel Corporation
3 */
4
5 #include <string.h>
6
7 #include <rte_atomic.h>
8 #include <rte_lcore.h>
9 #include <rte_malloc.h>
10 #include <rte_random.h>
11 #include <rte_stack.h>
12
13 #include "test.h"
14
15 #define STACK_SIZE 4096
16 #define MAX_BULK 32
17
18 static int
19 test_stack_push_pop(struct rte_stack *s, void **obj_table, unsigned int bulk_sz)
20 {
21 unsigned int i, ret;
22 void **popped_objs;
23
24 popped_objs = rte_calloc(NULL, STACK_SIZE, sizeof(void *), 0);
25 if (popped_objs == NULL) {
26 printf("[%s():%u] failed to calloc %zu bytes\n",
27 __func__, __LINE__, STACK_SIZE * sizeof(void *));
28 return -1;
29 }
30
31 for (i = 0; i < STACK_SIZE; i += bulk_sz) {
32 ret = rte_stack_push(s, &obj_table[i], bulk_sz);
33
34 if (ret != bulk_sz) {
35 printf("[%s():%u] push returned: %d (expected %u)\n",
36 __func__, __LINE__, ret, bulk_sz);
37 rte_free(popped_objs);
38 return -1;
39 }
40
41 if (rte_stack_count(s) != i + bulk_sz) {
42 printf("[%s():%u] stack count: %u (expected %u)\n",
43 __func__, __LINE__, rte_stack_count(s),
44 i + bulk_sz);
45 rte_free(popped_objs);
46 return -1;
47 }
48
49 if (rte_stack_free_count(s) != STACK_SIZE - i - bulk_sz) {
50 printf("[%s():%u] stack free count: %u (expected %u)\n",
51 __func__, __LINE__, rte_stack_count(s),
52 STACK_SIZE - i - bulk_sz);
53 rte_free(popped_objs);
54 return -1;
55 }
56 }
57
58 for (i = 0; i < STACK_SIZE; i += bulk_sz) {
59 ret = rte_stack_pop(s, &popped_objs[i], bulk_sz);
60
61 if (ret != bulk_sz) {
62 printf("[%s():%u] pop returned: %d (expected %u)\n",
63 __func__, __LINE__, ret, bulk_sz);
64 rte_free(popped_objs);
65 return -1;
66 }
67
68 if (rte_stack_count(s) != STACK_SIZE - i - bulk_sz) {
69 printf("[%s():%u] stack count: %u (expected %u)\n",
70 __func__, __LINE__, rte_stack_count(s),
71 STACK_SIZE - i - bulk_sz);
72 rte_free(popped_objs);
73 return -1;
74 }
75
76 if (rte_stack_free_count(s) != i + bulk_sz) {
77 printf("[%s():%u] stack free count: %u (expected %u)\n",
78 __func__, __LINE__, rte_stack_count(s),
79 i + bulk_sz);
80 rte_free(popped_objs);
81 return -1;
82 }
83 }
84
85 for (i = 0; i < STACK_SIZE; i++) {
86 if (obj_table[i] != popped_objs[STACK_SIZE - i - 1]) {
87 printf("[%s():%u] Incorrect value %p at index 0x%x\n",
88 __func__, __LINE__,
89 popped_objs[STACK_SIZE - i - 1], i);
90 rte_free(popped_objs);
91 return -1;
92 }
93 }
94
95 rte_free(popped_objs);
96
97 return 0;
98 }
99
100 static int
101 test_stack_basic(uint32_t flags)
102 {
103 struct rte_stack *s = NULL;
104 void **obj_table = NULL;
105 int i, ret = -1;
106
107 obj_table = rte_calloc(NULL, STACK_SIZE, sizeof(void *), 0);
108 if (obj_table == NULL) {
109 printf("[%s():%u] failed to calloc %zu bytes\n",
110 __func__, __LINE__, STACK_SIZE * sizeof(void *));
111 goto fail_test;
112 }
113
114 for (i = 0; i < STACK_SIZE; i++)
115 obj_table[i] = (void *)(uintptr_t)i;
116
117 s = rte_stack_create(__func__, STACK_SIZE, rte_socket_id(), flags);
118 if (s == NULL) {
119 printf("[%s():%u] failed to create a stack\n",
120 __func__, __LINE__);
121 goto fail_test;
122 }
123
124 if (rte_stack_lookup(__func__) != s) {
125 printf("[%s():%u] failed to lookup a stack\n",
126 __func__, __LINE__);
127 goto fail_test;
128 }
129
130 if (rte_stack_count(s) != 0) {
131 printf("[%s():%u] stack count: %u (expected 0)\n",
132 __func__, __LINE__, rte_stack_count(s));
133 goto fail_test;
134 }
135
136 if (rte_stack_free_count(s) != STACK_SIZE) {
137 printf("[%s():%u] stack free count: %u (expected %u)\n",
138 __func__, __LINE__, rte_stack_count(s), STACK_SIZE);
139 goto fail_test;
140 }
141
142 ret = test_stack_push_pop(s, obj_table, 1);
143 if (ret) {
144 printf("[%s():%u] Single object push/pop failed\n",
145 __func__, __LINE__);
146 goto fail_test;
147 }
148
149 ret = test_stack_push_pop(s, obj_table, MAX_BULK);
150 if (ret) {
151 printf("[%s():%u] Bulk object push/pop failed\n",
152 __func__, __LINE__);
153 goto fail_test;
154 }
155
156 ret = rte_stack_push(s, obj_table, 2 * STACK_SIZE);
157 if (ret != 0) {
158 printf("[%s():%u] Excess objects push succeeded\n",
159 __func__, __LINE__);
160 goto fail_test;
161 }
162
163 ret = rte_stack_pop(s, obj_table, 1);
164 if (ret != 0) {
165 printf("[%s():%u] Empty stack pop succeeded\n",
166 __func__, __LINE__);
167 goto fail_test;
168 }
169
170 ret = 0;
171
172 fail_test:
173 rte_stack_free(s);
174
175 rte_free(obj_table);
176
177 return ret;
178 }
179
180 static int
181 test_stack_name_reuse(uint32_t flags)
182 {
183 struct rte_stack *s[2];
184
185 s[0] = rte_stack_create("test", STACK_SIZE, rte_socket_id(), flags);
186 if (s[0] == NULL) {
187 printf("[%s():%u] Failed to create a stack\n",
188 __func__, __LINE__);
189 return -1;
190 }
191
192 s[1] = rte_stack_create("test", STACK_SIZE, rte_socket_id(), flags);
193 if (s[1] != NULL) {
194 printf("[%s():%u] Failed to detect re-used name\n",
195 __func__, __LINE__);
196 return -1;
197 }
198
199 rte_stack_free(s[0]);
200
201 return 0;
202 }
203
204 static int
205 test_stack_name_length(uint32_t flags)
206 {
207 char name[RTE_STACK_NAMESIZE + 1];
208 struct rte_stack *s;
209
210 memset(name, 's', sizeof(name));
211 name[RTE_STACK_NAMESIZE] = '\0';
212
213 s = rte_stack_create(name, STACK_SIZE, rte_socket_id(), flags);
214 if (s != NULL) {
215 printf("[%s():%u] Failed to prevent long name\n",
216 __func__, __LINE__);
217 return -1;
218 }
219
220 if (rte_errno != ENAMETOOLONG) {
221 printf("[%s():%u] rte_stack failed to set correct errno on failed lookup\n",
222 __func__, __LINE__);
223 return -1;
224 }
225
226 return 0;
227 }
228
229 static int
230 test_lookup_null(void)
231 {
232 struct rte_stack *s = rte_stack_lookup("stack_not_found");
233
234 if (s != NULL) {
235 printf("[%s():%u] rte_stack found a non-existent stack\n",
236 __func__, __LINE__);
237 return -1;
238 }
239
240 if (rte_errno != ENOENT) {
241 printf("[%s():%u] rte_stack failed to set correct errno on failed lookup\n",
242 __func__, __LINE__);
243 return -1;
244 }
245
246 s = rte_stack_lookup(NULL);
247
248 if (s != NULL) {
249 printf("[%s():%u] rte_stack found a non-existent stack\n",
250 __func__, __LINE__);
251 return -1;
252 }
253
254 if (rte_errno != EINVAL) {
255 printf("[%s():%u] rte_stack failed to set correct errno on failed lookup\n",
256 __func__, __LINE__);
257 return -1;
258 }
259
260 return 0;
261 }
262
263 static int
264 test_free_null(void)
265 {
266 /* Check whether the library proper handles a NULL pointer */
267 rte_stack_free(NULL);
268
269 return 0;
270 }
271
272 #define NUM_ITERS_PER_THREAD 100000
273
274 struct test_args {
275 struct rte_stack *s;
276 rte_atomic64_t *sz;
277 };
278
279 static int
280 stack_thread_push_pop(void *args)
281 {
282 struct test_args *t = args;
283 void **obj_table;
284 int i;
285
286 obj_table = rte_calloc(NULL, STACK_SIZE, sizeof(void *), 0);
287 if (obj_table == NULL) {
288 printf("[%s():%u] failed to calloc %zu bytes\n",
289 __func__, __LINE__, STACK_SIZE * sizeof(void *));
290 return -1;
291 }
292
293 for (i = 0; i < NUM_ITERS_PER_THREAD; i++) {
294 unsigned int success, num;
295
296 /* Reserve up to min(MAX_BULK, available slots) stack entries,
297 * then push and pop those stack entries.
298 */
299 do {
300 uint64_t sz = rte_atomic64_read(t->sz);
301 volatile uint64_t *sz_addr;
302
303 sz_addr = (volatile uint64_t *)t->sz;
304
305 num = RTE_MIN(rte_rand() % MAX_BULK, STACK_SIZE - sz);
306
307 success = rte_atomic64_cmpset(sz_addr, sz, sz + num);
308 } while (success == 0);
309
310 if (rte_stack_push(t->s, obj_table, num) != num) {
311 printf("[%s():%u] Failed to push %u pointers\n",
312 __func__, __LINE__, num);
313 rte_free(obj_table);
314 return -1;
315 }
316
317 if (rte_stack_pop(t->s, obj_table, num) != num) {
318 printf("[%s():%u] Failed to pop %u pointers\n",
319 __func__, __LINE__, num);
320 rte_free(obj_table);
321 return -1;
322 }
323
324 rte_atomic64_sub(t->sz, num);
325 }
326
327 rte_free(obj_table);
328 return 0;
329 }
330
331 static int
332 test_stack_multithreaded(uint32_t flags)
333 {
334 struct test_args *args;
335 unsigned int lcore_id;
336 struct rte_stack *s;
337 rte_atomic64_t size;
338
339 printf("[%s():%u] Running with %u lcores\n",
340 __func__, __LINE__, rte_lcore_count());
341
342 if (rte_lcore_count() < 2)
343 return 0;
344
345 args = rte_malloc(NULL, sizeof(struct test_args) * RTE_MAX_LCORE, 0);
346 if (args == NULL) {
347 printf("[%s():%u] failed to malloc %zu bytes\n",
348 __func__, __LINE__,
349 sizeof(struct test_args) * RTE_MAX_LCORE);
350 return -1;
351 }
352
353 s = rte_stack_create("test", STACK_SIZE, rte_socket_id(), flags);
354 if (s == NULL) {
355 printf("[%s():%u] Failed to create a stack\n",
356 __func__, __LINE__);
357 rte_free(args);
358 return -1;
359 }
360
361 rte_atomic64_init(&size);
362
363 RTE_LCORE_FOREACH_SLAVE(lcore_id) {
364 args[lcore_id].s = s;
365 args[lcore_id].sz = &size;
366
367 if (rte_eal_remote_launch(stack_thread_push_pop,
368 &args[lcore_id], lcore_id))
369 rte_panic("Failed to launch lcore %d\n", lcore_id);
370 }
371
372 lcore_id = rte_lcore_id();
373
374 args[lcore_id].s = s;
375 args[lcore_id].sz = &size;
376
377 stack_thread_push_pop(&args[lcore_id]);
378
379 rte_eal_mp_wait_lcore();
380
381 rte_stack_free(s);
382 rte_free(args);
383
384 return 0;
385 }
386
387 static int
388 __test_stack(uint32_t flags)
389 {
390 if (test_stack_basic(flags) < 0)
391 return -1;
392
393 if (test_lookup_null() < 0)
394 return -1;
395
396 if (test_free_null() < 0)
397 return -1;
398
399 if (test_stack_name_reuse(flags) < 0)
400 return -1;
401
402 if (test_stack_name_length(flags) < 0)
403 return -1;
404
405 if (test_stack_multithreaded(flags) < 0)
406 return -1;
407
408 return 0;
409 }
410
411 static int
412 test_stack(void)
413 {
414 return __test_stack(0);
415 }
416
417 static int
418 test_lf_stack(void)
419 {
420 return __test_stack(RTE_STACK_F_LF);
421 }
422
423 REGISTER_TEST_COMMAND(stack_autotest, test_stack);
424 REGISTER_TEST_COMMAND(stack_lf_autotest, test_lf_stack);