]> git.proxmox.com Git - ceph.git/blob - ceph/src/seastar/dpdk/app/test/test_rcu_qsbr_perf.c
import 15.2.0 Octopus source
[ceph.git] / ceph / src / seastar / dpdk / app / test / test_rcu_qsbr_perf.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (c) 2018 Arm Limited
3 */
4
5 #include <stdio.h>
6 #include <stdbool.h>
7 #include <inttypes.h>
8 #include <rte_pause.h>
9 #include <rte_rcu_qsbr.h>
10 #include <rte_hash.h>
11 #include <rte_hash_crc.h>
12 #include <rte_malloc.h>
13 #include <rte_cycles.h>
14 #include <unistd.h>
15
16 #include "test.h"
17
18 /* Check condition and return an error if true. */
19 #define TEST_RCU_MAX_LCORE 128
20 static uint16_t enabled_core_ids[TEST_RCU_MAX_LCORE];
21 static uint8_t num_cores;
22
23 static uint32_t *keys;
24 #define TOTAL_ENTRY (1024 * 8)
25 #define COUNTER_VALUE 4096
26 static uint32_t *hash_data[TEST_RCU_MAX_LCORE][TOTAL_ENTRY];
27 static volatile uint8_t writer_done;
28 static volatile uint8_t all_registered;
29 static volatile uint32_t thr_id;
30
31 static struct rte_rcu_qsbr *t[TEST_RCU_MAX_LCORE];
32 static struct rte_hash *h[TEST_RCU_MAX_LCORE];
33 static char hash_name[TEST_RCU_MAX_LCORE][8];
34 static rte_atomic64_t updates, checks;
35 static rte_atomic64_t update_cycles, check_cycles;
36
37 /* Scale down results to 1000 operations to support lower
38 * granularity clocks.
39 */
40 #define RCU_SCALE_DOWN 1000
41
42 /* Simple way to allocate thread ids in 0 to TEST_RCU_MAX_LCORE space */
43 static inline uint32_t
44 alloc_thread_id(void)
45 {
46 uint32_t tmp_thr_id;
47
48 tmp_thr_id = __atomic_fetch_add(&thr_id, 1, __ATOMIC_RELAXED);
49 if (tmp_thr_id >= TEST_RCU_MAX_LCORE)
50 printf("Invalid thread id %u\n", tmp_thr_id);
51
52 return tmp_thr_id;
53 }
54
55 static inline int
56 get_enabled_cores_mask(void)
57 {
58 uint16_t core_id;
59 uint32_t max_cores = rte_lcore_count();
60
61 if (max_cores > TEST_RCU_MAX_LCORE) {
62 printf("Number of cores exceed %d\n", TEST_RCU_MAX_LCORE);
63 return -1;
64 }
65
66 core_id = 0;
67 num_cores = 0;
68 RTE_LCORE_FOREACH_SLAVE(core_id) {
69 enabled_core_ids[num_cores] = core_id;
70 num_cores++;
71 }
72
73 return 0;
74 }
75
76 static int
77 test_rcu_qsbr_reader_perf(void *arg)
78 {
79 bool writer_present = (bool)arg;
80 uint32_t thread_id = alloc_thread_id();
81 uint64_t loop_cnt = 0;
82 uint64_t begin, cycles;
83
84 /* Register for report QS */
85 rte_rcu_qsbr_thread_register(t[0], thread_id);
86 /* Make the thread online */
87 rte_rcu_qsbr_thread_online(t[0], thread_id);
88
89 begin = rte_rdtsc_precise();
90
91 if (writer_present) {
92 while (!writer_done) {
93 /* Update quiescent state counter */
94 rte_rcu_qsbr_quiescent(t[0], thread_id);
95 loop_cnt++;
96 }
97 } else {
98 while (loop_cnt < 100000000) {
99 /* Update quiescent state counter */
100 rte_rcu_qsbr_quiescent(t[0], thread_id);
101 loop_cnt++;
102 }
103 }
104
105 cycles = rte_rdtsc_precise() - begin;
106 rte_atomic64_add(&update_cycles, cycles);
107 rte_atomic64_add(&updates, loop_cnt);
108
109 /* Make the thread offline */
110 rte_rcu_qsbr_thread_offline(t[0], thread_id);
111 /* Unregister before exiting to avoid writer from waiting */
112 rte_rcu_qsbr_thread_unregister(t[0], thread_id);
113
114 return 0;
115 }
116
117 static int
118 test_rcu_qsbr_writer_perf(void *arg)
119 {
120 bool wait = (bool)arg;
121 uint64_t token = 0;
122 uint64_t loop_cnt = 0;
123 uint64_t begin, cycles;
124
125 begin = rte_rdtsc_precise();
126
127 do {
128 /* Start the quiescent state query process */
129 if (wait)
130 token = rte_rcu_qsbr_start(t[0]);
131
132 /* Check quiescent state status */
133 rte_rcu_qsbr_check(t[0], token, wait);
134 loop_cnt++;
135 } while (loop_cnt < 20000000);
136
137 cycles = rte_rdtsc_precise() - begin;
138 rte_atomic64_add(&check_cycles, cycles);
139 rte_atomic64_add(&checks, loop_cnt);
140 return 0;
141 }
142
143 /*
144 * Perf test: Reader/writer
145 * Single writer, Multiple Readers, Single QS var, Non-Blocking rcu_qsbr_check
146 */
147 static int
148 test_rcu_qsbr_perf(void)
149 {
150 int i, sz;
151 int tmp_num_cores;
152
153 writer_done = 0;
154
155 rte_atomic64_clear(&updates);
156 rte_atomic64_clear(&update_cycles);
157 rte_atomic64_clear(&checks);
158 rte_atomic64_clear(&check_cycles);
159
160 printf("\nPerf Test: %d Readers/1 Writer('wait' in qsbr_check == true)\n",
161 num_cores - 1);
162
163 __atomic_store_n(&thr_id, 0, __ATOMIC_SEQ_CST);
164
165 if (all_registered == 1)
166 tmp_num_cores = num_cores - 1;
167 else
168 tmp_num_cores = TEST_RCU_MAX_LCORE;
169
170 sz = rte_rcu_qsbr_get_memsize(tmp_num_cores);
171 t[0] = (struct rte_rcu_qsbr *)rte_zmalloc("rcu0", sz,
172 RTE_CACHE_LINE_SIZE);
173 /* QS variable is initialized */
174 rte_rcu_qsbr_init(t[0], tmp_num_cores);
175
176 /* Reader threads are launched */
177 for (i = 0; i < num_cores - 1; i++)
178 rte_eal_remote_launch(test_rcu_qsbr_reader_perf, (void *)1,
179 enabled_core_ids[i]);
180
181 /* Writer thread is launched */
182 rte_eal_remote_launch(test_rcu_qsbr_writer_perf,
183 (void *)1, enabled_core_ids[i]);
184
185 /* Wait for the writer thread */
186 rte_eal_wait_lcore(enabled_core_ids[i]);
187 writer_done = 1;
188
189 /* Wait until all readers have exited */
190 rte_eal_mp_wait_lcore();
191
192 printf("Total RCU updates = %"PRIi64"\n", rte_atomic64_read(&updates));
193 printf("Cycles per %d updates: %"PRIi64"\n", RCU_SCALE_DOWN,
194 rte_atomic64_read(&update_cycles) /
195 (rte_atomic64_read(&updates) / RCU_SCALE_DOWN));
196 printf("Total RCU checks = %"PRIi64"\n", rte_atomic64_read(&checks));
197 printf("Cycles per %d checks: %"PRIi64"\n", RCU_SCALE_DOWN,
198 rte_atomic64_read(&check_cycles) /
199 (rte_atomic64_read(&checks) / RCU_SCALE_DOWN));
200
201 rte_free(t[0]);
202
203 return 0;
204 }
205
206 /*
207 * Perf test: Readers
208 * Single writer, Multiple readers, Single QS variable
209 */
210 static int
211 test_rcu_qsbr_rperf(void)
212 {
213 int i, sz;
214 int tmp_num_cores;
215
216 rte_atomic64_clear(&updates);
217 rte_atomic64_clear(&update_cycles);
218
219 __atomic_store_n(&thr_id, 0, __ATOMIC_SEQ_CST);
220
221 printf("\nPerf Test: %d Readers\n", num_cores);
222
223 if (all_registered == 1)
224 tmp_num_cores = num_cores;
225 else
226 tmp_num_cores = TEST_RCU_MAX_LCORE;
227
228 sz = rte_rcu_qsbr_get_memsize(tmp_num_cores);
229 t[0] = (struct rte_rcu_qsbr *)rte_zmalloc("rcu0", sz,
230 RTE_CACHE_LINE_SIZE);
231 /* QS variable is initialized */
232 rte_rcu_qsbr_init(t[0], tmp_num_cores);
233
234 /* Reader threads are launched */
235 for (i = 0; i < num_cores; i++)
236 rte_eal_remote_launch(test_rcu_qsbr_reader_perf, NULL,
237 enabled_core_ids[i]);
238
239 /* Wait until all readers have exited */
240 rte_eal_mp_wait_lcore();
241
242 printf("Total RCU updates = %"PRIi64"\n", rte_atomic64_read(&updates));
243 printf("Cycles per %d updates: %"PRIi64"\n", RCU_SCALE_DOWN,
244 rte_atomic64_read(&update_cycles) /
245 (rte_atomic64_read(&updates) / RCU_SCALE_DOWN));
246
247 rte_free(t[0]);
248
249 return 0;
250 }
251
252 /*
253 * Perf test:
254 * Multiple writer, Single QS variable, Non-blocking rcu_qsbr_check
255 */
256 static int
257 test_rcu_qsbr_wperf(void)
258 {
259 int i, sz;
260
261 rte_atomic64_clear(&checks);
262 rte_atomic64_clear(&check_cycles);
263
264 __atomic_store_n(&thr_id, 0, __ATOMIC_SEQ_CST);
265
266 printf("\nPerf test: %d Writers ('wait' in qsbr_check == false)\n",
267 num_cores);
268
269 /* Number of readers does not matter for QS variable in this test
270 * case as no reader will be registered.
271 */
272 sz = rte_rcu_qsbr_get_memsize(TEST_RCU_MAX_LCORE);
273 t[0] = (struct rte_rcu_qsbr *)rte_zmalloc("rcu0", sz,
274 RTE_CACHE_LINE_SIZE);
275 /* QS variable is initialized */
276 rte_rcu_qsbr_init(t[0], TEST_RCU_MAX_LCORE);
277
278 /* Writer threads are launched */
279 for (i = 0; i < num_cores; i++)
280 rte_eal_remote_launch(test_rcu_qsbr_writer_perf,
281 (void *)0, enabled_core_ids[i]);
282
283 /* Wait until all readers have exited */
284 rte_eal_mp_wait_lcore();
285
286 printf("Total RCU checks = %"PRIi64"\n", rte_atomic64_read(&checks));
287 printf("Cycles per %d checks: %"PRIi64"\n", RCU_SCALE_DOWN,
288 rte_atomic64_read(&check_cycles) /
289 (rte_atomic64_read(&checks) / RCU_SCALE_DOWN));
290
291 rte_free(t[0]);
292
293 return 0;
294 }
295
296 /*
297 * RCU test cases using rte_hash data structure.
298 */
299 static int
300 test_rcu_qsbr_hash_reader(void *arg)
301 {
302 struct rte_rcu_qsbr *temp;
303 struct rte_hash *hash = NULL;
304 int i;
305 uint64_t loop_cnt = 0;
306 uint64_t begin, cycles;
307 uint32_t thread_id = alloc_thread_id();
308 uint8_t read_type = (uint8_t)((uintptr_t)arg);
309 uint32_t *pdata;
310
311 temp = t[read_type];
312 hash = h[read_type];
313
314 rte_rcu_qsbr_thread_register(temp, thread_id);
315
316 begin = rte_rdtsc_precise();
317
318 do {
319 rte_rcu_qsbr_thread_online(temp, thread_id);
320 for (i = 0; i < TOTAL_ENTRY; i++) {
321 rte_rcu_qsbr_lock(temp, thread_id);
322 if (rte_hash_lookup_data(hash, keys+i,
323 (void **)&pdata) != -ENOENT) {
324 *pdata = 0;
325 while (*pdata < COUNTER_VALUE)
326 ++*pdata;
327 }
328 rte_rcu_qsbr_unlock(temp, thread_id);
329 }
330 /* Update quiescent state counter */
331 rte_rcu_qsbr_quiescent(temp, thread_id);
332 rte_rcu_qsbr_thread_offline(temp, thread_id);
333 loop_cnt++;
334 } while (!writer_done);
335
336 cycles = rte_rdtsc_precise() - begin;
337 rte_atomic64_add(&update_cycles, cycles);
338 rte_atomic64_add(&updates, loop_cnt);
339
340 rte_rcu_qsbr_thread_unregister(temp, thread_id);
341
342 return 0;
343 }
344
345 static struct rte_hash *
346 init_hash(int hash_id)
347 {
348 int i;
349 struct rte_hash *h = NULL;
350
351 sprintf(hash_name[hash_id], "hash%d", hash_id);
352 struct rte_hash_parameters hash_params = {
353 .entries = TOTAL_ENTRY,
354 .key_len = sizeof(uint32_t),
355 .hash_func_init_val = 0,
356 .socket_id = rte_socket_id(),
357 .hash_func = rte_hash_crc,
358 .extra_flag =
359 RTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY_LF,
360 .name = hash_name[hash_id],
361 };
362
363 h = rte_hash_create(&hash_params);
364 if (h == NULL) {
365 printf("Hash create Failed\n");
366 return NULL;
367 }
368
369 for (i = 0; i < TOTAL_ENTRY; i++) {
370 hash_data[hash_id][i] = rte_zmalloc(NULL, sizeof(uint32_t), 0);
371 if (hash_data[hash_id][i] == NULL) {
372 printf("No memory\n");
373 return NULL;
374 }
375 }
376 keys = rte_malloc(NULL, sizeof(uint32_t) * TOTAL_ENTRY, 0);
377 if (keys == NULL) {
378 printf("No memory\n");
379 return NULL;
380 }
381
382 for (i = 0; i < TOTAL_ENTRY; i++)
383 keys[i] = i;
384
385 for (i = 0; i < TOTAL_ENTRY; i++) {
386 if (rte_hash_add_key_data(h, keys + i,
387 (void *)((uintptr_t)hash_data[hash_id][i]))
388 < 0) {
389 printf("Hash key add Failed #%d\n", i);
390 return NULL;
391 }
392 }
393 return h;
394 }
395
396 /*
397 * Functional test:
398 * Single writer, Single QS variable Single QSBR query, Blocking rcu_qsbr_check
399 */
400 static int
401 test_rcu_qsbr_sw_sv_1qs(void)
402 {
403 uint64_t token, begin, cycles;
404 int i, tmp_num_cores, sz;
405 int32_t pos;
406
407 writer_done = 0;
408
409 rte_atomic64_clear(&updates);
410 rte_atomic64_clear(&update_cycles);
411 rte_atomic64_clear(&checks);
412 rte_atomic64_clear(&check_cycles);
413
414 __atomic_store_n(&thr_id, 0, __ATOMIC_SEQ_CST);
415
416 printf("\nPerf test: 1 writer, %d readers, 1 QSBR variable, 1 QSBR Query, Blocking QSBR Check\n", num_cores);
417
418 if (all_registered == 1)
419 tmp_num_cores = num_cores;
420 else
421 tmp_num_cores = TEST_RCU_MAX_LCORE;
422
423 sz = rte_rcu_qsbr_get_memsize(tmp_num_cores);
424 t[0] = (struct rte_rcu_qsbr *)rte_zmalloc("rcu0", sz,
425 RTE_CACHE_LINE_SIZE);
426 /* QS variable is initialized */
427 rte_rcu_qsbr_init(t[0], tmp_num_cores);
428
429 /* Shared data structure created */
430 h[0] = init_hash(0);
431 if (h[0] == NULL) {
432 printf("Hash init failed\n");
433 goto error;
434 }
435
436 /* Reader threads are launched */
437 for (i = 0; i < num_cores; i++)
438 rte_eal_remote_launch(test_rcu_qsbr_hash_reader, NULL,
439 enabled_core_ids[i]);
440
441 begin = rte_rdtsc_precise();
442
443 for (i = 0; i < TOTAL_ENTRY; i++) {
444 /* Delete elements from the shared data structure */
445 pos = rte_hash_del_key(h[0], keys + i);
446 if (pos < 0) {
447 printf("Delete key failed #%d\n", keys[i]);
448 goto error;
449 }
450 /* Start the quiescent state query process */
451 token = rte_rcu_qsbr_start(t[0]);
452
453 /* Check the quiescent state status */
454 rte_rcu_qsbr_check(t[0], token, true);
455 if (*hash_data[0][i] != COUNTER_VALUE &&
456 *hash_data[0][i] != 0) {
457 printf("Reader did not complete #%d = %d\n", i,
458 *hash_data[0][i]);
459 goto error;
460 }
461
462 if (rte_hash_free_key_with_position(h[0], pos) < 0) {
463 printf("Failed to free the key #%d\n", keys[i]);
464 goto error;
465 }
466 rte_free(hash_data[0][i]);
467 hash_data[0][i] = NULL;
468 }
469
470 cycles = rte_rdtsc_precise() - begin;
471 rte_atomic64_add(&check_cycles, cycles);
472 rte_atomic64_add(&checks, i);
473
474 writer_done = 1;
475
476 /* Wait until all readers have exited */
477 rte_eal_mp_wait_lcore();
478 /* Check return value from threads */
479 for (i = 0; i < num_cores; i++)
480 if (lcore_config[enabled_core_ids[i]].ret < 0)
481 goto error;
482 rte_hash_free(h[0]);
483 rte_free(keys);
484
485 printf("Following numbers include calls to rte_hash functions\n");
486 printf("Cycles per 1 update(online/update/offline): %"PRIi64"\n",
487 rte_atomic64_read(&update_cycles) /
488 rte_atomic64_read(&updates));
489
490 printf("Cycles per 1 check(start, check): %"PRIi64"\n\n",
491 rte_atomic64_read(&check_cycles) /
492 rte_atomic64_read(&checks));
493
494 rte_free(t[0]);
495
496 return 0;
497
498 error:
499 writer_done = 1;
500 /* Wait until all readers have exited */
501 rte_eal_mp_wait_lcore();
502
503 rte_hash_free(h[0]);
504 rte_free(keys);
505 for (i = 0; i < TOTAL_ENTRY; i++)
506 rte_free(hash_data[0][i]);
507
508 rte_free(t[0]);
509
510 return -1;
511 }
512
513 /*
514 * Functional test:
515 * Single writer, Single QS variable, Single QSBR query,
516 * Non-blocking rcu_qsbr_check
517 */
518 static int
519 test_rcu_qsbr_sw_sv_1qs_non_blocking(void)
520 {
521 uint64_t token, begin, cycles;
522 int i, ret, tmp_num_cores, sz;
523 int32_t pos;
524
525 writer_done = 0;
526
527 printf("Perf test: 1 writer, %d readers, 1 QSBR variable, 1 QSBR Query, Non-Blocking QSBR check\n", num_cores);
528
529 __atomic_store_n(&thr_id, 0, __ATOMIC_SEQ_CST);
530
531 if (all_registered == 1)
532 tmp_num_cores = num_cores;
533 else
534 tmp_num_cores = TEST_RCU_MAX_LCORE;
535
536 sz = rte_rcu_qsbr_get_memsize(tmp_num_cores);
537 t[0] = (struct rte_rcu_qsbr *)rte_zmalloc("rcu0", sz,
538 RTE_CACHE_LINE_SIZE);
539 /* QS variable is initialized */
540 rte_rcu_qsbr_init(t[0], tmp_num_cores);
541
542 /* Shared data structure created */
543 h[0] = init_hash(0);
544 if (h[0] == NULL) {
545 printf("Hash init failed\n");
546 goto error;
547 }
548
549 /* Reader threads are launched */
550 for (i = 0; i < num_cores; i++)
551 rte_eal_remote_launch(test_rcu_qsbr_hash_reader, NULL,
552 enabled_core_ids[i]);
553
554 begin = rte_rdtsc_precise();
555
556 for (i = 0; i < TOTAL_ENTRY; i++) {
557 /* Delete elements from the shared data structure */
558 pos = rte_hash_del_key(h[0], keys + i);
559 if (pos < 0) {
560 printf("Delete key failed #%d\n", keys[i]);
561 goto error;
562 }
563 /* Start the quiescent state query process */
564 token = rte_rcu_qsbr_start(t[0]);
565
566 /* Check the quiescent state status */
567 do {
568 ret = rte_rcu_qsbr_check(t[0], token, false);
569 } while (ret == 0);
570 if (*hash_data[0][i] != COUNTER_VALUE &&
571 *hash_data[0][i] != 0) {
572 printf("Reader did not complete #%d = %d\n", i,
573 *hash_data[0][i]);
574 goto error;
575 }
576
577 if (rte_hash_free_key_with_position(h[0], pos) < 0) {
578 printf("Failed to free the key #%d\n", keys[i]);
579 goto error;
580 }
581 rte_free(hash_data[0][i]);
582 hash_data[0][i] = NULL;
583 }
584
585 cycles = rte_rdtsc_precise() - begin;
586 rte_atomic64_add(&check_cycles, cycles);
587 rte_atomic64_add(&checks, i);
588
589 writer_done = 1;
590 /* Wait until all readers have exited */
591 rte_eal_mp_wait_lcore();
592 /* Check return value from threads */
593 for (i = 0; i < num_cores; i++)
594 if (lcore_config[enabled_core_ids[i]].ret < 0)
595 goto error;
596 rte_hash_free(h[0]);
597 rte_free(keys);
598
599 printf("Following numbers include calls to rte_hash functions\n");
600 printf("Cycles per 1 update(online/update/offline): %"PRIi64"\n",
601 rte_atomic64_read(&update_cycles) /
602 rte_atomic64_read(&updates));
603
604 printf("Cycles per 1 check(start, check): %"PRIi64"\n\n",
605 rte_atomic64_read(&check_cycles) /
606 rte_atomic64_read(&checks));
607
608 rte_free(t[0]);
609
610 return 0;
611
612 error:
613 writer_done = 1;
614 /* Wait until all readers have exited */
615 rte_eal_mp_wait_lcore();
616
617 rte_hash_free(h[0]);
618 rte_free(keys);
619 for (i = 0; i < TOTAL_ENTRY; i++)
620 rte_free(hash_data[0][i]);
621
622 rte_free(t[0]);
623
624 return -1;
625 }
626
627 static int
628 test_rcu_qsbr_main(void)
629 {
630 rte_atomic64_init(&updates);
631 rte_atomic64_init(&update_cycles);
632 rte_atomic64_init(&checks);
633 rte_atomic64_init(&check_cycles);
634
635 if (get_enabled_cores_mask() != 0)
636 return -1;
637
638 printf("Number of cores provided = %d\n", num_cores);
639 if (num_cores < 2) {
640 printf("Test failed! Need 2 or more cores\n");
641 goto test_fail;
642 }
643 if (num_cores > TEST_RCU_MAX_LCORE) {
644 printf("Test failed! %d cores supported\n", TEST_RCU_MAX_LCORE);
645 goto test_fail;
646 }
647
648 printf("Perf test with all reader threads registered\n");
649 printf("--------------------------------------------\n");
650 all_registered = 1;
651
652 if (test_rcu_qsbr_perf() < 0)
653 goto test_fail;
654
655 if (test_rcu_qsbr_rperf() < 0)
656 goto test_fail;
657
658 if (test_rcu_qsbr_wperf() < 0)
659 goto test_fail;
660
661 if (test_rcu_qsbr_sw_sv_1qs() < 0)
662 goto test_fail;
663
664 if (test_rcu_qsbr_sw_sv_1qs_non_blocking() < 0)
665 goto test_fail;
666
667 /* Make sure the actual number of cores provided is less than
668 * TEST_RCU_MAX_LCORE. This will allow for some threads not
669 * to be registered on the QS variable.
670 */
671 if (num_cores >= TEST_RCU_MAX_LCORE) {
672 printf("Test failed! number of cores provided should be less than %d\n",
673 TEST_RCU_MAX_LCORE);
674 goto test_fail;
675 }
676
677 printf("Perf test with some of reader threads registered\n");
678 printf("------------------------------------------------\n");
679 all_registered = 0;
680
681 if (test_rcu_qsbr_perf() < 0)
682 goto test_fail;
683
684 if (test_rcu_qsbr_rperf() < 0)
685 goto test_fail;
686
687 if (test_rcu_qsbr_wperf() < 0)
688 goto test_fail;
689
690 if (test_rcu_qsbr_sw_sv_1qs() < 0)
691 goto test_fail;
692
693 if (test_rcu_qsbr_sw_sv_1qs_non_blocking() < 0)
694 goto test_fail;
695
696 printf("\n");
697
698 return 0;
699
700 test_fail:
701 return -1;
702 }
703
704 REGISTER_TEST_COMMAND(rcu_qsbr_perf_autotest, test_rcu_qsbr_main);