]> git.proxmox.com Git - ceph.git/blob - ceph/src/spdk/dpdk/app/test-crypto-perf/main.c
7bb286ccbe6ca70c01e72ce8bb5bc7d732c60c73
[ceph.git] / ceph / src / spdk / dpdk / app / test-crypto-perf / main.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016-2017 Intel Corporation
3 */
4
5 #include <stdio.h>
6 #include <unistd.h>
7
8 #include <rte_malloc.h>
9 #include <rte_random.h>
10 #include <rte_eal.h>
11 #include <rte_cryptodev.h>
12 #ifdef RTE_LIBRTE_PMD_CRYPTO_SCHEDULER
13 #include <rte_cryptodev_scheduler.h>
14 #endif
15
16 #include "cperf.h"
17 #include "cperf_options.h"
18 #include "cperf_test_vector_parsing.h"
19 #include "cperf_test_throughput.h"
20 #include "cperf_test_latency.h"
21 #include "cperf_test_verify.h"
22 #include "cperf_test_pmd_cyclecount.h"
23
24 static struct {
25 struct rte_mempool *sess_mp;
26 struct rte_mempool *priv_mp;
27 } session_pool_socket[RTE_MAX_NUMA_NODES];
28
29 const char *cperf_test_type_strs[] = {
30 [CPERF_TEST_TYPE_THROUGHPUT] = "throughput",
31 [CPERF_TEST_TYPE_LATENCY] = "latency",
32 [CPERF_TEST_TYPE_VERIFY] = "verify",
33 [CPERF_TEST_TYPE_PMDCC] = "pmd-cyclecount"
34 };
35
36 const char *cperf_op_type_strs[] = {
37 [CPERF_CIPHER_ONLY] = "cipher-only",
38 [CPERF_AUTH_ONLY] = "auth-only",
39 [CPERF_CIPHER_THEN_AUTH] = "cipher-then-auth",
40 [CPERF_AUTH_THEN_CIPHER] = "auth-then-cipher",
41 [CPERF_AEAD] = "aead",
42 [CPERF_PDCP] = "pdcp"
43 };
44
45 const struct cperf_test cperf_testmap[] = {
46 [CPERF_TEST_TYPE_THROUGHPUT] = {
47 cperf_throughput_test_constructor,
48 cperf_throughput_test_runner,
49 cperf_throughput_test_destructor
50 },
51 [CPERF_TEST_TYPE_LATENCY] = {
52 cperf_latency_test_constructor,
53 cperf_latency_test_runner,
54 cperf_latency_test_destructor
55 },
56 [CPERF_TEST_TYPE_VERIFY] = {
57 cperf_verify_test_constructor,
58 cperf_verify_test_runner,
59 cperf_verify_test_destructor
60 },
61 [CPERF_TEST_TYPE_PMDCC] = {
62 cperf_pmd_cyclecount_test_constructor,
63 cperf_pmd_cyclecount_test_runner,
64 cperf_pmd_cyclecount_test_destructor
65 }
66 };
67
68 static int
69 fill_session_pool_socket(int32_t socket_id, uint32_t session_priv_size,
70 uint32_t nb_sessions)
71 {
72 char mp_name[RTE_MEMPOOL_NAMESIZE];
73 struct rte_mempool *sess_mp;
74
75 if (session_pool_socket[socket_id].priv_mp == NULL) {
76 snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
77 "priv_sess_mp_%u", socket_id);
78
79 sess_mp = rte_mempool_create(mp_name,
80 nb_sessions,
81 session_priv_size,
82 0, 0, NULL, NULL, NULL,
83 NULL, socket_id,
84 0);
85
86 if (sess_mp == NULL) {
87 printf("Cannot create pool \"%s\" on socket %d\n",
88 mp_name, socket_id);
89 return -ENOMEM;
90 }
91
92 printf("Allocated pool \"%s\" on socket %d\n",
93 mp_name, socket_id);
94 session_pool_socket[socket_id].priv_mp = sess_mp;
95 }
96
97 if (session_pool_socket[socket_id].sess_mp == NULL) {
98
99 snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
100 "sess_mp_%u", socket_id);
101
102 sess_mp = rte_cryptodev_sym_session_pool_create(mp_name,
103 nb_sessions, 0, 0, 0, socket_id);
104
105 if (sess_mp == NULL) {
106 printf("Cannot create pool \"%s\" on socket %d\n",
107 mp_name, socket_id);
108 return -ENOMEM;
109 }
110
111 printf("Allocated pool \"%s\" on socket %d\n",
112 mp_name, socket_id);
113 session_pool_socket[socket_id].sess_mp = sess_mp;
114 }
115
116 return 0;
117 }
118
119 static int
120 cperf_initialize_cryptodev(struct cperf_options *opts, uint8_t *enabled_cdevs)
121 {
122 uint8_t enabled_cdev_count = 0, nb_lcores, cdev_id;
123 uint32_t sessions_needed = 0;
124 unsigned int i, j;
125 int ret;
126
127 enabled_cdev_count = rte_cryptodev_devices_get(opts->device_type,
128 enabled_cdevs, RTE_CRYPTO_MAX_DEVS);
129 if (enabled_cdev_count == 0) {
130 printf("No crypto devices type %s available\n",
131 opts->device_type);
132 return -EINVAL;
133 }
134
135 nb_lcores = rte_lcore_count() - 1;
136
137 if (nb_lcores < 1) {
138 RTE_LOG(ERR, USER1,
139 "Number of enabled cores need to be higher than 1\n");
140 return -EINVAL;
141 }
142
143 /*
144 * Use less number of devices,
145 * if there are more available than cores.
146 */
147 if (enabled_cdev_count > nb_lcores)
148 enabled_cdev_count = nb_lcores;
149
150 /* Create a mempool shared by all the devices */
151 uint32_t max_sess_size = 0, sess_size;
152
153 for (cdev_id = 0; cdev_id < rte_cryptodev_count(); cdev_id++) {
154 sess_size = rte_cryptodev_sym_get_private_session_size(cdev_id);
155 if (sess_size > max_sess_size)
156 max_sess_size = sess_size;
157 }
158
159 /*
160 * Calculate number of needed queue pairs, based on the amount
161 * of available number of logical cores and crypto devices.
162 * For instance, if there are 4 cores and 2 crypto devices,
163 * 2 queue pairs will be set up per device.
164 */
165 opts->nb_qps = (nb_lcores % enabled_cdev_count) ?
166 (nb_lcores / enabled_cdev_count) + 1 :
167 nb_lcores / enabled_cdev_count;
168
169 for (i = 0; i < enabled_cdev_count &&
170 i < RTE_CRYPTO_MAX_DEVS; i++) {
171 cdev_id = enabled_cdevs[i];
172 #ifdef RTE_LIBRTE_PMD_CRYPTO_SCHEDULER
173 /*
174 * If multi-core scheduler is used, limit the number
175 * of queue pairs to 1, as there is no way to know
176 * how many cores are being used by the PMD, and
177 * how many will be available for the application.
178 */
179 if (!strcmp((const char *)opts->device_type, "crypto_scheduler") &&
180 rte_cryptodev_scheduler_mode_get(cdev_id) ==
181 CDEV_SCHED_MODE_MULTICORE)
182 opts->nb_qps = 1;
183 #endif
184
185 struct rte_cryptodev_info cdev_info;
186 uint8_t socket_id = rte_cryptodev_socket_id(cdev_id);
187 /* range check the socket_id - negative values become big
188 * positive ones due to use of unsigned value
189 */
190 if (socket_id >= RTE_MAX_NUMA_NODES)
191 socket_id = 0;
192
193 rte_cryptodev_info_get(cdev_id, &cdev_info);
194 if (opts->nb_qps > cdev_info.max_nb_queue_pairs) {
195 printf("Number of needed queue pairs is higher "
196 "than the maximum number of queue pairs "
197 "per device.\n");
198 printf("Lower the number of cores or increase "
199 "the number of crypto devices\n");
200 return -EINVAL;
201 }
202 struct rte_cryptodev_config conf = {
203 .nb_queue_pairs = opts->nb_qps,
204 .socket_id = socket_id,
205 .ff_disable = RTE_CRYPTODEV_FF_SECURITY |
206 RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO,
207 };
208
209 struct rte_cryptodev_qp_conf qp_conf = {
210 .nb_descriptors = opts->nb_descriptors
211 };
212
213 /**
214 * Device info specifies the min headroom and tailroom
215 * requirement for the crypto PMD. This need to be honoured
216 * by the application, while creating mbuf.
217 */
218 if (opts->headroom_sz < cdev_info.min_mbuf_headroom_req) {
219 /* Update headroom */
220 opts->headroom_sz = cdev_info.min_mbuf_headroom_req;
221 }
222 if (opts->tailroom_sz < cdev_info.min_mbuf_tailroom_req) {
223 /* Update tailroom */
224 opts->tailroom_sz = cdev_info.min_mbuf_tailroom_req;
225 }
226
227 /* Update segment size to include headroom & tailroom */
228 opts->segment_sz += (opts->headroom_sz + opts->tailroom_sz);
229
230 uint32_t dev_max_nb_sess = cdev_info.sym.max_nb_sessions;
231 /*
232 * Two sessions objects are required for each session
233 * (one for the header, one for the private data)
234 */
235 if (!strcmp((const char *)opts->device_type,
236 "crypto_scheduler")) {
237 #ifdef RTE_LIBRTE_PMD_CRYPTO_SCHEDULER
238 uint32_t nb_slaves =
239 rte_cryptodev_scheduler_slaves_get(cdev_id,
240 NULL);
241
242 sessions_needed = enabled_cdev_count *
243 opts->nb_qps * nb_slaves;
244 #endif
245 } else
246 sessions_needed = enabled_cdev_count *
247 opts->nb_qps;
248
249 /*
250 * A single session is required per queue pair
251 * in each device
252 */
253 if (dev_max_nb_sess != 0 && dev_max_nb_sess < opts->nb_qps) {
254 RTE_LOG(ERR, USER1,
255 "Device does not support at least "
256 "%u sessions\n", opts->nb_qps);
257 return -ENOTSUP;
258 }
259
260 ret = fill_session_pool_socket(socket_id, max_sess_size,
261 sessions_needed);
262 if (ret < 0)
263 return ret;
264
265 qp_conf.mp_session = session_pool_socket[socket_id].sess_mp;
266 qp_conf.mp_session_private =
267 session_pool_socket[socket_id].priv_mp;
268
269 ret = rte_cryptodev_configure(cdev_id, &conf);
270 if (ret < 0) {
271 printf("Failed to configure cryptodev %u", cdev_id);
272 return -EINVAL;
273 }
274
275 for (j = 0; j < opts->nb_qps; j++) {
276 ret = rte_cryptodev_queue_pair_setup(cdev_id, j,
277 &qp_conf, socket_id);
278 if (ret < 0) {
279 printf("Failed to setup queue pair %u on "
280 "cryptodev %u", j, cdev_id);
281 return -EINVAL;
282 }
283 }
284
285 ret = rte_cryptodev_start(cdev_id);
286 if (ret < 0) {
287 printf("Failed to start device %u: error %d\n",
288 cdev_id, ret);
289 return -EPERM;
290 }
291 }
292
293 return enabled_cdev_count;
294 }
295
296 static int
297 cperf_verify_devices_capabilities(struct cperf_options *opts,
298 uint8_t *enabled_cdevs, uint8_t nb_cryptodevs)
299 {
300 struct rte_cryptodev_sym_capability_idx cap_idx;
301 const struct rte_cryptodev_symmetric_capability *capability;
302
303 uint8_t i, cdev_id;
304 int ret;
305
306 for (i = 0; i < nb_cryptodevs; i++) {
307
308 cdev_id = enabled_cdevs[i];
309
310 if (opts->op_type == CPERF_AUTH_ONLY ||
311 opts->op_type == CPERF_CIPHER_THEN_AUTH ||
312 opts->op_type == CPERF_AUTH_THEN_CIPHER) {
313
314 cap_idx.type = RTE_CRYPTO_SYM_XFORM_AUTH;
315 cap_idx.algo.auth = opts->auth_algo;
316
317 capability = rte_cryptodev_sym_capability_get(cdev_id,
318 &cap_idx);
319 if (capability == NULL)
320 return -1;
321
322 ret = rte_cryptodev_sym_capability_check_auth(
323 capability,
324 opts->auth_key_sz,
325 opts->digest_sz,
326 opts->auth_iv_sz);
327 if (ret != 0)
328 return ret;
329 }
330
331 if (opts->op_type == CPERF_CIPHER_ONLY ||
332 opts->op_type == CPERF_CIPHER_THEN_AUTH ||
333 opts->op_type == CPERF_AUTH_THEN_CIPHER) {
334
335 cap_idx.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
336 cap_idx.algo.cipher = opts->cipher_algo;
337
338 capability = rte_cryptodev_sym_capability_get(cdev_id,
339 &cap_idx);
340 if (capability == NULL)
341 return -1;
342
343 ret = rte_cryptodev_sym_capability_check_cipher(
344 capability,
345 opts->cipher_key_sz,
346 opts->cipher_iv_sz);
347 if (ret != 0)
348 return ret;
349 }
350
351 if (opts->op_type == CPERF_AEAD) {
352
353 cap_idx.type = RTE_CRYPTO_SYM_XFORM_AEAD;
354 cap_idx.algo.aead = opts->aead_algo;
355
356 capability = rte_cryptodev_sym_capability_get(cdev_id,
357 &cap_idx);
358 if (capability == NULL)
359 return -1;
360
361 ret = rte_cryptodev_sym_capability_check_aead(
362 capability,
363 opts->aead_key_sz,
364 opts->digest_sz,
365 opts->aead_aad_sz,
366 opts->aead_iv_sz);
367 if (ret != 0)
368 return ret;
369 }
370 }
371
372 return 0;
373 }
374
375 static int
376 cperf_check_test_vector(struct cperf_options *opts,
377 struct cperf_test_vector *test_vec)
378 {
379 if (opts->op_type == CPERF_CIPHER_ONLY) {
380 if (opts->cipher_algo == RTE_CRYPTO_CIPHER_NULL) {
381 if (test_vec->plaintext.data == NULL)
382 return -1;
383 } else if (opts->cipher_algo != RTE_CRYPTO_CIPHER_NULL) {
384 if (test_vec->plaintext.data == NULL)
385 return -1;
386 if (test_vec->plaintext.length < opts->max_buffer_size)
387 return -1;
388 if (test_vec->ciphertext.data == NULL)
389 return -1;
390 if (test_vec->ciphertext.length < opts->max_buffer_size)
391 return -1;
392 /* Cipher IV is only required for some algorithms */
393 if (opts->cipher_iv_sz &&
394 test_vec->cipher_iv.data == NULL)
395 return -1;
396 if (test_vec->cipher_iv.length != opts->cipher_iv_sz)
397 return -1;
398 if (test_vec->cipher_key.data == NULL)
399 return -1;
400 if (test_vec->cipher_key.length != opts->cipher_key_sz)
401 return -1;
402 }
403 } else if (opts->op_type == CPERF_AUTH_ONLY) {
404 if (opts->auth_algo != RTE_CRYPTO_AUTH_NULL) {
405 if (test_vec->plaintext.data == NULL)
406 return -1;
407 if (test_vec->plaintext.length < opts->max_buffer_size)
408 return -1;
409 /* Auth key is only required for some algorithms */
410 if (opts->auth_key_sz &&
411 test_vec->auth_key.data == NULL)
412 return -1;
413 if (test_vec->auth_key.length != opts->auth_key_sz)
414 return -1;
415 if (test_vec->auth_iv.length != opts->auth_iv_sz)
416 return -1;
417 /* Auth IV is only required for some algorithms */
418 if (opts->auth_iv_sz && test_vec->auth_iv.data == NULL)
419 return -1;
420 if (test_vec->digest.data == NULL)
421 return -1;
422 if (test_vec->digest.length < opts->digest_sz)
423 return -1;
424 }
425
426 } else if (opts->op_type == CPERF_CIPHER_THEN_AUTH ||
427 opts->op_type == CPERF_AUTH_THEN_CIPHER) {
428 if (opts->cipher_algo == RTE_CRYPTO_CIPHER_NULL) {
429 if (test_vec->plaintext.data == NULL)
430 return -1;
431 if (test_vec->plaintext.length < opts->max_buffer_size)
432 return -1;
433 } else if (opts->cipher_algo != RTE_CRYPTO_CIPHER_NULL) {
434 if (test_vec->plaintext.data == NULL)
435 return -1;
436 if (test_vec->plaintext.length < opts->max_buffer_size)
437 return -1;
438 if (test_vec->ciphertext.data == NULL)
439 return -1;
440 if (test_vec->ciphertext.length < opts->max_buffer_size)
441 return -1;
442 if (test_vec->cipher_iv.data == NULL)
443 return -1;
444 if (test_vec->cipher_iv.length != opts->cipher_iv_sz)
445 return -1;
446 if (test_vec->cipher_key.data == NULL)
447 return -1;
448 if (test_vec->cipher_key.length != opts->cipher_key_sz)
449 return -1;
450 }
451 if (opts->auth_algo != RTE_CRYPTO_AUTH_NULL) {
452 if (test_vec->auth_key.data == NULL)
453 return -1;
454 if (test_vec->auth_key.length != opts->auth_key_sz)
455 return -1;
456 if (test_vec->auth_iv.length != opts->auth_iv_sz)
457 return -1;
458 /* Auth IV is only required for some algorithms */
459 if (opts->auth_iv_sz && test_vec->auth_iv.data == NULL)
460 return -1;
461 if (test_vec->digest.data == NULL)
462 return -1;
463 if (test_vec->digest.length < opts->digest_sz)
464 return -1;
465 }
466 } else if (opts->op_type == CPERF_AEAD) {
467 if (test_vec->plaintext.data == NULL)
468 return -1;
469 if (test_vec->plaintext.length < opts->max_buffer_size)
470 return -1;
471 if (test_vec->ciphertext.data == NULL)
472 return -1;
473 if (test_vec->ciphertext.length < opts->max_buffer_size)
474 return -1;
475 if (test_vec->aead_key.data == NULL)
476 return -1;
477 if (test_vec->aead_key.length != opts->aead_key_sz)
478 return -1;
479 if (test_vec->aead_iv.data == NULL)
480 return -1;
481 if (test_vec->aead_iv.length != opts->aead_iv_sz)
482 return -1;
483 if (test_vec->aad.data == NULL)
484 return -1;
485 if (test_vec->aad.length != opts->aead_aad_sz)
486 return -1;
487 if (test_vec->digest.data == NULL)
488 return -1;
489 if (test_vec->digest.length < opts->digest_sz)
490 return -1;
491 }
492 return 0;
493 }
494
495 int
496 main(int argc, char **argv)
497 {
498 struct cperf_options opts = {0};
499 struct cperf_test_vector *t_vec = NULL;
500 struct cperf_op_fns op_fns;
501 void *ctx[RTE_MAX_LCORE] = { };
502 int nb_cryptodevs = 0;
503 uint16_t total_nb_qps = 0;
504 uint8_t cdev_id, i;
505 uint8_t enabled_cdevs[RTE_CRYPTO_MAX_DEVS] = { 0 };
506
507 uint8_t buffer_size_idx = 0;
508
509 int ret;
510 uint32_t lcore_id;
511
512 /* Initialise DPDK EAL */
513 ret = rte_eal_init(argc, argv);
514 if (ret < 0)
515 rte_exit(EXIT_FAILURE, "Invalid EAL arguments!\n");
516 argc -= ret;
517 argv += ret;
518
519 cperf_options_default(&opts);
520
521 ret = cperf_options_parse(&opts, argc, argv);
522 if (ret) {
523 RTE_LOG(ERR, USER1, "Parsing on or more user options failed\n");
524 goto err;
525 }
526
527 ret = cperf_options_check(&opts);
528 if (ret) {
529 RTE_LOG(ERR, USER1,
530 "Checking on or more user options failed\n");
531 goto err;
532 }
533
534 nb_cryptodevs = cperf_initialize_cryptodev(&opts, enabled_cdevs);
535
536 if (!opts.silent)
537 cperf_options_dump(&opts);
538
539 if (nb_cryptodevs < 1) {
540 RTE_LOG(ERR, USER1, "Failed to initialise requested crypto "
541 "device type\n");
542 nb_cryptodevs = 0;
543 goto err;
544 }
545
546 ret = cperf_verify_devices_capabilities(&opts, enabled_cdevs,
547 nb_cryptodevs);
548 if (ret) {
549 RTE_LOG(ERR, USER1, "Crypto device type does not support "
550 "capabilities requested\n");
551 goto err;
552 }
553
554 if (opts.test_file != NULL) {
555 t_vec = cperf_test_vector_get_from_file(&opts);
556 if (t_vec == NULL) {
557 RTE_LOG(ERR, USER1,
558 "Failed to create test vector for"
559 " specified file\n");
560 goto err;
561 }
562
563 if (cperf_check_test_vector(&opts, t_vec)) {
564 RTE_LOG(ERR, USER1, "Incomplete necessary test vectors"
565 "\n");
566 goto err;
567 }
568 } else {
569 t_vec = cperf_test_vector_get_dummy(&opts);
570 if (t_vec == NULL) {
571 RTE_LOG(ERR, USER1,
572 "Failed to create test vector for"
573 " specified algorithms\n");
574 goto err;
575 }
576 }
577
578 ret = cperf_get_op_functions(&opts, &op_fns);
579 if (ret) {
580 RTE_LOG(ERR, USER1, "Failed to find function ops set for "
581 "specified algorithms combination\n");
582 goto err;
583 }
584
585 if (!opts.silent && opts.test != CPERF_TEST_TYPE_THROUGHPUT &&
586 opts.test != CPERF_TEST_TYPE_LATENCY)
587 show_test_vector(t_vec);
588
589 total_nb_qps = nb_cryptodevs * opts.nb_qps;
590
591 i = 0;
592 uint8_t qp_id = 0, cdev_index = 0;
593 RTE_LCORE_FOREACH_SLAVE(lcore_id) {
594
595 if (i == total_nb_qps)
596 break;
597
598 cdev_id = enabled_cdevs[cdev_index];
599
600 uint8_t socket_id = rte_cryptodev_socket_id(cdev_id);
601
602 ctx[i] = cperf_testmap[opts.test].constructor(
603 session_pool_socket[socket_id].sess_mp,
604 session_pool_socket[socket_id].priv_mp,
605 cdev_id, qp_id,
606 &opts, t_vec, &op_fns);
607 if (ctx[i] == NULL) {
608 RTE_LOG(ERR, USER1, "Test run constructor failed\n");
609 goto err;
610 }
611 qp_id = (qp_id + 1) % opts.nb_qps;
612 if (qp_id == 0)
613 cdev_index++;
614 i++;
615 }
616
617 if (opts.imix_distribution_count != 0) {
618 uint8_t buffer_size_count = opts.buffer_size_count;
619 uint16_t distribution_total[buffer_size_count];
620 uint32_t op_idx;
621 uint32_t test_average_size = 0;
622 const uint32_t *buffer_size_list = opts.buffer_size_list;
623 const uint32_t *imix_distribution_list = opts.imix_distribution_list;
624
625 opts.imix_buffer_sizes = rte_malloc(NULL,
626 sizeof(uint32_t) * opts.pool_sz,
627 0);
628 /*
629 * Calculate accumulated distribution of
630 * probabilities per packet size
631 */
632 distribution_total[0] = imix_distribution_list[0];
633 for (i = 1; i < buffer_size_count; i++)
634 distribution_total[i] = imix_distribution_list[i] +
635 distribution_total[i-1];
636
637 /* Calculate a random sequence of packet sizes, based on distribution */
638 for (op_idx = 0; op_idx < opts.pool_sz; op_idx++) {
639 uint16_t random_number = rte_rand() %
640 distribution_total[buffer_size_count - 1];
641 for (i = 0; i < buffer_size_count; i++)
642 if (random_number < distribution_total[i])
643 break;
644
645 opts.imix_buffer_sizes[op_idx] = buffer_size_list[i];
646 }
647
648 /* Calculate average buffer size for the IMIX distribution */
649 for (i = 0; i < buffer_size_count; i++)
650 test_average_size += buffer_size_list[i] *
651 imix_distribution_list[i];
652
653 opts.test_buffer_size = test_average_size /
654 distribution_total[buffer_size_count - 1];
655
656 i = 0;
657 RTE_LCORE_FOREACH_SLAVE(lcore_id) {
658
659 if (i == total_nb_qps)
660 break;
661
662 rte_eal_remote_launch(cperf_testmap[opts.test].runner,
663 ctx[i], lcore_id);
664 i++;
665 }
666 i = 0;
667 RTE_LCORE_FOREACH_SLAVE(lcore_id) {
668
669 if (i == total_nb_qps)
670 break;
671 ret |= rte_eal_wait_lcore(lcore_id);
672 i++;
673 }
674
675 if (ret != EXIT_SUCCESS)
676 goto err;
677 } else {
678
679 /* Get next size from range or list */
680 if (opts.inc_buffer_size != 0)
681 opts.test_buffer_size = opts.min_buffer_size;
682 else
683 opts.test_buffer_size = opts.buffer_size_list[0];
684
685 while (opts.test_buffer_size <= opts.max_buffer_size) {
686 i = 0;
687 RTE_LCORE_FOREACH_SLAVE(lcore_id) {
688
689 if (i == total_nb_qps)
690 break;
691
692 rte_eal_remote_launch(cperf_testmap[opts.test].runner,
693 ctx[i], lcore_id);
694 i++;
695 }
696 i = 0;
697 RTE_LCORE_FOREACH_SLAVE(lcore_id) {
698
699 if (i == total_nb_qps)
700 break;
701 ret |= rte_eal_wait_lcore(lcore_id);
702 i++;
703 }
704
705 if (ret != EXIT_SUCCESS)
706 goto err;
707
708 /* Get next size from range or list */
709 if (opts.inc_buffer_size != 0)
710 opts.test_buffer_size += opts.inc_buffer_size;
711 else {
712 if (++buffer_size_idx == opts.buffer_size_count)
713 break;
714 opts.test_buffer_size =
715 opts.buffer_size_list[buffer_size_idx];
716 }
717 }
718 }
719
720 i = 0;
721 RTE_LCORE_FOREACH_SLAVE(lcore_id) {
722
723 if (i == total_nb_qps)
724 break;
725
726 cperf_testmap[opts.test].destructor(ctx[i]);
727 i++;
728 }
729
730 for (i = 0; i < nb_cryptodevs &&
731 i < RTE_CRYPTO_MAX_DEVS; i++)
732 rte_cryptodev_stop(enabled_cdevs[i]);
733
734 free_test_vector(t_vec, &opts);
735
736 printf("\n");
737 return EXIT_SUCCESS;
738
739 err:
740 i = 0;
741 RTE_LCORE_FOREACH_SLAVE(lcore_id) {
742 if (i == total_nb_qps)
743 break;
744
745 if (ctx[i] && cperf_testmap[opts.test].destructor)
746 cperf_testmap[opts.test].destructor(ctx[i]);
747 i++;
748 }
749
750 for (i = 0; i < nb_cryptodevs &&
751 i < RTE_CRYPTO_MAX_DEVS; i++)
752 rte_cryptodev_stop(enabled_cdevs[i]);
753 rte_free(opts.imix_buffer_sizes);
754 free_test_vector(t_vec, &opts);
755
756 printf("\n");
757 return EXIT_FAILURE;
758 }