1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016-2017 Intel Corporation
8 #include <rte_malloc.h>
9 #include <rte_random.h>
11 #include <rte_cryptodev.h>
12 #ifdef RTE_LIBRTE_PMD_CRYPTO_SCHEDULER
13 #include <rte_cryptodev_scheduler.h>
17 #include "cperf_options.h"
18 #include "cperf_test_vector_parsing.h"
19 #include "cperf_test_throughput.h"
20 #include "cperf_test_latency.h"
21 #include "cperf_test_verify.h"
22 #include "cperf_test_pmd_cyclecount.h"
25 struct rte_mempool
*sess_mp
;
26 struct rte_mempool
*priv_mp
;
27 } session_pool_socket
[RTE_MAX_NUMA_NODES
];
29 const char *cperf_test_type_strs
[] = {
30 [CPERF_TEST_TYPE_THROUGHPUT
] = "throughput",
31 [CPERF_TEST_TYPE_LATENCY
] = "latency",
32 [CPERF_TEST_TYPE_VERIFY
] = "verify",
33 [CPERF_TEST_TYPE_PMDCC
] = "pmd-cyclecount"
36 const char *cperf_op_type_strs
[] = {
37 [CPERF_CIPHER_ONLY
] = "cipher-only",
38 [CPERF_AUTH_ONLY
] = "auth-only",
39 [CPERF_CIPHER_THEN_AUTH
] = "cipher-then-auth",
40 [CPERF_AUTH_THEN_CIPHER
] = "auth-then-cipher",
44 const struct cperf_test cperf_testmap
[] = {
45 [CPERF_TEST_TYPE_THROUGHPUT
] = {
46 cperf_throughput_test_constructor
,
47 cperf_throughput_test_runner
,
48 cperf_throughput_test_destructor
50 [CPERF_TEST_TYPE_LATENCY
] = {
51 cperf_latency_test_constructor
,
52 cperf_latency_test_runner
,
53 cperf_latency_test_destructor
55 [CPERF_TEST_TYPE_VERIFY
] = {
56 cperf_verify_test_constructor
,
57 cperf_verify_test_runner
,
58 cperf_verify_test_destructor
60 [CPERF_TEST_TYPE_PMDCC
] = {
61 cperf_pmd_cyclecount_test_constructor
,
62 cperf_pmd_cyclecount_test_runner
,
63 cperf_pmd_cyclecount_test_destructor
68 fill_session_pool_socket(int32_t socket_id
, uint32_t session_priv_size
,
71 char mp_name
[RTE_MEMPOOL_NAMESIZE
];
72 struct rte_mempool
*sess_mp
;
74 if (session_pool_socket
[socket_id
].priv_mp
== NULL
) {
75 snprintf(mp_name
, RTE_MEMPOOL_NAMESIZE
,
76 "priv_sess_mp_%u", socket_id
);
78 sess_mp
= rte_mempool_create(mp_name
,
81 0, 0, NULL
, NULL
, NULL
,
85 if (sess_mp
== NULL
) {
86 printf("Cannot create pool \"%s\" on socket %d\n",
91 printf("Allocated pool \"%s\" on socket %d\n",
93 session_pool_socket
[socket_id
].priv_mp
= sess_mp
;
96 if (session_pool_socket
[socket_id
].sess_mp
== NULL
) {
98 snprintf(mp_name
, RTE_MEMPOOL_NAMESIZE
,
99 "sess_mp_%u", socket_id
);
101 sess_mp
= rte_cryptodev_sym_session_pool_create(mp_name
,
102 nb_sessions
, 0, 0, 0, socket_id
);
104 if (sess_mp
== NULL
) {
105 printf("Cannot create pool \"%s\" on socket %d\n",
110 printf("Allocated pool \"%s\" on socket %d\n",
112 session_pool_socket
[socket_id
].sess_mp
= sess_mp
;
119 cperf_initialize_cryptodev(struct cperf_options
*opts
, uint8_t *enabled_cdevs
)
121 uint8_t enabled_cdev_count
= 0, nb_lcores
, cdev_id
;
122 uint32_t sessions_needed
= 0;
126 enabled_cdev_count
= rte_cryptodev_devices_get(opts
->device_type
,
127 enabled_cdevs
, RTE_CRYPTO_MAX_DEVS
);
128 if (enabled_cdev_count
== 0) {
129 printf("No crypto devices type %s available\n",
134 nb_lcores
= rte_lcore_count() - 1;
138 "Number of enabled cores need to be higher than 1\n");
143 * Use less number of devices,
144 * if there are more available than cores.
146 if (enabled_cdev_count
> nb_lcores
)
147 enabled_cdev_count
= nb_lcores
;
149 /* Create a mempool shared by all the devices */
150 uint32_t max_sess_size
= 0, sess_size
;
152 for (cdev_id
= 0; cdev_id
< rte_cryptodev_count(); cdev_id
++) {
153 sess_size
= rte_cryptodev_sym_get_private_session_size(cdev_id
);
154 if (sess_size
> max_sess_size
)
155 max_sess_size
= sess_size
;
159 * Calculate number of needed queue pairs, based on the amount
160 * of available number of logical cores and crypto devices.
161 * For instance, if there are 4 cores and 2 crypto devices,
162 * 2 queue pairs will be set up per device.
164 opts
->nb_qps
= (nb_lcores
% enabled_cdev_count
) ?
165 (nb_lcores
/ enabled_cdev_count
) + 1 :
166 nb_lcores
/ enabled_cdev_count
;
168 for (i
= 0; i
< enabled_cdev_count
&&
169 i
< RTE_CRYPTO_MAX_DEVS
; i
++) {
170 cdev_id
= enabled_cdevs
[i
];
171 #ifdef RTE_LIBRTE_PMD_CRYPTO_SCHEDULER
173 * If multi-core scheduler is used, limit the number
174 * of queue pairs to 1, as there is no way to know
175 * how many cores are being used by the PMD, and
176 * how many will be available for the application.
178 if (!strcmp((const char *)opts
->device_type
, "crypto_scheduler") &&
179 rte_cryptodev_scheduler_mode_get(cdev_id
) ==
180 CDEV_SCHED_MODE_MULTICORE
)
184 struct rte_cryptodev_info cdev_info
;
185 uint8_t socket_id
= rte_cryptodev_socket_id(cdev_id
);
186 /* range check the socket_id - negative values become big
187 * positive ones due to use of unsigned value
189 if (socket_id
>= RTE_MAX_NUMA_NODES
)
192 rte_cryptodev_info_get(cdev_id
, &cdev_info
);
193 if (opts
->nb_qps
> cdev_info
.max_nb_queue_pairs
) {
194 printf("Number of needed queue pairs is higher "
195 "than the maximum number of queue pairs "
197 printf("Lower the number of cores or increase "
198 "the number of crypto devices\n");
201 struct rte_cryptodev_config conf
= {
202 .nb_queue_pairs
= opts
->nb_qps
,
203 .socket_id
= socket_id
206 struct rte_cryptodev_qp_conf qp_conf
= {
207 .nb_descriptors
= opts
->nb_descriptors
211 * Device info specifies the min headroom and tailroom
212 * requirement for the crypto PMD. This need to be honoured
213 * by the application, while creating mbuf.
215 if (opts
->headroom_sz
< cdev_info
.min_mbuf_headroom_req
) {
216 /* Update headroom */
217 opts
->headroom_sz
= cdev_info
.min_mbuf_headroom_req
;
219 if (opts
->tailroom_sz
< cdev_info
.min_mbuf_tailroom_req
) {
220 /* Update tailroom */
221 opts
->tailroom_sz
= cdev_info
.min_mbuf_tailroom_req
;
224 /* Update segment size to include headroom & tailroom */
225 opts
->segment_sz
+= (opts
->headroom_sz
+ opts
->tailroom_sz
);
227 uint32_t dev_max_nb_sess
= cdev_info
.sym
.max_nb_sessions
;
229 * Two sessions objects are required for each session
230 * (one for the header, one for the private data)
232 if (!strcmp((const char *)opts
->device_type
,
233 "crypto_scheduler")) {
234 #ifdef RTE_LIBRTE_PMD_CRYPTO_SCHEDULER
236 rte_cryptodev_scheduler_slaves_get(cdev_id
,
239 sessions_needed
= enabled_cdev_count
*
240 opts
->nb_qps
* nb_slaves
;
243 sessions_needed
= enabled_cdev_count
*
247 * A single session is required per queue pair
250 if (dev_max_nb_sess
!= 0 && dev_max_nb_sess
< opts
->nb_qps
) {
252 "Device does not support at least "
253 "%u sessions\n", opts
->nb_qps
);
257 ret
= fill_session_pool_socket(socket_id
, max_sess_size
,
262 qp_conf
.mp_session
= session_pool_socket
[socket_id
].sess_mp
;
263 qp_conf
.mp_session_private
=
264 session_pool_socket
[socket_id
].priv_mp
;
266 ret
= rte_cryptodev_configure(cdev_id
, &conf
);
268 printf("Failed to configure cryptodev %u", cdev_id
);
272 for (j
= 0; j
< opts
->nb_qps
; j
++) {
273 ret
= rte_cryptodev_queue_pair_setup(cdev_id
, j
,
274 &qp_conf
, socket_id
);
276 printf("Failed to setup queue pair %u on "
277 "cryptodev %u", j
, cdev_id
);
282 ret
= rte_cryptodev_start(cdev_id
);
284 printf("Failed to start device %u: error %d\n",
290 return enabled_cdev_count
;
294 cperf_verify_devices_capabilities(struct cperf_options
*opts
,
295 uint8_t *enabled_cdevs
, uint8_t nb_cryptodevs
)
297 struct rte_cryptodev_sym_capability_idx cap_idx
;
298 const struct rte_cryptodev_symmetric_capability
*capability
;
303 for (i
= 0; i
< nb_cryptodevs
; i
++) {
305 cdev_id
= enabled_cdevs
[i
];
307 if (opts
->op_type
== CPERF_AUTH_ONLY
||
308 opts
->op_type
== CPERF_CIPHER_THEN_AUTH
||
309 opts
->op_type
== CPERF_AUTH_THEN_CIPHER
) {
311 cap_idx
.type
= RTE_CRYPTO_SYM_XFORM_AUTH
;
312 cap_idx
.algo
.auth
= opts
->auth_algo
;
314 capability
= rte_cryptodev_sym_capability_get(cdev_id
,
316 if (capability
== NULL
)
319 ret
= rte_cryptodev_sym_capability_check_auth(
328 if (opts
->op_type
== CPERF_CIPHER_ONLY
||
329 opts
->op_type
== CPERF_CIPHER_THEN_AUTH
||
330 opts
->op_type
== CPERF_AUTH_THEN_CIPHER
) {
332 cap_idx
.type
= RTE_CRYPTO_SYM_XFORM_CIPHER
;
333 cap_idx
.algo
.cipher
= opts
->cipher_algo
;
335 capability
= rte_cryptodev_sym_capability_get(cdev_id
,
337 if (capability
== NULL
)
340 ret
= rte_cryptodev_sym_capability_check_cipher(
348 if (opts
->op_type
== CPERF_AEAD
) {
350 cap_idx
.type
= RTE_CRYPTO_SYM_XFORM_AEAD
;
351 cap_idx
.algo
.aead
= opts
->aead_algo
;
353 capability
= rte_cryptodev_sym_capability_get(cdev_id
,
355 if (capability
== NULL
)
358 ret
= rte_cryptodev_sym_capability_check_aead(
373 cperf_check_test_vector(struct cperf_options
*opts
,
374 struct cperf_test_vector
*test_vec
)
376 if (opts
->op_type
== CPERF_CIPHER_ONLY
) {
377 if (opts
->cipher_algo
== RTE_CRYPTO_CIPHER_NULL
) {
378 if (test_vec
->plaintext
.data
== NULL
)
380 } else if (opts
->cipher_algo
!= RTE_CRYPTO_CIPHER_NULL
) {
381 if (test_vec
->plaintext
.data
== NULL
)
383 if (test_vec
->plaintext
.length
< opts
->max_buffer_size
)
385 if (test_vec
->ciphertext
.data
== NULL
)
387 if (test_vec
->ciphertext
.length
< opts
->max_buffer_size
)
389 /* Cipher IV is only required for some algorithms */
390 if (opts
->cipher_iv_sz
&&
391 test_vec
->cipher_iv
.data
== NULL
)
393 if (test_vec
->cipher_iv
.length
!= opts
->cipher_iv_sz
)
395 if (test_vec
->cipher_key
.data
== NULL
)
397 if (test_vec
->cipher_key
.length
!= opts
->cipher_key_sz
)
400 } else if (opts
->op_type
== CPERF_AUTH_ONLY
) {
401 if (opts
->auth_algo
!= RTE_CRYPTO_AUTH_NULL
) {
402 if (test_vec
->plaintext
.data
== NULL
)
404 if (test_vec
->plaintext
.length
< opts
->max_buffer_size
)
406 /* Auth key is only required for some algorithms */
407 if (opts
->auth_key_sz
&&
408 test_vec
->auth_key
.data
== NULL
)
410 if (test_vec
->auth_key
.length
!= opts
->auth_key_sz
)
412 if (test_vec
->auth_iv
.length
!= opts
->auth_iv_sz
)
414 /* Auth IV is only required for some algorithms */
415 if (opts
->auth_iv_sz
&& test_vec
->auth_iv
.data
== NULL
)
417 if (test_vec
->digest
.data
== NULL
)
419 if (test_vec
->digest
.length
< opts
->digest_sz
)
423 } else if (opts
->op_type
== CPERF_CIPHER_THEN_AUTH
||
424 opts
->op_type
== CPERF_AUTH_THEN_CIPHER
) {
425 if (opts
->cipher_algo
== RTE_CRYPTO_CIPHER_NULL
) {
426 if (test_vec
->plaintext
.data
== NULL
)
428 if (test_vec
->plaintext
.length
< opts
->max_buffer_size
)
430 } else if (opts
->cipher_algo
!= RTE_CRYPTO_CIPHER_NULL
) {
431 if (test_vec
->plaintext
.data
== NULL
)
433 if (test_vec
->plaintext
.length
< opts
->max_buffer_size
)
435 if (test_vec
->ciphertext
.data
== NULL
)
437 if (test_vec
->ciphertext
.length
< opts
->max_buffer_size
)
439 if (test_vec
->cipher_iv
.data
== NULL
)
441 if (test_vec
->cipher_iv
.length
!= opts
->cipher_iv_sz
)
443 if (test_vec
->cipher_key
.data
== NULL
)
445 if (test_vec
->cipher_key
.length
!= opts
->cipher_key_sz
)
448 if (opts
->auth_algo
!= RTE_CRYPTO_AUTH_NULL
) {
449 if (test_vec
->auth_key
.data
== NULL
)
451 if (test_vec
->auth_key
.length
!= opts
->auth_key_sz
)
453 if (test_vec
->auth_iv
.length
!= opts
->auth_iv_sz
)
455 /* Auth IV is only required for some algorithms */
456 if (opts
->auth_iv_sz
&& test_vec
->auth_iv
.data
== NULL
)
458 if (test_vec
->digest
.data
== NULL
)
460 if (test_vec
->digest
.length
< opts
->digest_sz
)
463 } else if (opts
->op_type
== CPERF_AEAD
) {
464 if (test_vec
->plaintext
.data
== NULL
)
466 if (test_vec
->plaintext
.length
< opts
->max_buffer_size
)
468 if (test_vec
->ciphertext
.data
== NULL
)
470 if (test_vec
->ciphertext
.length
< opts
->max_buffer_size
)
472 if (test_vec
->aead_key
.data
== NULL
)
474 if (test_vec
->aead_key
.length
!= opts
->aead_key_sz
)
476 if (test_vec
->aead_iv
.data
== NULL
)
478 if (test_vec
->aead_iv
.length
!= opts
->aead_iv_sz
)
480 if (test_vec
->aad
.data
== NULL
)
482 if (test_vec
->aad
.length
!= opts
->aead_aad_sz
)
484 if (test_vec
->digest
.data
== NULL
)
486 if (test_vec
->digest
.length
< opts
->digest_sz
)
493 main(int argc
, char **argv
)
495 struct cperf_options opts
= {0};
496 struct cperf_test_vector
*t_vec
= NULL
;
497 struct cperf_op_fns op_fns
;
498 void *ctx
[RTE_MAX_LCORE
] = { };
499 int nb_cryptodevs
= 0;
500 uint16_t total_nb_qps
= 0;
502 uint8_t enabled_cdevs
[RTE_CRYPTO_MAX_DEVS
] = { 0 };
504 uint8_t buffer_size_idx
= 0;
509 /* Initialise DPDK EAL */
510 ret
= rte_eal_init(argc
, argv
);
512 rte_exit(EXIT_FAILURE
, "Invalid EAL arguments!\n");
516 cperf_options_default(&opts
);
518 ret
= cperf_options_parse(&opts
, argc
, argv
);
520 RTE_LOG(ERR
, USER1
, "Parsing on or more user options failed\n");
524 ret
= cperf_options_check(&opts
);
527 "Checking on or more user options failed\n");
531 nb_cryptodevs
= cperf_initialize_cryptodev(&opts
, enabled_cdevs
);
534 cperf_options_dump(&opts
);
536 if (nb_cryptodevs
< 1) {
537 RTE_LOG(ERR
, USER1
, "Failed to initialise requested crypto "
543 ret
= cperf_verify_devices_capabilities(&opts
, enabled_cdevs
,
546 RTE_LOG(ERR
, USER1
, "Crypto device type does not support "
547 "capabilities requested\n");
551 if (opts
.test_file
!= NULL
) {
552 t_vec
= cperf_test_vector_get_from_file(&opts
);
555 "Failed to create test vector for"
556 " specified file\n");
560 if (cperf_check_test_vector(&opts
, t_vec
)) {
561 RTE_LOG(ERR
, USER1
, "Incomplete necessary test vectors"
566 t_vec
= cperf_test_vector_get_dummy(&opts
);
569 "Failed to create test vector for"
570 " specified algorithms\n");
575 ret
= cperf_get_op_functions(&opts
, &op_fns
);
577 RTE_LOG(ERR
, USER1
, "Failed to find function ops set for "
578 "specified algorithms combination\n");
583 show_test_vector(t_vec
);
585 total_nb_qps
= nb_cryptodevs
* opts
.nb_qps
;
588 uint8_t qp_id
= 0, cdev_index
= 0;
589 RTE_LCORE_FOREACH_SLAVE(lcore_id
) {
591 if (i
== total_nb_qps
)
594 cdev_id
= enabled_cdevs
[cdev_index
];
596 uint8_t socket_id
= rte_cryptodev_socket_id(cdev_id
);
598 ctx
[i
] = cperf_testmap
[opts
.test
].constructor(
599 session_pool_socket
[socket_id
].sess_mp
,
600 session_pool_socket
[socket_id
].priv_mp
,
602 &opts
, t_vec
, &op_fns
);
603 if (ctx
[i
] == NULL
) {
604 RTE_LOG(ERR
, USER1
, "Test run constructor failed\n");
607 qp_id
= (qp_id
+ 1) % opts
.nb_qps
;
613 if (opts
.imix_distribution_count
!= 0) {
614 uint8_t buffer_size_count
= opts
.buffer_size_count
;
615 uint16_t distribution_total
[buffer_size_count
];
617 uint32_t test_average_size
= 0;
618 const uint32_t *buffer_size_list
= opts
.buffer_size_list
;
619 const uint32_t *imix_distribution_list
= opts
.imix_distribution_list
;
621 opts
.imix_buffer_sizes
= rte_malloc(NULL
,
622 sizeof(uint32_t) * opts
.pool_sz
,
625 * Calculate accumulated distribution of
626 * probabilities per packet size
628 distribution_total
[0] = imix_distribution_list
[0];
629 for (i
= 1; i
< buffer_size_count
; i
++)
630 distribution_total
[i
] = imix_distribution_list
[i
] +
631 distribution_total
[i
-1];
633 /* Calculate a random sequence of packet sizes, based on distribution */
634 for (op_idx
= 0; op_idx
< opts
.pool_sz
; op_idx
++) {
635 uint16_t random_number
= rte_rand() %
636 distribution_total
[buffer_size_count
- 1];
637 for (i
= 0; i
< buffer_size_count
; i
++)
638 if (random_number
< distribution_total
[i
])
641 opts
.imix_buffer_sizes
[op_idx
] = buffer_size_list
[i
];
644 /* Calculate average buffer size for the IMIX distribution */
645 for (i
= 0; i
< buffer_size_count
; i
++)
646 test_average_size
+= buffer_size_list
[i
] *
647 imix_distribution_list
[i
];
649 opts
.test_buffer_size
= test_average_size
/
650 distribution_total
[buffer_size_count
- 1];
653 RTE_LCORE_FOREACH_SLAVE(lcore_id
) {
655 if (i
== total_nb_qps
)
658 rte_eal_remote_launch(cperf_testmap
[opts
.test
].runner
,
663 RTE_LCORE_FOREACH_SLAVE(lcore_id
) {
665 if (i
== total_nb_qps
)
667 rte_eal_wait_lcore(lcore_id
);
672 /* Get next size from range or list */
673 if (opts
.inc_buffer_size
!= 0)
674 opts
.test_buffer_size
= opts
.min_buffer_size
;
676 opts
.test_buffer_size
= opts
.buffer_size_list
[0];
678 while (opts
.test_buffer_size
<= opts
.max_buffer_size
) {
680 RTE_LCORE_FOREACH_SLAVE(lcore_id
) {
682 if (i
== total_nb_qps
)
685 rte_eal_remote_launch(cperf_testmap
[opts
.test
].runner
,
690 RTE_LCORE_FOREACH_SLAVE(lcore_id
) {
692 if (i
== total_nb_qps
)
694 rte_eal_wait_lcore(lcore_id
);
698 /* Get next size from range or list */
699 if (opts
.inc_buffer_size
!= 0)
700 opts
.test_buffer_size
+= opts
.inc_buffer_size
;
702 if (++buffer_size_idx
== opts
.buffer_size_count
)
704 opts
.test_buffer_size
=
705 opts
.buffer_size_list
[buffer_size_idx
];
711 RTE_LCORE_FOREACH_SLAVE(lcore_id
) {
713 if (i
== total_nb_qps
)
716 cperf_testmap
[opts
.test
].destructor(ctx
[i
]);
720 for (i
= 0; i
< nb_cryptodevs
&&
721 i
< RTE_CRYPTO_MAX_DEVS
; i
++)
722 rte_cryptodev_stop(enabled_cdevs
[i
]);
724 free_test_vector(t_vec
, &opts
);
731 RTE_LCORE_FOREACH_SLAVE(lcore_id
) {
732 if (i
== total_nb_qps
)
735 if (ctx
[i
] && cperf_testmap
[opts
.test
].destructor
)
736 cperf_testmap
[opts
.test
].destructor(ctx
[i
]);
740 for (i
= 0; i
< nb_cryptodevs
&&
741 i
< RTE_CRYPTO_MAX_DEVS
; i
++)
742 rte_cryptodev_stop(enabled_cdevs
[i
]);
743 rte_free(opts
.imix_buffer_sizes
);
744 free_test_vector(t_vec
, &opts
);