1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016-2017 Intel Corporation
5 #include <rte_malloc.h>
6 #include <rte_cycles.h>
7 #include <rte_crypto.h>
8 #include <rte_cryptodev.h>
10 #include "cperf_test_verify.h"
11 #include "cperf_ops.h"
12 #include "cperf_test_common.h"
14 struct cperf_verify_ctx
{
19 struct rte_mempool
*pool
;
21 struct rte_cryptodev_sym_session
*sess
;
23 cperf_populate_ops_t populate_ops
;
25 uint32_t src_buf_offset
;
26 uint32_t dst_buf_offset
;
28 const struct cperf_options
*options
;
29 const struct cperf_test_vector
*test_vector
;
32 struct cperf_op_result
{
33 enum rte_crypto_op_status status
;
37 cperf_verify_test_free(struct cperf_verify_ctx
*ctx
)
41 rte_cryptodev_sym_session_clear(ctx
->dev_id
, ctx
->sess
);
42 rte_cryptodev_sym_session_free(ctx
->sess
);
46 rte_mempool_free(ctx
->pool
);
53 cperf_verify_test_constructor(struct rte_mempool
*sess_mp
,
54 struct rte_mempool
*sess_priv_mp
,
55 uint8_t dev_id
, uint16_t qp_id
,
56 const struct cperf_options
*options
,
57 const struct cperf_test_vector
*test_vector
,
58 const struct cperf_op_fns
*op_fns
)
60 struct cperf_verify_ctx
*ctx
= NULL
;
62 ctx
= rte_malloc(NULL
, sizeof(struct cperf_verify_ctx
), 0);
69 ctx
->populate_ops
= op_fns
->populate_ops
;
70 ctx
->options
= options
;
71 ctx
->test_vector
= test_vector
;
73 /* IV goes at the end of the crypto operation */
74 uint16_t iv_offset
= sizeof(struct rte_crypto_op
) +
75 sizeof(struct rte_crypto_sym_op
);
77 ctx
->sess
= op_fns
->sess_create(sess_mp
, sess_priv_mp
, dev_id
, options
,
78 test_vector
, iv_offset
);
79 if (ctx
->sess
== NULL
)
82 if (cperf_alloc_common_memory(options
, test_vector
, dev_id
, qp_id
, 0,
83 &ctx
->src_buf_offset
, &ctx
->dst_buf_offset
,
89 cperf_verify_test_free(ctx
);
95 cperf_verify_op(struct rte_crypto_op
*op
,
96 const struct cperf_options
*options
,
97 const struct cperf_test_vector
*vector
)
99 const struct rte_mbuf
*m
;
103 uint32_t cipher_offset
, auth_offset
;
104 uint8_t cipher
, auth
;
107 if (op
->status
!= RTE_CRYPTO_OP_STATUS_SUCCESS
)
114 nb_segs
= m
->nb_segs
;
116 while (m
&& nb_segs
!= 0) {
122 data
= rte_malloc(NULL
, len
, 0);
130 nb_segs
= m
->nb_segs
;
132 while (m
&& nb_segs
!= 0) {
133 memcpy(data
+ len
, rte_pktmbuf_mtod(m
, uint8_t *),
140 switch (options
->op_type
) {
141 case CPERF_CIPHER_ONLY
:
147 case CPERF_CIPHER_THEN_AUTH
:
151 auth_offset
= options
->test_buffer_size
;
153 case CPERF_AUTH_ONLY
:
157 auth_offset
= options
->test_buffer_size
;
159 case CPERF_AUTH_THEN_CIPHER
:
163 auth_offset
= options
->test_buffer_size
;
169 auth_offset
= options
->test_buffer_size
;
177 if (options
->cipher_op
== RTE_CRYPTO_CIPHER_OP_ENCRYPT
)
178 res
+= memcmp(data
+ cipher_offset
,
179 vector
->ciphertext
.data
,
180 options
->test_buffer_size
);
182 res
+= memcmp(data
+ cipher_offset
,
183 vector
->plaintext
.data
,
184 options
->test_buffer_size
);
188 if (options
->auth_op
== RTE_CRYPTO_AUTH_OP_GENERATE
)
189 res
+= memcmp(data
+ auth_offset
,
200 cperf_mbuf_set(struct rte_mbuf
*mbuf
,
201 const struct cperf_options
*options
,
202 const struct cperf_test_vector
*test_vector
)
204 uint32_t segment_sz
= options
->segment_sz
;
207 (options
->cipher_op
== RTE_CRYPTO_CIPHER_OP_ENCRYPT
) ?
208 test_vector
->plaintext
.data
:
209 test_vector
->ciphertext
.data
;
210 uint32_t remaining_bytes
= options
->max_buffer_size
;
212 while (remaining_bytes
) {
213 mbuf_data
= rte_pktmbuf_mtod(mbuf
, uint8_t *);
215 if (remaining_bytes
<= segment_sz
) {
216 memcpy(mbuf_data
, test_data
, remaining_bytes
);
220 memcpy(mbuf_data
, test_data
, segment_sz
);
221 remaining_bytes
-= segment_sz
;
222 test_data
+= segment_sz
;
228 cperf_verify_test_runner(void *test_ctx
)
230 struct cperf_verify_ctx
*ctx
= test_ctx
;
232 uint64_t ops_enqd
= 0, ops_enqd_total
= 0, ops_enqd_failed
= 0;
233 uint64_t ops_deqd
= 0, ops_deqd_total
= 0, ops_deqd_failed
= 0;
234 uint64_t ops_failed
= 0;
236 static int only_once
;
239 uint16_t ops_unused
= 0;
240 uint32_t imix_idx
= 0;
242 struct rte_crypto_op
*ops
[ctx
->options
->max_burst_size
];
243 struct rte_crypto_op
*ops_processed
[ctx
->options
->max_burst_size
];
245 uint32_t lcore
= rte_lcore_id();
247 #ifdef CPERF_LINEARIZATION_ENABLE
248 struct rte_cryptodev_info dev_info
;
251 /* Check if source mbufs require coalescing */
252 if (ctx
->options
->segment_sz
< ctx
->options
->max_buffer_size
) {
253 rte_cryptodev_info_get(ctx
->dev_id
, &dev_info
);
254 if ((dev_info
.feature_flags
&
255 RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER
) == 0)
258 #endif /* CPERF_LINEARIZATION_ENABLE */
260 ctx
->lcore_id
= lcore
;
262 if (!ctx
->options
->csv
)
263 printf("\n# Running verify test on device: %u, lcore: %u\n",
266 uint16_t iv_offset
= sizeof(struct rte_crypto_op
) +
267 sizeof(struct rte_crypto_sym_op
);
269 while (ops_enqd_total
< ctx
->options
->total_ops
) {
271 uint16_t burst_size
= ((ops_enqd_total
+ ctx
->options
->max_burst_size
)
272 <= ctx
->options
->total_ops
) ?
273 ctx
->options
->max_burst_size
:
274 ctx
->options
->total_ops
-
277 uint16_t ops_needed
= burst_size
- ops_unused
;
279 /* Allocate objects containing crypto operations and mbufs */
280 if (rte_mempool_get_bulk(ctx
->pool
, (void **)ops
,
283 "Failed to allocate more crypto operations "
284 "from the crypto operation pool.\n"
285 "Consider increasing the pool size "
290 /* Setup crypto op, attach mbuf etc */
291 (ctx
->populate_ops
)(ops
, ctx
->src_buf_offset
,
293 ops_needed
, ctx
->sess
, ctx
->options
,
294 ctx
->test_vector
, iv_offset
, &imix_idx
);
297 /* Populate the mbuf with the test vector, for verification */
298 for (i
= 0; i
< ops_needed
; i
++)
299 cperf_mbuf_set(ops
[i
]->sym
->m_src
,
303 #ifdef CPERF_LINEARIZATION_ENABLE
305 /* PMD doesn't support scatter-gather and source buffer
307 * We need to linearize it before enqueuing.
309 for (i
= 0; i
< burst_size
; i
++)
310 rte_pktmbuf_linearize(ops
[i
]->sym
->m_src
);
312 #endif /* CPERF_LINEARIZATION_ENABLE */
314 /* Enqueue burst of ops on crypto device */
315 ops_enqd
= rte_cryptodev_enqueue_burst(ctx
->dev_id
, ctx
->qp_id
,
317 if (ops_enqd
< burst_size
)
321 * Calculate number of ops not enqueued (mainly for hw
322 * accelerators whose ingress queue can fill up).
324 ops_unused
= burst_size
- ops_enqd
;
325 ops_enqd_total
+= ops_enqd
;
328 /* Dequeue processed burst of ops from crypto device */
329 ops_deqd
= rte_cryptodev_dequeue_burst(ctx
->dev_id
, ctx
->qp_id
,
330 ops_processed
, ctx
->options
->max_burst_size
);
334 * Count dequeue polls which didn't return any
335 * processed operations. This statistic is mainly
336 * relevant to hw accelerators.
342 for (i
= 0; i
< ops_deqd
; i
++) {
343 if (cperf_verify_op(ops_processed
[i
], ctx
->options
,
347 /* Free crypto ops so they can be reused. */
348 rte_mempool_put_bulk(ctx
->pool
,
349 (void **)ops_processed
, ops_deqd
);
350 ops_deqd_total
+= ops_deqd
;
353 /* Dequeue any operations still in the crypto device */
355 while (ops_deqd_total
< ctx
->options
->total_ops
) {
356 /* Sending 0 length burst to flush sw crypto device */
357 rte_cryptodev_enqueue_burst(ctx
->dev_id
, ctx
->qp_id
, NULL
, 0);
360 ops_deqd
= rte_cryptodev_dequeue_burst(ctx
->dev_id
, ctx
->qp_id
,
361 ops_processed
, ctx
->options
->max_burst_size
);
367 for (i
= 0; i
< ops_deqd
; i
++) {
368 if (cperf_verify_op(ops_processed
[i
], ctx
->options
,
372 /* Free crypto ops so they can be reused. */
373 rte_mempool_put_bulk(ctx
->pool
,
374 (void **)ops_processed
, ops_deqd
);
375 ops_deqd_total
+= ops_deqd
;
378 if (!ctx
->options
->csv
) {
380 printf("%12s%12s%12s%12s%12s%12s%12s%12s\n\n",
381 "lcore id", "Buf Size", "Burst size",
382 "Enqueued", "Dequeued", "Failed Enq",
383 "Failed Deq", "Failed Ops");
386 printf("%12u%12u%12u%12"PRIu64
"%12"PRIu64
"%12"PRIu64
387 "%12"PRIu64
"%12"PRIu64
"\n",
389 ctx
->options
->max_buffer_size
,
390 ctx
->options
->max_burst_size
,
398 printf("\n# lcore id, Buffer Size(B), "
399 "Burst Size,Enqueued,Dequeued,Failed Enq,"
400 "Failed Deq,Failed Ops\n");
403 printf("%10u;%10u;%u;%"PRIu64
";%"PRIu64
";%"PRIu64
";%"PRIu64
";"
406 ctx
->options
->max_buffer_size
,
407 ctx
->options
->max_burst_size
,
421 cperf_verify_test_destructor(void *arg
)
423 struct cperf_verify_ctx
*ctx
= arg
;
428 cperf_verify_test_free(ctx
);