]> git.proxmox.com Git - ceph.git/blob - ceph/src/spdk/dpdk/app/test-crypto-perf/cperf_test_verify.c
import 15.2.0 Octopus source
[ceph.git] / ceph / src / spdk / dpdk / app / test-crypto-perf / cperf_test_verify.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016-2017 Intel Corporation
3 */
4
5 #include <rte_malloc.h>
6 #include <rte_cycles.h>
7 #include <rte_crypto.h>
8 #include <rte_cryptodev.h>
9
10 #include "cperf_test_verify.h"
11 #include "cperf_ops.h"
12 #include "cperf_test_common.h"
13
14 struct cperf_verify_ctx {
15 uint8_t dev_id;
16 uint16_t qp_id;
17 uint8_t lcore_id;
18
19 struct rte_mempool *pool;
20
21 struct rte_cryptodev_sym_session *sess;
22
23 cperf_populate_ops_t populate_ops;
24
25 uint32_t src_buf_offset;
26 uint32_t dst_buf_offset;
27
28 const struct cperf_options *options;
29 const struct cperf_test_vector *test_vector;
30 };
31
32 struct cperf_op_result {
33 enum rte_crypto_op_status status;
34 };
35
36 static void
37 cperf_verify_test_free(struct cperf_verify_ctx *ctx)
38 {
39 if (ctx) {
40 if (ctx->sess) {
41 rte_cryptodev_sym_session_clear(ctx->dev_id, ctx->sess);
42 rte_cryptodev_sym_session_free(ctx->sess);
43 }
44
45 if (ctx->pool)
46 rte_mempool_free(ctx->pool);
47
48 rte_free(ctx);
49 }
50 }
51
52 void *
53 cperf_verify_test_constructor(struct rte_mempool *sess_mp,
54 struct rte_mempool *sess_priv_mp,
55 uint8_t dev_id, uint16_t qp_id,
56 const struct cperf_options *options,
57 const struct cperf_test_vector *test_vector,
58 const struct cperf_op_fns *op_fns)
59 {
60 struct cperf_verify_ctx *ctx = NULL;
61
62 ctx = rte_malloc(NULL, sizeof(struct cperf_verify_ctx), 0);
63 if (ctx == NULL)
64 goto err;
65
66 ctx->dev_id = dev_id;
67 ctx->qp_id = qp_id;
68
69 ctx->populate_ops = op_fns->populate_ops;
70 ctx->options = options;
71 ctx->test_vector = test_vector;
72
73 /* IV goes at the end of the crypto operation */
74 uint16_t iv_offset = sizeof(struct rte_crypto_op) +
75 sizeof(struct rte_crypto_sym_op);
76
77 ctx->sess = op_fns->sess_create(sess_mp, sess_priv_mp, dev_id, options,
78 test_vector, iv_offset);
79 if (ctx->sess == NULL)
80 goto err;
81
82 if (cperf_alloc_common_memory(options, test_vector, dev_id, qp_id, 0,
83 &ctx->src_buf_offset, &ctx->dst_buf_offset,
84 &ctx->pool) < 0)
85 goto err;
86
87 return ctx;
88 err:
89 cperf_verify_test_free(ctx);
90
91 return NULL;
92 }
93
94 static int
95 cperf_verify_op(struct rte_crypto_op *op,
96 const struct cperf_options *options,
97 const struct cperf_test_vector *vector)
98 {
99 const struct rte_mbuf *m;
100 uint32_t len;
101 uint16_t nb_segs;
102 uint8_t *data;
103 uint32_t cipher_offset, auth_offset;
104 uint8_t cipher, auth;
105 int res = 0;
106
107 if (op->status != RTE_CRYPTO_OP_STATUS_SUCCESS)
108 return 1;
109
110 if (op->sym->m_dst)
111 m = op->sym->m_dst;
112 else
113 m = op->sym->m_src;
114 nb_segs = m->nb_segs;
115 len = 0;
116 while (m && nb_segs != 0) {
117 len += m->data_len;
118 m = m->next;
119 nb_segs--;
120 }
121
122 data = rte_malloc(NULL, len, 0);
123 if (data == NULL)
124 return 1;
125
126 if (op->sym->m_dst)
127 m = op->sym->m_dst;
128 else
129 m = op->sym->m_src;
130 nb_segs = m->nb_segs;
131 len = 0;
132 while (m && nb_segs != 0) {
133 memcpy(data + len, rte_pktmbuf_mtod(m, uint8_t *),
134 m->data_len);
135 len += m->data_len;
136 m = m->next;
137 nb_segs--;
138 }
139
140 switch (options->op_type) {
141 case CPERF_CIPHER_ONLY:
142 cipher = 1;
143 cipher_offset = 0;
144 auth = 0;
145 auth_offset = 0;
146 break;
147 case CPERF_CIPHER_THEN_AUTH:
148 cipher = 1;
149 cipher_offset = 0;
150 auth = 1;
151 auth_offset = options->test_buffer_size;
152 break;
153 case CPERF_AUTH_ONLY:
154 cipher = 0;
155 cipher_offset = 0;
156 auth = 1;
157 auth_offset = options->test_buffer_size;
158 break;
159 case CPERF_AUTH_THEN_CIPHER:
160 cipher = 1;
161 cipher_offset = 0;
162 auth = 1;
163 auth_offset = options->test_buffer_size;
164 break;
165 case CPERF_AEAD:
166 cipher = 1;
167 cipher_offset = 0;
168 auth = 1;
169 auth_offset = options->test_buffer_size;
170 break;
171 default:
172 res = 1;
173 goto out;
174 }
175
176 if (cipher == 1) {
177 if (options->cipher_op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
178 res += memcmp(data + cipher_offset,
179 vector->ciphertext.data,
180 options->test_buffer_size);
181 else
182 res += memcmp(data + cipher_offset,
183 vector->plaintext.data,
184 options->test_buffer_size);
185 }
186
187 if (auth == 1) {
188 if (options->auth_op == RTE_CRYPTO_AUTH_OP_GENERATE)
189 res += memcmp(data + auth_offset,
190 vector->digest.data,
191 options->digest_sz);
192 }
193
194 out:
195 rte_free(data);
196 return !!res;
197 }
198
199 static void
200 cperf_mbuf_set(struct rte_mbuf *mbuf,
201 const struct cperf_options *options,
202 const struct cperf_test_vector *test_vector)
203 {
204 uint32_t segment_sz = options->segment_sz;
205 uint8_t *mbuf_data;
206 uint8_t *test_data =
207 (options->cipher_op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
208 test_vector->plaintext.data :
209 test_vector->ciphertext.data;
210 uint32_t remaining_bytes = options->max_buffer_size;
211
212 while (remaining_bytes) {
213 mbuf_data = rte_pktmbuf_mtod(mbuf, uint8_t *);
214
215 if (remaining_bytes <= segment_sz) {
216 memcpy(mbuf_data, test_data, remaining_bytes);
217 return;
218 }
219
220 memcpy(mbuf_data, test_data, segment_sz);
221 remaining_bytes -= segment_sz;
222 test_data += segment_sz;
223 mbuf = mbuf->next;
224 }
225 }
226
227 int
228 cperf_verify_test_runner(void *test_ctx)
229 {
230 struct cperf_verify_ctx *ctx = test_ctx;
231
232 uint64_t ops_enqd = 0, ops_enqd_total = 0, ops_enqd_failed = 0;
233 uint64_t ops_deqd = 0, ops_deqd_total = 0, ops_deqd_failed = 0;
234 uint64_t ops_failed = 0;
235
236 static int only_once;
237
238 uint64_t i;
239 uint16_t ops_unused = 0;
240 uint32_t imix_idx = 0;
241
242 struct rte_crypto_op *ops[ctx->options->max_burst_size];
243 struct rte_crypto_op *ops_processed[ctx->options->max_burst_size];
244
245 uint32_t lcore = rte_lcore_id();
246
247 #ifdef CPERF_LINEARIZATION_ENABLE
248 struct rte_cryptodev_info dev_info;
249 int linearize = 0;
250
251 /* Check if source mbufs require coalescing */
252 if (ctx->options->segment_sz < ctx->options->max_buffer_size) {
253 rte_cryptodev_info_get(ctx->dev_id, &dev_info);
254 if ((dev_info.feature_flags &
255 RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER) == 0)
256 linearize = 1;
257 }
258 #endif /* CPERF_LINEARIZATION_ENABLE */
259
260 ctx->lcore_id = lcore;
261
262 if (!ctx->options->csv)
263 printf("\n# Running verify test on device: %u, lcore: %u\n",
264 ctx->dev_id, lcore);
265
266 uint16_t iv_offset = sizeof(struct rte_crypto_op) +
267 sizeof(struct rte_crypto_sym_op);
268
269 while (ops_enqd_total < ctx->options->total_ops) {
270
271 uint16_t burst_size = ((ops_enqd_total + ctx->options->max_burst_size)
272 <= ctx->options->total_ops) ?
273 ctx->options->max_burst_size :
274 ctx->options->total_ops -
275 ops_enqd_total;
276
277 uint16_t ops_needed = burst_size - ops_unused;
278
279 /* Allocate objects containing crypto operations and mbufs */
280 if (rte_mempool_get_bulk(ctx->pool, (void **)ops,
281 ops_needed) != 0) {
282 RTE_LOG(ERR, USER1,
283 "Failed to allocate more crypto operations "
284 "from the crypto operation pool.\n"
285 "Consider increasing the pool size "
286 "with --pool-sz\n");
287 return -1;
288 }
289
290 /* Setup crypto op, attach mbuf etc */
291 (ctx->populate_ops)(ops, ctx->src_buf_offset,
292 ctx->dst_buf_offset,
293 ops_needed, ctx->sess, ctx->options,
294 ctx->test_vector, iv_offset, &imix_idx);
295
296
297 /* Populate the mbuf with the test vector, for verification */
298 for (i = 0; i < ops_needed; i++)
299 cperf_mbuf_set(ops[i]->sym->m_src,
300 ctx->options,
301 ctx->test_vector);
302
303 #ifdef CPERF_LINEARIZATION_ENABLE
304 if (linearize) {
305 /* PMD doesn't support scatter-gather and source buffer
306 * is segmented.
307 * We need to linearize it before enqueuing.
308 */
309 for (i = 0; i < burst_size; i++)
310 rte_pktmbuf_linearize(ops[i]->sym->m_src);
311 }
312 #endif /* CPERF_LINEARIZATION_ENABLE */
313
314 /* Enqueue burst of ops on crypto device */
315 ops_enqd = rte_cryptodev_enqueue_burst(ctx->dev_id, ctx->qp_id,
316 ops, burst_size);
317 if (ops_enqd < burst_size)
318 ops_enqd_failed++;
319
320 /**
321 * Calculate number of ops not enqueued (mainly for hw
322 * accelerators whose ingress queue can fill up).
323 */
324 ops_unused = burst_size - ops_enqd;
325 ops_enqd_total += ops_enqd;
326
327
328 /* Dequeue processed burst of ops from crypto device */
329 ops_deqd = rte_cryptodev_dequeue_burst(ctx->dev_id, ctx->qp_id,
330 ops_processed, ctx->options->max_burst_size);
331
332 if (ops_deqd == 0) {
333 /**
334 * Count dequeue polls which didn't return any
335 * processed operations. This statistic is mainly
336 * relevant to hw accelerators.
337 */
338 ops_deqd_failed++;
339 continue;
340 }
341
342 for (i = 0; i < ops_deqd; i++) {
343 if (cperf_verify_op(ops_processed[i], ctx->options,
344 ctx->test_vector))
345 ops_failed++;
346 }
347 /* Free crypto ops so they can be reused. */
348 rte_mempool_put_bulk(ctx->pool,
349 (void **)ops_processed, ops_deqd);
350 ops_deqd_total += ops_deqd;
351 }
352
353 /* Dequeue any operations still in the crypto device */
354
355 while (ops_deqd_total < ctx->options->total_ops) {
356 /* Sending 0 length burst to flush sw crypto device */
357 rte_cryptodev_enqueue_burst(ctx->dev_id, ctx->qp_id, NULL, 0);
358
359 /* dequeue burst */
360 ops_deqd = rte_cryptodev_dequeue_burst(ctx->dev_id, ctx->qp_id,
361 ops_processed, ctx->options->max_burst_size);
362 if (ops_deqd == 0) {
363 ops_deqd_failed++;
364 continue;
365 }
366
367 for (i = 0; i < ops_deqd; i++) {
368 if (cperf_verify_op(ops_processed[i], ctx->options,
369 ctx->test_vector))
370 ops_failed++;
371 }
372 /* Free crypto ops so they can be reused. */
373 rte_mempool_put_bulk(ctx->pool,
374 (void **)ops_processed, ops_deqd);
375 ops_deqd_total += ops_deqd;
376 }
377
378 if (!ctx->options->csv) {
379 if (!only_once)
380 printf("%12s%12s%12s%12s%12s%12s%12s%12s\n\n",
381 "lcore id", "Buf Size", "Burst size",
382 "Enqueued", "Dequeued", "Failed Enq",
383 "Failed Deq", "Failed Ops");
384 only_once = 1;
385
386 printf("%12u%12u%12u%12"PRIu64"%12"PRIu64"%12"PRIu64
387 "%12"PRIu64"%12"PRIu64"\n",
388 ctx->lcore_id,
389 ctx->options->max_buffer_size,
390 ctx->options->max_burst_size,
391 ops_enqd_total,
392 ops_deqd_total,
393 ops_enqd_failed,
394 ops_deqd_failed,
395 ops_failed);
396 } else {
397 if (!only_once)
398 printf("\n# lcore id, Buffer Size(B), "
399 "Burst Size,Enqueued,Dequeued,Failed Enq,"
400 "Failed Deq,Failed Ops\n");
401 only_once = 1;
402
403 printf("%10u;%10u;%u;%"PRIu64";%"PRIu64";%"PRIu64";%"PRIu64";"
404 "%"PRIu64"\n",
405 ctx->lcore_id,
406 ctx->options->max_buffer_size,
407 ctx->options->max_burst_size,
408 ops_enqd_total,
409 ops_deqd_total,
410 ops_enqd_failed,
411 ops_deqd_failed,
412 ops_failed);
413 }
414
415 return 0;
416 }
417
418
419
420 void
421 cperf_verify_test_destructor(void *arg)
422 {
423 struct cperf_verify_ctx *ctx = arg;
424
425 if (ctx == NULL)
426 return;
427
428 cperf_verify_test_free(ctx);
429 }