]> git.proxmox.com Git - ceph.git/blob - ceph/src/spdk/dpdk/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c
8c5e0cd926d8075206e584625ec48f0ab91d7991
[ceph.git] / ceph / src / spdk / dpdk / drivers / crypto / aesni_mb / rte_aesni_mb_pmd_ops.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2015-2017 Intel Corporation
3 */
4
5 #include <string.h>
6
7 #include <rte_string_fns.h>
8 #include <rte_common.h>
9 #include <rte_malloc.h>
10 #include <rte_cryptodev_pmd.h>
11
12 #include "aesni_mb_pmd_private.h"
13
14
15 static const struct rte_cryptodev_capabilities aesni_mb_pmd_capabilities[] = {
16 { /* MD5 HMAC */
17 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
18 {.sym = {
19 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
20 {.auth = {
21 .algo = RTE_CRYPTO_AUTH_MD5_HMAC,
22 .block_size = 64,
23 .key_size = {
24 .min = 1,
25 .max = 64,
26 .increment = 1
27 },
28 .digest_size = {
29 .min = 1,
30 .max = 16,
31 .increment = 1
32 },
33 .iv_size = { 0 }
34 }, }
35 }, }
36 },
37 { /* SHA1 HMAC */
38 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
39 {.sym = {
40 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
41 {.auth = {
42 .algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
43 .block_size = 64,
44 .key_size = {
45 .min = 1,
46 .max = 65535,
47 .increment = 1
48 },
49 .digest_size = {
50 .min = 1,
51 .max = 20,
52 .increment = 1
53 },
54 .iv_size = { 0 }
55 }, }
56 }, }
57 },
58 { /* SHA1 */
59 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
60 {.sym = {
61 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
62 {.auth = {
63 .algo = RTE_CRYPTO_AUTH_SHA1,
64 .block_size = 64,
65 .key_size = {
66 .min = 0,
67 .max = 0,
68 .increment = 0
69 },
70 .digest_size = {
71 .min = 1,
72 .max = 20,
73 .increment = 1
74 },
75 .iv_size = { 0 }
76 }, }
77 }, }
78 },
79 { /* SHA224 HMAC */
80 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
81 {.sym = {
82 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
83 {.auth = {
84 .algo = RTE_CRYPTO_AUTH_SHA224_HMAC,
85 .block_size = 64,
86 .key_size = {
87 .min = 1,
88 .max = 65535,
89 .increment = 1
90 },
91 .digest_size = {
92 .min = 1,
93 .max = 28,
94 .increment = 1
95 },
96 .iv_size = { 0 }
97 }, }
98 }, }
99 },
100 { /* SHA224 */
101 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
102 {.sym = {
103 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
104 {.auth = {
105 .algo = RTE_CRYPTO_AUTH_SHA224,
106 .block_size = 64,
107 .key_size = {
108 .min = 0,
109 .max = 0,
110 .increment = 0
111 },
112 .digest_size = {
113 .min = 1,
114 .max = 28,
115 .increment = 1
116 },
117 .iv_size = { 0 }
118 }, }
119 }, }
120 },
121 { /* SHA256 HMAC */
122 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
123 {.sym = {
124 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
125 {.auth = {
126 .algo = RTE_CRYPTO_AUTH_SHA256_HMAC,
127 .block_size = 64,
128 .key_size = {
129 .min = 1,
130 .max = 65535,
131 .increment = 1
132 },
133 .digest_size = {
134 .min = 1,
135 .max = 32,
136 .increment = 1
137 },
138 .iv_size = { 0 }
139 }, }
140 }, }
141 },
142 { /* SHA256 */
143 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
144 {.sym = {
145 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
146 {.auth = {
147 .algo = RTE_CRYPTO_AUTH_SHA256,
148 .block_size = 64,
149 .key_size = {
150 .min = 0,
151 .max = 0,
152 .increment = 0
153 },
154 .digest_size = {
155 .min = 1,
156 .max = 32,
157 .increment = 1
158 },
159 .iv_size = { 0 }
160 }, }
161 }, }
162 },
163 { /* SHA384 HMAC */
164 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
165 {.sym = {
166 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
167 {.auth = {
168 .algo = RTE_CRYPTO_AUTH_SHA384_HMAC,
169 .block_size = 128,
170 .key_size = {
171 .min = 1,
172 .max = 65535,
173 .increment = 1
174 },
175 .digest_size = {
176 .min = 1,
177 .max = 48,
178 .increment = 1
179 },
180 .iv_size = { 0 }
181 }, }
182 }, }
183 },
184 { /* SHA384 */
185 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
186 {.sym = {
187 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
188 {.auth = {
189 .algo = RTE_CRYPTO_AUTH_SHA384,
190 .block_size = 128,
191 .key_size = {
192 .min = 0,
193 .max = 0,
194 .increment = 0
195 },
196 .digest_size = {
197 .min = 1,
198 .max = 48,
199 .increment = 1
200 },
201 .iv_size = { 0 }
202 }, }
203 }, }
204 },
205 { /* SHA512 HMAC */
206 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
207 {.sym = {
208 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
209 {.auth = {
210 .algo = RTE_CRYPTO_AUTH_SHA512_HMAC,
211 .block_size = 128,
212 .key_size = {
213 .min = 1,
214 .max = 65535,
215 .increment = 1
216 },
217 .digest_size = {
218 .min = 1,
219 .max = 64,
220 .increment = 1
221 },
222 .iv_size = { 0 }
223 }, }
224 }, }
225 },
226 { /* SHA512 */
227 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
228 {.sym = {
229 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
230 {.auth = {
231 .algo = RTE_CRYPTO_AUTH_SHA512,
232 .block_size = 128,
233 .key_size = {
234 .min = 0,
235 .max = 0,
236 .increment = 0
237 },
238 .digest_size = {
239 .min = 1,
240 .max = 64,
241 .increment = 1
242 },
243 .iv_size = { 0 }
244 }, }
245 }, }
246 },
247 { /* AES XCBC HMAC */
248 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
249 {.sym = {
250 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
251 {.auth = {
252 .algo = RTE_CRYPTO_AUTH_AES_XCBC_MAC,
253 .block_size = 16,
254 .key_size = {
255 .min = 16,
256 .max = 16,
257 .increment = 0
258 },
259 .digest_size = {
260 .min = 12,
261 .max = 12,
262 .increment = 0
263 },
264 .iv_size = { 0 }
265 }, }
266 }, }
267 },
268 { /* AES CBC */
269 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
270 {.sym = {
271 .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
272 {.cipher = {
273 .algo = RTE_CRYPTO_CIPHER_AES_CBC,
274 .block_size = 16,
275 .key_size = {
276 .min = 16,
277 .max = 32,
278 .increment = 8
279 },
280 .iv_size = {
281 .min = 16,
282 .max = 16,
283 .increment = 0
284 }
285 }, }
286 }, }
287 },
288 { /* AES CTR */
289 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
290 {.sym = {
291 .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
292 {.cipher = {
293 .algo = RTE_CRYPTO_CIPHER_AES_CTR,
294 .block_size = 16,
295 .key_size = {
296 .min = 16,
297 .max = 32,
298 .increment = 8
299 },
300 .iv_size = {
301 .min = 12,
302 .max = 16,
303 .increment = 4
304 }
305 }, }
306 }, }
307 },
308 { /* AES DOCSIS BPI */
309 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
310 {.sym = {
311 .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
312 {.cipher = {
313 .algo = RTE_CRYPTO_CIPHER_AES_DOCSISBPI,
314 .block_size = 16,
315 .key_size = {
316 .min = 16,
317 #if IMB_VERSION_NUM >= IMB_VERSION(0, 53, 3)
318 .max = 32,
319 .increment = 16
320 #else
321 .max = 16,
322 .increment = 0
323 #endif
324 },
325 .iv_size = {
326 .min = 16,
327 .max = 16,
328 .increment = 0
329 }
330 }, }
331 }, }
332 },
333 { /* DES CBC */
334 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
335 {.sym = {
336 .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
337 {.cipher = {
338 .algo = RTE_CRYPTO_CIPHER_DES_CBC,
339 .block_size = 8,
340 .key_size = {
341 .min = 8,
342 .max = 8,
343 .increment = 0
344 },
345 .iv_size = {
346 .min = 8,
347 .max = 8,
348 .increment = 0
349 }
350 }, }
351 }, }
352 },
353 { /* 3DES CBC */
354 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
355 {.sym = {
356 .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
357 {.cipher = {
358 .algo = RTE_CRYPTO_CIPHER_3DES_CBC,
359 .block_size = 8,
360 .key_size = {
361 .min = 8,
362 .max = 24,
363 .increment = 8
364 },
365 .iv_size = {
366 .min = 8,
367 .max = 8,
368 .increment = 0
369 }
370 }, }
371 }, }
372 },
373 { /* DES DOCSIS BPI */
374 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
375 {.sym = {
376 .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
377 {.cipher = {
378 .algo = RTE_CRYPTO_CIPHER_DES_DOCSISBPI,
379 .block_size = 8,
380 .key_size = {
381 .min = 8,
382 .max = 8,
383 .increment = 0
384 },
385 .iv_size = {
386 .min = 8,
387 .max = 8,
388 .increment = 0
389 }
390 }, }
391 }, }
392 },
393 { /* AES CCM */
394 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
395 {.sym = {
396 .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
397 {.aead = {
398 .algo = RTE_CRYPTO_AEAD_AES_CCM,
399 .block_size = 16,
400 .key_size = {
401 .min = 16,
402 .max = 16,
403 .increment = 0
404 },
405 .digest_size = {
406 .min = 4,
407 .max = 16,
408 .increment = 2
409 },
410 .aad_size = {
411 .min = 0,
412 .max = 46,
413 .increment = 1
414 },
415 .iv_size = {
416 .min = 7,
417 .max = 13,
418 .increment = 1
419 },
420 }, }
421 }, }
422 },
423 { /* AES CMAC */
424 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
425 {.sym = {
426 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
427 {.auth = {
428 .algo = RTE_CRYPTO_AUTH_AES_CMAC,
429 .block_size = 16,
430 .key_size = {
431 .min = 16,
432 .max = 16,
433 .increment = 0
434 },
435 .digest_size = {
436 .min = 1,
437 .max = 16,
438 .increment = 1
439 },
440 .iv_size = { 0 }
441 }, }
442 }, }
443 },
444 { /* AES GCM */
445 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
446 {.sym = {
447 .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
448 {.aead = {
449 .algo = RTE_CRYPTO_AEAD_AES_GCM,
450 .block_size = 16,
451 .key_size = {
452 .min = 16,
453 .max = 32,
454 .increment = 8
455 },
456 .digest_size = {
457 .min = 8,
458 .max = 16,
459 .increment = 4
460 },
461 .aad_size = {
462 .min = 0,
463 .max = 65535,
464 .increment = 1
465 },
466 .iv_size = {
467 .min = 12,
468 .max = 12,
469 .increment = 0
470 }
471 }, }
472 }, }
473 },
474 { /* AES GMAC (AUTH) */
475 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
476 {.sym = {
477 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
478 {.auth = {
479 .algo = RTE_CRYPTO_AUTH_AES_GMAC,
480 .block_size = 16,
481 .key_size = {
482 .min = 16,
483 .max = 32,
484 .increment = 8
485 },
486 .digest_size = {
487 .min = 8,
488 .max = 16,
489 .increment = 4
490 },
491 .iv_size = {
492 .min = 12,
493 .max = 12,
494 .increment = 0
495 }
496 }, }
497 }, }
498 },
499 RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
500 };
501
502
503 /** Configure device */
504 static int
505 aesni_mb_pmd_config(__rte_unused struct rte_cryptodev *dev,
506 __rte_unused struct rte_cryptodev_config *config)
507 {
508 return 0;
509 }
510
511 /** Start device */
512 static int
513 aesni_mb_pmd_start(__rte_unused struct rte_cryptodev *dev)
514 {
515 return 0;
516 }
517
518 /** Stop device */
519 static void
520 aesni_mb_pmd_stop(__rte_unused struct rte_cryptodev *dev)
521 {
522 }
523
524 /** Close device */
525 static int
526 aesni_mb_pmd_close(__rte_unused struct rte_cryptodev *dev)
527 {
528 return 0;
529 }
530
531
532 /** Get device statistics */
533 static void
534 aesni_mb_pmd_stats_get(struct rte_cryptodev *dev,
535 struct rte_cryptodev_stats *stats)
536 {
537 int qp_id;
538
539 for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
540 struct aesni_mb_qp *qp = dev->data->queue_pairs[qp_id];
541
542 stats->enqueued_count += qp->stats.enqueued_count;
543 stats->dequeued_count += qp->stats.dequeued_count;
544
545 stats->enqueue_err_count += qp->stats.enqueue_err_count;
546 stats->dequeue_err_count += qp->stats.dequeue_err_count;
547 }
548 }
549
550 /** Reset device statistics */
551 static void
552 aesni_mb_pmd_stats_reset(struct rte_cryptodev *dev)
553 {
554 int qp_id;
555
556 for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
557 struct aesni_mb_qp *qp = dev->data->queue_pairs[qp_id];
558
559 memset(&qp->stats, 0, sizeof(qp->stats));
560 }
561 }
562
563
564 /** Get device info */
565 static void
566 aesni_mb_pmd_info_get(struct rte_cryptodev *dev,
567 struct rte_cryptodev_info *dev_info)
568 {
569 struct aesni_mb_private *internals = dev->data->dev_private;
570
571 if (dev_info != NULL) {
572 dev_info->driver_id = dev->driver_id;
573 dev_info->feature_flags = dev->feature_flags;
574 dev_info->capabilities = aesni_mb_pmd_capabilities;
575 dev_info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
576 /* No limit of number of sessions */
577 dev_info->sym.max_nb_sessions = 0;
578 }
579 }
580
581 /** Release queue pair */
582 static int
583 aesni_mb_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id)
584 {
585 struct aesni_mb_qp *qp = dev->data->queue_pairs[qp_id];
586 struct rte_ring *r = NULL;
587
588 if (qp != NULL) {
589 r = rte_ring_lookup(qp->name);
590 if (r)
591 rte_ring_free(r);
592 if (qp->mb_mgr)
593 free_mb_mgr(qp->mb_mgr);
594 rte_free(qp);
595 dev->data->queue_pairs[qp_id] = NULL;
596 }
597 return 0;
598 }
599
600 /** set a unique name for the queue pair based on it's name, dev_id and qp_id */
601 static int
602 aesni_mb_pmd_qp_set_unique_name(struct rte_cryptodev *dev,
603 struct aesni_mb_qp *qp)
604 {
605 unsigned n = snprintf(qp->name, sizeof(qp->name),
606 "aesni_mb_pmd_%u_qp_%u",
607 dev->data->dev_id, qp->id);
608
609 if (n >= sizeof(qp->name))
610 return -1;
611
612 return 0;
613 }
614
615 /** Create a ring to place processed operations on */
616 static struct rte_ring *
617 aesni_mb_pmd_qp_create_processed_ops_ring(struct aesni_mb_qp *qp,
618 unsigned int ring_size, int socket_id)
619 {
620 struct rte_ring *r;
621 char ring_name[RTE_CRYPTODEV_NAME_MAX_LEN];
622
623 unsigned int n = strlcpy(ring_name, qp->name, sizeof(ring_name));
624
625 if (n >= sizeof(ring_name))
626 return NULL;
627
628 r = rte_ring_lookup(ring_name);
629 if (r) {
630 if (rte_ring_get_size(r) >= ring_size) {
631 AESNI_MB_LOG(INFO, "Reusing existing ring %s for processed ops",
632 ring_name);
633 return r;
634 }
635
636 AESNI_MB_LOG(ERR, "Unable to reuse existing ring %s for processed ops",
637 ring_name);
638 return NULL;
639 }
640
641 return rte_ring_create(ring_name, ring_size, socket_id,
642 RING_F_SP_ENQ | RING_F_SC_DEQ);
643 }
644
645 /** Setup a queue pair */
646 static int
647 aesni_mb_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
648 const struct rte_cryptodev_qp_conf *qp_conf,
649 int socket_id)
650 {
651 struct aesni_mb_qp *qp = NULL;
652 struct aesni_mb_private *internals = dev->data->dev_private;
653 int ret = -1;
654
655 /* Free memory prior to re-allocation if needed. */
656 if (dev->data->queue_pairs[qp_id] != NULL)
657 aesni_mb_pmd_qp_release(dev, qp_id);
658
659 /* Allocate the queue pair data structure. */
660 qp = rte_zmalloc_socket("AES-NI PMD Queue Pair", sizeof(*qp),
661 RTE_CACHE_LINE_SIZE, socket_id);
662 if (qp == NULL)
663 return -ENOMEM;
664
665 qp->id = qp_id;
666 dev->data->queue_pairs[qp_id] = qp;
667
668 if (aesni_mb_pmd_qp_set_unique_name(dev, qp))
669 goto qp_setup_cleanup;
670
671
672 qp->mb_mgr = alloc_mb_mgr(0);
673 if (qp->mb_mgr == NULL) {
674 ret = -ENOMEM;
675 goto qp_setup_cleanup;
676 }
677
678 switch (internals->vector_mode) {
679 case RTE_AESNI_MB_SSE:
680 dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_SSE;
681 init_mb_mgr_sse(qp->mb_mgr);
682 break;
683 case RTE_AESNI_MB_AVX:
684 dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX;
685 init_mb_mgr_avx(qp->mb_mgr);
686 break;
687 case RTE_AESNI_MB_AVX2:
688 dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX2;
689 init_mb_mgr_avx2(qp->mb_mgr);
690 break;
691 case RTE_AESNI_MB_AVX512:
692 dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX512;
693 init_mb_mgr_avx512(qp->mb_mgr);
694 break;
695 default:
696 AESNI_MB_LOG(ERR, "Unsupported vector mode %u\n",
697 internals->vector_mode);
698 goto qp_setup_cleanup;
699 }
700
701 qp->ingress_queue = aesni_mb_pmd_qp_create_processed_ops_ring(qp,
702 qp_conf->nb_descriptors, socket_id);
703 if (qp->ingress_queue == NULL) {
704 ret = -1;
705 goto qp_setup_cleanup;
706 }
707
708 qp->sess_mp = qp_conf->mp_session;
709 qp->sess_mp_priv = qp_conf->mp_session_private;
710
711 memset(&qp->stats, 0, sizeof(qp->stats));
712
713 char mp_name[RTE_MEMPOOL_NAMESIZE];
714
715 snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
716 "digest_mp_%u_%u", dev->data->dev_id, qp_id);
717 return 0;
718
719 qp_setup_cleanup:
720 if (qp) {
721 if (qp->mb_mgr)
722 free_mb_mgr(qp->mb_mgr);
723 rte_free(qp);
724 }
725
726 return ret;
727 }
728
729 /** Returns the size of the aesni multi-buffer session structure */
730 static unsigned
731 aesni_mb_pmd_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
732 {
733 return sizeof(struct aesni_mb_session);
734 }
735
736 /** Configure a aesni multi-buffer session from a crypto xform chain */
737 static int
738 aesni_mb_pmd_sym_session_configure(struct rte_cryptodev *dev,
739 struct rte_crypto_sym_xform *xform,
740 struct rte_cryptodev_sym_session *sess,
741 struct rte_mempool *mempool)
742 {
743 void *sess_private_data;
744 struct aesni_mb_private *internals = dev->data->dev_private;
745 int ret;
746
747 if (unlikely(sess == NULL)) {
748 AESNI_MB_LOG(ERR, "invalid session struct");
749 return -EINVAL;
750 }
751
752 if (rte_mempool_get(mempool, &sess_private_data)) {
753 AESNI_MB_LOG(ERR,
754 "Couldn't get object from session mempool");
755 return -ENOMEM;
756 }
757
758 ret = aesni_mb_set_session_parameters(internals->mb_mgr,
759 sess_private_data, xform);
760 if (ret != 0) {
761 AESNI_MB_LOG(ERR, "failed configure session parameters");
762
763 /* Return session to mempool */
764 rte_mempool_put(mempool, sess_private_data);
765 return ret;
766 }
767
768 set_sym_session_private_data(sess, dev->driver_id,
769 sess_private_data);
770
771 return 0;
772 }
773
774 /** Clear the memory of session so it doesn't leave key material behind */
775 static void
776 aesni_mb_pmd_sym_session_clear(struct rte_cryptodev *dev,
777 struct rte_cryptodev_sym_session *sess)
778 {
779 uint8_t index = dev->driver_id;
780 void *sess_priv = get_sym_session_private_data(sess, index);
781
782 /* Zero out the whole structure */
783 if (sess_priv) {
784 memset(sess_priv, 0, sizeof(struct aesni_mb_session));
785 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
786 set_sym_session_private_data(sess, index, NULL);
787 rte_mempool_put(sess_mp, sess_priv);
788 }
789 }
790
791 struct rte_cryptodev_ops aesni_mb_pmd_ops = {
792 .dev_configure = aesni_mb_pmd_config,
793 .dev_start = aesni_mb_pmd_start,
794 .dev_stop = aesni_mb_pmd_stop,
795 .dev_close = aesni_mb_pmd_close,
796
797 .stats_get = aesni_mb_pmd_stats_get,
798 .stats_reset = aesni_mb_pmd_stats_reset,
799
800 .dev_infos_get = aesni_mb_pmd_info_get,
801
802 .queue_pair_setup = aesni_mb_pmd_qp_setup,
803 .queue_pair_release = aesni_mb_pmd_qp_release,
804
805 .sym_cpu_process = aesni_mb_cpu_crypto_process_bulk,
806
807 .sym_session_get_size = aesni_mb_pmd_sym_session_get_size,
808 .sym_session_configure = aesni_mb_pmd_sym_session_configure,
809 .sym_session_clear = aesni_mb_pmd_sym_session_clear
810 };
811
812 struct rte_cryptodev_ops *rte_aesni_mb_pmd_ops = &aesni_mb_pmd_ops;