]> git.proxmox.com Git - ceph.git/blob - ceph/src/spdk/dpdk/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c
8d15b99d4936e8b69f4cfc1bccd26c26ffe211f7
[ceph.git] / ceph / src / spdk / dpdk / drivers / crypto / aesni_mb / rte_aesni_mb_pmd_ops.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2015-2017 Intel Corporation
3 */
4
5 #include <string.h>
6
7 #include <rte_string_fns.h>
8 #include <rte_common.h>
9 #include <rte_malloc.h>
10 #include <rte_cryptodev_pmd.h>
11
12 #include "rte_aesni_mb_pmd_private.h"
13
14
15 static const struct rte_cryptodev_capabilities aesni_mb_pmd_capabilities[] = {
16 { /* MD5 HMAC */
17 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
18 {.sym = {
19 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
20 {.auth = {
21 .algo = RTE_CRYPTO_AUTH_MD5_HMAC,
22 .block_size = 64,
23 .key_size = {
24 .min = 1,
25 .max = 64,
26 .increment = 1
27 },
28 .digest_size = {
29 .min = 1,
30 .max = 16,
31 .increment = 1
32 },
33 .iv_size = { 0 }
34 }, }
35 }, }
36 },
37 { /* SHA1 HMAC */
38 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
39 {.sym = {
40 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
41 {.auth = {
42 .algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
43 .block_size = 64,
44 .key_size = {
45 .min = 1,
46 .max = 65535,
47 .increment = 1
48 },
49 .digest_size = {
50 .min = 1,
51 .max = 20,
52 .increment = 1
53 },
54 .iv_size = { 0 }
55 }, }
56 }, }
57 },
58 { /* SHA1 */
59 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
60 {.sym = {
61 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
62 {.auth = {
63 .algo = RTE_CRYPTO_AUTH_SHA1,
64 .block_size = 64,
65 .key_size = {
66 .min = 0,
67 .max = 0,
68 .increment = 0
69 },
70 .digest_size = {
71 .min = 1,
72 .max = 20,
73 .increment = 1
74 },
75 .iv_size = { 0 }
76 }, }
77 }, }
78 },
79 { /* SHA224 HMAC */
80 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
81 {.sym = {
82 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
83 {.auth = {
84 .algo = RTE_CRYPTO_AUTH_SHA224_HMAC,
85 .block_size = 64,
86 .key_size = {
87 .min = 1,
88 .max = 65535,
89 .increment = 1
90 },
91 .digest_size = {
92 .min = 1,
93 .max = 28,
94 .increment = 1
95 },
96 .iv_size = { 0 }
97 }, }
98 }, }
99 },
100 { /* SHA224 */
101 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
102 {.sym = {
103 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
104 {.auth = {
105 .algo = RTE_CRYPTO_AUTH_SHA224,
106 .block_size = 64,
107 .key_size = {
108 .min = 0,
109 .max = 0,
110 .increment = 0
111 },
112 .digest_size = {
113 .min = 1,
114 .max = 28,
115 .increment = 1
116 },
117 .iv_size = { 0 }
118 }, }
119 }, }
120 },
121 { /* SHA256 HMAC */
122 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
123 {.sym = {
124 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
125 {.auth = {
126 .algo = RTE_CRYPTO_AUTH_SHA256_HMAC,
127 .block_size = 64,
128 .key_size = {
129 .min = 1,
130 .max = 65535,
131 .increment = 1
132 },
133 .digest_size = {
134 .min = 1,
135 .max = 32,
136 .increment = 1
137 },
138 .iv_size = { 0 }
139 }, }
140 }, }
141 },
142 { /* SHA256 */
143 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
144 {.sym = {
145 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
146 {.auth = {
147 .algo = RTE_CRYPTO_AUTH_SHA256,
148 .block_size = 64,
149 .key_size = {
150 .min = 0,
151 .max = 0,
152 .increment = 0
153 },
154 .digest_size = {
155 .min = 1,
156 .max = 32,
157 .increment = 1
158 },
159 .iv_size = { 0 }
160 }, }
161 }, }
162 },
163 { /* SHA384 HMAC */
164 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
165 {.sym = {
166 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
167 {.auth = {
168 .algo = RTE_CRYPTO_AUTH_SHA384_HMAC,
169 .block_size = 128,
170 .key_size = {
171 .min = 1,
172 .max = 65535,
173 .increment = 1
174 },
175 .digest_size = {
176 .min = 1,
177 .max = 48,
178 .increment = 1
179 },
180 .iv_size = { 0 }
181 }, }
182 }, }
183 },
184 { /* SHA384 */
185 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
186 {.sym = {
187 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
188 {.auth = {
189 .algo = RTE_CRYPTO_AUTH_SHA384,
190 .block_size = 128,
191 .key_size = {
192 .min = 0,
193 .max = 0,
194 .increment = 0
195 },
196 .digest_size = {
197 .min = 1,
198 .max = 48,
199 .increment = 1
200 },
201 .iv_size = { 0 }
202 }, }
203 }, }
204 },
205 { /* SHA512 HMAC */
206 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
207 {.sym = {
208 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
209 {.auth = {
210 .algo = RTE_CRYPTO_AUTH_SHA512_HMAC,
211 .block_size = 128,
212 .key_size = {
213 .min = 1,
214 .max = 65535,
215 .increment = 1
216 },
217 .digest_size = {
218 .min = 1,
219 .max = 64,
220 .increment = 1
221 },
222 .iv_size = { 0 }
223 }, }
224 }, }
225 },
226 { /* SHA512 */
227 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
228 {.sym = {
229 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
230 {.auth = {
231 .algo = RTE_CRYPTO_AUTH_SHA512,
232 .block_size = 128,
233 .key_size = {
234 .min = 0,
235 .max = 0,
236 .increment = 0
237 },
238 .digest_size = {
239 .min = 1,
240 .max = 64,
241 .increment = 1
242 },
243 .iv_size = { 0 }
244 }, }
245 }, }
246 },
247 { /* AES XCBC HMAC */
248 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
249 {.sym = {
250 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
251 {.auth = {
252 .algo = RTE_CRYPTO_AUTH_AES_XCBC_MAC,
253 .block_size = 16,
254 .key_size = {
255 .min = 16,
256 .max = 16,
257 .increment = 0
258 },
259 .digest_size = {
260 .min = 12,
261 .max = 12,
262 .increment = 0
263 },
264 .iv_size = { 0 }
265 }, }
266 }, }
267 },
268 { /* AES CBC */
269 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
270 {.sym = {
271 .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
272 {.cipher = {
273 .algo = RTE_CRYPTO_CIPHER_AES_CBC,
274 .block_size = 16,
275 .key_size = {
276 .min = 16,
277 .max = 32,
278 .increment = 8
279 },
280 .iv_size = {
281 .min = 16,
282 .max = 16,
283 .increment = 0
284 }
285 }, }
286 }, }
287 },
288 { /* AES CTR */
289 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
290 {.sym = {
291 .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
292 {.cipher = {
293 .algo = RTE_CRYPTO_CIPHER_AES_CTR,
294 .block_size = 16,
295 .key_size = {
296 .min = 16,
297 .max = 32,
298 .increment = 8
299 },
300 .iv_size = {
301 .min = 12,
302 .max = 16,
303 .increment = 4
304 }
305 }, }
306 }, }
307 },
308 { /* AES DOCSIS BPI */
309 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
310 {.sym = {
311 .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
312 {.cipher = {
313 .algo = RTE_CRYPTO_CIPHER_AES_DOCSISBPI,
314 .block_size = 16,
315 .key_size = {
316 .min = 16,
317 .max = 16,
318 .increment = 0
319 },
320 .iv_size = {
321 .min = 16,
322 .max = 16,
323 .increment = 0
324 }
325 }, }
326 }, }
327 },
328 { /* DES CBC */
329 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
330 {.sym = {
331 .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
332 {.cipher = {
333 .algo = RTE_CRYPTO_CIPHER_DES_CBC,
334 .block_size = 8,
335 .key_size = {
336 .min = 8,
337 .max = 8,
338 .increment = 0
339 },
340 .iv_size = {
341 .min = 8,
342 .max = 8,
343 .increment = 0
344 }
345 }, }
346 }, }
347 },
348 { /* 3DES CBC */
349 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
350 {.sym = {
351 .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
352 {.cipher = {
353 .algo = RTE_CRYPTO_CIPHER_3DES_CBC,
354 .block_size = 8,
355 .key_size = {
356 .min = 8,
357 .max = 24,
358 .increment = 8
359 },
360 .iv_size = {
361 .min = 8,
362 .max = 8,
363 .increment = 0
364 }
365 }, }
366 }, }
367 },
368 { /* DES DOCSIS BPI */
369 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
370 {.sym = {
371 .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
372 {.cipher = {
373 .algo = RTE_CRYPTO_CIPHER_DES_DOCSISBPI,
374 .block_size = 8,
375 .key_size = {
376 .min = 8,
377 .max = 8,
378 .increment = 0
379 },
380 .iv_size = {
381 .min = 8,
382 .max = 8,
383 .increment = 0
384 }
385 }, }
386 }, }
387 },
388 { /* AES CCM */
389 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
390 {.sym = {
391 .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
392 {.aead = {
393 .algo = RTE_CRYPTO_AEAD_AES_CCM,
394 .block_size = 16,
395 .key_size = {
396 .min = 16,
397 .max = 16,
398 .increment = 0
399 },
400 .digest_size = {
401 .min = 4,
402 .max = 16,
403 .increment = 2
404 },
405 .aad_size = {
406 .min = 0,
407 .max = 46,
408 .increment = 1
409 },
410 .iv_size = {
411 .min = 7,
412 .max = 13,
413 .increment = 1
414 },
415 }, }
416 }, }
417 },
418 { /* AES CMAC */
419 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
420 {.sym = {
421 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
422 {.auth = {
423 .algo = RTE_CRYPTO_AUTH_AES_CMAC,
424 .block_size = 16,
425 .key_size = {
426 .min = 16,
427 .max = 16,
428 .increment = 0
429 },
430 .digest_size = {
431 .min = 1,
432 .max = 16,
433 .increment = 1
434 },
435 .iv_size = { 0 }
436 }, }
437 }, }
438 },
439 { /* AES GCM */
440 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
441 {.sym = {
442 .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
443 {.aead = {
444 .algo = RTE_CRYPTO_AEAD_AES_GCM,
445 .block_size = 16,
446 .key_size = {
447 .min = 16,
448 .max = 32,
449 .increment = 8
450 },
451 .digest_size = {
452 .min = 8,
453 .max = 16,
454 .increment = 4
455 },
456 .aad_size = {
457 .min = 0,
458 .max = 65535,
459 .increment = 1
460 },
461 .iv_size = {
462 .min = 12,
463 .max = 12,
464 .increment = 0
465 }
466 }, }
467 }, }
468 },
469 { /* AES GMAC (AUTH) */
470 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
471 {.sym = {
472 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
473 {.auth = {
474 .algo = RTE_CRYPTO_AUTH_AES_GMAC,
475 .block_size = 16,
476 .key_size = {
477 .min = 16,
478 .max = 32,
479 .increment = 8
480 },
481 .digest_size = {
482 .min = 8,
483 .max = 16,
484 .increment = 4
485 },
486 .iv_size = {
487 .min = 12,
488 .max = 12,
489 .increment = 0
490 }
491 }, }
492 }, }
493 },
494 RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
495 };
496
497
498 /** Configure device */
499 static int
500 aesni_mb_pmd_config(__rte_unused struct rte_cryptodev *dev,
501 __rte_unused struct rte_cryptodev_config *config)
502 {
503 return 0;
504 }
505
506 /** Start device */
507 static int
508 aesni_mb_pmd_start(__rte_unused struct rte_cryptodev *dev)
509 {
510 return 0;
511 }
512
513 /** Stop device */
514 static void
515 aesni_mb_pmd_stop(__rte_unused struct rte_cryptodev *dev)
516 {
517 }
518
519 /** Close device */
520 static int
521 aesni_mb_pmd_close(__rte_unused struct rte_cryptodev *dev)
522 {
523 return 0;
524 }
525
526
527 /** Get device statistics */
528 static void
529 aesni_mb_pmd_stats_get(struct rte_cryptodev *dev,
530 struct rte_cryptodev_stats *stats)
531 {
532 int qp_id;
533
534 for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
535 struct aesni_mb_qp *qp = dev->data->queue_pairs[qp_id];
536
537 stats->enqueued_count += qp->stats.enqueued_count;
538 stats->dequeued_count += qp->stats.dequeued_count;
539
540 stats->enqueue_err_count += qp->stats.enqueue_err_count;
541 stats->dequeue_err_count += qp->stats.dequeue_err_count;
542 }
543 }
544
545 /** Reset device statistics */
546 static void
547 aesni_mb_pmd_stats_reset(struct rte_cryptodev *dev)
548 {
549 int qp_id;
550
551 for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
552 struct aesni_mb_qp *qp = dev->data->queue_pairs[qp_id];
553
554 memset(&qp->stats, 0, sizeof(qp->stats));
555 }
556 }
557
558
559 /** Get device info */
560 static void
561 aesni_mb_pmd_info_get(struct rte_cryptodev *dev,
562 struct rte_cryptodev_info *dev_info)
563 {
564 struct aesni_mb_private *internals = dev->data->dev_private;
565
566 if (dev_info != NULL) {
567 dev_info->driver_id = dev->driver_id;
568 dev_info->feature_flags = dev->feature_flags;
569 dev_info->capabilities = aesni_mb_pmd_capabilities;
570 dev_info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
571 /* No limit of number of sessions */
572 dev_info->sym.max_nb_sessions = 0;
573 }
574 }
575
576 /** Release queue pair */
577 static int
578 aesni_mb_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id)
579 {
580 struct aesni_mb_qp *qp = dev->data->queue_pairs[qp_id];
581 struct rte_ring *r = NULL;
582
583 if (qp != NULL) {
584 r = rte_ring_lookup(qp->name);
585 if (r)
586 rte_ring_free(r);
587 if (qp->mb_mgr)
588 free_mb_mgr(qp->mb_mgr);
589 rte_free(qp);
590 dev->data->queue_pairs[qp_id] = NULL;
591 }
592 return 0;
593 }
594
595 /** set a unique name for the queue pair based on it's name, dev_id and qp_id */
596 static int
597 aesni_mb_pmd_qp_set_unique_name(struct rte_cryptodev *dev,
598 struct aesni_mb_qp *qp)
599 {
600 unsigned n = snprintf(qp->name, sizeof(qp->name),
601 "aesni_mb_pmd_%u_qp_%u",
602 dev->data->dev_id, qp->id);
603
604 if (n >= sizeof(qp->name))
605 return -1;
606
607 return 0;
608 }
609
610 /** Create a ring to place processed operations on */
611 static struct rte_ring *
612 aesni_mb_pmd_qp_create_processed_ops_ring(struct aesni_mb_qp *qp,
613 unsigned int ring_size, int socket_id)
614 {
615 struct rte_ring *r;
616 char ring_name[RTE_CRYPTODEV_NAME_MAX_LEN];
617
618 unsigned int n = strlcpy(ring_name, qp->name, sizeof(ring_name));
619
620 if (n >= sizeof(ring_name))
621 return NULL;
622
623 r = rte_ring_lookup(ring_name);
624 if (r) {
625 if (rte_ring_get_size(r) >= ring_size) {
626 AESNI_MB_LOG(INFO, "Reusing existing ring %s for processed ops",
627 ring_name);
628 return r;
629 }
630
631 AESNI_MB_LOG(ERR, "Unable to reuse existing ring %s for processed ops",
632 ring_name);
633 return NULL;
634 }
635
636 return rte_ring_create(ring_name, ring_size, socket_id,
637 RING_F_SP_ENQ | RING_F_SC_DEQ);
638 }
639
640 /** Setup a queue pair */
641 static int
642 aesni_mb_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
643 const struct rte_cryptodev_qp_conf *qp_conf,
644 int socket_id)
645 {
646 struct aesni_mb_qp *qp = NULL;
647 struct aesni_mb_private *internals = dev->data->dev_private;
648 int ret = -1;
649
650 /* Free memory prior to re-allocation if needed. */
651 if (dev->data->queue_pairs[qp_id] != NULL)
652 aesni_mb_pmd_qp_release(dev, qp_id);
653
654 /* Allocate the queue pair data structure. */
655 qp = rte_zmalloc_socket("AES-NI PMD Queue Pair", sizeof(*qp),
656 RTE_CACHE_LINE_SIZE, socket_id);
657 if (qp == NULL)
658 return -ENOMEM;
659
660 qp->id = qp_id;
661 dev->data->queue_pairs[qp_id] = qp;
662
663 if (aesni_mb_pmd_qp_set_unique_name(dev, qp))
664 goto qp_setup_cleanup;
665
666
667 qp->mb_mgr = alloc_mb_mgr(0);
668 if (qp->mb_mgr == NULL) {
669 ret = -ENOMEM;
670 goto qp_setup_cleanup;
671 }
672
673 switch (internals->vector_mode) {
674 case RTE_AESNI_MB_SSE:
675 dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_SSE;
676 init_mb_mgr_sse(qp->mb_mgr);
677 break;
678 case RTE_AESNI_MB_AVX:
679 dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX;
680 init_mb_mgr_avx(qp->mb_mgr);
681 break;
682 case RTE_AESNI_MB_AVX2:
683 dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX2;
684 init_mb_mgr_avx2(qp->mb_mgr);
685 break;
686 case RTE_AESNI_MB_AVX512:
687 dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX512;
688 init_mb_mgr_avx512(qp->mb_mgr);
689 break;
690 default:
691 AESNI_MB_LOG(ERR, "Unsupported vector mode %u\n",
692 internals->vector_mode);
693 goto qp_setup_cleanup;
694 }
695
696 qp->ingress_queue = aesni_mb_pmd_qp_create_processed_ops_ring(qp,
697 qp_conf->nb_descriptors, socket_id);
698 if (qp->ingress_queue == NULL) {
699 ret = -1;
700 goto qp_setup_cleanup;
701 }
702
703 qp->sess_mp = qp_conf->mp_session;
704 qp->sess_mp_priv = qp_conf->mp_session_private;
705
706 memset(&qp->stats, 0, sizeof(qp->stats));
707
708 char mp_name[RTE_MEMPOOL_NAMESIZE];
709
710 snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
711 "digest_mp_%u_%u", dev->data->dev_id, qp_id);
712 return 0;
713
714 qp_setup_cleanup:
715 if (qp) {
716 if (qp->mb_mgr)
717 free_mb_mgr(qp->mb_mgr);
718 rte_free(qp);
719 }
720
721 return ret;
722 }
723
724 /** Return the number of allocated queue pairs */
725 static uint32_t
726 aesni_mb_pmd_qp_count(struct rte_cryptodev *dev)
727 {
728 return dev->data->nb_queue_pairs;
729 }
730
731 /** Returns the size of the aesni multi-buffer session structure */
732 static unsigned
733 aesni_mb_pmd_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
734 {
735 return sizeof(struct aesni_mb_session);
736 }
737
738 /** Configure a aesni multi-buffer session from a crypto xform chain */
739 static int
740 aesni_mb_pmd_sym_session_configure(struct rte_cryptodev *dev,
741 struct rte_crypto_sym_xform *xform,
742 struct rte_cryptodev_sym_session *sess,
743 struct rte_mempool *mempool)
744 {
745 void *sess_private_data;
746 struct aesni_mb_private *internals = dev->data->dev_private;
747 int ret;
748
749 if (unlikely(sess == NULL)) {
750 AESNI_MB_LOG(ERR, "invalid session struct");
751 return -EINVAL;
752 }
753
754 if (rte_mempool_get(mempool, &sess_private_data)) {
755 AESNI_MB_LOG(ERR,
756 "Couldn't get object from session mempool");
757 return -ENOMEM;
758 }
759
760 ret = aesni_mb_set_session_parameters(internals->mb_mgr,
761 sess_private_data, xform);
762 if (ret != 0) {
763 AESNI_MB_LOG(ERR, "failed configure session parameters");
764
765 /* Return session to mempool */
766 rte_mempool_put(mempool, sess_private_data);
767 return ret;
768 }
769
770 set_sym_session_private_data(sess, dev->driver_id,
771 sess_private_data);
772
773 return 0;
774 }
775
776 /** Clear the memory of session so it doesn't leave key material behind */
777 static void
778 aesni_mb_pmd_sym_session_clear(struct rte_cryptodev *dev,
779 struct rte_cryptodev_sym_session *sess)
780 {
781 uint8_t index = dev->driver_id;
782 void *sess_priv = get_sym_session_private_data(sess, index);
783
784 /* Zero out the whole structure */
785 if (sess_priv) {
786 memset(sess_priv, 0, sizeof(struct aesni_mb_session));
787 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
788 set_sym_session_private_data(sess, index, NULL);
789 rte_mempool_put(sess_mp, sess_priv);
790 }
791 }
792
793 struct rte_cryptodev_ops aesni_mb_pmd_ops = {
794 .dev_configure = aesni_mb_pmd_config,
795 .dev_start = aesni_mb_pmd_start,
796 .dev_stop = aesni_mb_pmd_stop,
797 .dev_close = aesni_mb_pmd_close,
798
799 .stats_get = aesni_mb_pmd_stats_get,
800 .stats_reset = aesni_mb_pmd_stats_reset,
801
802 .dev_infos_get = aesni_mb_pmd_info_get,
803
804 .queue_pair_setup = aesni_mb_pmd_qp_setup,
805 .queue_pair_release = aesni_mb_pmd_qp_release,
806 .queue_pair_count = aesni_mb_pmd_qp_count,
807
808 .sym_session_get_size = aesni_mb_pmd_sym_session_get_size,
809 .sym_session_configure = aesni_mb_pmd_sym_session_configure,
810 .sym_session_clear = aesni_mb_pmd_sym_session_clear
811 };
812
813 struct rte_cryptodev_ops *rte_aesni_mb_pmd_ops = &aesni_mb_pmd_ops;