1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2015-2017 Intel Corporation
7 #include <rte_string_fns.h>
8 #include <rte_common.h>
9 #include <rte_malloc.h>
10 #include <rte_cryptodev_pmd.h>
12 #include "rte_aesni_mb_pmd_private.h"
15 static const struct rte_cryptodev_capabilities aesni_mb_pmd_capabilities
[] = {
17 .op
= RTE_CRYPTO_OP_TYPE_SYMMETRIC
,
19 .xform_type
= RTE_CRYPTO_SYM_XFORM_AUTH
,
21 .algo
= RTE_CRYPTO_AUTH_MD5_HMAC
,
38 .op
= RTE_CRYPTO_OP_TYPE_SYMMETRIC
,
40 .xform_type
= RTE_CRYPTO_SYM_XFORM_AUTH
,
42 .algo
= RTE_CRYPTO_AUTH_SHA1_HMAC
,
59 .op
= RTE_CRYPTO_OP_TYPE_SYMMETRIC
,
61 .xform_type
= RTE_CRYPTO_SYM_XFORM_AUTH
,
63 .algo
= RTE_CRYPTO_AUTH_SHA1
,
80 .op
= RTE_CRYPTO_OP_TYPE_SYMMETRIC
,
82 .xform_type
= RTE_CRYPTO_SYM_XFORM_AUTH
,
84 .algo
= RTE_CRYPTO_AUTH_SHA224_HMAC
,
101 .op
= RTE_CRYPTO_OP_TYPE_SYMMETRIC
,
103 .xform_type
= RTE_CRYPTO_SYM_XFORM_AUTH
,
105 .algo
= RTE_CRYPTO_AUTH_SHA224
,
122 .op
= RTE_CRYPTO_OP_TYPE_SYMMETRIC
,
124 .xform_type
= RTE_CRYPTO_SYM_XFORM_AUTH
,
126 .algo
= RTE_CRYPTO_AUTH_SHA256_HMAC
,
143 .op
= RTE_CRYPTO_OP_TYPE_SYMMETRIC
,
145 .xform_type
= RTE_CRYPTO_SYM_XFORM_AUTH
,
147 .algo
= RTE_CRYPTO_AUTH_SHA256
,
164 .op
= RTE_CRYPTO_OP_TYPE_SYMMETRIC
,
166 .xform_type
= RTE_CRYPTO_SYM_XFORM_AUTH
,
168 .algo
= RTE_CRYPTO_AUTH_SHA384_HMAC
,
185 .op
= RTE_CRYPTO_OP_TYPE_SYMMETRIC
,
187 .xform_type
= RTE_CRYPTO_SYM_XFORM_AUTH
,
189 .algo
= RTE_CRYPTO_AUTH_SHA384
,
206 .op
= RTE_CRYPTO_OP_TYPE_SYMMETRIC
,
208 .xform_type
= RTE_CRYPTO_SYM_XFORM_AUTH
,
210 .algo
= RTE_CRYPTO_AUTH_SHA512_HMAC
,
227 .op
= RTE_CRYPTO_OP_TYPE_SYMMETRIC
,
229 .xform_type
= RTE_CRYPTO_SYM_XFORM_AUTH
,
231 .algo
= RTE_CRYPTO_AUTH_SHA512
,
247 { /* AES XCBC HMAC */
248 .op
= RTE_CRYPTO_OP_TYPE_SYMMETRIC
,
250 .xform_type
= RTE_CRYPTO_SYM_XFORM_AUTH
,
252 .algo
= RTE_CRYPTO_AUTH_AES_XCBC_MAC
,
269 .op
= RTE_CRYPTO_OP_TYPE_SYMMETRIC
,
271 .xform_type
= RTE_CRYPTO_SYM_XFORM_CIPHER
,
273 .algo
= RTE_CRYPTO_CIPHER_AES_CBC
,
289 .op
= RTE_CRYPTO_OP_TYPE_SYMMETRIC
,
291 .xform_type
= RTE_CRYPTO_SYM_XFORM_CIPHER
,
293 .algo
= RTE_CRYPTO_CIPHER_AES_CTR
,
308 { /* AES DOCSIS BPI */
309 .op
= RTE_CRYPTO_OP_TYPE_SYMMETRIC
,
311 .xform_type
= RTE_CRYPTO_SYM_XFORM_CIPHER
,
313 .algo
= RTE_CRYPTO_CIPHER_AES_DOCSISBPI
,
329 .op
= RTE_CRYPTO_OP_TYPE_SYMMETRIC
,
331 .xform_type
= RTE_CRYPTO_SYM_XFORM_CIPHER
,
333 .algo
= RTE_CRYPTO_CIPHER_DES_CBC
,
349 .op
= RTE_CRYPTO_OP_TYPE_SYMMETRIC
,
351 .xform_type
= RTE_CRYPTO_SYM_XFORM_CIPHER
,
353 .algo
= RTE_CRYPTO_CIPHER_3DES_CBC
,
368 { /* DES DOCSIS BPI */
369 .op
= RTE_CRYPTO_OP_TYPE_SYMMETRIC
,
371 .xform_type
= RTE_CRYPTO_SYM_XFORM_CIPHER
,
373 .algo
= RTE_CRYPTO_CIPHER_DES_DOCSISBPI
,
389 .op
= RTE_CRYPTO_OP_TYPE_SYMMETRIC
,
391 .xform_type
= RTE_CRYPTO_SYM_XFORM_AEAD
,
393 .algo
= RTE_CRYPTO_AEAD_AES_CCM
,
419 .op
= RTE_CRYPTO_OP_TYPE_SYMMETRIC
,
421 .xform_type
= RTE_CRYPTO_SYM_XFORM_AUTH
,
423 .algo
= RTE_CRYPTO_AUTH_AES_CMAC
,
440 .op
= RTE_CRYPTO_OP_TYPE_SYMMETRIC
,
442 .xform_type
= RTE_CRYPTO_SYM_XFORM_AEAD
,
444 .algo
= RTE_CRYPTO_AEAD_AES_GCM
,
469 { /* AES GMAC (AUTH) */
470 .op
= RTE_CRYPTO_OP_TYPE_SYMMETRIC
,
472 .xform_type
= RTE_CRYPTO_SYM_XFORM_AUTH
,
474 .algo
= RTE_CRYPTO_AUTH_AES_GMAC
,
494 RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
498 /** Configure device */
500 aesni_mb_pmd_config(__rte_unused
struct rte_cryptodev
*dev
,
501 __rte_unused
struct rte_cryptodev_config
*config
)
508 aesni_mb_pmd_start(__rte_unused
struct rte_cryptodev
*dev
)
515 aesni_mb_pmd_stop(__rte_unused
struct rte_cryptodev
*dev
)
521 aesni_mb_pmd_close(__rte_unused
struct rte_cryptodev
*dev
)
527 /** Get device statistics */
529 aesni_mb_pmd_stats_get(struct rte_cryptodev
*dev
,
530 struct rte_cryptodev_stats
*stats
)
534 for (qp_id
= 0; qp_id
< dev
->data
->nb_queue_pairs
; qp_id
++) {
535 struct aesni_mb_qp
*qp
= dev
->data
->queue_pairs
[qp_id
];
537 stats
->enqueued_count
+= qp
->stats
.enqueued_count
;
538 stats
->dequeued_count
+= qp
->stats
.dequeued_count
;
540 stats
->enqueue_err_count
+= qp
->stats
.enqueue_err_count
;
541 stats
->dequeue_err_count
+= qp
->stats
.dequeue_err_count
;
545 /** Reset device statistics */
547 aesni_mb_pmd_stats_reset(struct rte_cryptodev
*dev
)
551 for (qp_id
= 0; qp_id
< dev
->data
->nb_queue_pairs
; qp_id
++) {
552 struct aesni_mb_qp
*qp
= dev
->data
->queue_pairs
[qp_id
];
554 memset(&qp
->stats
, 0, sizeof(qp
->stats
));
559 /** Get device info */
561 aesni_mb_pmd_info_get(struct rte_cryptodev
*dev
,
562 struct rte_cryptodev_info
*dev_info
)
564 struct aesni_mb_private
*internals
= dev
->data
->dev_private
;
566 if (dev_info
!= NULL
) {
567 dev_info
->driver_id
= dev
->driver_id
;
568 dev_info
->feature_flags
= dev
->feature_flags
;
569 dev_info
->capabilities
= aesni_mb_pmd_capabilities
;
570 dev_info
->max_nb_queue_pairs
= internals
->max_nb_queue_pairs
;
571 /* No limit of number of sessions */
572 dev_info
->sym
.max_nb_sessions
= 0;
576 /** Release queue pair */
578 aesni_mb_pmd_qp_release(struct rte_cryptodev
*dev
, uint16_t qp_id
)
580 struct aesni_mb_qp
*qp
= dev
->data
->queue_pairs
[qp_id
];
581 struct rte_ring
*r
= NULL
;
584 r
= rte_ring_lookup(qp
->name
);
588 free_mb_mgr(qp
->mb_mgr
);
590 dev
->data
->queue_pairs
[qp_id
] = NULL
;
595 /** set a unique name for the queue pair based on it's name, dev_id and qp_id */
597 aesni_mb_pmd_qp_set_unique_name(struct rte_cryptodev
*dev
,
598 struct aesni_mb_qp
*qp
)
600 unsigned n
= snprintf(qp
->name
, sizeof(qp
->name
),
601 "aesni_mb_pmd_%u_qp_%u",
602 dev
->data
->dev_id
, qp
->id
);
604 if (n
>= sizeof(qp
->name
))
610 /** Create a ring to place processed operations on */
611 static struct rte_ring
*
612 aesni_mb_pmd_qp_create_processed_ops_ring(struct aesni_mb_qp
*qp
,
613 unsigned int ring_size
, int socket_id
)
616 char ring_name
[RTE_CRYPTODEV_NAME_MAX_LEN
];
618 unsigned int n
= strlcpy(ring_name
, qp
->name
, sizeof(ring_name
));
620 if (n
>= sizeof(ring_name
))
623 r
= rte_ring_lookup(ring_name
);
625 if (rte_ring_get_size(r
) >= ring_size
) {
626 AESNI_MB_LOG(INFO
, "Reusing existing ring %s for processed ops",
631 AESNI_MB_LOG(ERR
, "Unable to reuse existing ring %s for processed ops",
636 return rte_ring_create(ring_name
, ring_size
, socket_id
,
637 RING_F_SP_ENQ
| RING_F_SC_DEQ
);
640 /** Setup a queue pair */
642 aesni_mb_pmd_qp_setup(struct rte_cryptodev
*dev
, uint16_t qp_id
,
643 const struct rte_cryptodev_qp_conf
*qp_conf
,
646 struct aesni_mb_qp
*qp
= NULL
;
647 struct aesni_mb_private
*internals
= dev
->data
->dev_private
;
650 /* Free memory prior to re-allocation if needed. */
651 if (dev
->data
->queue_pairs
[qp_id
] != NULL
)
652 aesni_mb_pmd_qp_release(dev
, qp_id
);
654 /* Allocate the queue pair data structure. */
655 qp
= rte_zmalloc_socket("AES-NI PMD Queue Pair", sizeof(*qp
),
656 RTE_CACHE_LINE_SIZE
, socket_id
);
661 dev
->data
->queue_pairs
[qp_id
] = qp
;
663 if (aesni_mb_pmd_qp_set_unique_name(dev
, qp
))
664 goto qp_setup_cleanup
;
667 qp
->mb_mgr
= alloc_mb_mgr(0);
668 if (qp
->mb_mgr
== NULL
) {
670 goto qp_setup_cleanup
;
673 switch (internals
->vector_mode
) {
674 case RTE_AESNI_MB_SSE
:
675 dev
->feature_flags
|= RTE_CRYPTODEV_FF_CPU_SSE
;
676 init_mb_mgr_sse(qp
->mb_mgr
);
678 case RTE_AESNI_MB_AVX
:
679 dev
->feature_flags
|= RTE_CRYPTODEV_FF_CPU_AVX
;
680 init_mb_mgr_avx(qp
->mb_mgr
);
682 case RTE_AESNI_MB_AVX2
:
683 dev
->feature_flags
|= RTE_CRYPTODEV_FF_CPU_AVX2
;
684 init_mb_mgr_avx2(qp
->mb_mgr
);
686 case RTE_AESNI_MB_AVX512
:
687 dev
->feature_flags
|= RTE_CRYPTODEV_FF_CPU_AVX512
;
688 init_mb_mgr_avx512(qp
->mb_mgr
);
691 AESNI_MB_LOG(ERR
, "Unsupported vector mode %u\n",
692 internals
->vector_mode
);
693 goto qp_setup_cleanup
;
696 qp
->ingress_queue
= aesni_mb_pmd_qp_create_processed_ops_ring(qp
,
697 qp_conf
->nb_descriptors
, socket_id
);
698 if (qp
->ingress_queue
== NULL
) {
700 goto qp_setup_cleanup
;
703 qp
->sess_mp
= qp_conf
->mp_session
;
704 qp
->sess_mp_priv
= qp_conf
->mp_session_private
;
706 memset(&qp
->stats
, 0, sizeof(qp
->stats
));
708 char mp_name
[RTE_MEMPOOL_NAMESIZE
];
710 snprintf(mp_name
, RTE_MEMPOOL_NAMESIZE
,
711 "digest_mp_%u_%u", dev
->data
->dev_id
, qp_id
);
717 free_mb_mgr(qp
->mb_mgr
);
724 /** Return the number of allocated queue pairs */
726 aesni_mb_pmd_qp_count(struct rte_cryptodev
*dev
)
728 return dev
->data
->nb_queue_pairs
;
731 /** Returns the size of the aesni multi-buffer session structure */
733 aesni_mb_pmd_sym_session_get_size(struct rte_cryptodev
*dev __rte_unused
)
735 return sizeof(struct aesni_mb_session
);
738 /** Configure a aesni multi-buffer session from a crypto xform chain */
740 aesni_mb_pmd_sym_session_configure(struct rte_cryptodev
*dev
,
741 struct rte_crypto_sym_xform
*xform
,
742 struct rte_cryptodev_sym_session
*sess
,
743 struct rte_mempool
*mempool
)
745 void *sess_private_data
;
746 struct aesni_mb_private
*internals
= dev
->data
->dev_private
;
749 if (unlikely(sess
== NULL
)) {
750 AESNI_MB_LOG(ERR
, "invalid session struct");
754 if (rte_mempool_get(mempool
, &sess_private_data
)) {
756 "Couldn't get object from session mempool");
760 ret
= aesni_mb_set_session_parameters(internals
->mb_mgr
,
761 sess_private_data
, xform
);
763 AESNI_MB_LOG(ERR
, "failed configure session parameters");
765 /* Return session to mempool */
766 rte_mempool_put(mempool
, sess_private_data
);
770 set_sym_session_private_data(sess
, dev
->driver_id
,
776 /** Clear the memory of session so it doesn't leave key material behind */
778 aesni_mb_pmd_sym_session_clear(struct rte_cryptodev
*dev
,
779 struct rte_cryptodev_sym_session
*sess
)
781 uint8_t index
= dev
->driver_id
;
782 void *sess_priv
= get_sym_session_private_data(sess
, index
);
784 /* Zero out the whole structure */
786 memset(sess_priv
, 0, sizeof(struct aesni_mb_session
));
787 struct rte_mempool
*sess_mp
= rte_mempool_from_obj(sess_priv
);
788 set_sym_session_private_data(sess
, index
, NULL
);
789 rte_mempool_put(sess_mp
, sess_priv
);
793 struct rte_cryptodev_ops aesni_mb_pmd_ops
= {
794 .dev_configure
= aesni_mb_pmd_config
,
795 .dev_start
= aesni_mb_pmd_start
,
796 .dev_stop
= aesni_mb_pmd_stop
,
797 .dev_close
= aesni_mb_pmd_close
,
799 .stats_get
= aesni_mb_pmd_stats_get
,
800 .stats_reset
= aesni_mb_pmd_stats_reset
,
802 .dev_infos_get
= aesni_mb_pmd_info_get
,
804 .queue_pair_setup
= aesni_mb_pmd_qp_setup
,
805 .queue_pair_release
= aesni_mb_pmd_qp_release
,
806 .queue_pair_count
= aesni_mb_pmd_qp_count
,
808 .sym_session_get_size
= aesni_mb_pmd_sym_session_get_size
,
809 .sym_session_configure
= aesni_mb_pmd_sym_session_configure
,
810 .sym_session_clear
= aesni_mb_pmd_sym_session_clear
813 struct rte_cryptodev_ops
*rte_aesni_mb_pmd_ops
= &aesni_mb_pmd_ops
;