4 * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 #include <sys/queue.h>
43 #include <rte_common.h>
45 #include <rte_debug.h>
46 #include <rte_memory.h>
47 #include <rte_memzone.h>
48 #include <rte_tailq.h>
49 #include <rte_ether.h>
50 #include <rte_malloc.h>
51 #include <rte_launch.h>
53 #include <rte_per_lcore.h>
54 #include <rte_lcore.h>
55 #include <rte_atomic.h>
56 #include <rte_branch_prediction.h>
57 #include <rte_mempool.h>
59 #include <rte_string_fns.h>
60 #include <rte_spinlock.h>
61 #include <rte_hexdump.h>
65 #include "qat_crypto.h"
66 #include "adf_transport_access_macros.h"
70 static const struct rte_cryptodev_capabilities qat_pmd_capabilities
[] = {
72 .op
= RTE_CRYPTO_OP_TYPE_SYMMETRIC
,
74 .xform_type
= RTE_CRYPTO_SYM_XFORM_AUTH
,
76 .algo
= RTE_CRYPTO_AUTH_SHA1_HMAC
,
93 .op
= RTE_CRYPTO_OP_TYPE_SYMMETRIC
,
95 .xform_type
= RTE_CRYPTO_SYM_XFORM_AUTH
,
97 .algo
= RTE_CRYPTO_AUTH_SHA224_HMAC
,
114 .op
= RTE_CRYPTO_OP_TYPE_SYMMETRIC
,
116 .xform_type
= RTE_CRYPTO_SYM_XFORM_AUTH
,
118 .algo
= RTE_CRYPTO_AUTH_SHA256_HMAC
,
135 .op
= RTE_CRYPTO_OP_TYPE_SYMMETRIC
,
137 .xform_type
= RTE_CRYPTO_SYM_XFORM_AUTH
,
139 .algo
= RTE_CRYPTO_AUTH_SHA384_HMAC
,
156 .op
= RTE_CRYPTO_OP_TYPE_SYMMETRIC
,
158 .xform_type
= RTE_CRYPTO_SYM_XFORM_AUTH
,
160 .algo
= RTE_CRYPTO_AUTH_SHA512_HMAC
,
177 .op
= RTE_CRYPTO_OP_TYPE_SYMMETRIC
,
179 .xform_type
= RTE_CRYPTO_SYM_XFORM_AUTH
,
181 .algo
= RTE_CRYPTO_AUTH_MD5_HMAC
,
198 .op
= RTE_CRYPTO_OP_TYPE_SYMMETRIC
,
200 .xform_type
= RTE_CRYPTO_SYM_XFORM_AUTH
,
202 .algo
= RTE_CRYPTO_AUTH_AES_XCBC_MAC
,
218 { /* AES GCM (AUTH) */
219 .op
= RTE_CRYPTO_OP_TYPE_SYMMETRIC
,
221 .xform_type
= RTE_CRYPTO_SYM_XFORM_AUTH
,
223 .algo
= RTE_CRYPTO_AUTH_AES_GCM
,
243 { /* AES GMAC (AUTH) */
244 .op
= RTE_CRYPTO_OP_TYPE_SYMMETRIC
,
246 .xform_type
= RTE_CRYPTO_SYM_XFORM_AUTH
,
248 .algo
= RTE_CRYPTO_AUTH_AES_GMAC
,
268 { /* SNOW 3G (UIA2) */
269 .op
= RTE_CRYPTO_OP_TYPE_SYMMETRIC
,
271 .xform_type
= RTE_CRYPTO_SYM_XFORM_AUTH
,
273 .algo
= RTE_CRYPTO_AUTH_SNOW3G_UIA2
,
293 { /* AES GCM (CIPHER) */
294 .op
= RTE_CRYPTO_OP_TYPE_SYMMETRIC
,
296 .xform_type
= RTE_CRYPTO_SYM_XFORM_CIPHER
,
298 .algo
= RTE_CRYPTO_CIPHER_AES_GCM
,
314 .op
= RTE_CRYPTO_OP_TYPE_SYMMETRIC
,
316 .xform_type
= RTE_CRYPTO_SYM_XFORM_CIPHER
,
318 .algo
= RTE_CRYPTO_CIPHER_AES_CBC
,
333 { /* SNOW 3G (UEA2) */
334 .op
= RTE_CRYPTO_OP_TYPE_SYMMETRIC
,
336 .xform_type
= RTE_CRYPTO_SYM_XFORM_CIPHER
,
338 .algo
= RTE_CRYPTO_CIPHER_SNOW3G_UEA2
,
354 .op
= RTE_CRYPTO_OP_TYPE_SYMMETRIC
,
356 .xform_type
= RTE_CRYPTO_SYM_XFORM_CIPHER
,
358 .algo
= RTE_CRYPTO_CIPHER_AES_CTR
,
374 .op
= RTE_CRYPTO_OP_TYPE_SYMMETRIC
,
376 .xform_type
= RTE_CRYPTO_SYM_XFORM_AUTH
,
378 .algo
= RTE_CRYPTO_AUTH_NULL
,
394 { /* NULL (CIPHER) */
395 .op
= RTE_CRYPTO_OP_TYPE_SYMMETRIC
,
397 .xform_type
= RTE_CRYPTO_SYM_XFORM_CIPHER
,
399 .algo
= RTE_CRYPTO_CIPHER_NULL
,
415 .op
= RTE_CRYPTO_OP_TYPE_SYMMETRIC
,
417 .xform_type
= RTE_CRYPTO_SYM_XFORM_CIPHER
,
419 .algo
= RTE_CRYPTO_CIPHER_KASUMI_F8
,
435 .op
= RTE_CRYPTO_OP_TYPE_SYMMETRIC
,
437 .xform_type
= RTE_CRYPTO_SYM_XFORM_AUTH
,
439 .algo
= RTE_CRYPTO_AUTH_KASUMI_F9
,
460 .op
= RTE_CRYPTO_OP_TYPE_SYMMETRIC
,
462 .xform_type
= RTE_CRYPTO_SYM_XFORM_CIPHER
,
464 .algo
= RTE_CRYPTO_CIPHER_3DES_CBC
,
480 .op
= RTE_CRYPTO_OP_TYPE_SYMMETRIC
,
482 .xform_type
= RTE_CRYPTO_SYM_XFORM_CIPHER
,
484 .algo
= RTE_CRYPTO_CIPHER_3DES_CTR
,
499 RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
502 static inline uint32_t
503 adf_modulo(uint32_t data
, uint32_t shift
);
506 qat_write_hw_desc_entry(struct rte_crypto_op
*op
, uint8_t *out_msg
);
508 void qat_crypto_sym_clear_session(struct rte_cryptodev
*dev
,
511 struct qat_session
*sess
= session
;
512 phys_addr_t cd_paddr
;
514 PMD_INIT_FUNC_TRACE();
516 cd_paddr
= sess
->cd_paddr
;
517 memset(sess
, 0, qat_crypto_sym_get_session_private_size(dev
));
518 sess
->cd_paddr
= cd_paddr
;
520 PMD_DRV_LOG(ERR
, "NULL session");
524 qat_get_cmd_id(const struct rte_crypto_sym_xform
*xform
)
527 if (xform
->type
== RTE_CRYPTO_SYM_XFORM_CIPHER
&& xform
->next
== NULL
)
528 return ICP_QAT_FW_LA_CMD_CIPHER
;
530 /* Authentication Only */
531 if (xform
->type
== RTE_CRYPTO_SYM_XFORM_AUTH
&& xform
->next
== NULL
)
532 return ICP_QAT_FW_LA_CMD_AUTH
;
534 if (xform
->next
== NULL
)
537 /* Cipher then Authenticate */
538 if (xform
->type
== RTE_CRYPTO_SYM_XFORM_CIPHER
&&
539 xform
->next
->type
== RTE_CRYPTO_SYM_XFORM_AUTH
)
540 return ICP_QAT_FW_LA_CMD_CIPHER_HASH
;
542 /* Authenticate then Cipher */
543 if (xform
->type
== RTE_CRYPTO_SYM_XFORM_AUTH
&&
544 xform
->next
->type
== RTE_CRYPTO_SYM_XFORM_CIPHER
)
545 return ICP_QAT_FW_LA_CMD_HASH_CIPHER
;
550 static struct rte_crypto_auth_xform
*
551 qat_get_auth_xform(struct rte_crypto_sym_xform
*xform
)
554 if (xform
->type
== RTE_CRYPTO_SYM_XFORM_AUTH
)
563 static struct rte_crypto_cipher_xform
*
564 qat_get_cipher_xform(struct rte_crypto_sym_xform
*xform
)
567 if (xform
->type
== RTE_CRYPTO_SYM_XFORM_CIPHER
)
568 return &xform
->cipher
;
576 qat_crypto_sym_configure_session_cipher(struct rte_cryptodev
*dev
,
577 struct rte_crypto_sym_xform
*xform
, void *session_private
)
579 struct qat_pmd_private
*internals
= dev
->data
->dev_private
;
581 struct qat_session
*session
= session_private
;
583 struct rte_crypto_cipher_xform
*cipher_xform
= NULL
;
585 /* Get cipher xform from crypto xform chain */
586 cipher_xform
= qat_get_cipher_xform(xform
);
588 switch (cipher_xform
->algo
) {
589 case RTE_CRYPTO_CIPHER_AES_CBC
:
590 if (qat_alg_validate_aes_key(cipher_xform
->key
.length
,
591 &session
->qat_cipher_alg
) != 0) {
592 PMD_DRV_LOG(ERR
, "Invalid AES cipher key size");
595 session
->qat_mode
= ICP_QAT_HW_CIPHER_CBC_MODE
;
597 case RTE_CRYPTO_CIPHER_AES_GCM
:
598 if (qat_alg_validate_aes_key(cipher_xform
->key
.length
,
599 &session
->qat_cipher_alg
) != 0) {
600 PMD_DRV_LOG(ERR
, "Invalid AES cipher key size");
603 session
->qat_mode
= ICP_QAT_HW_CIPHER_CTR_MODE
;
605 case RTE_CRYPTO_CIPHER_AES_CTR
:
606 if (qat_alg_validate_aes_key(cipher_xform
->key
.length
,
607 &session
->qat_cipher_alg
) != 0) {
608 PMD_DRV_LOG(ERR
, "Invalid AES cipher key size");
611 session
->qat_mode
= ICP_QAT_HW_CIPHER_CTR_MODE
;
613 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2
:
614 if (qat_alg_validate_snow3g_key(cipher_xform
->key
.length
,
615 &session
->qat_cipher_alg
) != 0) {
616 PMD_DRV_LOG(ERR
, "Invalid SNOW 3G cipher key size");
619 session
->qat_mode
= ICP_QAT_HW_CIPHER_ECB_MODE
;
621 case RTE_CRYPTO_CIPHER_NULL
:
622 session
->qat_mode
= ICP_QAT_HW_CIPHER_ECB_MODE
;
624 case RTE_CRYPTO_CIPHER_KASUMI_F8
:
625 if (qat_alg_validate_kasumi_key(cipher_xform
->key
.length
,
626 &session
->qat_cipher_alg
) != 0) {
627 PMD_DRV_LOG(ERR
, "Invalid KASUMI cipher key size");
630 session
->qat_mode
= ICP_QAT_HW_CIPHER_F8_MODE
;
632 case RTE_CRYPTO_CIPHER_3DES_CBC
:
633 if (qat_alg_validate_3des_key(cipher_xform
->key
.length
,
634 &session
->qat_cipher_alg
) != 0) {
635 PMD_DRV_LOG(ERR
, "Invalid 3DES cipher key size");
638 session
->qat_mode
= ICP_QAT_HW_CIPHER_CBC_MODE
;
640 case RTE_CRYPTO_CIPHER_3DES_CTR
:
641 if (qat_alg_validate_3des_key(cipher_xform
->key
.length
,
642 &session
->qat_cipher_alg
) != 0) {
643 PMD_DRV_LOG(ERR
, "Invalid 3DES cipher key size");
646 session
->qat_mode
= ICP_QAT_HW_CIPHER_CTR_MODE
;
648 case RTE_CRYPTO_CIPHER_3DES_ECB
:
649 case RTE_CRYPTO_CIPHER_AES_ECB
:
650 case RTE_CRYPTO_CIPHER_AES_CCM
:
651 case RTE_CRYPTO_CIPHER_AES_F8
:
652 case RTE_CRYPTO_CIPHER_AES_XTS
:
653 case RTE_CRYPTO_CIPHER_ARC4
:
654 case RTE_CRYPTO_CIPHER_ZUC_EEA3
:
655 PMD_DRV_LOG(ERR
, "Crypto QAT PMD: Unsupported Cipher alg %u",
659 PMD_DRV_LOG(ERR
, "Crypto: Undefined Cipher specified %u\n",
664 if (cipher_xform
->op
== RTE_CRYPTO_CIPHER_OP_ENCRYPT
)
665 session
->qat_dir
= ICP_QAT_HW_CIPHER_ENCRYPT
;
667 session
->qat_dir
= ICP_QAT_HW_CIPHER_DECRYPT
;
669 if (qat_alg_aead_session_create_content_desc_cipher(session
,
670 cipher_xform
->key
.data
,
671 cipher_xform
->key
.length
))
677 rte_mempool_put(internals
->sess_mp
, session
);
683 qat_crypto_sym_configure_session(struct rte_cryptodev
*dev
,
684 struct rte_crypto_sym_xform
*xform
, void *session_private
)
686 struct qat_pmd_private
*internals
= dev
->data
->dev_private
;
688 struct qat_session
*session
= session_private
;
692 PMD_INIT_FUNC_TRACE();
694 /* Get requested QAT command id */
695 qat_cmd_id
= qat_get_cmd_id(xform
);
696 if (qat_cmd_id
< 0 || qat_cmd_id
>= ICP_QAT_FW_LA_CMD_DELIMITER
) {
697 PMD_DRV_LOG(ERR
, "Unsupported xform chain requested");
700 session
->qat_cmd
= (enum icp_qat_fw_la_cmd_id
)qat_cmd_id
;
701 switch (session
->qat_cmd
) {
702 case ICP_QAT_FW_LA_CMD_CIPHER
:
703 session
= qat_crypto_sym_configure_session_cipher(dev
, xform
, session
);
705 case ICP_QAT_FW_LA_CMD_AUTH
:
706 session
= qat_crypto_sym_configure_session_auth(dev
, xform
, session
);
708 case ICP_QAT_FW_LA_CMD_CIPHER_HASH
:
709 session
= qat_crypto_sym_configure_session_cipher(dev
, xform
, session
);
710 session
= qat_crypto_sym_configure_session_auth(dev
, xform
, session
);
712 case ICP_QAT_FW_LA_CMD_HASH_CIPHER
:
713 session
= qat_crypto_sym_configure_session_auth(dev
, xform
, session
);
714 session
= qat_crypto_sym_configure_session_cipher(dev
, xform
, session
);
716 case ICP_QAT_FW_LA_CMD_TRNG_GET_RANDOM
:
717 case ICP_QAT_FW_LA_CMD_TRNG_TEST
:
718 case ICP_QAT_FW_LA_CMD_SSL3_KEY_DERIVE
:
719 case ICP_QAT_FW_LA_CMD_TLS_V1_1_KEY_DERIVE
:
720 case ICP_QAT_FW_LA_CMD_TLS_V1_2_KEY_DERIVE
:
721 case ICP_QAT_FW_LA_CMD_MGF1
:
722 case ICP_QAT_FW_LA_CMD_AUTH_PRE_COMP
:
723 case ICP_QAT_FW_LA_CMD_CIPHER_PRE_COMP
:
724 case ICP_QAT_FW_LA_CMD_DELIMITER
:
725 PMD_DRV_LOG(ERR
, "Unsupported Service %u",
729 PMD_DRV_LOG(ERR
, "Unsupported Service %u",
736 rte_mempool_put(internals
->sess_mp
, session
);
741 qat_crypto_sym_configure_session_auth(struct rte_cryptodev
*dev
,
742 struct rte_crypto_sym_xform
*xform
,
743 struct qat_session
*session_private
)
746 struct qat_pmd_private
*internals
= dev
->data
->dev_private
;
747 struct qat_session
*session
= session_private
;
748 struct rte_crypto_auth_xform
*auth_xform
= NULL
;
749 struct rte_crypto_cipher_xform
*cipher_xform
= NULL
;
750 auth_xform
= qat_get_auth_xform(xform
);
752 switch (auth_xform
->algo
) {
753 case RTE_CRYPTO_AUTH_SHA1_HMAC
:
754 session
->qat_hash_alg
= ICP_QAT_HW_AUTH_ALGO_SHA1
;
756 case RTE_CRYPTO_AUTH_SHA224_HMAC
:
757 session
->qat_hash_alg
= ICP_QAT_HW_AUTH_ALGO_SHA224
;
759 case RTE_CRYPTO_AUTH_SHA256_HMAC
:
760 session
->qat_hash_alg
= ICP_QAT_HW_AUTH_ALGO_SHA256
;
762 case RTE_CRYPTO_AUTH_SHA384_HMAC
:
763 session
->qat_hash_alg
= ICP_QAT_HW_AUTH_ALGO_SHA384
;
765 case RTE_CRYPTO_AUTH_SHA512_HMAC
:
766 session
->qat_hash_alg
= ICP_QAT_HW_AUTH_ALGO_SHA512
;
768 case RTE_CRYPTO_AUTH_AES_XCBC_MAC
:
769 session
->qat_hash_alg
= ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC
;
771 case RTE_CRYPTO_AUTH_AES_GCM
:
772 session
->qat_hash_alg
= ICP_QAT_HW_AUTH_ALGO_GALOIS_128
;
774 case RTE_CRYPTO_AUTH_AES_GMAC
:
775 session
->qat_hash_alg
= ICP_QAT_HW_AUTH_ALGO_GALOIS_128
;
777 case RTE_CRYPTO_AUTH_SNOW3G_UIA2
:
778 session
->qat_hash_alg
= ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2
;
780 case RTE_CRYPTO_AUTH_MD5_HMAC
:
781 session
->qat_hash_alg
= ICP_QAT_HW_AUTH_ALGO_MD5
;
783 case RTE_CRYPTO_AUTH_NULL
:
784 session
->qat_hash_alg
= ICP_QAT_HW_AUTH_ALGO_NULL
;
786 case RTE_CRYPTO_AUTH_KASUMI_F9
:
787 session
->qat_hash_alg
= ICP_QAT_HW_AUTH_ALGO_KASUMI_F9
;
789 case RTE_CRYPTO_AUTH_SHA1
:
790 case RTE_CRYPTO_AUTH_SHA256
:
791 case RTE_CRYPTO_AUTH_SHA512
:
792 case RTE_CRYPTO_AUTH_SHA224
:
793 case RTE_CRYPTO_AUTH_SHA384
:
794 case RTE_CRYPTO_AUTH_MD5
:
795 case RTE_CRYPTO_AUTH_AES_CCM
:
796 case RTE_CRYPTO_AUTH_AES_CMAC
:
797 case RTE_CRYPTO_AUTH_AES_CBC_MAC
:
798 case RTE_CRYPTO_AUTH_ZUC_EIA3
:
799 PMD_DRV_LOG(ERR
, "Crypto: Unsupported hash alg %u",
803 PMD_DRV_LOG(ERR
, "Crypto: Undefined Hash algo %u specified",
807 cipher_xform
= qat_get_cipher_xform(xform
);
809 if ((session
->qat_hash_alg
== ICP_QAT_HW_AUTH_ALGO_GALOIS_128
) ||
810 (session
->qat_hash_alg
==
811 ICP_QAT_HW_AUTH_ALGO_GALOIS_64
)) {
812 if (qat_alg_aead_session_create_content_desc_auth(session
,
813 cipher_xform
->key
.data
,
814 cipher_xform
->key
.length
,
815 auth_xform
->add_auth_data_length
,
816 auth_xform
->digest_length
,
820 if (qat_alg_aead_session_create_content_desc_auth(session
,
821 auth_xform
->key
.data
,
822 auth_xform
->key
.length
,
823 auth_xform
->add_auth_data_length
,
824 auth_xform
->digest_length
,
831 if (internals
->sess_mp
!= NULL
)
832 rte_mempool_put(internals
->sess_mp
, session
);
836 unsigned qat_crypto_sym_get_session_private_size(
837 struct rte_cryptodev
*dev __rte_unused
)
839 return RTE_ALIGN_CEIL(sizeof(struct qat_session
), 8);
844 qat_pmd_enqueue_op_burst(void *qp
, struct rte_crypto_op
**ops
,
847 register struct qat_queue
*queue
;
848 struct qat_qp
*tmp_qp
= (struct qat_qp
*)qp
;
849 register uint32_t nb_ops_sent
= 0;
850 register struct rte_crypto_op
**cur_op
= ops
;
852 uint16_t nb_ops_possible
= nb_ops
;
853 register uint8_t *base_addr
;
854 register uint32_t tail
;
857 if (unlikely(nb_ops
== 0))
860 /* read params used a lot in main loop into registers */
861 queue
= &(tmp_qp
->tx_q
);
862 base_addr
= (uint8_t *)queue
->base_addr
;
865 /* Find how many can actually fit on the ring */
866 overflow
= rte_atomic16_add_return(&tmp_qp
->inflights16
, nb_ops
)
867 - queue
->max_inflights
;
869 rte_atomic16_sub(&tmp_qp
->inflights16
, overflow
);
870 nb_ops_possible
= nb_ops
- overflow
;
871 if (nb_ops_possible
== 0)
875 while (nb_ops_sent
!= nb_ops_possible
) {
876 ret
= qat_write_hw_desc_entry(*cur_op
, base_addr
+ tail
);
878 tmp_qp
->stats
.enqueue_err_count
++;
879 if (nb_ops_sent
== 0)
884 tail
= adf_modulo(tail
+ queue
->msg_size
, queue
->modulo
);
889 WRITE_CSR_RING_TAIL(tmp_qp
->mmap_bar_addr
, queue
->hw_bundle_number
,
890 queue
->hw_queue_number
, tail
);
892 tmp_qp
->stats
.enqueued_count
+= nb_ops_sent
;
897 qat_pmd_dequeue_op_burst(void *qp
, struct rte_crypto_op
**ops
,
900 struct qat_queue
*queue
;
901 struct qat_qp
*tmp_qp
= (struct qat_qp
*)qp
;
902 uint32_t msg_counter
= 0;
903 struct rte_crypto_op
*rx_op
;
904 struct icp_qat_fw_comn_resp
*resp_msg
;
906 queue
= &(tmp_qp
->rx_q
);
907 resp_msg
= (struct icp_qat_fw_comn_resp
*)
908 ((uint8_t *)queue
->base_addr
+ queue
->head
);
910 while (*(uint32_t *)resp_msg
!= ADF_RING_EMPTY_SIG
&&
911 msg_counter
!= nb_ops
) {
912 rx_op
= (struct rte_crypto_op
*)(uintptr_t)
913 (resp_msg
->opaque_data
);
915 #ifdef RTE_LIBRTE_PMD_QAT_DEBUG_RX
916 rte_hexdump(stdout
, "qat_response:", (uint8_t *)resp_msg
,
917 sizeof(struct icp_qat_fw_comn_resp
));
919 if (ICP_QAT_FW_COMN_STATUS_FLAG_OK
!=
920 ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(
921 resp_msg
->comn_hdr
.comn_status
)) {
922 rx_op
->status
= RTE_CRYPTO_OP_STATUS_AUTH_FAILED
;
924 rx_op
->status
= RTE_CRYPTO_OP_STATUS_SUCCESS
;
926 *(uint32_t *)resp_msg
= ADF_RING_EMPTY_SIG
;
927 queue
->head
= adf_modulo(queue
->head
+
929 ADF_RING_SIZE_MODULO(queue
->queue_size
));
930 resp_msg
= (struct icp_qat_fw_comn_resp
*)
931 ((uint8_t *)queue
->base_addr
+
937 if (msg_counter
> 0) {
938 WRITE_CSR_RING_HEAD(tmp_qp
->mmap_bar_addr
,
939 queue
->hw_bundle_number
,
940 queue
->hw_queue_number
, queue
->head
);
941 rte_atomic16_sub(&tmp_qp
->inflights16
, msg_counter
);
942 tmp_qp
->stats
.dequeued_count
+= msg_counter
;
948 qat_write_hw_desc_entry(struct rte_crypto_op
*op
, uint8_t *out_msg
)
950 struct qat_session
*ctx
;
951 struct icp_qat_fw_la_cipher_req_params
*cipher_param
;
952 struct icp_qat_fw_la_auth_req_params
*auth_param
;
953 register struct icp_qat_fw_la_bulk_req
*qat_req
;
954 uint8_t do_auth
= 0, do_cipher
= 0;
955 uint32_t cipher_len
= 0, cipher_ofs
= 0;
956 uint32_t auth_len
= 0, auth_ofs
= 0;
957 uint32_t min_ofs
= 0;
958 uint32_t digest_appended
= 1;
959 uint64_t buf_start
= 0;
962 #ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
963 if (unlikely(op
->type
!= RTE_CRYPTO_OP_TYPE_SYMMETRIC
)) {
964 PMD_DRV_LOG(ERR
, "QAT PMD only supports symmetric crypto "
965 "operation requests, op (%p) is not a "
966 "symmetric operation.", op
);
970 if (unlikely(op
->sym
->sess_type
== RTE_CRYPTO_SYM_OP_SESSIONLESS
)) {
971 PMD_DRV_LOG(ERR
, "QAT PMD only supports session oriented"
972 " requests, op (%p) is sessionless.", op
);
976 if (unlikely(op
->sym
->session
->dev_type
!= RTE_CRYPTODEV_QAT_SYM_PMD
)) {
977 PMD_DRV_LOG(ERR
, "Session was not created for this device");
981 ctx
= (struct qat_session
*)op
->sym
->session
->_private
;
982 qat_req
= (struct icp_qat_fw_la_bulk_req
*)out_msg
;
983 rte_mov128((uint8_t *)qat_req
, (const uint8_t *)&(ctx
->fw_req
));
984 qat_req
->comn_mid
.opaque_data
= (uint64_t)(uintptr_t)op
;
985 cipher_param
= (void *)&qat_req
->serv_specif_rqpars
;
986 auth_param
= (void *)((uint8_t *)cipher_param
+ sizeof(*cipher_param
));
988 if (ctx
->qat_cmd
== ICP_QAT_FW_LA_CMD_HASH_CIPHER
||
989 ctx
->qat_cmd
== ICP_QAT_FW_LA_CMD_CIPHER_HASH
) {
992 } else if (ctx
->qat_cmd
== ICP_QAT_FW_LA_CMD_AUTH
) {
995 } else if (ctx
->qat_cmd
== ICP_QAT_FW_LA_CMD_CIPHER
) {
1002 if (ctx
->qat_cipher_alg
==
1003 ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2
||
1004 ctx
->qat_cipher_alg
== ICP_QAT_HW_CIPHER_ALGO_KASUMI
) {
1007 (cipher_param
->cipher_length
% BYTE_LENGTH
!= 0)
1008 || (cipher_param
->cipher_offset
1009 % BYTE_LENGTH
!= 0))) {
1011 "SNOW3G/KASUMI in QAT PMD only supports byte aligned values");
1012 op
->status
= RTE_CRYPTO_OP_STATUS_INVALID_ARGS
;
1015 cipher_len
= op
->sym
->cipher
.data
.length
>> 3;
1016 cipher_ofs
= op
->sym
->cipher
.data
.offset
>> 3;
1019 cipher_len
= op
->sym
->cipher
.data
.length
;
1020 cipher_ofs
= op
->sym
->cipher
.data
.offset
;
1023 /* copy IV into request if it fits */
1024 if (op
->sym
->cipher
.iv
.length
&& (op
->sym
->cipher
.iv
.length
<=
1025 sizeof(cipher_param
->u
.cipher_IV_array
))) {
1026 rte_memcpy(cipher_param
->u
.cipher_IV_array
,
1027 op
->sym
->cipher
.iv
.data
,
1028 op
->sym
->cipher
.iv
.length
);
1030 ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(
1031 qat_req
->comn_hdr
.serv_specif_flags
,
1032 ICP_QAT_FW_CIPH_IV_64BIT_PTR
);
1033 cipher_param
->u
.s
.cipher_IV_ptr
=
1034 op
->sym
->cipher
.iv
.phys_addr
;
1036 min_ofs
= cipher_ofs
;
1041 if (ctx
->qat_hash_alg
== ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2
||
1042 ctx
->qat_hash_alg
== ICP_QAT_HW_AUTH_ALGO_KASUMI_F9
) {
1043 if (unlikely((auth_param
->auth_off
% BYTE_LENGTH
!= 0)
1044 || (auth_param
->auth_len
% BYTE_LENGTH
!= 0))) {
1046 "For SNOW3G/KASUMI, QAT PMD only supports byte aligned values");
1047 op
->status
= RTE_CRYPTO_OP_STATUS_INVALID_ARGS
;
1050 auth_ofs
= op
->sym
->auth
.data
.offset
>> 3;
1051 auth_len
= op
->sym
->auth
.data
.length
>> 3;
1053 if (ctx
->qat_hash_alg
==
1054 ICP_QAT_HW_AUTH_ALGO_KASUMI_F9
) {
1056 auth_len
= auth_len
+ auth_ofs
+ 1 -
1057 ICP_QAT_HW_KASUMI_BLK_SZ
;
1058 auth_ofs
= ICP_QAT_HW_KASUMI_BLK_SZ
;
1060 auth_len
= auth_len
+ auth_ofs
+ 1;
1066 auth_ofs
= op
->sym
->auth
.data
.offset
;
1067 auth_len
= op
->sym
->auth
.data
.length
;
1071 if (op
->sym
->auth
.digest
.phys_addr
) {
1072 ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(
1073 qat_req
->comn_hdr
.serv_specif_flags
,
1074 ICP_QAT_FW_LA_NO_DIGEST_IN_BUFFER
);
1075 auth_param
->auth_res_addr
=
1076 op
->sym
->auth
.digest
.phys_addr
;
1077 digest_appended
= 0;
1080 auth_param
->u1
.aad_adr
= op
->sym
->auth
.aad
.phys_addr
;
1084 /* adjust for chain case */
1085 if (do_cipher
&& do_auth
)
1086 min_ofs
= cipher_ofs
< auth_ofs
? cipher_ofs
: auth_ofs
;
1089 /* Start DMA at nearest aligned address below min_ofs */
1090 #define QAT_64_BTYE_ALIGN_MASK (~0x3f)
1091 buf_start
= rte_pktmbuf_mtophys_offset(op
->sym
->m_src
, min_ofs
) &
1092 QAT_64_BTYE_ALIGN_MASK
;
1094 if (unlikely((rte_pktmbuf_mtophys(op
->sym
->m_src
)
1095 - rte_pktmbuf_headroom(op
->sym
->m_src
)) > buf_start
)) {
1096 /* alignment has pushed addr ahead of start of mbuf
1097 * so revert and take the performance hit
1099 buf_start
= rte_pktmbuf_mtophys(op
->sym
->m_src
);
1102 qat_req
->comn_mid
.dest_data_addr
=
1103 qat_req
->comn_mid
.src_data_addr
= buf_start
;
1106 cipher_param
->cipher_offset
=
1107 (uint32_t)rte_pktmbuf_mtophys_offset(
1108 op
->sym
->m_src
, cipher_ofs
) - buf_start
;
1109 cipher_param
->cipher_length
= cipher_len
;
1111 cipher_param
->cipher_offset
= 0;
1112 cipher_param
->cipher_length
= 0;
1115 auth_param
->auth_off
= (uint32_t)rte_pktmbuf_mtophys_offset(
1116 op
->sym
->m_src
, auth_ofs
) - buf_start
;
1117 auth_param
->auth_len
= auth_len
;
1119 auth_param
->auth_off
= 0;
1120 auth_param
->auth_len
= 0;
1122 qat_req
->comn_mid
.dst_length
=
1123 qat_req
->comn_mid
.src_length
=
1124 (cipher_param
->cipher_offset
+ cipher_param
->cipher_length
)
1125 > (auth_param
->auth_off
+ auth_param
->auth_len
) ?
1126 (cipher_param
->cipher_offset
+ cipher_param
->cipher_length
)
1127 : (auth_param
->auth_off
+ auth_param
->auth_len
);
1129 if (do_auth
&& digest_appended
) {
1130 if (ctx
->auth_op
== ICP_QAT_HW_AUTH_GENERATE
)
1131 qat_req
->comn_mid
.dst_length
1132 += op
->sym
->auth
.digest
.length
;
1134 qat_req
->comn_mid
.src_length
1135 += op
->sym
->auth
.digest
.length
;
1138 /* out-of-place operation (OOP) */
1139 if (unlikely(op
->sym
->m_dst
!= NULL
)) {
1142 qat_req
->comn_mid
.dest_data_addr
=
1143 rte_pktmbuf_mtophys_offset(op
->sym
->m_dst
,
1145 - auth_param
->auth_off
;
1147 qat_req
->comn_mid
.dest_data_addr
=
1148 rte_pktmbuf_mtophys_offset(op
->sym
->m_dst
,
1150 - cipher_param
->cipher_offset
;
1153 if (ctx
->qat_hash_alg
== ICP_QAT_HW_AUTH_ALGO_GALOIS_128
||
1154 ctx
->qat_hash_alg
== ICP_QAT_HW_AUTH_ALGO_GALOIS_64
) {
1155 if (op
->sym
->cipher
.iv
.length
== 12) {
1157 * For GCM a 12 bit IV is allowed,
1158 * but we need to inform the f/w
1160 ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
1161 qat_req
->comn_hdr
.serv_specif_flags
,
1162 ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS
);
1164 if (op
->sym
->cipher
.data
.length
== 0) {
1168 qat_req
->comn_mid
.dest_data_addr
=
1169 qat_req
->comn_mid
.src_data_addr
=
1170 op
->sym
->auth
.aad
.phys_addr
;
1171 qat_req
->comn_mid
.dst_length
=
1172 qat_req
->comn_mid
.src_length
=
1173 rte_pktmbuf_data_len(op
->sym
->m_src
);
1174 cipher_param
->cipher_length
= 0;
1175 cipher_param
->cipher_offset
= 0;
1176 auth_param
->u1
.aad_adr
= 0;
1177 auth_param
->auth_len
= op
->sym
->auth
.aad
.length
;
1178 auth_param
->auth_off
= op
->sym
->auth
.data
.offset
;
1179 auth_param
->u2
.aad_sz
= 0;
1184 #ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
1185 rte_hexdump(stdout
, "qat_req:", qat_req
,
1186 sizeof(struct icp_qat_fw_la_bulk_req
));
1187 rte_hexdump(stdout
, "src_data:",
1188 rte_pktmbuf_mtod(op
->sym
->m_src
, uint8_t*),
1189 rte_pktmbuf_data_len(op
->sym
->m_src
));
1190 rte_hexdump(stdout
, "iv:", op
->sym
->cipher
.iv
.data
,
1191 op
->sym
->cipher
.iv
.length
);
1192 rte_hexdump(stdout
, "digest:", op
->sym
->auth
.digest
.data
,
1193 op
->sym
->auth
.digest
.length
);
1194 rte_hexdump(stdout
, "aad:", op
->sym
->auth
.aad
.data
,
1195 op
->sym
->auth
.aad
.length
);
1200 static inline uint32_t adf_modulo(uint32_t data
, uint32_t shift
)
1202 uint32_t div
= data
>> shift
;
1203 uint32_t mult
= div
<< shift
;
1208 void qat_crypto_sym_session_init(struct rte_mempool
*mp
, void *sym_sess
)
1210 struct rte_cryptodev_sym_session
*sess
= sym_sess
;
1211 struct qat_session
*s
= (void *)sess
->_private
;
1213 PMD_INIT_FUNC_TRACE();
1214 s
->cd_paddr
= rte_mempool_virt2phy(mp
, sess
) +
1215 offsetof(struct qat_session
, cd
) +
1216 offsetof(struct rte_cryptodev_sym_session
, _private
);
1219 int qat_dev_config(__rte_unused
struct rte_cryptodev
*dev
)
1221 PMD_INIT_FUNC_TRACE();
1225 int qat_dev_start(__rte_unused
struct rte_cryptodev
*dev
)
1227 PMD_INIT_FUNC_TRACE();
1231 void qat_dev_stop(__rte_unused
struct rte_cryptodev
*dev
)
1233 PMD_INIT_FUNC_TRACE();
1236 int qat_dev_close(struct rte_cryptodev
*dev
)
1240 PMD_INIT_FUNC_TRACE();
1242 for (i
= 0; i
< dev
->data
->nb_queue_pairs
; i
++) {
1243 ret
= qat_crypto_sym_qp_release(dev
, i
);
1251 void qat_dev_info_get(__rte_unused
struct rte_cryptodev
*dev
,
1252 struct rte_cryptodev_info
*info
)
1254 struct qat_pmd_private
*internals
= dev
->data
->dev_private
;
1256 PMD_INIT_FUNC_TRACE();
1258 info
->max_nb_queue_pairs
=
1259 ADF_NUM_SYM_QPS_PER_BUNDLE
*
1260 ADF_NUM_BUNDLES_PER_DEV
;
1261 info
->feature_flags
= dev
->feature_flags
;
1262 info
->capabilities
= qat_pmd_capabilities
;
1263 info
->sym
.max_nb_sessions
= internals
->max_nb_sessions
;
1264 info
->dev_type
= RTE_CRYPTODEV_QAT_SYM_PMD
;
1268 void qat_crypto_sym_stats_get(struct rte_cryptodev
*dev
,
1269 struct rte_cryptodev_stats
*stats
)
1272 struct qat_qp
**qp
= (struct qat_qp
**)(dev
->data
->queue_pairs
);
1274 PMD_INIT_FUNC_TRACE();
1275 if (stats
== NULL
) {
1276 PMD_DRV_LOG(ERR
, "invalid stats ptr NULL");
1279 for (i
= 0; i
< dev
->data
->nb_queue_pairs
; i
++) {
1280 if (qp
[i
] == NULL
) {
1281 PMD_DRV_LOG(DEBUG
, "Uninitialised queue pair");
1285 stats
->enqueued_count
+= qp
[i
]->stats
.enqueued_count
;
1286 stats
->dequeued_count
+= qp
[i
]->stats
.enqueued_count
;
1287 stats
->enqueue_err_count
+= qp
[i
]->stats
.enqueue_err_count
;
1288 stats
->dequeue_err_count
+= qp
[i
]->stats
.enqueue_err_count
;
1292 void qat_crypto_sym_stats_reset(struct rte_cryptodev
*dev
)
1295 struct qat_qp
**qp
= (struct qat_qp
**)(dev
->data
->queue_pairs
);
1297 PMD_INIT_FUNC_TRACE();
1298 for (i
= 0; i
< dev
->data
->nb_queue_pairs
; i
++)
1299 memset(&(qp
[i
]->stats
), 0, sizeof(qp
[i
]->stats
));
1300 PMD_DRV_LOG(DEBUG
, "QAT crypto: stats cleared");