1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2015-2018 Intel Corporation
5 #include <openssl/evp.h>
7 #include <rte_mempool.h>
9 #include <rte_crypto_sym.h>
10 #include <rte_bus_pci.h>
11 #include <rte_byteorder.h>
15 /** Decrypt a single partial block
16 * Depends on openssl libcrypto
17 * Uses ECB+XOR to do CFB encryption, same result, more performant
20 bpi_cipher_decrypt(uint8_t *src
, uint8_t *dst
,
21 uint8_t *iv
, int ivlen
, int srclen
,
24 EVP_CIPHER_CTX
*ctx
= (EVP_CIPHER_CTX
*)bpi_ctx
;
26 uint8_t encrypted_iv
[BPI_MAX_ENCR_IV_LEN
];
27 uint8_t *encr
= encrypted_iv
;
29 /* ECB method: encrypt (not decrypt!) the IV, then XOR with plaintext */
30 if (EVP_EncryptUpdate(ctx
, encrypted_iv
, &encrypted_ivlen
, iv
, ivlen
)
32 goto cipher_decrypt_err
;
34 for (; srclen
!= 0; --srclen
, ++dst
, ++src
, ++encr
)
40 QAT_DP_LOG(ERR
, "libcrypto ECB cipher decrypt for BPI IV failed");
45 static inline uint32_t
46 qat_bpicipher_preprocess(struct qat_sym_session
*ctx
,
47 struct rte_crypto_op
*op
)
49 int block_len
= qat_cipher_get_block_size(ctx
->qat_cipher_alg
);
50 struct rte_crypto_sym_op
*sym_op
= op
->sym
;
51 uint8_t last_block_len
= block_len
> 0 ?
52 sym_op
->cipher
.data
.length
% block_len
: 0;
55 ctx
->qat_dir
== ICP_QAT_HW_CIPHER_DECRYPT
) {
57 /* Decrypt last block */
58 uint8_t *last_block
, *dst
, *iv
;
59 uint32_t last_block_offset
= sym_op
->cipher
.data
.offset
+
60 sym_op
->cipher
.data
.length
- last_block_len
;
61 last_block
= (uint8_t *) rte_pktmbuf_mtod_offset(sym_op
->m_src
,
62 uint8_t *, last_block_offset
);
64 if (unlikely(sym_op
->m_dst
!= NULL
))
65 /* out-of-place operation (OOP) */
66 dst
= (uint8_t *) rte_pktmbuf_mtod_offset(sym_op
->m_dst
,
67 uint8_t *, last_block_offset
);
71 if (last_block_len
< sym_op
->cipher
.data
.length
)
72 /* use previous block ciphertext as IV */
73 iv
= last_block
- block_len
;
75 /* runt block, i.e. less than one full block */
76 iv
= rte_crypto_op_ctod_offset(op
, uint8_t *,
77 ctx
->cipher_iv
.offset
);
79 #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
80 QAT_DP_HEXDUMP_LOG(DEBUG
, "BPI: src before pre-process:",
81 last_block
, last_block_len
);
82 if (sym_op
->m_dst
!= NULL
)
83 QAT_DP_HEXDUMP_LOG(DEBUG
, "BPI:dst before pre-process:",
86 bpi_cipher_decrypt(last_block
, dst
, iv
, block_len
,
87 last_block_len
, ctx
->bpi_ctx
);
88 #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
89 QAT_DP_HEXDUMP_LOG(DEBUG
, "BPI: src after pre-process:",
90 last_block
, last_block_len
);
91 if (sym_op
->m_dst
!= NULL
)
92 QAT_DP_HEXDUMP_LOG(DEBUG
, "BPI: dst after pre-process:",
97 return sym_op
->cipher
.data
.length
- last_block_len
;
101 set_cipher_iv(uint16_t iv_length
, uint16_t iv_offset
,
102 struct icp_qat_fw_la_cipher_req_params
*cipher_param
,
103 struct rte_crypto_op
*op
,
104 struct icp_qat_fw_la_bulk_req
*qat_req
)
106 /* copy IV into request if it fits */
107 if (iv_length
<= sizeof(cipher_param
->u
.cipher_IV_array
)) {
108 rte_memcpy(cipher_param
->u
.cipher_IV_array
,
109 rte_crypto_op_ctod_offset(op
, uint8_t *,
113 ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(
114 qat_req
->comn_hdr
.serv_specif_flags
,
115 ICP_QAT_FW_CIPH_IV_64BIT_PTR
);
116 cipher_param
->u
.s
.cipher_IV_ptr
=
117 rte_crypto_op_ctophys_offset(op
,
122 /** Set IV for CCM is special case, 0th byte is set to q-1
123 * where q is padding of nonce in 16 byte block
126 set_cipher_iv_ccm(uint16_t iv_length
, uint16_t iv_offset
,
127 struct icp_qat_fw_la_cipher_req_params
*cipher_param
,
128 struct rte_crypto_op
*op
, uint8_t q
, uint8_t aad_len_field_sz
)
130 rte_memcpy(((uint8_t *)cipher_param
->u
.cipher_IV_array
) +
131 ICP_QAT_HW_CCM_NONCE_OFFSET
,
132 rte_crypto_op_ctod_offset(op
, uint8_t *,
133 iv_offset
) + ICP_QAT_HW_CCM_NONCE_OFFSET
,
135 *(uint8_t *)&cipher_param
->u
.cipher_IV_array
[0] =
136 q
- ICP_QAT_HW_CCM_NONCE_OFFSET
;
138 if (aad_len_field_sz
)
139 rte_memcpy(&op
->sym
->aead
.aad
.data
[ICP_QAT_HW_CCM_NONCE_OFFSET
],
140 rte_crypto_op_ctod_offset(op
, uint8_t *,
141 iv_offset
) + ICP_QAT_HW_CCM_NONCE_OFFSET
,
146 qat_sym_build_request(void *in_op
, uint8_t *out_msg
,
147 void *op_cookie
, enum qat_device_gen qat_dev_gen
)
150 struct qat_sym_session
*ctx
;
151 struct icp_qat_fw_la_cipher_req_params
*cipher_param
;
152 struct icp_qat_fw_la_auth_req_params
*auth_param
;
153 register struct icp_qat_fw_la_bulk_req
*qat_req
;
154 uint8_t do_auth
= 0, do_cipher
= 0, do_aead
= 0;
155 uint32_t cipher_len
= 0, cipher_ofs
= 0;
156 uint32_t auth_len
= 0, auth_ofs
= 0;
157 uint32_t min_ofs
= 0;
158 uint64_t src_buf_start
= 0, dst_buf_start
= 0;
160 uint8_t wireless_auth
= 0, in_place
= 1;
161 struct rte_crypto_op
*op
= (struct rte_crypto_op
*)in_op
;
162 struct qat_sym_op_cookie
*cookie
=
163 (struct qat_sym_op_cookie
*)op_cookie
;
165 if (unlikely(op
->type
!= RTE_CRYPTO_OP_TYPE_SYMMETRIC
)) {
166 QAT_DP_LOG(ERR
, "QAT PMD only supports symmetric crypto "
167 "operation requests, op (%p) is not a "
168 "symmetric operation.", op
);
172 if (unlikely(op
->sess_type
== RTE_CRYPTO_OP_SESSIONLESS
)) {
173 QAT_DP_LOG(ERR
, "QAT PMD only supports session oriented"
174 " requests, op (%p) is sessionless.", op
);
178 ctx
= (struct qat_sym_session
*)get_sym_session_private_data(
179 op
->sym
->session
, cryptodev_qat_driver_id
);
181 if (unlikely(ctx
== NULL
)) {
182 QAT_DP_LOG(ERR
, "Session was not created for this device");
186 if (unlikely(ctx
->min_qat_dev_gen
> qat_dev_gen
)) {
187 QAT_DP_LOG(ERR
, "Session alg not supported on this device gen");
188 op
->status
= RTE_CRYPTO_OP_STATUS_INVALID_SESSION
;
192 qat_req
= (struct icp_qat_fw_la_bulk_req
*)out_msg
;
193 rte_mov128((uint8_t *)qat_req
, (const uint8_t *)&(ctx
->fw_req
));
194 qat_req
->comn_mid
.opaque_data
= (uint64_t)(uintptr_t)op
;
195 cipher_param
= (void *)&qat_req
->serv_specif_rqpars
;
196 auth_param
= (void *)((uint8_t *)cipher_param
+ sizeof(*cipher_param
));
198 if (ctx
->qat_cmd
== ICP_QAT_FW_LA_CMD_HASH_CIPHER
||
199 ctx
->qat_cmd
== ICP_QAT_FW_LA_CMD_CIPHER_HASH
) {
200 /* AES-GCM or AES-CCM */
201 if (ctx
->qat_hash_alg
== ICP_QAT_HW_AUTH_ALGO_GALOIS_128
||
202 ctx
->qat_hash_alg
== ICP_QAT_HW_AUTH_ALGO_GALOIS_64
||
203 (ctx
->qat_cipher_alg
== ICP_QAT_HW_CIPHER_ALGO_AES128
204 && ctx
->qat_mode
== ICP_QAT_HW_CIPHER_CTR_MODE
205 && ctx
->qat_hash_alg
==
206 ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC
)) {
212 } else if (ctx
->qat_cmd
== ICP_QAT_FW_LA_CMD_AUTH
) {
215 } else if (ctx
->qat_cmd
== ICP_QAT_FW_LA_CMD_CIPHER
) {
222 if (ctx
->qat_cipher_alg
==
223 ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2
||
224 ctx
->qat_cipher_alg
== ICP_QAT_HW_CIPHER_ALGO_KASUMI
||
225 ctx
->qat_cipher_alg
==
226 ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3
) {
229 (op
->sym
->cipher
.data
.length
% BYTE_LENGTH
!= 0) ||
230 (op
->sym
->cipher
.data
.offset
% BYTE_LENGTH
!= 0))) {
232 "SNOW3G/KASUMI/ZUC in QAT PMD only supports byte aligned values");
233 op
->status
= RTE_CRYPTO_OP_STATUS_INVALID_ARGS
;
236 cipher_len
= op
->sym
->cipher
.data
.length
>> 3;
237 cipher_ofs
= op
->sym
->cipher
.data
.offset
>> 3;
239 } else if (ctx
->bpi_ctx
) {
240 /* DOCSIS - only send complete blocks to device
241 * Process any partial block using CFB mode.
242 * Even if 0 complete blocks, still send this to device
243 * to get into rx queue for post-process and dequeuing
245 cipher_len
= qat_bpicipher_preprocess(ctx
, op
);
246 cipher_ofs
= op
->sym
->cipher
.data
.offset
;
248 cipher_len
= op
->sym
->cipher
.data
.length
;
249 cipher_ofs
= op
->sym
->cipher
.data
.offset
;
252 set_cipher_iv(ctx
->cipher_iv
.length
, ctx
->cipher_iv
.offset
,
253 cipher_param
, op
, qat_req
);
254 min_ofs
= cipher_ofs
;
259 if (ctx
->qat_hash_alg
== ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2
||
260 ctx
->qat_hash_alg
== ICP_QAT_HW_AUTH_ALGO_KASUMI_F9
||
262 ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3
) {
264 (op
->sym
->auth
.data
.offset
% BYTE_LENGTH
!= 0) ||
265 (op
->sym
->auth
.data
.length
% BYTE_LENGTH
!= 0))) {
267 "For SNOW3G/KASUMI/ZUC, QAT PMD only supports byte aligned values");
268 op
->status
= RTE_CRYPTO_OP_STATUS_INVALID_ARGS
;
271 auth_ofs
= op
->sym
->auth
.data
.offset
>> 3;
272 auth_len
= op
->sym
->auth
.data
.length
>> 3;
275 auth_param
->u1
.aad_adr
=
276 rte_crypto_op_ctophys_offset(op
,
277 ctx
->auth_iv
.offset
);
279 } else if (ctx
->qat_hash_alg
==
280 ICP_QAT_HW_AUTH_ALGO_GALOIS_128
||
282 ICP_QAT_HW_AUTH_ALGO_GALOIS_64
) {
284 set_cipher_iv(ctx
->auth_iv
.length
,
286 cipher_param
, op
, qat_req
);
287 auth_ofs
= op
->sym
->auth
.data
.offset
;
288 auth_len
= op
->sym
->auth
.data
.length
;
290 auth_param
->u1
.aad_adr
= 0;
291 auth_param
->u2
.aad_sz
= 0;
294 * If len(iv)==12B fw computes J0
296 if (ctx
->auth_iv
.length
== 12) {
297 ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
298 qat_req
->comn_hdr
.serv_specif_flags
,
299 ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS
);
303 auth_ofs
= op
->sym
->auth
.data
.offset
;
304 auth_len
= op
->sym
->auth
.data
.length
;
309 if (likely(ctx
->qat_hash_alg
!= ICP_QAT_HW_AUTH_ALGO_NULL
))
310 auth_param
->auth_res_addr
=
311 op
->sym
->auth
.digest
.phys_addr
;
317 * This address may used for setting AAD physical pointer
318 * into IV offset from op
320 rte_iova_t aad_phys_addr_aead
= op
->sym
->aead
.aad
.phys_addr
;
321 if (ctx
->qat_hash_alg
==
322 ICP_QAT_HW_AUTH_ALGO_GALOIS_128
||
324 ICP_QAT_HW_AUTH_ALGO_GALOIS_64
) {
326 * If len(iv)==12B fw computes J0
328 if (ctx
->cipher_iv
.length
== 12) {
329 ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
330 qat_req
->comn_hdr
.serv_specif_flags
,
331 ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS
);
333 set_cipher_iv(ctx
->cipher_iv
.length
,
334 ctx
->cipher_iv
.offset
,
335 cipher_param
, op
, qat_req
);
337 } else if (ctx
->qat_hash_alg
==
338 ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC
) {
340 /* In case of AES-CCM this may point to user selected
341 * memory or iv offset in cypto_op
343 uint8_t *aad_data
= op
->sym
->aead
.aad
.data
;
344 /* This is true AAD length, it not includes 18 bytes of
347 uint8_t aad_ccm_real_len
= 0;
348 uint8_t aad_len_field_sz
= 0;
349 uint32_t msg_len_be
=
350 rte_bswap32(op
->sym
->aead
.data
.length
);
352 if (ctx
->aad_len
> ICP_QAT_HW_CCM_AAD_DATA_OFFSET
) {
353 aad_len_field_sz
= ICP_QAT_HW_CCM_AAD_LEN_INFO
;
354 aad_ccm_real_len
= ctx
->aad_len
-
355 ICP_QAT_HW_CCM_AAD_B0_LEN
-
356 ICP_QAT_HW_CCM_AAD_LEN_INFO
;
359 * aad_len not greater than 18, so no actual aad
360 * data, then use IV after op for B0 block
362 aad_data
= rte_crypto_op_ctod_offset(op
,
364 ctx
->cipher_iv
.offset
);
366 rte_crypto_op_ctophys_offset(op
,
367 ctx
->cipher_iv
.offset
);
370 uint8_t q
= ICP_QAT_HW_CCM_NQ_CONST
-
371 ctx
->cipher_iv
.length
;
373 aad_data
[0] = ICP_QAT_HW_CCM_BUILD_B0_FLAGS(
375 ctx
->digest_length
, q
);
377 if (q
> ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE
) {
378 memcpy(aad_data
+ ctx
->cipher_iv
.length
+
379 ICP_QAT_HW_CCM_NONCE_OFFSET
+
380 (q
- ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE
),
381 (uint8_t *)&msg_len_be
,
382 ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE
);
384 memcpy(aad_data
+ ctx
->cipher_iv
.length
+
385 ICP_QAT_HW_CCM_NONCE_OFFSET
,
386 (uint8_t *)&msg_len_be
387 + (ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE
391 if (aad_len_field_sz
> 0) {
392 *(uint16_t *)&aad_data
[ICP_QAT_HW_CCM_AAD_B0_LEN
]
393 = rte_bswap16(aad_ccm_real_len
);
395 if ((aad_ccm_real_len
+ aad_len_field_sz
)
396 % ICP_QAT_HW_CCM_AAD_B0_LEN
) {
400 pad_len
= ICP_QAT_HW_CCM_AAD_B0_LEN
-
401 ((aad_ccm_real_len
+ aad_len_field_sz
) %
402 ICP_QAT_HW_CCM_AAD_B0_LEN
);
403 pad_idx
= ICP_QAT_HW_CCM_AAD_B0_LEN
+
404 aad_ccm_real_len
+ aad_len_field_sz
;
405 memset(&aad_data
[pad_idx
],
411 set_cipher_iv_ccm(ctx
->cipher_iv
.length
,
412 ctx
->cipher_iv
.offset
,
418 cipher_len
= op
->sym
->aead
.data
.length
;
419 cipher_ofs
= op
->sym
->aead
.data
.offset
;
420 auth_len
= op
->sym
->aead
.data
.length
;
421 auth_ofs
= op
->sym
->aead
.data
.offset
;
423 auth_param
->u1
.aad_adr
= aad_phys_addr_aead
;
424 auth_param
->auth_res_addr
= op
->sym
->aead
.digest
.phys_addr
;
425 min_ofs
= op
->sym
->aead
.data
.offset
;
428 if (op
->sym
->m_src
->next
|| (op
->sym
->m_dst
&& op
->sym
->m_dst
->next
))
431 /* adjust for chain case */
432 if (do_cipher
&& do_auth
)
433 min_ofs
= cipher_ofs
< auth_ofs
? cipher_ofs
: auth_ofs
;
435 if (unlikely(min_ofs
>= rte_pktmbuf_data_len(op
->sym
->m_src
) && do_sgl
))
438 if (unlikely(op
->sym
->m_dst
!= NULL
)) {
439 /* Out-of-place operation (OOP)
440 * Don't align DMA start. DMA the minimum data-set
441 * so as not to overwrite data in dest buffer
445 rte_pktmbuf_iova_offset(op
->sym
->m_src
, min_ofs
);
447 rte_pktmbuf_iova_offset(op
->sym
->m_dst
, min_ofs
);
450 /* In-place operation
451 * Start DMA at nearest aligned address below min_ofs
454 rte_pktmbuf_iova_offset(op
->sym
->m_src
, min_ofs
)
455 & QAT_64_BTYE_ALIGN_MASK
;
457 if (unlikely((rte_pktmbuf_iova(op
->sym
->m_src
) -
458 rte_pktmbuf_headroom(op
->sym
->m_src
))
460 /* alignment has pushed addr ahead of start of mbuf
461 * so revert and take the performance hit
464 rte_pktmbuf_iova_offset(op
->sym
->m_src
,
467 dst_buf_start
= src_buf_start
;
470 if (do_cipher
|| do_aead
) {
471 cipher_param
->cipher_offset
=
472 (uint32_t)rte_pktmbuf_iova_offset(
473 op
->sym
->m_src
, cipher_ofs
) - src_buf_start
;
474 cipher_param
->cipher_length
= cipher_len
;
476 cipher_param
->cipher_offset
= 0;
477 cipher_param
->cipher_length
= 0;
480 if (do_auth
|| do_aead
) {
481 auth_param
->auth_off
= (uint32_t)rte_pktmbuf_iova_offset(
482 op
->sym
->m_src
, auth_ofs
) - src_buf_start
;
483 auth_param
->auth_len
= auth_len
;
485 auth_param
->auth_off
= 0;
486 auth_param
->auth_len
= 0;
489 qat_req
->comn_mid
.dst_length
=
490 qat_req
->comn_mid
.src_length
=
491 (cipher_param
->cipher_offset
+ cipher_param
->cipher_length
)
492 > (auth_param
->auth_off
+ auth_param
->auth_len
) ?
493 (cipher_param
->cipher_offset
+ cipher_param
->cipher_length
)
494 : (auth_param
->auth_off
+ auth_param
->auth_len
);
498 ICP_QAT_FW_COMN_PTR_TYPE_SET(qat_req
->comn_hdr
.comn_req_flags
,
499 QAT_COMN_PTR_TYPE_SGL
);
500 ret
= qat_sgl_fill_array(op
->sym
->m_src
,
501 (int64_t)(src_buf_start
- rte_pktmbuf_iova(op
->sym
->m_src
)),
502 &cookie
->qat_sgl_src
,
503 qat_req
->comn_mid
.src_length
,
504 QAT_SYM_SGL_MAX_NUMBER
);
507 QAT_DP_LOG(ERR
, "QAT PMD Cannot fill sgl array");
511 if (likely(op
->sym
->m_dst
== NULL
))
512 qat_req
->comn_mid
.dest_data_addr
=
513 qat_req
->comn_mid
.src_data_addr
=
514 cookie
->qat_sgl_src_phys_addr
;
516 ret
= qat_sgl_fill_array(op
->sym
->m_dst
,
517 (int64_t)(dst_buf_start
-
518 rte_pktmbuf_iova(op
->sym
->m_dst
)),
519 &cookie
->qat_sgl_dst
,
520 qat_req
->comn_mid
.dst_length
,
521 QAT_SYM_SGL_MAX_NUMBER
);
524 QAT_DP_LOG(ERR
, "QAT PMD can't fill sgl array");
528 qat_req
->comn_mid
.src_data_addr
=
529 cookie
->qat_sgl_src_phys_addr
;
530 qat_req
->comn_mid
.dest_data_addr
=
531 cookie
->qat_sgl_dst_phys_addr
;
534 qat_req
->comn_mid
.src_data_addr
= src_buf_start
;
535 qat_req
->comn_mid
.dest_data_addr
= dst_buf_start
;
536 /* handle case of auth-gen-then-cipher with digest encrypted */
537 if (wireless_auth
&& in_place
&&
538 (op
->sym
->auth
.digest
.phys_addr
==
539 src_buf_start
+ auth_ofs
+ auth_len
) &&
540 (auth_ofs
+ auth_len
+ ctx
->digest_length
<=
541 cipher_ofs
+ cipher_len
)) {
542 struct icp_qat_fw_comn_req_hdr
*header
=
544 ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(
545 header
->serv_specif_flags
,
546 ICP_QAT_FW_LA_DIGEST_IN_BUFFER
);
550 #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
551 QAT_DP_HEXDUMP_LOG(DEBUG
, "qat_req:", qat_req
,
552 sizeof(struct icp_qat_fw_la_bulk_req
));
553 QAT_DP_HEXDUMP_LOG(DEBUG
, "src_data:",
554 rte_pktmbuf_mtod(op
->sym
->m_src
, uint8_t*),
555 rte_pktmbuf_data_len(op
->sym
->m_src
));
557 uint8_t *cipher_iv_ptr
= rte_crypto_op_ctod_offset(op
,
559 ctx
->cipher_iv
.offset
);
560 QAT_DP_HEXDUMP_LOG(DEBUG
, "cipher iv:", cipher_iv_ptr
,
561 ctx
->cipher_iv
.length
);
565 if (ctx
->auth_iv
.length
) {
566 uint8_t *auth_iv_ptr
= rte_crypto_op_ctod_offset(op
,
568 ctx
->auth_iv
.offset
);
569 QAT_DP_HEXDUMP_LOG(DEBUG
, "auth iv:", auth_iv_ptr
,
570 ctx
->auth_iv
.length
);
572 QAT_DP_HEXDUMP_LOG(DEBUG
, "digest:", op
->sym
->auth
.digest
.data
,
577 QAT_DP_HEXDUMP_LOG(DEBUG
, "digest:", op
->sym
->aead
.digest
.data
,
579 QAT_DP_HEXDUMP_LOG(DEBUG
, "aad:", op
->sym
->aead
.aad
.data
,