1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
4 * Copyright 2016-2018 NXP
13 #include <rte_cryptodev.h>
14 #include <rte_malloc.h>
15 #include <rte_memcpy.h>
16 #include <rte_string_fns.h>
17 #include <rte_cycles.h>
18 #include <rte_kvargs.h>
20 #include <rte_cryptodev_pmd.h>
21 #include <rte_common.h>
22 #include <rte_fslmc.h>
23 #include <fslmc_vfio.h>
24 #include <dpaa2_hw_pvt.h>
25 #include <dpaa2_hw_dpio.h>
26 #include <dpaa2_hw_mempool.h>
27 #include <fsl_dpopr.h>
28 #include <fsl_dpseci.h>
29 #include <fsl_mc_sys.h>
31 #include "dpaa2_sec_priv.h"
32 #include "dpaa2_sec_event.h"
33 #include "dpaa2_sec_logs.h"
36 typedef uint64_t dma_addr_t
;
38 /* RTA header files */
39 #include <hw/desc/ipsec.h>
40 #include <hw/desc/pdcp.h>
41 #include <hw/desc/algo.h>
43 /* Minimum job descriptor consists of a oneword job descriptor HEADER and
44 * a pointer to the shared descriptor
46 #define MIN_JOB_DESC_SIZE (CAAM_CMD_SZ + CAAM_PTR_SZ)
47 #define FSL_VENDOR_ID 0x1957
48 #define FSL_DEVICE_ID 0x410
49 #define FSL_SUBSYSTEM_SEC 1
50 #define FSL_MC_DPSECI_DEVID 3
53 /* FLE_POOL_NUM_BUFS is set as per the ipsec-secgw application */
54 #define FLE_POOL_NUM_BUFS 32000
55 #define FLE_POOL_BUF_SIZE 256
56 #define FLE_POOL_CACHE_SIZE 512
57 #define FLE_SG_MEM_SIZE 2048
58 #define SEC_FLC_DHR_OUTBOUND -114
59 #define SEC_FLC_DHR_INBOUND 0
61 enum rta_sec_era rta_sec_era
= RTA_SEC_ERA_8
;
63 static uint8_t cryptodev_driver_id
;
65 int dpaa2_logtype_sec
;
68 build_proto_compound_fd(dpaa2_sec_session
*sess
,
69 struct rte_crypto_op
*op
,
70 struct qbman_fd
*fd
, uint16_t bpid
)
72 struct rte_crypto_sym_op
*sym_op
= op
->sym
;
73 struct ctxt_priv
*priv
= sess
->ctxt
;
74 struct qbman_fle
*fle
, *ip_fle
, *op_fle
;
75 struct sec_flow_context
*flc
;
76 struct rte_mbuf
*src_mbuf
= sym_op
->m_src
;
77 struct rte_mbuf
*dst_mbuf
= sym_op
->m_dst
;
83 /* Save the shared descriptor */
84 flc
= &priv
->flc_desc
[0].flc
;
86 /* we are using the first FLE entry to store Mbuf */
87 retval
= rte_mempool_get(priv
->fle_pool
, (void **)(&fle
));
89 DPAA2_SEC_ERR("Memory alloc failed");
92 memset(fle
, 0, FLE_POOL_BUF_SIZE
);
93 DPAA2_SET_FLE_ADDR(fle
, (size_t)op
);
94 DPAA2_FLE_SAVE_CTXT(fle
, (ptrdiff_t)priv
);
99 if (likely(bpid
< MAX_BPID
)) {
100 DPAA2_SET_FD_BPID(fd
, bpid
);
101 DPAA2_SET_FLE_BPID(op_fle
, bpid
);
102 DPAA2_SET_FLE_BPID(ip_fle
, bpid
);
104 DPAA2_SET_FD_IVP(fd
);
105 DPAA2_SET_FLE_IVP(op_fle
);
106 DPAA2_SET_FLE_IVP(ip_fle
);
109 /* Configure FD as a FRAME LIST */
110 DPAA2_SET_FD_ADDR(fd
, DPAA2_VADDR_TO_IOVA(op_fle
));
111 DPAA2_SET_FD_COMPOUND_FMT(fd
);
112 DPAA2_SET_FD_FLC(fd
, DPAA2_VADDR_TO_IOVA(flc
));
114 /* Configure Output FLE with dst mbuf data */
115 DPAA2_SET_FLE_ADDR(op_fle
, DPAA2_MBUF_VADDR_TO_IOVA(dst_mbuf
));
116 DPAA2_SET_FLE_OFFSET(op_fle
, dst_mbuf
->data_off
);
117 DPAA2_SET_FLE_LEN(op_fle
, dst_mbuf
->buf_len
);
119 /* Configure Input FLE with src mbuf data */
120 DPAA2_SET_FLE_ADDR(ip_fle
, DPAA2_MBUF_VADDR_TO_IOVA(src_mbuf
));
121 DPAA2_SET_FLE_OFFSET(ip_fle
, src_mbuf
->data_off
);
122 DPAA2_SET_FLE_LEN(ip_fle
, src_mbuf
->pkt_len
);
124 DPAA2_SET_FD_LEN(fd
, ip_fle
->length
);
125 DPAA2_SET_FLE_FIN(ip_fle
);
127 #ifdef ENABLE_HFN_OVERRIDE
128 if (sess
->ctxt_type
== DPAA2_SEC_PDCP
&& sess
->pdcp
.hfn_ovd
) {
129 /*enable HFN override override */
130 DPAA2_SET_FLE_INTERNAL_JD(ip_fle
, sess
->pdcp
.hfn_ovd
);
131 DPAA2_SET_FLE_INTERNAL_JD(op_fle
, sess
->pdcp
.hfn_ovd
);
132 DPAA2_SET_FD_INTERNAL_JD(fd
, sess
->pdcp
.hfn_ovd
);
141 build_proto_fd(dpaa2_sec_session
*sess
,
142 struct rte_crypto_op
*op
,
143 struct qbman_fd
*fd
, uint16_t bpid
)
145 struct rte_crypto_sym_op
*sym_op
= op
->sym
;
147 return build_proto_compound_fd(sess
, op
, fd
, bpid
);
149 struct ctxt_priv
*priv
= sess
->ctxt
;
150 struct sec_flow_context
*flc
;
151 struct rte_mbuf
*mbuf
= sym_op
->m_src
;
153 if (likely(bpid
< MAX_BPID
))
154 DPAA2_SET_FD_BPID(fd
, bpid
);
156 DPAA2_SET_FD_IVP(fd
);
158 /* Save the shared descriptor */
159 flc
= &priv
->flc_desc
[0].flc
;
161 DPAA2_SET_FD_ADDR(fd
, DPAA2_MBUF_VADDR_TO_IOVA(sym_op
->m_src
));
162 DPAA2_SET_FD_OFFSET(fd
, sym_op
->m_src
->data_off
);
163 DPAA2_SET_FD_LEN(fd
, sym_op
->m_src
->pkt_len
);
164 DPAA2_SET_FD_FLC(fd
, DPAA2_VADDR_TO_IOVA(flc
));
166 /* save physical address of mbuf */
167 op
->sym
->aead
.digest
.phys_addr
= mbuf
->buf_iova
;
168 mbuf
->buf_iova
= (size_t)op
;
174 build_authenc_gcm_sg_fd(dpaa2_sec_session
*sess
,
175 struct rte_crypto_op
*op
,
176 struct qbman_fd
*fd
, __rte_unused
uint16_t bpid
)
178 struct rte_crypto_sym_op
*sym_op
= op
->sym
;
179 struct ctxt_priv
*priv
= sess
->ctxt
;
180 struct qbman_fle
*fle
, *sge
, *ip_fle
, *op_fle
;
181 struct sec_flow_context
*flc
;
182 uint32_t auth_only_len
= sess
->ext_params
.aead_ctxt
.auth_only_len
;
183 int icv_len
= sess
->digest_length
;
185 struct rte_mbuf
*mbuf
;
186 uint8_t *IV_ptr
= rte_crypto_op_ctod_offset(op
, uint8_t *,
189 PMD_INIT_FUNC_TRACE();
192 mbuf
= sym_op
->m_dst
;
194 mbuf
= sym_op
->m_src
;
196 /* first FLE entry used to store mbuf and session ctxt */
197 fle
= (struct qbman_fle
*)rte_malloc(NULL
, FLE_SG_MEM_SIZE
,
198 RTE_CACHE_LINE_SIZE
);
199 if (unlikely(!fle
)) {
200 DPAA2_SEC_ERR("GCM SG: Memory alloc failed for SGE");
203 memset(fle
, 0, FLE_SG_MEM_SIZE
);
204 DPAA2_SET_FLE_ADDR(fle
, (size_t)op
);
205 DPAA2_FLE_SAVE_CTXT(fle
, (size_t)priv
);
211 /* Save the shared descriptor */
212 flc
= &priv
->flc_desc
[0].flc
;
214 /* Configure FD as a FRAME LIST */
215 DPAA2_SET_FD_ADDR(fd
, DPAA2_VADDR_TO_IOVA(op_fle
));
216 DPAA2_SET_FD_COMPOUND_FMT(fd
);
217 DPAA2_SET_FD_FLC(fd
, DPAA2_VADDR_TO_IOVA(flc
));
219 DPAA2_SEC_DP_DEBUG("GCM SG: auth_off: 0x%x/length %d, digest-len=%d\n"
220 "iv-len=%d data_off: 0x%x\n",
221 sym_op
->aead
.data
.offset
,
222 sym_op
->aead
.data
.length
,
225 sym_op
->m_src
->data_off
);
227 /* Configure Output FLE with Scatter/Gather Entry */
228 DPAA2_SET_FLE_SG_EXT(op_fle
);
229 DPAA2_SET_FLE_ADDR(op_fle
, DPAA2_VADDR_TO_IOVA(sge
));
232 DPAA2_SET_FLE_INTERNAL_JD(op_fle
, auth_only_len
);
234 op_fle
->length
= (sess
->dir
== DIR_ENC
) ?
235 (sym_op
->aead
.data
.length
+ icv_len
+ auth_only_len
) :
236 sym_op
->aead
.data
.length
+ auth_only_len
;
238 /* Configure Output SGE for Encap/Decap */
239 DPAA2_SET_FLE_ADDR(sge
, DPAA2_MBUF_VADDR_TO_IOVA(mbuf
));
240 DPAA2_SET_FLE_OFFSET(sge
, mbuf
->data_off
+
241 RTE_ALIGN_CEIL(auth_only_len
, 16) - auth_only_len
);
242 sge
->length
= mbuf
->data_len
- sym_op
->aead
.data
.offset
+ auth_only_len
;
248 DPAA2_SET_FLE_ADDR(sge
, DPAA2_MBUF_VADDR_TO_IOVA(mbuf
));
249 DPAA2_SET_FLE_OFFSET(sge
, mbuf
->data_off
);
250 sge
->length
= mbuf
->data_len
;
253 sge
->length
-= icv_len
;
255 if (sess
->dir
== DIR_ENC
) {
257 DPAA2_SET_FLE_ADDR(sge
,
258 DPAA2_VADDR_TO_IOVA(sym_op
->aead
.digest
.data
));
259 sge
->length
= icv_len
;
261 DPAA2_SET_FLE_FIN(sge
);
264 mbuf
= sym_op
->m_src
;
266 /* Configure Input FLE with Scatter/Gather Entry */
267 DPAA2_SET_FLE_ADDR(ip_fle
, DPAA2_VADDR_TO_IOVA(sge
));
268 DPAA2_SET_FLE_SG_EXT(ip_fle
);
269 DPAA2_SET_FLE_FIN(ip_fle
);
270 ip_fle
->length
= (sess
->dir
== DIR_ENC
) ?
271 (sym_op
->aead
.data
.length
+ sess
->iv
.length
+ auth_only_len
) :
272 (sym_op
->aead
.data
.length
+ sess
->iv
.length
+ auth_only_len
+
275 /* Configure Input SGE for Encap/Decap */
276 DPAA2_SET_FLE_ADDR(sge
, DPAA2_VADDR_TO_IOVA(IV_ptr
));
277 sge
->length
= sess
->iv
.length
;
281 DPAA2_SET_FLE_ADDR(sge
,
282 DPAA2_VADDR_TO_IOVA(sym_op
->aead
.aad
.data
));
283 sge
->length
= auth_only_len
;
287 DPAA2_SET_FLE_ADDR(sge
, DPAA2_MBUF_VADDR_TO_IOVA(mbuf
));
288 DPAA2_SET_FLE_OFFSET(sge
, sym_op
->aead
.data
.offset
+
290 sge
->length
= mbuf
->data_len
- sym_op
->aead
.data
.offset
;
296 DPAA2_SET_FLE_ADDR(sge
, DPAA2_MBUF_VADDR_TO_IOVA(mbuf
));
297 DPAA2_SET_FLE_OFFSET(sge
, mbuf
->data_off
);
298 sge
->length
= mbuf
->data_len
;
302 if (sess
->dir
== DIR_DEC
) {
304 old_icv
= (uint8_t *)(sge
+ 1);
305 memcpy(old_icv
, sym_op
->aead
.digest
.data
, icv_len
);
306 DPAA2_SET_FLE_ADDR(sge
, DPAA2_VADDR_TO_IOVA(old_icv
));
307 sge
->length
= icv_len
;
310 DPAA2_SET_FLE_FIN(sge
);
312 DPAA2_SET_FLE_INTERNAL_JD(ip_fle
, auth_only_len
);
313 DPAA2_SET_FD_INTERNAL_JD(fd
, auth_only_len
);
315 DPAA2_SET_FD_LEN(fd
, ip_fle
->length
);
321 build_authenc_gcm_fd(dpaa2_sec_session
*sess
,
322 struct rte_crypto_op
*op
,
323 struct qbman_fd
*fd
, uint16_t bpid
)
325 struct rte_crypto_sym_op
*sym_op
= op
->sym
;
326 struct ctxt_priv
*priv
= sess
->ctxt
;
327 struct qbman_fle
*fle
, *sge
;
328 struct sec_flow_context
*flc
;
329 uint32_t auth_only_len
= sess
->ext_params
.aead_ctxt
.auth_only_len
;
330 int icv_len
= sess
->digest_length
, retval
;
332 struct rte_mbuf
*dst
;
333 uint8_t *IV_ptr
= rte_crypto_op_ctod_offset(op
, uint8_t *,
336 PMD_INIT_FUNC_TRACE();
343 /* TODO we are using the first FLE entry to store Mbuf and session ctxt.
344 * Currently we donot know which FLE has the mbuf stored.
345 * So while retreiving we can go back 1 FLE from the FD -ADDR
346 * to get the MBUF Addr from the previous FLE.
347 * We can have a better approach to use the inline Mbuf
349 retval
= rte_mempool_get(priv
->fle_pool
, (void **)(&fle
));
351 DPAA2_SEC_ERR("GCM: Memory alloc failed for SGE");
354 memset(fle
, 0, FLE_POOL_BUF_SIZE
);
355 DPAA2_SET_FLE_ADDR(fle
, (size_t)op
);
356 DPAA2_FLE_SAVE_CTXT(fle
, (ptrdiff_t)priv
);
359 if (likely(bpid
< MAX_BPID
)) {
360 DPAA2_SET_FD_BPID(fd
, bpid
);
361 DPAA2_SET_FLE_BPID(fle
, bpid
);
362 DPAA2_SET_FLE_BPID(fle
+ 1, bpid
);
363 DPAA2_SET_FLE_BPID(sge
, bpid
);
364 DPAA2_SET_FLE_BPID(sge
+ 1, bpid
);
365 DPAA2_SET_FLE_BPID(sge
+ 2, bpid
);
366 DPAA2_SET_FLE_BPID(sge
+ 3, bpid
);
368 DPAA2_SET_FD_IVP(fd
);
369 DPAA2_SET_FLE_IVP(fle
);
370 DPAA2_SET_FLE_IVP((fle
+ 1));
371 DPAA2_SET_FLE_IVP(sge
);
372 DPAA2_SET_FLE_IVP((sge
+ 1));
373 DPAA2_SET_FLE_IVP((sge
+ 2));
374 DPAA2_SET_FLE_IVP((sge
+ 3));
377 /* Save the shared descriptor */
378 flc
= &priv
->flc_desc
[0].flc
;
379 /* Configure FD as a FRAME LIST */
380 DPAA2_SET_FD_ADDR(fd
, DPAA2_VADDR_TO_IOVA(fle
));
381 DPAA2_SET_FD_COMPOUND_FMT(fd
);
382 DPAA2_SET_FD_FLC(fd
, DPAA2_VADDR_TO_IOVA(flc
));
384 DPAA2_SEC_DP_DEBUG("GCM: auth_off: 0x%x/length %d, digest-len=%d\n"
385 "iv-len=%d data_off: 0x%x\n",
386 sym_op
->aead
.data
.offset
,
387 sym_op
->aead
.data
.length
,
390 sym_op
->m_src
->data_off
);
392 /* Configure Output FLE with Scatter/Gather Entry */
393 DPAA2_SET_FLE_ADDR(fle
, DPAA2_VADDR_TO_IOVA(sge
));
395 DPAA2_SET_FLE_INTERNAL_JD(fle
, auth_only_len
);
396 fle
->length
= (sess
->dir
== DIR_ENC
) ?
397 (sym_op
->aead
.data
.length
+ icv_len
+ auth_only_len
) :
398 sym_op
->aead
.data
.length
+ auth_only_len
;
400 DPAA2_SET_FLE_SG_EXT(fle
);
402 /* Configure Output SGE for Encap/Decap */
403 DPAA2_SET_FLE_ADDR(sge
, DPAA2_MBUF_VADDR_TO_IOVA(dst
));
404 DPAA2_SET_FLE_OFFSET(sge
, dst
->data_off
+
405 RTE_ALIGN_CEIL(auth_only_len
, 16) - auth_only_len
);
406 sge
->length
= sym_op
->aead
.data
.length
+ auth_only_len
;
408 if (sess
->dir
== DIR_ENC
) {
410 DPAA2_SET_FLE_ADDR(sge
,
411 DPAA2_VADDR_TO_IOVA(sym_op
->aead
.digest
.data
));
412 sge
->length
= sess
->digest_length
;
413 DPAA2_SET_FD_LEN(fd
, (sym_op
->aead
.data
.length
+
414 sess
->iv
.length
+ auth_only_len
));
416 DPAA2_SET_FLE_FIN(sge
);
421 /* Configure Input FLE with Scatter/Gather Entry */
422 DPAA2_SET_FLE_ADDR(fle
, DPAA2_VADDR_TO_IOVA(sge
));
423 DPAA2_SET_FLE_SG_EXT(fle
);
424 DPAA2_SET_FLE_FIN(fle
);
425 fle
->length
= (sess
->dir
== DIR_ENC
) ?
426 (sym_op
->aead
.data
.length
+ sess
->iv
.length
+ auth_only_len
) :
427 (sym_op
->aead
.data
.length
+ sess
->iv
.length
+ auth_only_len
+
428 sess
->digest_length
);
430 /* Configure Input SGE for Encap/Decap */
431 DPAA2_SET_FLE_ADDR(sge
, DPAA2_VADDR_TO_IOVA(IV_ptr
));
432 sge
->length
= sess
->iv
.length
;
435 DPAA2_SET_FLE_ADDR(sge
,
436 DPAA2_VADDR_TO_IOVA(sym_op
->aead
.aad
.data
));
437 sge
->length
= auth_only_len
;
438 DPAA2_SET_FLE_BPID(sge
, bpid
);
442 DPAA2_SET_FLE_ADDR(sge
, DPAA2_MBUF_VADDR_TO_IOVA(sym_op
->m_src
));
443 DPAA2_SET_FLE_OFFSET(sge
, sym_op
->aead
.data
.offset
+
444 sym_op
->m_src
->data_off
);
445 sge
->length
= sym_op
->aead
.data
.length
;
446 if (sess
->dir
== DIR_DEC
) {
448 old_icv
= (uint8_t *)(sge
+ 1);
449 memcpy(old_icv
, sym_op
->aead
.digest
.data
,
450 sess
->digest_length
);
451 DPAA2_SET_FLE_ADDR(sge
, DPAA2_VADDR_TO_IOVA(old_icv
));
452 sge
->length
= sess
->digest_length
;
453 DPAA2_SET_FD_LEN(fd
, (sym_op
->aead
.data
.length
+
454 sess
->digest_length
+
458 DPAA2_SET_FLE_FIN(sge
);
461 DPAA2_SET_FLE_INTERNAL_JD(fle
, auth_only_len
);
462 DPAA2_SET_FD_INTERNAL_JD(fd
, auth_only_len
);
469 build_authenc_sg_fd(dpaa2_sec_session
*sess
,
470 struct rte_crypto_op
*op
,
471 struct qbman_fd
*fd
, __rte_unused
uint16_t bpid
)
473 struct rte_crypto_sym_op
*sym_op
= op
->sym
;
474 struct ctxt_priv
*priv
= sess
->ctxt
;
475 struct qbman_fle
*fle
, *sge
, *ip_fle
, *op_fle
;
476 struct sec_flow_context
*flc
;
477 uint32_t auth_only_len
= sym_op
->auth
.data
.length
-
478 sym_op
->cipher
.data
.length
;
479 int icv_len
= sess
->digest_length
;
481 struct rte_mbuf
*mbuf
;
482 uint8_t *iv_ptr
= rte_crypto_op_ctod_offset(op
, uint8_t *,
485 PMD_INIT_FUNC_TRACE();
488 mbuf
= sym_op
->m_dst
;
490 mbuf
= sym_op
->m_src
;
492 /* first FLE entry used to store mbuf and session ctxt */
493 fle
= (struct qbman_fle
*)rte_malloc(NULL
, FLE_SG_MEM_SIZE
,
494 RTE_CACHE_LINE_SIZE
);
495 if (unlikely(!fle
)) {
496 DPAA2_SEC_ERR("AUTHENC SG: Memory alloc failed for SGE");
499 memset(fle
, 0, FLE_SG_MEM_SIZE
);
500 DPAA2_SET_FLE_ADDR(fle
, (size_t)op
);
501 DPAA2_FLE_SAVE_CTXT(fle
, (ptrdiff_t)priv
);
507 /* Save the shared descriptor */
508 flc
= &priv
->flc_desc
[0].flc
;
510 /* Configure FD as a FRAME LIST */
511 DPAA2_SET_FD_ADDR(fd
, DPAA2_VADDR_TO_IOVA(op_fle
));
512 DPAA2_SET_FD_COMPOUND_FMT(fd
);
513 DPAA2_SET_FD_FLC(fd
, DPAA2_VADDR_TO_IOVA(flc
));
516 "AUTHENC SG: auth_off: 0x%x/length %d, digest-len=%d\n"
517 "cipher_off: 0x%x/length %d, iv-len=%d data_off: 0x%x\n",
518 sym_op
->auth
.data
.offset
,
519 sym_op
->auth
.data
.length
,
521 sym_op
->cipher
.data
.offset
,
522 sym_op
->cipher
.data
.length
,
524 sym_op
->m_src
->data_off
);
526 /* Configure Output FLE with Scatter/Gather Entry */
527 DPAA2_SET_FLE_SG_EXT(op_fle
);
528 DPAA2_SET_FLE_ADDR(op_fle
, DPAA2_VADDR_TO_IOVA(sge
));
531 DPAA2_SET_FLE_INTERNAL_JD(op_fle
, auth_only_len
);
533 op_fle
->length
= (sess
->dir
== DIR_ENC
) ?
534 (sym_op
->cipher
.data
.length
+ icv_len
) :
535 sym_op
->cipher
.data
.length
;
537 /* Configure Output SGE for Encap/Decap */
538 DPAA2_SET_FLE_ADDR(sge
, DPAA2_MBUF_VADDR_TO_IOVA(mbuf
));
539 DPAA2_SET_FLE_OFFSET(sge
, mbuf
->data_off
+ sym_op
->auth
.data
.offset
);
540 sge
->length
= mbuf
->data_len
- sym_op
->auth
.data
.offset
;
546 DPAA2_SET_FLE_ADDR(sge
, DPAA2_MBUF_VADDR_TO_IOVA(mbuf
));
547 DPAA2_SET_FLE_OFFSET(sge
, mbuf
->data_off
);
548 sge
->length
= mbuf
->data_len
;
551 sge
->length
-= icv_len
;
553 if (sess
->dir
== DIR_ENC
) {
555 DPAA2_SET_FLE_ADDR(sge
,
556 DPAA2_VADDR_TO_IOVA(sym_op
->auth
.digest
.data
));
557 sge
->length
= icv_len
;
559 DPAA2_SET_FLE_FIN(sge
);
562 mbuf
= sym_op
->m_src
;
564 /* Configure Input FLE with Scatter/Gather Entry */
565 DPAA2_SET_FLE_ADDR(ip_fle
, DPAA2_VADDR_TO_IOVA(sge
));
566 DPAA2_SET_FLE_SG_EXT(ip_fle
);
567 DPAA2_SET_FLE_FIN(ip_fle
);
568 ip_fle
->length
= (sess
->dir
== DIR_ENC
) ?
569 (sym_op
->auth
.data
.length
+ sess
->iv
.length
) :
570 (sym_op
->auth
.data
.length
+ sess
->iv
.length
+
573 /* Configure Input SGE for Encap/Decap */
574 DPAA2_SET_FLE_ADDR(sge
, DPAA2_VADDR_TO_IOVA(iv_ptr
));
575 sge
->length
= sess
->iv
.length
;
578 DPAA2_SET_FLE_ADDR(sge
, DPAA2_MBUF_VADDR_TO_IOVA(mbuf
));
579 DPAA2_SET_FLE_OFFSET(sge
, sym_op
->auth
.data
.offset
+
581 sge
->length
= mbuf
->data_len
- sym_op
->auth
.data
.offset
;
587 DPAA2_SET_FLE_ADDR(sge
, DPAA2_MBUF_VADDR_TO_IOVA(mbuf
));
588 DPAA2_SET_FLE_OFFSET(sge
, mbuf
->data_off
);
589 sge
->length
= mbuf
->data_len
;
592 sge
->length
-= icv_len
;
594 if (sess
->dir
== DIR_DEC
) {
596 old_icv
= (uint8_t *)(sge
+ 1);
597 memcpy(old_icv
, sym_op
->auth
.digest
.data
,
599 DPAA2_SET_FLE_ADDR(sge
, DPAA2_VADDR_TO_IOVA(old_icv
));
600 sge
->length
= icv_len
;
603 DPAA2_SET_FLE_FIN(sge
);
605 DPAA2_SET_FLE_INTERNAL_JD(ip_fle
, auth_only_len
);
606 DPAA2_SET_FD_INTERNAL_JD(fd
, auth_only_len
);
608 DPAA2_SET_FD_LEN(fd
, ip_fle
->length
);
614 build_authenc_fd(dpaa2_sec_session
*sess
,
615 struct rte_crypto_op
*op
,
616 struct qbman_fd
*fd
, uint16_t bpid
)
618 struct rte_crypto_sym_op
*sym_op
= op
->sym
;
619 struct ctxt_priv
*priv
= sess
->ctxt
;
620 struct qbman_fle
*fle
, *sge
;
621 struct sec_flow_context
*flc
;
622 uint32_t auth_only_len
= sym_op
->auth
.data
.length
-
623 sym_op
->cipher
.data
.length
;
624 int icv_len
= sess
->digest_length
, retval
;
626 uint8_t *iv_ptr
= rte_crypto_op_ctod_offset(op
, uint8_t *,
628 struct rte_mbuf
*dst
;
630 PMD_INIT_FUNC_TRACE();
637 /* we are using the first FLE entry to store Mbuf.
638 * Currently we donot know which FLE has the mbuf stored.
639 * So while retreiving we can go back 1 FLE from the FD -ADDR
640 * to get the MBUF Addr from the previous FLE.
641 * We can have a better approach to use the inline Mbuf
643 retval
= rte_mempool_get(priv
->fle_pool
, (void **)(&fle
));
645 DPAA2_SEC_ERR("Memory alloc failed for SGE");
648 memset(fle
, 0, FLE_POOL_BUF_SIZE
);
649 DPAA2_SET_FLE_ADDR(fle
, (size_t)op
);
650 DPAA2_FLE_SAVE_CTXT(fle
, (ptrdiff_t)priv
);
653 if (likely(bpid
< MAX_BPID
)) {
654 DPAA2_SET_FD_BPID(fd
, bpid
);
655 DPAA2_SET_FLE_BPID(fle
, bpid
);
656 DPAA2_SET_FLE_BPID(fle
+ 1, bpid
);
657 DPAA2_SET_FLE_BPID(sge
, bpid
);
658 DPAA2_SET_FLE_BPID(sge
+ 1, bpid
);
659 DPAA2_SET_FLE_BPID(sge
+ 2, bpid
);
660 DPAA2_SET_FLE_BPID(sge
+ 3, bpid
);
662 DPAA2_SET_FD_IVP(fd
);
663 DPAA2_SET_FLE_IVP(fle
);
664 DPAA2_SET_FLE_IVP((fle
+ 1));
665 DPAA2_SET_FLE_IVP(sge
);
666 DPAA2_SET_FLE_IVP((sge
+ 1));
667 DPAA2_SET_FLE_IVP((sge
+ 2));
668 DPAA2_SET_FLE_IVP((sge
+ 3));
671 /* Save the shared descriptor */
672 flc
= &priv
->flc_desc
[0].flc
;
673 /* Configure FD as a FRAME LIST */
674 DPAA2_SET_FD_ADDR(fd
, DPAA2_VADDR_TO_IOVA(fle
));
675 DPAA2_SET_FD_COMPOUND_FMT(fd
);
676 DPAA2_SET_FD_FLC(fd
, DPAA2_VADDR_TO_IOVA(flc
));
679 "AUTHENC: auth_off: 0x%x/length %d, digest-len=%d\n"
680 "cipher_off: 0x%x/length %d, iv-len=%d data_off: 0x%x\n",
681 sym_op
->auth
.data
.offset
,
682 sym_op
->auth
.data
.length
,
684 sym_op
->cipher
.data
.offset
,
685 sym_op
->cipher
.data
.length
,
687 sym_op
->m_src
->data_off
);
689 /* Configure Output FLE with Scatter/Gather Entry */
690 DPAA2_SET_FLE_ADDR(fle
, DPAA2_VADDR_TO_IOVA(sge
));
692 DPAA2_SET_FLE_INTERNAL_JD(fle
, auth_only_len
);
693 fle
->length
= (sess
->dir
== DIR_ENC
) ?
694 (sym_op
->cipher
.data
.length
+ icv_len
) :
695 sym_op
->cipher
.data
.length
;
697 DPAA2_SET_FLE_SG_EXT(fle
);
699 /* Configure Output SGE for Encap/Decap */
700 DPAA2_SET_FLE_ADDR(sge
, DPAA2_MBUF_VADDR_TO_IOVA(dst
));
701 DPAA2_SET_FLE_OFFSET(sge
, sym_op
->cipher
.data
.offset
+
703 sge
->length
= sym_op
->cipher
.data
.length
;
705 if (sess
->dir
== DIR_ENC
) {
707 DPAA2_SET_FLE_ADDR(sge
,
708 DPAA2_VADDR_TO_IOVA(sym_op
->auth
.digest
.data
));
709 sge
->length
= sess
->digest_length
;
710 DPAA2_SET_FD_LEN(fd
, (sym_op
->auth
.data
.length
+
713 DPAA2_SET_FLE_FIN(sge
);
718 /* Configure Input FLE with Scatter/Gather Entry */
719 DPAA2_SET_FLE_ADDR(fle
, DPAA2_VADDR_TO_IOVA(sge
));
720 DPAA2_SET_FLE_SG_EXT(fle
);
721 DPAA2_SET_FLE_FIN(fle
);
722 fle
->length
= (sess
->dir
== DIR_ENC
) ?
723 (sym_op
->auth
.data
.length
+ sess
->iv
.length
) :
724 (sym_op
->auth
.data
.length
+ sess
->iv
.length
+
725 sess
->digest_length
);
727 /* Configure Input SGE for Encap/Decap */
728 DPAA2_SET_FLE_ADDR(sge
, DPAA2_VADDR_TO_IOVA(iv_ptr
));
729 sge
->length
= sess
->iv
.length
;
732 DPAA2_SET_FLE_ADDR(sge
, DPAA2_MBUF_VADDR_TO_IOVA(sym_op
->m_src
));
733 DPAA2_SET_FLE_OFFSET(sge
, sym_op
->auth
.data
.offset
+
734 sym_op
->m_src
->data_off
);
735 sge
->length
= sym_op
->auth
.data
.length
;
736 if (sess
->dir
== DIR_DEC
) {
738 old_icv
= (uint8_t *)(sge
+ 1);
739 memcpy(old_icv
, sym_op
->auth
.digest
.data
,
740 sess
->digest_length
);
741 DPAA2_SET_FLE_ADDR(sge
, DPAA2_VADDR_TO_IOVA(old_icv
));
742 sge
->length
= sess
->digest_length
;
743 DPAA2_SET_FD_LEN(fd
, (sym_op
->auth
.data
.length
+
744 sess
->digest_length
+
747 DPAA2_SET_FLE_FIN(sge
);
749 DPAA2_SET_FLE_INTERNAL_JD(fle
, auth_only_len
);
750 DPAA2_SET_FD_INTERNAL_JD(fd
, auth_only_len
);
755 static inline int build_auth_sg_fd(
756 dpaa2_sec_session
*sess
,
757 struct rte_crypto_op
*op
,
759 __rte_unused
uint16_t bpid
)
761 struct rte_crypto_sym_op
*sym_op
= op
->sym
;
762 struct qbman_fle
*fle
, *sge
, *ip_fle
, *op_fle
;
763 struct sec_flow_context
*flc
;
764 struct ctxt_priv
*priv
= sess
->ctxt
;
766 struct rte_mbuf
*mbuf
;
768 PMD_INIT_FUNC_TRACE();
770 mbuf
= sym_op
->m_src
;
771 fle
= (struct qbman_fle
*)rte_malloc(NULL
, FLE_SG_MEM_SIZE
,
772 RTE_CACHE_LINE_SIZE
);
773 if (unlikely(!fle
)) {
774 DPAA2_SEC_ERR("AUTH SG: Memory alloc failed for SGE");
777 memset(fle
, 0, FLE_SG_MEM_SIZE
);
778 /* first FLE entry used to store mbuf and session ctxt */
779 DPAA2_SET_FLE_ADDR(fle
, (size_t)op
);
780 DPAA2_FLE_SAVE_CTXT(fle
, (ptrdiff_t)priv
);
785 flc
= &priv
->flc_desc
[DESC_INITFINAL
].flc
;
787 DPAA2_SET_FD_FLC(fd
, DPAA2_VADDR_TO_IOVA(flc
));
788 DPAA2_SET_FD_ADDR(fd
, DPAA2_VADDR_TO_IOVA(op_fle
));
789 DPAA2_SET_FD_COMPOUND_FMT(fd
);
792 DPAA2_SET_FLE_ADDR(op_fle
,
793 DPAA2_VADDR_TO_IOVA(sym_op
->auth
.digest
.data
));
794 op_fle
->length
= sess
->digest_length
;
797 DPAA2_SET_FLE_SG_EXT(ip_fle
);
798 DPAA2_SET_FLE_ADDR(ip_fle
, DPAA2_VADDR_TO_IOVA(sge
));
800 DPAA2_SET_FLE_ADDR(sge
, DPAA2_MBUF_VADDR_TO_IOVA(mbuf
));
801 DPAA2_SET_FLE_OFFSET(sge
, sym_op
->auth
.data
.offset
+ mbuf
->data_off
);
802 sge
->length
= mbuf
->data_len
- sym_op
->auth
.data
.offset
;
808 DPAA2_SET_FLE_ADDR(sge
, DPAA2_MBUF_VADDR_TO_IOVA(mbuf
));
809 DPAA2_SET_FLE_OFFSET(sge
, mbuf
->data_off
);
810 sge
->length
= mbuf
->data_len
;
813 if (sess
->dir
== DIR_ENC
) {
814 /* Digest calculation case */
815 sge
->length
-= sess
->digest_length
;
816 ip_fle
->length
= sym_op
->auth
.data
.length
;
818 /* Digest verification case */
820 old_digest
= (uint8_t *)(sge
+ 1);
821 rte_memcpy(old_digest
, sym_op
->auth
.digest
.data
,
822 sess
->digest_length
);
823 DPAA2_SET_FLE_ADDR(sge
, DPAA2_VADDR_TO_IOVA(old_digest
));
824 sge
->length
= sess
->digest_length
;
825 ip_fle
->length
= sym_op
->auth
.data
.length
+
828 DPAA2_SET_FLE_FIN(sge
);
829 DPAA2_SET_FLE_FIN(ip_fle
);
830 DPAA2_SET_FD_LEN(fd
, ip_fle
->length
);
836 build_auth_fd(dpaa2_sec_session
*sess
, struct rte_crypto_op
*op
,
837 struct qbman_fd
*fd
, uint16_t bpid
)
839 struct rte_crypto_sym_op
*sym_op
= op
->sym
;
840 struct qbman_fle
*fle
, *sge
;
841 struct sec_flow_context
*flc
;
842 struct ctxt_priv
*priv
= sess
->ctxt
;
846 PMD_INIT_FUNC_TRACE();
848 retval
= rte_mempool_get(priv
->fle_pool
, (void **)(&fle
));
850 DPAA2_SEC_ERR("AUTH Memory alloc failed for SGE");
853 memset(fle
, 0, FLE_POOL_BUF_SIZE
);
854 /* TODO we are using the first FLE entry to store Mbuf.
855 * Currently we donot know which FLE has the mbuf stored.
856 * So while retreiving we can go back 1 FLE from the FD -ADDR
857 * to get the MBUF Addr from the previous FLE.
858 * We can have a better approach to use the inline Mbuf
860 DPAA2_SET_FLE_ADDR(fle
, (size_t)op
);
861 DPAA2_FLE_SAVE_CTXT(fle
, (ptrdiff_t)priv
);
864 if (likely(bpid
< MAX_BPID
)) {
865 DPAA2_SET_FD_BPID(fd
, bpid
);
866 DPAA2_SET_FLE_BPID(fle
, bpid
);
867 DPAA2_SET_FLE_BPID(fle
+ 1, bpid
);
869 DPAA2_SET_FD_IVP(fd
);
870 DPAA2_SET_FLE_IVP(fle
);
871 DPAA2_SET_FLE_IVP((fle
+ 1));
873 flc
= &priv
->flc_desc
[DESC_INITFINAL
].flc
;
874 DPAA2_SET_FD_FLC(fd
, DPAA2_VADDR_TO_IOVA(flc
));
876 DPAA2_SET_FLE_ADDR(fle
, DPAA2_VADDR_TO_IOVA(sym_op
->auth
.digest
.data
));
877 fle
->length
= sess
->digest_length
;
879 DPAA2_SET_FD_ADDR(fd
, DPAA2_VADDR_TO_IOVA(fle
));
880 DPAA2_SET_FD_COMPOUND_FMT(fd
);
883 if (sess
->dir
== DIR_ENC
) {
884 DPAA2_SET_FLE_ADDR(fle
,
885 DPAA2_MBUF_VADDR_TO_IOVA(sym_op
->m_src
));
886 DPAA2_SET_FLE_OFFSET(fle
, sym_op
->auth
.data
.offset
+
887 sym_op
->m_src
->data_off
);
888 DPAA2_SET_FD_LEN(fd
, sym_op
->auth
.data
.length
);
889 fle
->length
= sym_op
->auth
.data
.length
;
892 DPAA2_SET_FLE_SG_EXT(fle
);
893 DPAA2_SET_FLE_ADDR(fle
, DPAA2_VADDR_TO_IOVA(sge
));
895 if (likely(bpid
< MAX_BPID
)) {
896 DPAA2_SET_FLE_BPID(sge
, bpid
);
897 DPAA2_SET_FLE_BPID(sge
+ 1, bpid
);
899 DPAA2_SET_FLE_IVP(sge
);
900 DPAA2_SET_FLE_IVP((sge
+ 1));
902 DPAA2_SET_FLE_ADDR(sge
,
903 DPAA2_MBUF_VADDR_TO_IOVA(sym_op
->m_src
));
904 DPAA2_SET_FLE_OFFSET(sge
, sym_op
->auth
.data
.offset
+
905 sym_op
->m_src
->data_off
);
907 DPAA2_SET_FD_LEN(fd
, sym_op
->auth
.data
.length
+
908 sess
->digest_length
);
909 sge
->length
= sym_op
->auth
.data
.length
;
911 old_digest
= (uint8_t *)(sge
+ 1);
912 rte_memcpy(old_digest
, sym_op
->auth
.digest
.data
,
913 sess
->digest_length
);
914 DPAA2_SET_FLE_ADDR(sge
, DPAA2_VADDR_TO_IOVA(old_digest
));
915 sge
->length
= sess
->digest_length
;
916 fle
->length
= sym_op
->auth
.data
.length
+
918 DPAA2_SET_FLE_FIN(sge
);
920 DPAA2_SET_FLE_FIN(fle
);
926 build_cipher_sg_fd(dpaa2_sec_session
*sess
, struct rte_crypto_op
*op
,
927 struct qbman_fd
*fd
, __rte_unused
uint16_t bpid
)
929 struct rte_crypto_sym_op
*sym_op
= op
->sym
;
930 struct qbman_fle
*ip_fle
, *op_fle
, *sge
, *fle
;
931 struct sec_flow_context
*flc
;
932 struct ctxt_priv
*priv
= sess
->ctxt
;
933 struct rte_mbuf
*mbuf
;
934 uint8_t *iv_ptr
= rte_crypto_op_ctod_offset(op
, uint8_t *,
937 PMD_INIT_FUNC_TRACE();
940 mbuf
= sym_op
->m_dst
;
942 mbuf
= sym_op
->m_src
;
944 fle
= (struct qbman_fle
*)rte_malloc(NULL
, FLE_SG_MEM_SIZE
,
945 RTE_CACHE_LINE_SIZE
);
947 DPAA2_SEC_ERR("CIPHER SG: Memory alloc failed for SGE");
950 memset(fle
, 0, FLE_SG_MEM_SIZE
);
951 /* first FLE entry used to store mbuf and session ctxt */
952 DPAA2_SET_FLE_ADDR(fle
, (size_t)op
);
953 DPAA2_FLE_SAVE_CTXT(fle
, (ptrdiff_t)priv
);
959 flc
= &priv
->flc_desc
[0].flc
;
962 "CIPHER SG: cipher_off: 0x%x/length %d, ivlen=%d"
964 sym_op
->cipher
.data
.offset
,
965 sym_op
->cipher
.data
.length
,
967 sym_op
->m_src
->data_off
);
970 DPAA2_SET_FLE_ADDR(op_fle
, DPAA2_VADDR_TO_IOVA(sge
));
971 op_fle
->length
= sym_op
->cipher
.data
.length
;
972 DPAA2_SET_FLE_SG_EXT(op_fle
);
975 DPAA2_SET_FLE_ADDR(sge
, DPAA2_MBUF_VADDR_TO_IOVA(mbuf
));
976 DPAA2_SET_FLE_OFFSET(sge
, sym_op
->cipher
.data
.offset
+ mbuf
->data_off
);
977 sge
->length
= mbuf
->data_len
- sym_op
->cipher
.data
.offset
;
983 DPAA2_SET_FLE_ADDR(sge
, DPAA2_MBUF_VADDR_TO_IOVA(mbuf
));
984 DPAA2_SET_FLE_OFFSET(sge
, mbuf
->data_off
);
985 sge
->length
= mbuf
->data_len
;
988 DPAA2_SET_FLE_FIN(sge
);
991 "CIPHER SG: 1 - flc = %p, fle = %p FLEaddr = %x-%x, len %d\n",
992 flc
, fle
, fle
->addr_hi
, fle
->addr_lo
,
996 mbuf
= sym_op
->m_src
;
998 DPAA2_SET_FLE_ADDR(ip_fle
, DPAA2_VADDR_TO_IOVA(sge
));
999 ip_fle
->length
= sess
->iv
.length
+ sym_op
->cipher
.data
.length
;
1000 DPAA2_SET_FLE_SG_EXT(ip_fle
);
1003 DPAA2_SET_FLE_ADDR(sge
, DPAA2_VADDR_TO_IOVA(iv_ptr
));
1004 DPAA2_SET_FLE_OFFSET(sge
, 0);
1005 sge
->length
= sess
->iv
.length
;
1010 DPAA2_SET_FLE_ADDR(sge
, DPAA2_MBUF_VADDR_TO_IOVA(mbuf
));
1011 DPAA2_SET_FLE_OFFSET(sge
, sym_op
->cipher
.data
.offset
+
1013 sge
->length
= mbuf
->data_len
- sym_op
->cipher
.data
.offset
;
1019 DPAA2_SET_FLE_ADDR(sge
, DPAA2_MBUF_VADDR_TO_IOVA(mbuf
));
1020 DPAA2_SET_FLE_OFFSET(sge
, mbuf
->data_off
);
1021 sge
->length
= mbuf
->data_len
;
1024 DPAA2_SET_FLE_FIN(sge
);
1025 DPAA2_SET_FLE_FIN(ip_fle
);
1028 DPAA2_SET_FD_ADDR(fd
, DPAA2_VADDR_TO_IOVA(op_fle
));
1029 DPAA2_SET_FD_LEN(fd
, ip_fle
->length
);
1030 DPAA2_SET_FD_COMPOUND_FMT(fd
);
1031 DPAA2_SET_FD_FLC(fd
, DPAA2_VADDR_TO_IOVA(flc
));
1034 "CIPHER SG: fdaddr =%" PRIx64
" bpid =%d meta =%d"
1035 " off =%d, len =%d\n",
1036 DPAA2_GET_FD_ADDR(fd
),
1037 DPAA2_GET_FD_BPID(fd
),
1038 rte_dpaa2_bpid_info
[bpid
].meta_data_size
,
1039 DPAA2_GET_FD_OFFSET(fd
),
1040 DPAA2_GET_FD_LEN(fd
));
1045 build_cipher_fd(dpaa2_sec_session
*sess
, struct rte_crypto_op
*op
,
1046 struct qbman_fd
*fd
, uint16_t bpid
)
1048 struct rte_crypto_sym_op
*sym_op
= op
->sym
;
1049 struct qbman_fle
*fle
, *sge
;
1051 struct sec_flow_context
*flc
;
1052 struct ctxt_priv
*priv
= sess
->ctxt
;
1053 uint8_t *iv_ptr
= rte_crypto_op_ctod_offset(op
, uint8_t *,
1055 struct rte_mbuf
*dst
;
1057 PMD_INIT_FUNC_TRACE();
1060 dst
= sym_op
->m_dst
;
1062 dst
= sym_op
->m_src
;
1064 retval
= rte_mempool_get(priv
->fle_pool
, (void **)(&fle
));
1066 DPAA2_SEC_ERR("CIPHER: Memory alloc failed for SGE");
1069 memset(fle
, 0, FLE_POOL_BUF_SIZE
);
1070 /* TODO we are using the first FLE entry to store Mbuf.
1071 * Currently we donot know which FLE has the mbuf stored.
1072 * So while retreiving we can go back 1 FLE from the FD -ADDR
1073 * to get the MBUF Addr from the previous FLE.
1074 * We can have a better approach to use the inline Mbuf
1076 DPAA2_SET_FLE_ADDR(fle
, (size_t)op
);
1077 DPAA2_FLE_SAVE_CTXT(fle
, (ptrdiff_t)priv
);
1081 if (likely(bpid
< MAX_BPID
)) {
1082 DPAA2_SET_FD_BPID(fd
, bpid
);
1083 DPAA2_SET_FLE_BPID(fle
, bpid
);
1084 DPAA2_SET_FLE_BPID(fle
+ 1, bpid
);
1085 DPAA2_SET_FLE_BPID(sge
, bpid
);
1086 DPAA2_SET_FLE_BPID(sge
+ 1, bpid
);
1088 DPAA2_SET_FD_IVP(fd
);
1089 DPAA2_SET_FLE_IVP(fle
);
1090 DPAA2_SET_FLE_IVP((fle
+ 1));
1091 DPAA2_SET_FLE_IVP(sge
);
1092 DPAA2_SET_FLE_IVP((sge
+ 1));
1095 flc
= &priv
->flc_desc
[0].flc
;
1096 DPAA2_SET_FD_ADDR(fd
, DPAA2_VADDR_TO_IOVA(fle
));
1097 DPAA2_SET_FD_LEN(fd
, sym_op
->cipher
.data
.length
+
1099 DPAA2_SET_FD_COMPOUND_FMT(fd
);
1100 DPAA2_SET_FD_FLC(fd
, DPAA2_VADDR_TO_IOVA(flc
));
1103 "CIPHER: cipher_off: 0x%x/length %d, ivlen=%d,"
1104 " data_off: 0x%x\n",
1105 sym_op
->cipher
.data
.offset
,
1106 sym_op
->cipher
.data
.length
,
1108 sym_op
->m_src
->data_off
);
1110 DPAA2_SET_FLE_ADDR(fle
, DPAA2_MBUF_VADDR_TO_IOVA(dst
));
1111 DPAA2_SET_FLE_OFFSET(fle
, sym_op
->cipher
.data
.offset
+
1114 fle
->length
= sym_op
->cipher
.data
.length
+ sess
->iv
.length
;
1117 "CIPHER: 1 - flc = %p, fle = %p FLEaddr = %x-%x, length %d\n",
1118 flc
, fle
, fle
->addr_hi
, fle
->addr_lo
,
1123 DPAA2_SET_FLE_ADDR(fle
, DPAA2_VADDR_TO_IOVA(sge
));
1124 fle
->length
= sym_op
->cipher
.data
.length
+ sess
->iv
.length
;
1126 DPAA2_SET_FLE_SG_EXT(fle
);
1128 DPAA2_SET_FLE_ADDR(sge
, DPAA2_VADDR_TO_IOVA(iv_ptr
));
1129 sge
->length
= sess
->iv
.length
;
1132 DPAA2_SET_FLE_ADDR(sge
, DPAA2_MBUF_VADDR_TO_IOVA(sym_op
->m_src
));
1133 DPAA2_SET_FLE_OFFSET(sge
, sym_op
->cipher
.data
.offset
+
1134 sym_op
->m_src
->data_off
);
1136 sge
->length
= sym_op
->cipher
.data
.length
;
1137 DPAA2_SET_FLE_FIN(sge
);
1138 DPAA2_SET_FLE_FIN(fle
);
1141 "CIPHER: fdaddr =%" PRIx64
" bpid =%d meta =%d"
1142 " off =%d, len =%d\n",
1143 DPAA2_GET_FD_ADDR(fd
),
1144 DPAA2_GET_FD_BPID(fd
),
1145 rte_dpaa2_bpid_info
[bpid
].meta_data_size
,
1146 DPAA2_GET_FD_OFFSET(fd
),
1147 DPAA2_GET_FD_LEN(fd
));
1153 build_sec_fd(struct rte_crypto_op
*op
,
1154 struct qbman_fd
*fd
, uint16_t bpid
)
1157 dpaa2_sec_session
*sess
;
1159 PMD_INIT_FUNC_TRACE();
1161 if (op
->sess_type
== RTE_CRYPTO_OP_WITH_SESSION
)
1162 sess
= (dpaa2_sec_session
*)get_sym_session_private_data(
1163 op
->sym
->session
, cryptodev_driver_id
);
1164 else if (op
->sess_type
== RTE_CRYPTO_OP_SECURITY_SESSION
)
1165 sess
= (dpaa2_sec_session
*)get_sec_session_private_data(
1166 op
->sym
->sec_session
);
1170 /* Segmented buffer */
1171 if (unlikely(!rte_pktmbuf_is_contiguous(op
->sym
->m_src
))) {
1172 switch (sess
->ctxt_type
) {
1173 case DPAA2_SEC_CIPHER
:
1174 ret
= build_cipher_sg_fd(sess
, op
, fd
, bpid
);
1176 case DPAA2_SEC_AUTH
:
1177 ret
= build_auth_sg_fd(sess
, op
, fd
, bpid
);
1179 case DPAA2_SEC_AEAD
:
1180 ret
= build_authenc_gcm_sg_fd(sess
, op
, fd
, bpid
);
1182 case DPAA2_SEC_CIPHER_HASH
:
1183 ret
= build_authenc_sg_fd(sess
, op
, fd
, bpid
);
1185 case DPAA2_SEC_HASH_CIPHER
:
1187 DPAA2_SEC_ERR("error: Unsupported session");
1190 switch (sess
->ctxt_type
) {
1191 case DPAA2_SEC_CIPHER
:
1192 ret
= build_cipher_fd(sess
, op
, fd
, bpid
);
1194 case DPAA2_SEC_AUTH
:
1195 ret
= build_auth_fd(sess
, op
, fd
, bpid
);
1197 case DPAA2_SEC_AEAD
:
1198 ret
= build_authenc_gcm_fd(sess
, op
, fd
, bpid
);
1200 case DPAA2_SEC_CIPHER_HASH
:
1201 ret
= build_authenc_fd(sess
, op
, fd
, bpid
);
1203 case DPAA2_SEC_IPSEC
:
1204 ret
= build_proto_fd(sess
, op
, fd
, bpid
);
1206 case DPAA2_SEC_PDCP
:
1207 ret
= build_proto_compound_fd(sess
, op
, fd
, bpid
);
1209 case DPAA2_SEC_HASH_CIPHER
:
1211 DPAA2_SEC_ERR("error: Unsupported session");
1218 dpaa2_sec_enqueue_burst(void *qp
, struct rte_crypto_op
**ops
,
1221 /* Function to transmit the frames to given device and VQ*/
1224 struct qbman_fd fd_arr
[MAX_TX_RING_SLOTS
];
1225 uint32_t frames_to_send
;
1226 struct qbman_eq_desc eqdesc
;
1227 struct dpaa2_sec_qp
*dpaa2_qp
= (struct dpaa2_sec_qp
*)qp
;
1228 struct qbman_swp
*swp
;
1229 uint16_t num_tx
= 0;
1230 uint32_t flags
[MAX_TX_RING_SLOTS
] = {0};
1231 /*todo - need to support multiple buffer pools */
1233 struct rte_mempool
*mb_pool
;
1235 if (unlikely(nb_ops
== 0))
1238 if (ops
[0]->sess_type
== RTE_CRYPTO_OP_SESSIONLESS
) {
1239 DPAA2_SEC_ERR("sessionless crypto op not supported");
1242 /*Prepare enqueue descriptor*/
1243 qbman_eq_desc_clear(&eqdesc
);
1244 qbman_eq_desc_set_no_orp(&eqdesc
, DPAA2_EQ_RESP_ERR_FQ
);
1245 qbman_eq_desc_set_response(&eqdesc
, 0, 0);
1246 qbman_eq_desc_set_fq(&eqdesc
, dpaa2_qp
->tx_vq
.fqid
);
1248 if (!DPAA2_PER_LCORE_DPIO
) {
1249 ret
= dpaa2_affine_qbman_swp();
1251 DPAA2_SEC_ERR("Failure in affining portal");
1255 swp
= DPAA2_PER_LCORE_PORTAL
;
1258 frames_to_send
= (nb_ops
> dpaa2_eqcr_size
) ?
1259 dpaa2_eqcr_size
: nb_ops
;
1261 for (loop
= 0; loop
< frames_to_send
; loop
++) {
1262 if ((*ops
)->sym
->m_src
->seqn
) {
1263 uint8_t dqrr_index
= (*ops
)->sym
->m_src
->seqn
- 1;
1265 flags
[loop
] = QBMAN_ENQUEUE_FLAG_DCA
| dqrr_index
;
1266 DPAA2_PER_LCORE_DQRR_SIZE
--;
1267 DPAA2_PER_LCORE_DQRR_HELD
&= ~(1 << dqrr_index
);
1268 (*ops
)->sym
->m_src
->seqn
= DPAA2_INVALID_MBUF_SEQN
;
1271 /*Clear the unused FD fields before sending*/
1272 memset(&fd_arr
[loop
], 0, sizeof(struct qbman_fd
));
1273 mb_pool
= (*ops
)->sym
->m_src
->pool
;
1274 bpid
= mempool_to_bpid(mb_pool
);
1275 ret
= build_sec_fd(*ops
, &fd_arr
[loop
], bpid
);
1277 DPAA2_SEC_ERR("error: Improper packet contents"
1278 " for crypto operation");
1284 while (loop
< frames_to_send
) {
1285 loop
+= qbman_swp_enqueue_multiple(swp
, &eqdesc
,
1288 frames_to_send
- loop
);
1291 num_tx
+= frames_to_send
;
1292 nb_ops
-= frames_to_send
;
1295 dpaa2_qp
->tx_vq
.tx_pkts
+= num_tx
;
1296 dpaa2_qp
->tx_vq
.err_pkts
+= nb_ops
;
1300 static inline struct rte_crypto_op
*
1301 sec_simple_fd_to_mbuf(const struct qbman_fd
*fd
)
1303 struct rte_crypto_op
*op
;
1304 uint16_t len
= DPAA2_GET_FD_LEN(fd
);
1306 dpaa2_sec_session
*sess_priv
;
1308 struct rte_mbuf
*mbuf
= DPAA2_INLINE_MBUF_FROM_BUF(
1309 DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd
)),
1310 rte_dpaa2_bpid_info
[DPAA2_GET_FD_BPID(fd
)].meta_data_size
);
1312 diff
= len
- mbuf
->pkt_len
;
1313 mbuf
->pkt_len
+= diff
;
1314 mbuf
->data_len
+= diff
;
1315 op
= (struct rte_crypto_op
*)(size_t)mbuf
->buf_iova
;
1316 mbuf
->buf_iova
= op
->sym
->aead
.digest
.phys_addr
;
1317 op
->sym
->aead
.digest
.phys_addr
= 0L;
1319 sess_priv
= (dpaa2_sec_session
*)get_sec_session_private_data(
1320 op
->sym
->sec_session
);
1321 if (sess_priv
->dir
== DIR_ENC
)
1322 mbuf
->data_off
+= SEC_FLC_DHR_OUTBOUND
;
1324 mbuf
->data_off
+= SEC_FLC_DHR_INBOUND
;
1329 static inline struct rte_crypto_op
*
1330 sec_fd_to_mbuf(const struct qbman_fd
*fd
)
1332 struct qbman_fle
*fle
;
1333 struct rte_crypto_op
*op
;
1334 struct ctxt_priv
*priv
;
1335 struct rte_mbuf
*dst
, *src
;
1337 if (DPAA2_FD_GET_FORMAT(fd
) == qbman_fd_single
)
1338 return sec_simple_fd_to_mbuf(fd
);
1340 fle
= (struct qbman_fle
*)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd
));
1342 DPAA2_SEC_DP_DEBUG("FLE addr = %x - %x, offset = %x\n",
1343 fle
->addr_hi
, fle
->addr_lo
, fle
->fin_bpid_offset
);
1345 /* we are using the first FLE entry to store Mbuf.
1346 * Currently we donot know which FLE has the mbuf stored.
1347 * So while retreiving we can go back 1 FLE from the FD -ADDR
1348 * to get the MBUF Addr from the previous FLE.
1349 * We can have a better approach to use the inline Mbuf
1352 if (unlikely(DPAA2_GET_FD_IVP(fd
))) {
1353 /* TODO complete it. */
1354 DPAA2_SEC_ERR("error: non inline buffer");
1357 op
= (struct rte_crypto_op
*)DPAA2_GET_FLE_ADDR((fle
- 1));
1360 src
= op
->sym
->m_src
;
1363 if (op
->sym
->m_dst
) {
1364 dst
= op
->sym
->m_dst
;
1369 if (op
->sess_type
== RTE_CRYPTO_OP_SECURITY_SESSION
) {
1370 dpaa2_sec_session
*sess
= (dpaa2_sec_session
*)
1371 get_sec_session_private_data(op
->sym
->sec_session
);
1372 if (sess
->ctxt_type
== DPAA2_SEC_IPSEC
) {
1373 uint16_t len
= DPAA2_GET_FD_LEN(fd
);
1375 dst
->data_len
= len
;
1379 DPAA2_SEC_DP_DEBUG("mbuf %p BMAN buf addr %p,"
1380 " fdaddr =%" PRIx64
" bpid =%d meta =%d off =%d, len =%d\n",
1383 DPAA2_GET_FD_ADDR(fd
),
1384 DPAA2_GET_FD_BPID(fd
),
1385 rte_dpaa2_bpid_info
[DPAA2_GET_FD_BPID(fd
)].meta_data_size
,
1386 DPAA2_GET_FD_OFFSET(fd
),
1387 DPAA2_GET_FD_LEN(fd
));
1389 /* free the fle memory */
1390 if (likely(rte_pktmbuf_is_contiguous(src
))) {
1391 priv
= (struct ctxt_priv
*)(size_t)DPAA2_GET_FLE_CTXT(fle
- 1);
1392 rte_mempool_put(priv
->fle_pool
, (void *)(fle
-1));
1394 rte_free((void *)(fle
-1));
1400 dpaa2_sec_dequeue_burst(void *qp
, struct rte_crypto_op
**ops
,
1403 /* Function is responsible to receive frames for a given device and VQ*/
1404 struct dpaa2_sec_qp
*dpaa2_qp
= (struct dpaa2_sec_qp
*)qp
;
1405 struct qbman_result
*dq_storage
;
1406 uint32_t fqid
= dpaa2_qp
->rx_vq
.fqid
;
1407 int ret
, num_rx
= 0;
1408 uint8_t is_last
= 0, status
;
1409 struct qbman_swp
*swp
;
1410 const struct qbman_fd
*fd
;
1411 struct qbman_pull_desc pulldesc
;
1413 if (!DPAA2_PER_LCORE_DPIO
) {
1414 ret
= dpaa2_affine_qbman_swp();
1416 DPAA2_SEC_ERR("Failure in affining portal");
1420 swp
= DPAA2_PER_LCORE_PORTAL
;
1421 dq_storage
= dpaa2_qp
->rx_vq
.q_storage
->dq_storage
[0];
1423 qbman_pull_desc_clear(&pulldesc
);
1424 qbman_pull_desc_set_numframes(&pulldesc
,
1425 (nb_ops
> dpaa2_dqrr_size
) ?
1426 dpaa2_dqrr_size
: nb_ops
);
1427 qbman_pull_desc_set_fq(&pulldesc
, fqid
);
1428 qbman_pull_desc_set_storage(&pulldesc
, dq_storage
,
1429 (dma_addr_t
)DPAA2_VADDR_TO_IOVA(dq_storage
),
1432 /*Issue a volatile dequeue command. */
1434 if (qbman_swp_pull(swp
, &pulldesc
)) {
1436 "SEC VDQ command is not issued : QBMAN busy");
1437 /* Portal was busy, try again */
1443 /* Receive the packets till Last Dequeue entry is found with
1444 * respect to the above issues PULL command.
1447 /* Check if the previous issued command is completed.
1448 * Also seems like the SWP is shared between the Ethernet Driver
1449 * and the SEC driver.
1451 while (!qbman_check_command_complete(dq_storage
))
1454 /* Loop until the dq_storage is updated with
1455 * new token by QBMAN
1457 while (!qbman_check_new_result(dq_storage
))
1459 /* Check whether Last Pull command is Expired and
1460 * setting Condition for Loop termination
1462 if (qbman_result_DQ_is_pull_complete(dq_storage
)) {
1464 /* Check for valid frame. */
1465 status
= (uint8_t)qbman_result_DQ_flags(dq_storage
);
1467 (status
& QBMAN_DQ_STAT_VALIDFRAME
) == 0)) {
1468 DPAA2_SEC_DP_DEBUG("No frame is delivered\n");
1473 fd
= qbman_result_DQ_fd(dq_storage
);
1474 ops
[num_rx
] = sec_fd_to_mbuf(fd
);
1476 if (unlikely(fd
->simple
.frc
)) {
1477 /* TODO Parse SEC errors */
1478 DPAA2_SEC_ERR("SEC returned Error - %x",
1480 ops
[num_rx
]->status
= RTE_CRYPTO_OP_STATUS_ERROR
;
1482 ops
[num_rx
]->status
= RTE_CRYPTO_OP_STATUS_SUCCESS
;
1487 } /* End of Packet Rx loop */
1489 dpaa2_qp
->rx_vq
.rx_pkts
+= num_rx
;
1491 DPAA2_SEC_DP_DEBUG("SEC Received %d Packets\n", num_rx
);
1492 /*Return the total number of packets received to DPAA2 app*/
1496 /** Release queue pair */
1498 dpaa2_sec_queue_pair_release(struct rte_cryptodev
*dev
, uint16_t queue_pair_id
)
1500 struct dpaa2_sec_qp
*qp
=
1501 (struct dpaa2_sec_qp
*)dev
->data
->queue_pairs
[queue_pair_id
];
1503 PMD_INIT_FUNC_TRACE();
1505 if (qp
->rx_vq
.q_storage
) {
1506 dpaa2_free_dq_storage(qp
->rx_vq
.q_storage
);
1507 rte_free(qp
->rx_vq
.q_storage
);
1511 dev
->data
->queue_pairs
[queue_pair_id
] = NULL
;
1516 /** Setup a queue pair */
1518 dpaa2_sec_queue_pair_setup(struct rte_cryptodev
*dev
, uint16_t qp_id
,
1519 __rte_unused
const struct rte_cryptodev_qp_conf
*qp_conf
,
1520 __rte_unused
int socket_id
)
1522 struct dpaa2_sec_dev_private
*priv
= dev
->data
->dev_private
;
1523 struct dpaa2_sec_qp
*qp
;
1524 struct fsl_mc_io
*dpseci
= (struct fsl_mc_io
*)priv
->hw
;
1525 struct dpseci_rx_queue_cfg cfg
;
1528 PMD_INIT_FUNC_TRACE();
1530 /* If qp is already in use free ring memory and qp metadata. */
1531 if (dev
->data
->queue_pairs
[qp_id
] != NULL
) {
1532 DPAA2_SEC_INFO("QP already setup");
1536 DPAA2_SEC_DEBUG("dev =%p, queue =%d, conf =%p",
1537 dev
, qp_id
, qp_conf
);
1539 memset(&cfg
, 0, sizeof(struct dpseci_rx_queue_cfg
));
1541 qp
= rte_malloc(NULL
, sizeof(struct dpaa2_sec_qp
),
1542 RTE_CACHE_LINE_SIZE
);
1544 DPAA2_SEC_ERR("malloc failed for rx/tx queues");
1548 qp
->rx_vq
.crypto_data
= dev
->data
;
1549 qp
->tx_vq
.crypto_data
= dev
->data
;
1550 qp
->rx_vq
.q_storage
= rte_malloc("sec dq storage",
1551 sizeof(struct queue_storage_info_t
),
1552 RTE_CACHE_LINE_SIZE
);
1553 if (!qp
->rx_vq
.q_storage
) {
1554 DPAA2_SEC_ERR("malloc failed for q_storage");
1557 memset(qp
->rx_vq
.q_storage
, 0, sizeof(struct queue_storage_info_t
));
1559 if (dpaa2_alloc_dq_storage(qp
->rx_vq
.q_storage
)) {
1560 DPAA2_SEC_ERR("Unable to allocate dequeue storage");
1564 dev
->data
->queue_pairs
[qp_id
] = qp
;
1566 cfg
.options
= cfg
.options
| DPSECI_QUEUE_OPT_USER_CTX
;
1567 cfg
.user_ctx
= (size_t)(&qp
->rx_vq
);
1568 retcode
= dpseci_set_rx_queue(dpseci
, CMD_PRI_LOW
, priv
->token
,
1573 /** Return the number of allocated queue pairs */
1575 dpaa2_sec_queue_pair_count(struct rte_cryptodev
*dev
)
1577 PMD_INIT_FUNC_TRACE();
1579 return dev
->data
->nb_queue_pairs
;
1582 /** Returns the size of the aesni gcm session structure */
1584 dpaa2_sec_sym_session_get_size(struct rte_cryptodev
*dev __rte_unused
)
1586 PMD_INIT_FUNC_TRACE();
1588 return sizeof(dpaa2_sec_session
);
1592 dpaa2_sec_cipher_init(struct rte_cryptodev
*dev
,
1593 struct rte_crypto_sym_xform
*xform
,
1594 dpaa2_sec_session
*session
)
1596 struct dpaa2_sec_dev_private
*dev_priv
= dev
->data
->dev_private
;
1597 struct alginfo cipherdata
;
1599 struct ctxt_priv
*priv
;
1600 struct sec_flow_context
*flc
;
1602 PMD_INIT_FUNC_TRACE();
1604 /* For SEC CIPHER only one descriptor is required. */
1605 priv
= (struct ctxt_priv
*)rte_zmalloc(NULL
,
1606 sizeof(struct ctxt_priv
) + sizeof(struct sec_flc_desc
),
1607 RTE_CACHE_LINE_SIZE
);
1609 DPAA2_SEC_ERR("No Memory for priv CTXT");
1613 priv
->fle_pool
= dev_priv
->fle_pool
;
1615 flc
= &priv
->flc_desc
[0].flc
;
1617 session
->cipher_key
.data
= rte_zmalloc(NULL
, xform
->cipher
.key
.length
,
1618 RTE_CACHE_LINE_SIZE
);
1619 if (session
->cipher_key
.data
== NULL
) {
1620 DPAA2_SEC_ERR("No Memory for cipher key");
1624 session
->cipher_key
.length
= xform
->cipher
.key
.length
;
1626 memcpy(session
->cipher_key
.data
, xform
->cipher
.key
.data
,
1627 xform
->cipher
.key
.length
);
1628 cipherdata
.key
= (size_t)session
->cipher_key
.data
;
1629 cipherdata
.keylen
= session
->cipher_key
.length
;
1630 cipherdata
.key_enc_flags
= 0;
1631 cipherdata
.key_type
= RTA_DATA_IMM
;
1633 /* Set IV parameters */
1634 session
->iv
.offset
= xform
->cipher
.iv
.offset
;
1635 session
->iv
.length
= xform
->cipher
.iv
.length
;
1637 switch (xform
->cipher
.algo
) {
1638 case RTE_CRYPTO_CIPHER_AES_CBC
:
1639 cipherdata
.algtype
= OP_ALG_ALGSEL_AES
;
1640 cipherdata
.algmode
= OP_ALG_AAI_CBC
;
1641 session
->cipher_alg
= RTE_CRYPTO_CIPHER_AES_CBC
;
1643 case RTE_CRYPTO_CIPHER_3DES_CBC
:
1644 cipherdata
.algtype
= OP_ALG_ALGSEL_3DES
;
1645 cipherdata
.algmode
= OP_ALG_AAI_CBC
;
1646 session
->cipher_alg
= RTE_CRYPTO_CIPHER_3DES_CBC
;
1648 case RTE_CRYPTO_CIPHER_AES_CTR
:
1649 cipherdata
.algtype
= OP_ALG_ALGSEL_AES
;
1650 cipherdata
.algmode
= OP_ALG_AAI_CTR
;
1651 session
->cipher_alg
= RTE_CRYPTO_CIPHER_AES_CTR
;
1653 case RTE_CRYPTO_CIPHER_3DES_CTR
:
1654 case RTE_CRYPTO_CIPHER_AES_ECB
:
1655 case RTE_CRYPTO_CIPHER_3DES_ECB
:
1656 case RTE_CRYPTO_CIPHER_AES_XTS
:
1657 case RTE_CRYPTO_CIPHER_AES_F8
:
1658 case RTE_CRYPTO_CIPHER_ARC4
:
1659 case RTE_CRYPTO_CIPHER_KASUMI_F8
:
1660 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2
:
1661 case RTE_CRYPTO_CIPHER_ZUC_EEA3
:
1662 case RTE_CRYPTO_CIPHER_NULL
:
1663 DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u",
1664 xform
->cipher
.algo
);
1667 DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u",
1668 xform
->cipher
.algo
);
1671 session
->dir
= (xform
->cipher
.op
== RTE_CRYPTO_CIPHER_OP_ENCRYPT
) ?
1674 bufsize
= cnstr_shdsc_blkcipher(priv
->flc_desc
[0].desc
, 1, 0, SHR_NEVER
,
1675 &cipherdata
, NULL
, session
->iv
.length
,
1678 DPAA2_SEC_ERR("Crypto: Descriptor build failed");
1682 flc
->word1_sdl
= (uint8_t)bufsize
;
1683 session
->ctxt
= priv
;
1685 for (i
= 0; i
< bufsize
; i
++)
1686 DPAA2_SEC_DEBUG("DESC[%d]:0x%x", i
, priv
->flc_desc
[0].desc
[i
]);
1691 rte_free(session
->cipher_key
.data
);
1697 dpaa2_sec_auth_init(struct rte_cryptodev
*dev
,
1698 struct rte_crypto_sym_xform
*xform
,
1699 dpaa2_sec_session
*session
)
1701 struct dpaa2_sec_dev_private
*dev_priv
= dev
->data
->dev_private
;
1702 struct alginfo authdata
;
1704 struct ctxt_priv
*priv
;
1705 struct sec_flow_context
*flc
;
1707 PMD_INIT_FUNC_TRACE();
1709 /* For SEC AUTH three descriptors are required for various stages */
1710 priv
= (struct ctxt_priv
*)rte_zmalloc(NULL
,
1711 sizeof(struct ctxt_priv
) + 3 *
1712 sizeof(struct sec_flc_desc
),
1713 RTE_CACHE_LINE_SIZE
);
1715 DPAA2_SEC_ERR("No Memory for priv CTXT");
1719 priv
->fle_pool
= dev_priv
->fle_pool
;
1720 flc
= &priv
->flc_desc
[DESC_INITFINAL
].flc
;
1722 session
->auth_key
.data
= rte_zmalloc(NULL
, xform
->auth
.key
.length
,
1723 RTE_CACHE_LINE_SIZE
);
1724 if (session
->auth_key
.data
== NULL
) {
1725 DPAA2_SEC_ERR("Unable to allocate memory for auth key");
1729 session
->auth_key
.length
= xform
->auth
.key
.length
;
1731 memcpy(session
->auth_key
.data
, xform
->auth
.key
.data
,
1732 xform
->auth
.key
.length
);
1733 authdata
.key
= (size_t)session
->auth_key
.data
;
1734 authdata
.keylen
= session
->auth_key
.length
;
1735 authdata
.key_enc_flags
= 0;
1736 authdata
.key_type
= RTA_DATA_IMM
;
1738 session
->digest_length
= xform
->auth
.digest_length
;
1740 switch (xform
->auth
.algo
) {
1741 case RTE_CRYPTO_AUTH_SHA1_HMAC
:
1742 authdata
.algtype
= OP_ALG_ALGSEL_SHA1
;
1743 authdata
.algmode
= OP_ALG_AAI_HMAC
;
1744 session
->auth_alg
= RTE_CRYPTO_AUTH_SHA1_HMAC
;
1746 case RTE_CRYPTO_AUTH_MD5_HMAC
:
1747 authdata
.algtype
= OP_ALG_ALGSEL_MD5
;
1748 authdata
.algmode
= OP_ALG_AAI_HMAC
;
1749 session
->auth_alg
= RTE_CRYPTO_AUTH_MD5_HMAC
;
1751 case RTE_CRYPTO_AUTH_SHA256_HMAC
:
1752 authdata
.algtype
= OP_ALG_ALGSEL_SHA256
;
1753 authdata
.algmode
= OP_ALG_AAI_HMAC
;
1754 session
->auth_alg
= RTE_CRYPTO_AUTH_SHA256_HMAC
;
1756 case RTE_CRYPTO_AUTH_SHA384_HMAC
:
1757 authdata
.algtype
= OP_ALG_ALGSEL_SHA384
;
1758 authdata
.algmode
= OP_ALG_AAI_HMAC
;
1759 session
->auth_alg
= RTE_CRYPTO_AUTH_SHA384_HMAC
;
1761 case RTE_CRYPTO_AUTH_SHA512_HMAC
:
1762 authdata
.algtype
= OP_ALG_ALGSEL_SHA512
;
1763 authdata
.algmode
= OP_ALG_AAI_HMAC
;
1764 session
->auth_alg
= RTE_CRYPTO_AUTH_SHA512_HMAC
;
1766 case RTE_CRYPTO_AUTH_SHA224_HMAC
:
1767 authdata
.algtype
= OP_ALG_ALGSEL_SHA224
;
1768 authdata
.algmode
= OP_ALG_AAI_HMAC
;
1769 session
->auth_alg
= RTE_CRYPTO_AUTH_SHA224_HMAC
;
1771 case RTE_CRYPTO_AUTH_AES_XCBC_MAC
:
1772 case RTE_CRYPTO_AUTH_SNOW3G_UIA2
:
1773 case RTE_CRYPTO_AUTH_NULL
:
1774 case RTE_CRYPTO_AUTH_SHA1
:
1775 case RTE_CRYPTO_AUTH_SHA256
:
1776 case RTE_CRYPTO_AUTH_SHA512
:
1777 case RTE_CRYPTO_AUTH_SHA224
:
1778 case RTE_CRYPTO_AUTH_SHA384
:
1779 case RTE_CRYPTO_AUTH_MD5
:
1780 case RTE_CRYPTO_AUTH_AES_GMAC
:
1781 case RTE_CRYPTO_AUTH_KASUMI_F9
:
1782 case RTE_CRYPTO_AUTH_AES_CMAC
:
1783 case RTE_CRYPTO_AUTH_AES_CBC_MAC
:
1784 case RTE_CRYPTO_AUTH_ZUC_EIA3
:
1785 DPAA2_SEC_ERR("Crypto: Unsupported auth alg %un",
1789 DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u",
1793 session
->dir
= (xform
->auth
.op
== RTE_CRYPTO_AUTH_OP_GENERATE
) ?
1796 bufsize
= cnstr_shdsc_hmac(priv
->flc_desc
[DESC_INITFINAL
].desc
,
1797 1, 0, SHR_NEVER
, &authdata
, !session
->dir
,
1798 session
->digest_length
);
1800 DPAA2_SEC_ERR("Crypto: Invalid buffer length");
1804 flc
->word1_sdl
= (uint8_t)bufsize
;
1805 session
->ctxt
= priv
;
1806 for (i
= 0; i
< bufsize
; i
++)
1807 DPAA2_SEC_DEBUG("DESC[%d]:0x%x",
1808 i
, priv
->flc_desc
[DESC_INITFINAL
].desc
[i
]);
1814 rte_free(session
->auth_key
.data
);
1820 dpaa2_sec_aead_init(struct rte_cryptodev
*dev
,
1821 struct rte_crypto_sym_xform
*xform
,
1822 dpaa2_sec_session
*session
)
1824 struct dpaa2_sec_aead_ctxt
*ctxt
= &session
->ext_params
.aead_ctxt
;
1825 struct dpaa2_sec_dev_private
*dev_priv
= dev
->data
->dev_private
;
1826 struct alginfo aeaddata
;
1828 struct ctxt_priv
*priv
;
1829 struct sec_flow_context
*flc
;
1830 struct rte_crypto_aead_xform
*aead_xform
= &xform
->aead
;
1833 PMD_INIT_FUNC_TRACE();
1835 /* Set IV parameters */
1836 session
->iv
.offset
= aead_xform
->iv
.offset
;
1837 session
->iv
.length
= aead_xform
->iv
.length
;
1838 session
->ctxt_type
= DPAA2_SEC_AEAD
;
1840 /* For SEC AEAD only one descriptor is required */
1841 priv
= (struct ctxt_priv
*)rte_zmalloc(NULL
,
1842 sizeof(struct ctxt_priv
) + sizeof(struct sec_flc_desc
),
1843 RTE_CACHE_LINE_SIZE
);
1845 DPAA2_SEC_ERR("No Memory for priv CTXT");
1849 priv
->fle_pool
= dev_priv
->fle_pool
;
1850 flc
= &priv
->flc_desc
[0].flc
;
1852 session
->aead_key
.data
= rte_zmalloc(NULL
, aead_xform
->key
.length
,
1853 RTE_CACHE_LINE_SIZE
);
1854 if (session
->aead_key
.data
== NULL
&& aead_xform
->key
.length
> 0) {
1855 DPAA2_SEC_ERR("No Memory for aead key");
1859 memcpy(session
->aead_key
.data
, aead_xform
->key
.data
,
1860 aead_xform
->key
.length
);
1862 session
->digest_length
= aead_xform
->digest_length
;
1863 session
->aead_key
.length
= aead_xform
->key
.length
;
1864 ctxt
->auth_only_len
= aead_xform
->aad_length
;
1866 aeaddata
.key
= (size_t)session
->aead_key
.data
;
1867 aeaddata
.keylen
= session
->aead_key
.length
;
1868 aeaddata
.key_enc_flags
= 0;
1869 aeaddata
.key_type
= RTA_DATA_IMM
;
1871 switch (aead_xform
->algo
) {
1872 case RTE_CRYPTO_AEAD_AES_GCM
:
1873 aeaddata
.algtype
= OP_ALG_ALGSEL_AES
;
1874 aeaddata
.algmode
= OP_ALG_AAI_GCM
;
1875 session
->aead_alg
= RTE_CRYPTO_AEAD_AES_GCM
;
1877 case RTE_CRYPTO_AEAD_AES_CCM
:
1878 DPAA2_SEC_ERR("Crypto: Unsupported AEAD alg %u",
1882 DPAA2_SEC_ERR("Crypto: Undefined AEAD specified %u",
1886 session
->dir
= (aead_xform
->op
== RTE_CRYPTO_AEAD_OP_ENCRYPT
) ?
1889 priv
->flc_desc
[0].desc
[0] = aeaddata
.keylen
;
1890 err
= rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN
,
1892 (unsigned int *)priv
->flc_desc
[0].desc
,
1893 &priv
->flc_desc
[0].desc
[1], 1);
1896 DPAA2_SEC_ERR("Crypto: Incorrect key lengths");
1899 if (priv
->flc_desc
[0].desc
[1] & 1) {
1900 aeaddata
.key_type
= RTA_DATA_IMM
;
1902 aeaddata
.key
= DPAA2_VADDR_TO_IOVA(aeaddata
.key
);
1903 aeaddata
.key_type
= RTA_DATA_PTR
;
1905 priv
->flc_desc
[0].desc
[0] = 0;
1906 priv
->flc_desc
[0].desc
[1] = 0;
1908 if (session
->dir
== DIR_ENC
)
1909 bufsize
= cnstr_shdsc_gcm_encap(
1910 priv
->flc_desc
[0].desc
, 1, 0, SHR_NEVER
,
1911 &aeaddata
, session
->iv
.length
,
1912 session
->digest_length
);
1914 bufsize
= cnstr_shdsc_gcm_decap(
1915 priv
->flc_desc
[0].desc
, 1, 0, SHR_NEVER
,
1916 &aeaddata
, session
->iv
.length
,
1917 session
->digest_length
);
1919 DPAA2_SEC_ERR("Crypto: Invalid buffer length");
1923 flc
->word1_sdl
= (uint8_t)bufsize
;
1924 session
->ctxt
= priv
;
1925 for (i
= 0; i
< bufsize
; i
++)
1926 DPAA2_SEC_DEBUG("DESC[%d]:0x%x\n",
1927 i
, priv
->flc_desc
[0].desc
[i
]);
1932 rte_free(session
->aead_key
.data
);
1939 dpaa2_sec_aead_chain_init(struct rte_cryptodev
*dev
,
1940 struct rte_crypto_sym_xform
*xform
,
1941 dpaa2_sec_session
*session
)
1943 struct dpaa2_sec_aead_ctxt
*ctxt
= &session
->ext_params
.aead_ctxt
;
1944 struct dpaa2_sec_dev_private
*dev_priv
= dev
->data
->dev_private
;
1945 struct alginfo authdata
, cipherdata
;
1947 struct ctxt_priv
*priv
;
1948 struct sec_flow_context
*flc
;
1949 struct rte_crypto_cipher_xform
*cipher_xform
;
1950 struct rte_crypto_auth_xform
*auth_xform
;
1953 PMD_INIT_FUNC_TRACE();
1955 if (session
->ext_params
.aead_ctxt
.auth_cipher_text
) {
1956 cipher_xform
= &xform
->cipher
;
1957 auth_xform
= &xform
->next
->auth
;
1958 session
->ctxt_type
=
1959 (cipher_xform
->op
== RTE_CRYPTO_CIPHER_OP_ENCRYPT
) ?
1960 DPAA2_SEC_CIPHER_HASH
: DPAA2_SEC_HASH_CIPHER
;
1962 cipher_xform
= &xform
->next
->cipher
;
1963 auth_xform
= &xform
->auth
;
1964 session
->ctxt_type
=
1965 (cipher_xform
->op
== RTE_CRYPTO_CIPHER_OP_ENCRYPT
) ?
1966 DPAA2_SEC_HASH_CIPHER
: DPAA2_SEC_CIPHER_HASH
;
1969 /* Set IV parameters */
1970 session
->iv
.offset
= cipher_xform
->iv
.offset
;
1971 session
->iv
.length
= cipher_xform
->iv
.length
;
1973 /* For SEC AEAD only one descriptor is required */
1974 priv
= (struct ctxt_priv
*)rte_zmalloc(NULL
,
1975 sizeof(struct ctxt_priv
) + sizeof(struct sec_flc_desc
),
1976 RTE_CACHE_LINE_SIZE
);
1978 DPAA2_SEC_ERR("No Memory for priv CTXT");
1982 priv
->fle_pool
= dev_priv
->fle_pool
;
1983 flc
= &priv
->flc_desc
[0].flc
;
1985 session
->cipher_key
.data
= rte_zmalloc(NULL
, cipher_xform
->key
.length
,
1986 RTE_CACHE_LINE_SIZE
);
1987 if (session
->cipher_key
.data
== NULL
&& cipher_xform
->key
.length
> 0) {
1988 DPAA2_SEC_ERR("No Memory for cipher key");
1992 session
->cipher_key
.length
= cipher_xform
->key
.length
;
1993 session
->auth_key
.data
= rte_zmalloc(NULL
, auth_xform
->key
.length
,
1994 RTE_CACHE_LINE_SIZE
);
1995 if (session
->auth_key
.data
== NULL
&& auth_xform
->key
.length
> 0) {
1996 DPAA2_SEC_ERR("No Memory for auth key");
1997 rte_free(session
->cipher_key
.data
);
2001 session
->auth_key
.length
= auth_xform
->key
.length
;
2002 memcpy(session
->cipher_key
.data
, cipher_xform
->key
.data
,
2003 cipher_xform
->key
.length
);
2004 memcpy(session
->auth_key
.data
, auth_xform
->key
.data
,
2005 auth_xform
->key
.length
);
2007 authdata
.key
= (size_t)session
->auth_key
.data
;
2008 authdata
.keylen
= session
->auth_key
.length
;
2009 authdata
.key_enc_flags
= 0;
2010 authdata
.key_type
= RTA_DATA_IMM
;
2012 session
->digest_length
= auth_xform
->digest_length
;
2014 switch (auth_xform
->algo
) {
2015 case RTE_CRYPTO_AUTH_SHA1_HMAC
:
2016 authdata
.algtype
= OP_ALG_ALGSEL_SHA1
;
2017 authdata
.algmode
= OP_ALG_AAI_HMAC
;
2018 session
->auth_alg
= RTE_CRYPTO_AUTH_SHA1_HMAC
;
2020 case RTE_CRYPTO_AUTH_MD5_HMAC
:
2021 authdata
.algtype
= OP_ALG_ALGSEL_MD5
;
2022 authdata
.algmode
= OP_ALG_AAI_HMAC
;
2023 session
->auth_alg
= RTE_CRYPTO_AUTH_MD5_HMAC
;
2025 case RTE_CRYPTO_AUTH_SHA224_HMAC
:
2026 authdata
.algtype
= OP_ALG_ALGSEL_SHA224
;
2027 authdata
.algmode
= OP_ALG_AAI_HMAC
;
2028 session
->auth_alg
= RTE_CRYPTO_AUTH_SHA224_HMAC
;
2030 case RTE_CRYPTO_AUTH_SHA256_HMAC
:
2031 authdata
.algtype
= OP_ALG_ALGSEL_SHA256
;
2032 authdata
.algmode
= OP_ALG_AAI_HMAC
;
2033 session
->auth_alg
= RTE_CRYPTO_AUTH_SHA256_HMAC
;
2035 case RTE_CRYPTO_AUTH_SHA384_HMAC
:
2036 authdata
.algtype
= OP_ALG_ALGSEL_SHA384
;
2037 authdata
.algmode
= OP_ALG_AAI_HMAC
;
2038 session
->auth_alg
= RTE_CRYPTO_AUTH_SHA384_HMAC
;
2040 case RTE_CRYPTO_AUTH_SHA512_HMAC
:
2041 authdata
.algtype
= OP_ALG_ALGSEL_SHA512
;
2042 authdata
.algmode
= OP_ALG_AAI_HMAC
;
2043 session
->auth_alg
= RTE_CRYPTO_AUTH_SHA512_HMAC
;
2045 case RTE_CRYPTO_AUTH_AES_XCBC_MAC
:
2046 case RTE_CRYPTO_AUTH_SNOW3G_UIA2
:
2047 case RTE_CRYPTO_AUTH_NULL
:
2048 case RTE_CRYPTO_AUTH_SHA1
:
2049 case RTE_CRYPTO_AUTH_SHA256
:
2050 case RTE_CRYPTO_AUTH_SHA512
:
2051 case RTE_CRYPTO_AUTH_SHA224
:
2052 case RTE_CRYPTO_AUTH_SHA384
:
2053 case RTE_CRYPTO_AUTH_MD5
:
2054 case RTE_CRYPTO_AUTH_AES_GMAC
:
2055 case RTE_CRYPTO_AUTH_KASUMI_F9
:
2056 case RTE_CRYPTO_AUTH_AES_CMAC
:
2057 case RTE_CRYPTO_AUTH_AES_CBC_MAC
:
2058 case RTE_CRYPTO_AUTH_ZUC_EIA3
:
2059 DPAA2_SEC_ERR("Crypto: Unsupported auth alg %u",
2063 DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u",
2067 cipherdata
.key
= (size_t)session
->cipher_key
.data
;
2068 cipherdata
.keylen
= session
->cipher_key
.length
;
2069 cipherdata
.key_enc_flags
= 0;
2070 cipherdata
.key_type
= RTA_DATA_IMM
;
2072 switch (cipher_xform
->algo
) {
2073 case RTE_CRYPTO_CIPHER_AES_CBC
:
2074 cipherdata
.algtype
= OP_ALG_ALGSEL_AES
;
2075 cipherdata
.algmode
= OP_ALG_AAI_CBC
;
2076 session
->cipher_alg
= RTE_CRYPTO_CIPHER_AES_CBC
;
2078 case RTE_CRYPTO_CIPHER_3DES_CBC
:
2079 cipherdata
.algtype
= OP_ALG_ALGSEL_3DES
;
2080 cipherdata
.algmode
= OP_ALG_AAI_CBC
;
2081 session
->cipher_alg
= RTE_CRYPTO_CIPHER_3DES_CBC
;
2083 case RTE_CRYPTO_CIPHER_AES_CTR
:
2084 cipherdata
.algtype
= OP_ALG_ALGSEL_AES
;
2085 cipherdata
.algmode
= OP_ALG_AAI_CTR
;
2086 session
->cipher_alg
= RTE_CRYPTO_CIPHER_AES_CTR
;
2088 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2
:
2089 case RTE_CRYPTO_CIPHER_NULL
:
2090 case RTE_CRYPTO_CIPHER_3DES_ECB
:
2091 case RTE_CRYPTO_CIPHER_AES_ECB
:
2092 case RTE_CRYPTO_CIPHER_KASUMI_F8
:
2093 DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u",
2094 cipher_xform
->algo
);
2097 DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u",
2098 cipher_xform
->algo
);
2101 session
->dir
= (cipher_xform
->op
== RTE_CRYPTO_CIPHER_OP_ENCRYPT
) ?
2104 priv
->flc_desc
[0].desc
[0] = cipherdata
.keylen
;
2105 priv
->flc_desc
[0].desc
[1] = authdata
.keylen
;
2106 err
= rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN
,
2108 (unsigned int *)priv
->flc_desc
[0].desc
,
2109 &priv
->flc_desc
[0].desc
[2], 2);
2112 DPAA2_SEC_ERR("Crypto: Incorrect key lengths");
2115 if (priv
->flc_desc
[0].desc
[2] & 1) {
2116 cipherdata
.key_type
= RTA_DATA_IMM
;
2118 cipherdata
.key
= DPAA2_VADDR_TO_IOVA(cipherdata
.key
);
2119 cipherdata
.key_type
= RTA_DATA_PTR
;
2121 if (priv
->flc_desc
[0].desc
[2] & (1 << 1)) {
2122 authdata
.key_type
= RTA_DATA_IMM
;
2124 authdata
.key
= DPAA2_VADDR_TO_IOVA(authdata
.key
);
2125 authdata
.key_type
= RTA_DATA_PTR
;
2127 priv
->flc_desc
[0].desc
[0] = 0;
2128 priv
->flc_desc
[0].desc
[1] = 0;
2129 priv
->flc_desc
[0].desc
[2] = 0;
2131 if (session
->ctxt_type
== DPAA2_SEC_CIPHER_HASH
) {
2132 bufsize
= cnstr_shdsc_authenc(priv
->flc_desc
[0].desc
, 1,
2134 &cipherdata
, &authdata
,
2136 ctxt
->auth_only_len
,
2137 session
->digest_length
,
2140 DPAA2_SEC_ERR("Crypto: Invalid buffer length");
2144 DPAA2_SEC_ERR("Hash before cipher not supported");
2148 flc
->word1_sdl
= (uint8_t)bufsize
;
2149 session
->ctxt
= priv
;
2150 for (i
= 0; i
< bufsize
; i
++)
2151 DPAA2_SEC_DEBUG("DESC[%d]:0x%x",
2152 i
, priv
->flc_desc
[0].desc
[i
]);
2157 rte_free(session
->cipher_key
.data
);
2158 rte_free(session
->auth_key
.data
);
2164 dpaa2_sec_set_session_parameters(struct rte_cryptodev
*dev
,
2165 struct rte_crypto_sym_xform
*xform
, void *sess
)
2167 dpaa2_sec_session
*session
= sess
;
2169 PMD_INIT_FUNC_TRACE();
2171 if (unlikely(sess
== NULL
)) {
2172 DPAA2_SEC_ERR("Invalid session struct");
2176 memset(session
, 0, sizeof(dpaa2_sec_session
));
2177 /* Default IV length = 0 */
2178 session
->iv
.length
= 0;
2181 if (xform
->type
== RTE_CRYPTO_SYM_XFORM_CIPHER
&& xform
->next
== NULL
) {
2182 session
->ctxt_type
= DPAA2_SEC_CIPHER
;
2183 dpaa2_sec_cipher_init(dev
, xform
, session
);
2185 /* Authentication Only */
2186 } else if (xform
->type
== RTE_CRYPTO_SYM_XFORM_AUTH
&&
2187 xform
->next
== NULL
) {
2188 session
->ctxt_type
= DPAA2_SEC_AUTH
;
2189 dpaa2_sec_auth_init(dev
, xform
, session
);
2191 /* Cipher then Authenticate */
2192 } else if (xform
->type
== RTE_CRYPTO_SYM_XFORM_CIPHER
&&
2193 xform
->next
->type
== RTE_CRYPTO_SYM_XFORM_AUTH
) {
2194 session
->ext_params
.aead_ctxt
.auth_cipher_text
= true;
2195 dpaa2_sec_aead_chain_init(dev
, xform
, session
);
2197 /* Authenticate then Cipher */
2198 } else if (xform
->type
== RTE_CRYPTO_SYM_XFORM_AUTH
&&
2199 xform
->next
->type
== RTE_CRYPTO_SYM_XFORM_CIPHER
) {
2200 session
->ext_params
.aead_ctxt
.auth_cipher_text
= false;
2201 dpaa2_sec_aead_chain_init(dev
, xform
, session
);
2203 /* AEAD operation for AES-GCM kind of Algorithms */
2204 } else if (xform
->type
== RTE_CRYPTO_SYM_XFORM_AEAD
&&
2205 xform
->next
== NULL
) {
2206 dpaa2_sec_aead_init(dev
, xform
, session
);
2209 DPAA2_SEC_ERR("Invalid crypto type");
2217 dpaa2_sec_ipsec_aead_init(struct rte_crypto_aead_xform
*aead_xform
,
2218 dpaa2_sec_session
*session
,
2219 struct alginfo
*aeaddata
)
2221 PMD_INIT_FUNC_TRACE();
2223 session
->aead_key
.data
= rte_zmalloc(NULL
, aead_xform
->key
.length
,
2224 RTE_CACHE_LINE_SIZE
);
2225 if (session
->aead_key
.data
== NULL
&& aead_xform
->key
.length
> 0) {
2226 DPAA2_SEC_ERR("No Memory for aead key");
2229 memcpy(session
->aead_key
.data
, aead_xform
->key
.data
,
2230 aead_xform
->key
.length
);
2232 session
->digest_length
= aead_xform
->digest_length
;
2233 session
->aead_key
.length
= aead_xform
->key
.length
;
2235 aeaddata
->key
= (size_t)session
->aead_key
.data
;
2236 aeaddata
->keylen
= session
->aead_key
.length
;
2237 aeaddata
->key_enc_flags
= 0;
2238 aeaddata
->key_type
= RTA_DATA_IMM
;
2240 switch (aead_xform
->algo
) {
2241 case RTE_CRYPTO_AEAD_AES_GCM
:
2242 aeaddata
->algtype
= OP_ALG_ALGSEL_AES
;
2243 aeaddata
->algmode
= OP_ALG_AAI_GCM
;
2244 session
->aead_alg
= RTE_CRYPTO_AEAD_AES_GCM
;
2246 case RTE_CRYPTO_AEAD_AES_CCM
:
2247 aeaddata
->algtype
= OP_ALG_ALGSEL_AES
;
2248 aeaddata
->algmode
= OP_ALG_AAI_CCM
;
2249 session
->aead_alg
= RTE_CRYPTO_AEAD_AES_CCM
;
2252 DPAA2_SEC_ERR("Crypto: Undefined AEAD specified %u",
2256 session
->dir
= (aead_xform
->op
== RTE_CRYPTO_AEAD_OP_ENCRYPT
) ?
2263 dpaa2_sec_ipsec_proto_init(struct rte_crypto_cipher_xform
*cipher_xform
,
2264 struct rte_crypto_auth_xform
*auth_xform
,
2265 dpaa2_sec_session
*session
,
2266 struct alginfo
*cipherdata
,
2267 struct alginfo
*authdata
)
2270 session
->cipher_key
.data
= rte_zmalloc(NULL
,
2271 cipher_xform
->key
.length
,
2272 RTE_CACHE_LINE_SIZE
);
2273 if (session
->cipher_key
.data
== NULL
&&
2274 cipher_xform
->key
.length
> 0) {
2275 DPAA2_SEC_ERR("No Memory for cipher key");
2279 session
->cipher_key
.length
= cipher_xform
->key
.length
;
2280 memcpy(session
->cipher_key
.data
, cipher_xform
->key
.data
,
2281 cipher_xform
->key
.length
);
2282 session
->cipher_alg
= cipher_xform
->algo
;
2284 session
->cipher_key
.data
= NULL
;
2285 session
->cipher_key
.length
= 0;
2286 session
->cipher_alg
= RTE_CRYPTO_CIPHER_NULL
;
2290 session
->auth_key
.data
= rte_zmalloc(NULL
,
2291 auth_xform
->key
.length
,
2292 RTE_CACHE_LINE_SIZE
);
2293 if (session
->auth_key
.data
== NULL
&&
2294 auth_xform
->key
.length
> 0) {
2295 DPAA2_SEC_ERR("No Memory for auth key");
2298 session
->auth_key
.length
= auth_xform
->key
.length
;
2299 memcpy(session
->auth_key
.data
, auth_xform
->key
.data
,
2300 auth_xform
->key
.length
);
2301 session
->auth_alg
= auth_xform
->algo
;
2303 session
->auth_key
.data
= NULL
;
2304 session
->auth_key
.length
= 0;
2305 session
->auth_alg
= RTE_CRYPTO_AUTH_NULL
;
2308 authdata
->key
= (size_t)session
->auth_key
.data
;
2309 authdata
->keylen
= session
->auth_key
.length
;
2310 authdata
->key_enc_flags
= 0;
2311 authdata
->key_type
= RTA_DATA_IMM
;
2312 switch (session
->auth_alg
) {
2313 case RTE_CRYPTO_AUTH_SHA1_HMAC
:
2314 authdata
->algtype
= OP_PCL_IPSEC_HMAC_SHA1_96
;
2315 authdata
->algmode
= OP_ALG_AAI_HMAC
;
2317 case RTE_CRYPTO_AUTH_MD5_HMAC
:
2318 authdata
->algtype
= OP_PCL_IPSEC_HMAC_MD5_96
;
2319 authdata
->algmode
= OP_ALG_AAI_HMAC
;
2321 case RTE_CRYPTO_AUTH_SHA256_HMAC
:
2322 authdata
->algtype
= OP_PCL_IPSEC_HMAC_SHA2_256_128
;
2323 authdata
->algmode
= OP_ALG_AAI_HMAC
;
2325 case RTE_CRYPTO_AUTH_SHA384_HMAC
:
2326 authdata
->algtype
= OP_PCL_IPSEC_HMAC_SHA2_384_192
;
2327 authdata
->algmode
= OP_ALG_AAI_HMAC
;
2329 case RTE_CRYPTO_AUTH_SHA512_HMAC
:
2330 authdata
->algtype
= OP_PCL_IPSEC_HMAC_SHA2_512_256
;
2331 authdata
->algmode
= OP_ALG_AAI_HMAC
;
2333 case RTE_CRYPTO_AUTH_AES_CMAC
:
2334 authdata
->algtype
= OP_PCL_IPSEC_AES_CMAC_96
;
2336 case RTE_CRYPTO_AUTH_NULL
:
2337 authdata
->algtype
= OP_PCL_IPSEC_HMAC_NULL
;
2339 case RTE_CRYPTO_AUTH_SHA224_HMAC
:
2340 case RTE_CRYPTO_AUTH_AES_XCBC_MAC
:
2341 case RTE_CRYPTO_AUTH_SNOW3G_UIA2
:
2342 case RTE_CRYPTO_AUTH_SHA1
:
2343 case RTE_CRYPTO_AUTH_SHA256
:
2344 case RTE_CRYPTO_AUTH_SHA512
:
2345 case RTE_CRYPTO_AUTH_SHA224
:
2346 case RTE_CRYPTO_AUTH_SHA384
:
2347 case RTE_CRYPTO_AUTH_MD5
:
2348 case RTE_CRYPTO_AUTH_AES_GMAC
:
2349 case RTE_CRYPTO_AUTH_KASUMI_F9
:
2350 case RTE_CRYPTO_AUTH_AES_CBC_MAC
:
2351 case RTE_CRYPTO_AUTH_ZUC_EIA3
:
2352 DPAA2_SEC_ERR("Crypto: Unsupported auth alg %u",
2356 DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u",
2360 cipherdata
->key
= (size_t)session
->cipher_key
.data
;
2361 cipherdata
->keylen
= session
->cipher_key
.length
;
2362 cipherdata
->key_enc_flags
= 0;
2363 cipherdata
->key_type
= RTA_DATA_IMM
;
2365 switch (session
->cipher_alg
) {
2366 case RTE_CRYPTO_CIPHER_AES_CBC
:
2367 cipherdata
->algtype
= OP_PCL_IPSEC_AES_CBC
;
2368 cipherdata
->algmode
= OP_ALG_AAI_CBC
;
2370 case RTE_CRYPTO_CIPHER_3DES_CBC
:
2371 cipherdata
->algtype
= OP_PCL_IPSEC_3DES
;
2372 cipherdata
->algmode
= OP_ALG_AAI_CBC
;
2374 case RTE_CRYPTO_CIPHER_AES_CTR
:
2375 cipherdata
->algtype
= OP_PCL_IPSEC_AES_CTR
;
2376 cipherdata
->algmode
= OP_ALG_AAI_CTR
;
2378 case RTE_CRYPTO_CIPHER_NULL
:
2379 cipherdata
->algtype
= OP_PCL_IPSEC_NULL
;
2381 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2
:
2382 case RTE_CRYPTO_CIPHER_3DES_ECB
:
2383 case RTE_CRYPTO_CIPHER_AES_ECB
:
2384 case RTE_CRYPTO_CIPHER_KASUMI_F8
:
2385 DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u",
2386 session
->cipher_alg
);
2389 DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u",
2390 session
->cipher_alg
);
2397 #ifdef RTE_LIBRTE_SECURITY_TEST
2398 static uint8_t aes_cbc_iv
[] = {
2399 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
2400 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f };
2404 dpaa2_sec_set_ipsec_session(struct rte_cryptodev
*dev
,
2405 struct rte_security_session_conf
*conf
,
2408 struct rte_security_ipsec_xform
*ipsec_xform
= &conf
->ipsec
;
2409 struct rte_crypto_cipher_xform
*cipher_xform
= NULL
;
2410 struct rte_crypto_auth_xform
*auth_xform
= NULL
;
2411 struct rte_crypto_aead_xform
*aead_xform
= NULL
;
2412 dpaa2_sec_session
*session
= (dpaa2_sec_session
*)sess
;
2413 struct ctxt_priv
*priv
;
2414 struct ipsec_encap_pdb encap_pdb
;
2415 struct ipsec_decap_pdb decap_pdb
;
2416 struct alginfo authdata
, cipherdata
;
2418 struct sec_flow_context
*flc
;
2419 struct dpaa2_sec_dev_private
*dev_priv
= dev
->data
->dev_private
;
2422 PMD_INIT_FUNC_TRACE();
2424 priv
= (struct ctxt_priv
*)rte_zmalloc(NULL
,
2425 sizeof(struct ctxt_priv
) +
2426 sizeof(struct sec_flc_desc
),
2427 RTE_CACHE_LINE_SIZE
);
2430 DPAA2_SEC_ERR("No memory for priv CTXT");
2434 priv
->fle_pool
= dev_priv
->fle_pool
;
2435 flc
= &priv
->flc_desc
[0].flc
;
2437 memset(session
, 0, sizeof(dpaa2_sec_session
));
2439 if (conf
->crypto_xform
->type
== RTE_CRYPTO_SYM_XFORM_CIPHER
) {
2440 cipher_xform
= &conf
->crypto_xform
->cipher
;
2441 if (conf
->crypto_xform
->next
)
2442 auth_xform
= &conf
->crypto_xform
->next
->auth
;
2443 ret
= dpaa2_sec_ipsec_proto_init(cipher_xform
, auth_xform
,
2444 session
, &cipherdata
, &authdata
);
2445 } else if (conf
->crypto_xform
->type
== RTE_CRYPTO_SYM_XFORM_AUTH
) {
2446 auth_xform
= &conf
->crypto_xform
->auth
;
2447 if (conf
->crypto_xform
->next
)
2448 cipher_xform
= &conf
->crypto_xform
->next
->cipher
;
2449 ret
= dpaa2_sec_ipsec_proto_init(cipher_xform
, auth_xform
,
2450 session
, &cipherdata
, &authdata
);
2451 } else if (conf
->crypto_xform
->type
== RTE_CRYPTO_SYM_XFORM_AEAD
) {
2452 aead_xform
= &conf
->crypto_xform
->aead
;
2453 ret
= dpaa2_sec_ipsec_aead_init(aead_xform
,
2454 session
, &cipherdata
);
2456 DPAA2_SEC_ERR("XFORM not specified");
2461 DPAA2_SEC_ERR("Failed to process xform");
2465 session
->ctxt_type
= DPAA2_SEC_IPSEC
;
2466 if (ipsec_xform
->direction
== RTE_SECURITY_IPSEC_SA_DIR_EGRESS
) {
2469 flc
->dhr
= SEC_FLC_DHR_OUTBOUND
;
2470 ip4_hdr
.ip_v
= IPVERSION
;
2472 ip4_hdr
.ip_len
= rte_cpu_to_be_16(sizeof(ip4_hdr
));
2473 ip4_hdr
.ip_tos
= ipsec_xform
->tunnel
.ipv4
.dscp
;
2476 ip4_hdr
.ip_ttl
= ipsec_xform
->tunnel
.ipv4
.ttl
;
2477 ip4_hdr
.ip_p
= IPPROTO_ESP
;
2479 ip4_hdr
.ip_src
= ipsec_xform
->tunnel
.ipv4
.src_ip
;
2480 ip4_hdr
.ip_dst
= ipsec_xform
->tunnel
.ipv4
.dst_ip
;
2481 ip4_hdr
.ip_sum
= calc_chksum((uint16_t *)(void *)&ip4_hdr
,
2484 /* For Sec Proto only one descriptor is required. */
2485 memset(&encap_pdb
, 0, sizeof(struct ipsec_encap_pdb
));
2486 encap_pdb
.options
= (IPVERSION
<< PDBNH_ESP_ENCAP_SHIFT
) |
2487 PDBOPTS_ESP_OIHI_PDB_INL
|
2489 PDBHMO_ESP_ENCAP_DTTL
|
2491 encap_pdb
.spi
= ipsec_xform
->spi
;
2492 encap_pdb
.ip_hdr_len
= sizeof(struct ip
);
2494 session
->dir
= DIR_ENC
;
2495 bufsize
= cnstr_shdsc_ipsec_new_encap(priv
->flc_desc
[0].desc
,
2496 1, 0, SHR_SERIAL
, &encap_pdb
,
2497 (uint8_t *)&ip4_hdr
,
2498 &cipherdata
, &authdata
);
2499 } else if (ipsec_xform
->direction
==
2500 RTE_SECURITY_IPSEC_SA_DIR_INGRESS
) {
2501 flc
->dhr
= SEC_FLC_DHR_INBOUND
;
2502 memset(&decap_pdb
, 0, sizeof(struct ipsec_decap_pdb
));
2503 decap_pdb
.options
= sizeof(struct ip
) << 16;
2504 session
->dir
= DIR_DEC
;
2505 bufsize
= cnstr_shdsc_ipsec_new_decap(priv
->flc_desc
[0].desc
,
2507 &decap_pdb
, &cipherdata
, &authdata
);
2512 DPAA2_SEC_ERR("Crypto: Invalid buffer length");
2516 flc
->word1_sdl
= (uint8_t)bufsize
;
2518 /* Enable the stashing control bit */
2519 DPAA2_SET_FLC_RSC(flc
);
2520 flc
->word2_rflc_31_0
= lower_32_bits(
2521 (size_t)&(((struct dpaa2_sec_qp
*)
2522 dev
->data
->queue_pairs
[0])->rx_vq
) | 0x14);
2523 flc
->word3_rflc_63_32
= upper_32_bits(
2524 (size_t)&(((struct dpaa2_sec_qp
*)
2525 dev
->data
->queue_pairs
[0])->rx_vq
));
2527 /* Set EWS bit i.e. enable write-safe */
2528 DPAA2_SET_FLC_EWS(flc
);
2529 /* Set BS = 1 i.e reuse input buffers as output buffers */
2530 DPAA2_SET_FLC_REUSE_BS(flc
);
2531 /* Set FF = 10; reuse input buffers if they provide sufficient space */
2532 DPAA2_SET_FLC_REUSE_FF(flc
);
2534 session
->ctxt
= priv
;
2538 rte_free(session
->auth_key
.data
);
2539 rte_free(session
->cipher_key
.data
);
2545 dpaa2_sec_set_pdcp_session(struct rte_cryptodev
*dev
,
2546 struct rte_security_session_conf
*conf
,
2549 struct rte_security_pdcp_xform
*pdcp_xform
= &conf
->pdcp
;
2550 struct rte_crypto_sym_xform
*xform
= conf
->crypto_xform
;
2551 struct rte_crypto_auth_xform
*auth_xform
= NULL
;
2552 struct rte_crypto_cipher_xform
*cipher_xform
;
2553 dpaa2_sec_session
*session
= (dpaa2_sec_session
*)sess
;
2554 struct ctxt_priv
*priv
;
2555 struct dpaa2_sec_dev_private
*dev_priv
= dev
->data
->dev_private
;
2556 struct alginfo authdata
, cipherdata
;
2558 struct sec_flow_context
*flc
;
2559 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
2565 PMD_INIT_FUNC_TRACE();
2567 memset(session
, 0, sizeof(dpaa2_sec_session
));
2569 priv
= (struct ctxt_priv
*)rte_zmalloc(NULL
,
2570 sizeof(struct ctxt_priv
) +
2571 sizeof(struct sec_flc_desc
),
2572 RTE_CACHE_LINE_SIZE
);
2575 DPAA2_SEC_ERR("No memory for priv CTXT");
2579 priv
->fle_pool
= dev_priv
->fle_pool
;
2580 flc
= &priv
->flc_desc
[0].flc
;
2582 /* find xfrm types */
2583 if (xform
->type
== RTE_CRYPTO_SYM_XFORM_CIPHER
&& xform
->next
== NULL
) {
2584 cipher_xform
= &xform
->cipher
;
2585 } else if (xform
->type
== RTE_CRYPTO_SYM_XFORM_CIPHER
&&
2586 xform
->next
->type
== RTE_CRYPTO_SYM_XFORM_AUTH
) {
2587 session
->ext_params
.aead_ctxt
.auth_cipher_text
= true;
2588 cipher_xform
= &xform
->cipher
;
2589 auth_xform
= &xform
->next
->auth
;
2590 } else if (xform
->type
== RTE_CRYPTO_SYM_XFORM_AUTH
&&
2591 xform
->next
->type
== RTE_CRYPTO_SYM_XFORM_CIPHER
) {
2592 session
->ext_params
.aead_ctxt
.auth_cipher_text
= false;
2593 cipher_xform
= &xform
->next
->cipher
;
2594 auth_xform
= &xform
->auth
;
2596 DPAA2_SEC_ERR("Invalid crypto type");
2600 session
->ctxt_type
= DPAA2_SEC_PDCP
;
2602 session
->cipher_key
.data
= rte_zmalloc(NULL
,
2603 cipher_xform
->key
.length
,
2604 RTE_CACHE_LINE_SIZE
);
2605 if (session
->cipher_key
.data
== NULL
&&
2606 cipher_xform
->key
.length
> 0) {
2607 DPAA2_SEC_ERR("No Memory for cipher key");
2611 session
->cipher_key
.length
= cipher_xform
->key
.length
;
2612 memcpy(session
->cipher_key
.data
, cipher_xform
->key
.data
,
2613 cipher_xform
->key
.length
);
2615 (cipher_xform
->op
== RTE_CRYPTO_CIPHER_OP_ENCRYPT
) ?
2617 session
->cipher_alg
= cipher_xform
->algo
;
2619 session
->cipher_key
.data
= NULL
;
2620 session
->cipher_key
.length
= 0;
2621 session
->cipher_alg
= RTE_CRYPTO_CIPHER_NULL
;
2622 session
->dir
= DIR_ENC
;
2625 session
->pdcp
.domain
= pdcp_xform
->domain
;
2626 session
->pdcp
.bearer
= pdcp_xform
->bearer
;
2627 session
->pdcp
.pkt_dir
= pdcp_xform
->pkt_dir
;
2628 session
->pdcp
.sn_size
= pdcp_xform
->sn_size
;
2629 #ifdef ENABLE_HFN_OVERRIDE
2630 session
->pdcp
.hfn_ovd
= pdcp_xform
->hfn_ovd
;
2632 session
->pdcp
.hfn
= pdcp_xform
->hfn
;
2633 session
->pdcp
.hfn_threshold
= pdcp_xform
->hfn_threshold
;
2635 cipherdata
.key
= (size_t)session
->cipher_key
.data
;
2636 cipherdata
.keylen
= session
->cipher_key
.length
;
2637 cipherdata
.key_enc_flags
= 0;
2638 cipherdata
.key_type
= RTA_DATA_IMM
;
2640 switch (session
->cipher_alg
) {
2641 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2
:
2642 cipherdata
.algtype
= PDCP_CIPHER_TYPE_SNOW
;
2644 case RTE_CRYPTO_CIPHER_ZUC_EEA3
:
2645 cipherdata
.algtype
= PDCP_CIPHER_TYPE_ZUC
;
2647 case RTE_CRYPTO_CIPHER_AES_CTR
:
2648 cipherdata
.algtype
= PDCP_CIPHER_TYPE_AES
;
2650 case RTE_CRYPTO_CIPHER_NULL
:
2651 cipherdata
.algtype
= PDCP_CIPHER_TYPE_NULL
;
2654 DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u",
2655 session
->cipher_alg
);
2659 /* Auth is only applicable for control mode operation. */
2660 if (pdcp_xform
->domain
== RTE_SECURITY_PDCP_MODE_CONTROL
) {
2661 if (pdcp_xform
->sn_size
!= RTE_SECURITY_PDCP_SN_SIZE_5
) {
2663 "PDCP Seq Num size should be 5 bits for cmode");
2667 session
->auth_key
.data
= rte_zmalloc(NULL
,
2668 auth_xform
->key
.length
,
2669 RTE_CACHE_LINE_SIZE
);
2670 if (session
->auth_key
.data
== NULL
&&
2671 auth_xform
->key
.length
> 0) {
2672 DPAA2_SEC_ERR("No Memory for auth key");
2673 rte_free(session
->cipher_key
.data
);
2677 session
->auth_key
.length
= auth_xform
->key
.length
;
2678 memcpy(session
->auth_key
.data
, auth_xform
->key
.data
,
2679 auth_xform
->key
.length
);
2680 session
->auth_alg
= auth_xform
->algo
;
2682 session
->auth_key
.data
= NULL
;
2683 session
->auth_key
.length
= 0;
2684 session
->auth_alg
= RTE_CRYPTO_AUTH_NULL
;
2686 authdata
.key
= (size_t)session
->auth_key
.data
;
2687 authdata
.keylen
= session
->auth_key
.length
;
2688 authdata
.key_enc_flags
= 0;
2689 authdata
.key_type
= RTA_DATA_IMM
;
2691 switch (session
->auth_alg
) {
2692 case RTE_CRYPTO_AUTH_SNOW3G_UIA2
:
2693 authdata
.algtype
= PDCP_AUTH_TYPE_SNOW
;
2695 case RTE_CRYPTO_AUTH_ZUC_EIA3
:
2696 authdata
.algtype
= PDCP_AUTH_TYPE_ZUC
;
2698 case RTE_CRYPTO_AUTH_AES_CMAC
:
2699 authdata
.algtype
= PDCP_AUTH_TYPE_AES
;
2701 case RTE_CRYPTO_AUTH_NULL
:
2702 authdata
.algtype
= PDCP_AUTH_TYPE_NULL
;
2705 DPAA2_SEC_ERR("Crypto: Unsupported auth alg %u",
2710 if (session
->dir
== DIR_ENC
)
2711 bufsize
= cnstr_shdsc_pdcp_c_plane_encap(
2712 priv
->flc_desc
[0].desc
, 1, swap
,
2715 pdcp_xform
->pkt_dir
,
2716 pdcp_xform
->hfn_threshold
,
2717 &cipherdata
, &authdata
,
2719 else if (session
->dir
== DIR_DEC
)
2720 bufsize
= cnstr_shdsc_pdcp_c_plane_decap(
2721 priv
->flc_desc
[0].desc
, 1, swap
,
2724 pdcp_xform
->pkt_dir
,
2725 pdcp_xform
->hfn_threshold
,
2726 &cipherdata
, &authdata
,
2729 if (session
->dir
== DIR_ENC
)
2730 bufsize
= cnstr_shdsc_pdcp_u_plane_encap(
2731 priv
->flc_desc
[0].desc
, 1, swap
,
2732 (enum pdcp_sn_size
)pdcp_xform
->sn_size
,
2735 pdcp_xform
->pkt_dir
,
2736 pdcp_xform
->hfn_threshold
,
2738 else if (session
->dir
== DIR_DEC
)
2739 bufsize
= cnstr_shdsc_pdcp_u_plane_decap(
2740 priv
->flc_desc
[0].desc
, 1, swap
,
2741 (enum pdcp_sn_size
)pdcp_xform
->sn_size
,
2744 pdcp_xform
->pkt_dir
,
2745 pdcp_xform
->hfn_threshold
,
2750 DPAA2_SEC_ERR("Crypto: Invalid buffer length");
2754 /* Enable the stashing control bit */
2755 DPAA2_SET_FLC_RSC(flc
);
2756 flc
->word2_rflc_31_0
= lower_32_bits(
2757 (size_t)&(((struct dpaa2_sec_qp
*)
2758 dev
->data
->queue_pairs
[0])->rx_vq
) | 0x14);
2759 flc
->word3_rflc_63_32
= upper_32_bits(
2760 (size_t)&(((struct dpaa2_sec_qp
*)
2761 dev
->data
->queue_pairs
[0])->rx_vq
));
2763 flc
->word1_sdl
= (uint8_t)bufsize
;
2765 /* Set EWS bit i.e. enable write-safe */
2766 DPAA2_SET_FLC_EWS(flc
);
2767 /* Set BS = 1 i.e reuse input buffers as output buffers */
2768 DPAA2_SET_FLC_REUSE_BS(flc
);
2769 /* Set FF = 10; reuse input buffers if they provide sufficient space */
2770 DPAA2_SET_FLC_REUSE_FF(flc
);
2772 session
->ctxt
= priv
;
2776 rte_free(session
->auth_key
.data
);
2777 rte_free(session
->cipher_key
.data
);
2783 dpaa2_sec_security_session_create(void *dev
,
2784 struct rte_security_session_conf
*conf
,
2785 struct rte_security_session
*sess
,
2786 struct rte_mempool
*mempool
)
2788 void *sess_private_data
;
2789 struct rte_cryptodev
*cdev
= (struct rte_cryptodev
*)dev
;
2792 if (rte_mempool_get(mempool
, &sess_private_data
)) {
2793 DPAA2_SEC_ERR("Couldn't get object from session mempool");
2797 switch (conf
->protocol
) {
2798 case RTE_SECURITY_PROTOCOL_IPSEC
:
2799 ret
= dpaa2_sec_set_ipsec_session(cdev
, conf
,
2802 case RTE_SECURITY_PROTOCOL_MACSEC
:
2804 case RTE_SECURITY_PROTOCOL_PDCP
:
2805 ret
= dpaa2_sec_set_pdcp_session(cdev
, conf
,
2812 DPAA2_SEC_ERR("Failed to configure session parameters");
2813 /* Return session to mempool */
2814 rte_mempool_put(mempool
, sess_private_data
);
2818 set_sec_session_private_data(sess
, sess_private_data
);
2823 /** Clear the memory of session so it doesn't leave key material behind */
2825 dpaa2_sec_security_session_destroy(void *dev __rte_unused
,
2826 struct rte_security_session
*sess
)
2828 PMD_INIT_FUNC_TRACE();
2829 void *sess_priv
= get_sec_session_private_data(sess
);
2831 dpaa2_sec_session
*s
= (dpaa2_sec_session
*)sess_priv
;
2834 struct rte_mempool
*sess_mp
= rte_mempool_from_obj(sess_priv
);
2837 rte_free(s
->cipher_key
.data
);
2838 rte_free(s
->auth_key
.data
);
2839 memset(s
, 0, sizeof(dpaa2_sec_session
));
2840 set_sec_session_private_data(sess
, NULL
);
2841 rte_mempool_put(sess_mp
, sess_priv
);
2847 dpaa2_sec_sym_session_configure(struct rte_cryptodev
*dev
,
2848 struct rte_crypto_sym_xform
*xform
,
2849 struct rte_cryptodev_sym_session
*sess
,
2850 struct rte_mempool
*mempool
)
2852 void *sess_private_data
;
2855 if (rte_mempool_get(mempool
, &sess_private_data
)) {
2856 DPAA2_SEC_ERR("Couldn't get object from session mempool");
2860 ret
= dpaa2_sec_set_session_parameters(dev
, xform
, sess_private_data
);
2862 DPAA2_SEC_ERR("Failed to configure session parameters");
2863 /* Return session to mempool */
2864 rte_mempool_put(mempool
, sess_private_data
);
2868 set_sym_session_private_data(sess
, dev
->driver_id
,
2874 /** Clear the memory of session so it doesn't leave key material behind */
2876 dpaa2_sec_sym_session_clear(struct rte_cryptodev
*dev
,
2877 struct rte_cryptodev_sym_session
*sess
)
2879 PMD_INIT_FUNC_TRACE();
2880 uint8_t index
= dev
->driver_id
;
2881 void *sess_priv
= get_sym_session_private_data(sess
, index
);
2882 dpaa2_sec_session
*s
= (dpaa2_sec_session
*)sess_priv
;
2886 rte_free(s
->cipher_key
.data
);
2887 rte_free(s
->auth_key
.data
);
2888 memset(s
, 0, sizeof(dpaa2_sec_session
));
2889 struct rte_mempool
*sess_mp
= rte_mempool_from_obj(sess_priv
);
2890 set_sym_session_private_data(sess
, index
, NULL
);
2891 rte_mempool_put(sess_mp
, sess_priv
);
2896 dpaa2_sec_dev_configure(struct rte_cryptodev
*dev __rte_unused
,
2897 struct rte_cryptodev_config
*config __rte_unused
)
2899 PMD_INIT_FUNC_TRACE();
2905 dpaa2_sec_dev_start(struct rte_cryptodev
*dev
)
2907 struct dpaa2_sec_dev_private
*priv
= dev
->data
->dev_private
;
2908 struct fsl_mc_io
*dpseci
= (struct fsl_mc_io
*)priv
->hw
;
2909 struct dpseci_attr attr
;
2910 struct dpaa2_queue
*dpaa2_q
;
2911 struct dpaa2_sec_qp
**qp
= (struct dpaa2_sec_qp
**)
2912 dev
->data
->queue_pairs
;
2913 struct dpseci_rx_queue_attr rx_attr
;
2914 struct dpseci_tx_queue_attr tx_attr
;
2917 PMD_INIT_FUNC_TRACE();
2919 memset(&attr
, 0, sizeof(struct dpseci_attr
));
2921 ret
= dpseci_enable(dpseci
, CMD_PRI_LOW
, priv
->token
);
2923 DPAA2_SEC_ERR("DPSECI with HW_ID = %d ENABLE FAILED",
2925 goto get_attr_failure
;
2927 ret
= dpseci_get_attributes(dpseci
, CMD_PRI_LOW
, priv
->token
, &attr
);
2929 DPAA2_SEC_ERR("DPSEC ATTRIBUTE READ FAILED, disabling DPSEC");
2930 goto get_attr_failure
;
2932 for (i
= 0; i
< attr
.num_rx_queues
&& qp
[i
]; i
++) {
2933 dpaa2_q
= &qp
[i
]->rx_vq
;
2934 dpseci_get_rx_queue(dpseci
, CMD_PRI_LOW
, priv
->token
, i
,
2936 dpaa2_q
->fqid
= rx_attr
.fqid
;
2937 DPAA2_SEC_DEBUG("rx_fqid: %d", dpaa2_q
->fqid
);
2939 for (i
= 0; i
< attr
.num_tx_queues
&& qp
[i
]; i
++) {
2940 dpaa2_q
= &qp
[i
]->tx_vq
;
2941 dpseci_get_tx_queue(dpseci
, CMD_PRI_LOW
, priv
->token
, i
,
2943 dpaa2_q
->fqid
= tx_attr
.fqid
;
2944 DPAA2_SEC_DEBUG("tx_fqid: %d", dpaa2_q
->fqid
);
2949 dpseci_disable(dpseci
, CMD_PRI_LOW
, priv
->token
);
2954 dpaa2_sec_dev_stop(struct rte_cryptodev
*dev
)
2956 struct dpaa2_sec_dev_private
*priv
= dev
->data
->dev_private
;
2957 struct fsl_mc_io
*dpseci
= (struct fsl_mc_io
*)priv
->hw
;
2960 PMD_INIT_FUNC_TRACE();
2962 ret
= dpseci_disable(dpseci
, CMD_PRI_LOW
, priv
->token
);
2964 DPAA2_SEC_ERR("Failure in disabling dpseci %d device",
2969 ret
= dpseci_reset(dpseci
, CMD_PRI_LOW
, priv
->token
);
2971 DPAA2_SEC_ERR("SEC Device cannot be reset:Error = %0x", ret
);
2977 dpaa2_sec_dev_close(struct rte_cryptodev
*dev
)
2979 struct dpaa2_sec_dev_private
*priv
= dev
->data
->dev_private
;
2980 struct fsl_mc_io
*dpseci
= (struct fsl_mc_io
*)priv
->hw
;
2983 PMD_INIT_FUNC_TRACE();
2985 /* Function is reverse of dpaa2_sec_dev_init.
2986 * It does the following:
2987 * 1. Detach a DPSECI from attached resources i.e. buffer pools, dpbp_id
2988 * 2. Close the DPSECI device
2989 * 3. Free the allocated resources.
2992 /*Close the device at underlying layer*/
2993 ret
= dpseci_close(dpseci
, CMD_PRI_LOW
, priv
->token
);
2995 DPAA2_SEC_ERR("Failure closing dpseci device: err(%d)", ret
);
2999 /*Free the allocated memory for ethernet private data and dpseci*/
3007 dpaa2_sec_dev_infos_get(struct rte_cryptodev
*dev
,
3008 struct rte_cryptodev_info
*info
)
3010 struct dpaa2_sec_dev_private
*internals
= dev
->data
->dev_private
;
3012 PMD_INIT_FUNC_TRACE();
3014 info
->max_nb_queue_pairs
= internals
->max_nb_queue_pairs
;
3015 info
->feature_flags
= dev
->feature_flags
;
3016 info
->capabilities
= dpaa2_sec_capabilities
;
3017 /* No limit of number of sessions */
3018 info
->sym
.max_nb_sessions
= 0;
3019 info
->driver_id
= cryptodev_driver_id
;
3024 void dpaa2_sec_stats_get(struct rte_cryptodev
*dev
,
3025 struct rte_cryptodev_stats
*stats
)
3027 struct dpaa2_sec_dev_private
*priv
= dev
->data
->dev_private
;
3028 struct fsl_mc_io
*dpseci
= (struct fsl_mc_io
*)priv
->hw
;
3029 struct dpseci_sec_counters counters
= {0};
3030 struct dpaa2_sec_qp
**qp
= (struct dpaa2_sec_qp
**)
3031 dev
->data
->queue_pairs
;
3034 PMD_INIT_FUNC_TRACE();
3035 if (stats
== NULL
) {
3036 DPAA2_SEC_ERR("Invalid stats ptr NULL");
3039 for (i
= 0; i
< dev
->data
->nb_queue_pairs
; i
++) {
3040 if (qp
[i
] == NULL
) {
3041 DPAA2_SEC_DEBUG("Uninitialised queue pair");
3045 stats
->enqueued_count
+= qp
[i
]->tx_vq
.tx_pkts
;
3046 stats
->dequeued_count
+= qp
[i
]->rx_vq
.rx_pkts
;
3047 stats
->enqueue_err_count
+= qp
[i
]->tx_vq
.err_pkts
;
3048 stats
->dequeue_err_count
+= qp
[i
]->rx_vq
.err_pkts
;
3051 ret
= dpseci_get_sec_counters(dpseci
, CMD_PRI_LOW
, priv
->token
,
3054 DPAA2_SEC_ERR("SEC counters failed");
3056 DPAA2_SEC_INFO("dpseci hardware stats:"
3057 "\n\tNum of Requests Dequeued = %" PRIu64
3058 "\n\tNum of Outbound Encrypt Requests = %" PRIu64
3059 "\n\tNum of Inbound Decrypt Requests = %" PRIu64
3060 "\n\tNum of Outbound Bytes Encrypted = %" PRIu64
3061 "\n\tNum of Outbound Bytes Protected = %" PRIu64
3062 "\n\tNum of Inbound Bytes Decrypted = %" PRIu64
3063 "\n\tNum of Inbound Bytes Validated = %" PRIu64
,
3064 counters
.dequeued_requests
,
3065 counters
.ob_enc_requests
,
3066 counters
.ib_dec_requests
,
3067 counters
.ob_enc_bytes
,
3068 counters
.ob_prot_bytes
,
3069 counters
.ib_dec_bytes
,
3070 counters
.ib_valid_bytes
);
3075 void dpaa2_sec_stats_reset(struct rte_cryptodev
*dev
)
3078 struct dpaa2_sec_qp
**qp
= (struct dpaa2_sec_qp
**)
3079 (dev
->data
->queue_pairs
);
3081 PMD_INIT_FUNC_TRACE();
3083 for (i
= 0; i
< dev
->data
->nb_queue_pairs
; i
++) {
3084 if (qp
[i
] == NULL
) {
3085 DPAA2_SEC_DEBUG("Uninitialised queue pair");
3088 qp
[i
]->tx_vq
.rx_pkts
= 0;
3089 qp
[i
]->tx_vq
.tx_pkts
= 0;
3090 qp
[i
]->tx_vq
.err_pkts
= 0;
3091 qp
[i
]->rx_vq
.rx_pkts
= 0;
3092 qp
[i
]->rx_vq
.tx_pkts
= 0;
3093 qp
[i
]->rx_vq
.err_pkts
= 0;
3097 static void __attribute__((hot
))
3098 dpaa2_sec_process_parallel_event(struct qbman_swp
*swp
,
3099 const struct qbman_fd
*fd
,
3100 const struct qbman_result
*dq
,
3101 struct dpaa2_queue
*rxq
,
3102 struct rte_event
*ev
)
3104 /* Prefetching mbuf */
3105 rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd
)-
3106 rte_dpaa2_bpid_info
[DPAA2_GET_FD_BPID(fd
)].meta_data_size
));
3108 /* Prefetching ipsec crypto_op stored in priv data of mbuf */
3109 rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd
)-64));
3111 ev
->flow_id
= rxq
->ev
.flow_id
;
3112 ev
->sub_event_type
= rxq
->ev
.sub_event_type
;
3113 ev
->event_type
= RTE_EVENT_TYPE_CRYPTODEV
;
3114 ev
->op
= RTE_EVENT_OP_NEW
;
3115 ev
->sched_type
= rxq
->ev
.sched_type
;
3116 ev
->queue_id
= rxq
->ev
.queue_id
;
3117 ev
->priority
= rxq
->ev
.priority
;
3118 ev
->event_ptr
= sec_fd_to_mbuf(fd
);
3120 qbman_swp_dqrr_consume(swp
, dq
);
3123 dpaa2_sec_process_atomic_event(struct qbman_swp
*swp
__attribute__((unused
)),
3124 const struct qbman_fd
*fd
,
3125 const struct qbman_result
*dq
,
3126 struct dpaa2_queue
*rxq
,
3127 struct rte_event
*ev
)
3130 struct rte_crypto_op
*crypto_op
= (struct rte_crypto_op
*)ev
->event_ptr
;
3131 /* Prefetching mbuf */
3132 rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd
)-
3133 rte_dpaa2_bpid_info
[DPAA2_GET_FD_BPID(fd
)].meta_data_size
));
3135 /* Prefetching ipsec crypto_op stored in priv data of mbuf */
3136 rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd
)-64));
3138 ev
->flow_id
= rxq
->ev
.flow_id
;
3139 ev
->sub_event_type
= rxq
->ev
.sub_event_type
;
3140 ev
->event_type
= RTE_EVENT_TYPE_CRYPTODEV
;
3141 ev
->op
= RTE_EVENT_OP_NEW
;
3142 ev
->sched_type
= rxq
->ev
.sched_type
;
3143 ev
->queue_id
= rxq
->ev
.queue_id
;
3144 ev
->priority
= rxq
->ev
.priority
;
3146 ev
->event_ptr
= sec_fd_to_mbuf(fd
);
3147 dqrr_index
= qbman_get_dqrr_idx(dq
);
3148 crypto_op
->sym
->m_src
->seqn
= dqrr_index
+ 1;
3149 DPAA2_PER_LCORE_DQRR_SIZE
++;
3150 DPAA2_PER_LCORE_DQRR_HELD
|= 1 << dqrr_index
;
3151 DPAA2_PER_LCORE_DQRR_MBUF(dqrr_index
) = crypto_op
->sym
->m_src
;
3155 dpaa2_sec_eventq_attach(const struct rte_cryptodev
*dev
,
3158 const struct rte_event
*event
)
3160 struct dpaa2_sec_dev_private
*priv
= dev
->data
->dev_private
;
3161 struct fsl_mc_io
*dpseci
= (struct fsl_mc_io
*)priv
->hw
;
3162 struct dpaa2_sec_qp
*qp
= dev
->data
->queue_pairs
[qp_id
];
3163 struct dpseci_rx_queue_cfg cfg
;
3166 if (event
->sched_type
== RTE_SCHED_TYPE_PARALLEL
)
3167 qp
->rx_vq
.cb
= dpaa2_sec_process_parallel_event
;
3168 else if (event
->sched_type
== RTE_SCHED_TYPE_ATOMIC
)
3169 qp
->rx_vq
.cb
= dpaa2_sec_process_atomic_event
;
3173 memset(&cfg
, 0, sizeof(struct dpseci_rx_queue_cfg
));
3174 cfg
.options
= DPSECI_QUEUE_OPT_DEST
;
3175 cfg
.dest_cfg
.dest_type
= DPSECI_DEST_DPCON
;
3176 cfg
.dest_cfg
.dest_id
= dpcon_id
;
3177 cfg
.dest_cfg
.priority
= event
->priority
;
3179 cfg
.options
|= DPSECI_QUEUE_OPT_USER_CTX
;
3180 cfg
.user_ctx
= (size_t)(qp
);
3181 if (event
->sched_type
== RTE_SCHED_TYPE_ATOMIC
) {
3182 cfg
.options
|= DPSECI_QUEUE_OPT_ORDER_PRESERVATION
;
3183 cfg
.order_preservation_en
= 1;
3185 ret
= dpseci_set_rx_queue(dpseci
, CMD_PRI_LOW
, priv
->token
,
3188 RTE_LOG(ERR
, PMD
, "Error in dpseci_set_queue: ret: %d\n", ret
);
3192 memcpy(&qp
->rx_vq
.ev
, event
, sizeof(struct rte_event
));
3198 dpaa2_sec_eventq_detach(const struct rte_cryptodev
*dev
,
3201 struct dpaa2_sec_dev_private
*priv
= dev
->data
->dev_private
;
3202 struct fsl_mc_io
*dpseci
= (struct fsl_mc_io
*)priv
->hw
;
3203 struct dpseci_rx_queue_cfg cfg
;
3206 memset(&cfg
, 0, sizeof(struct dpseci_rx_queue_cfg
));
3207 cfg
.options
= DPSECI_QUEUE_OPT_DEST
;
3208 cfg
.dest_cfg
.dest_type
= DPSECI_DEST_NONE
;
3210 ret
= dpseci_set_rx_queue(dpseci
, CMD_PRI_LOW
, priv
->token
,
3213 RTE_LOG(ERR
, PMD
, "Error in dpseci_set_queue: ret: %d\n", ret
);
3218 static struct rte_cryptodev_ops crypto_ops
= {
3219 .dev_configure
= dpaa2_sec_dev_configure
,
3220 .dev_start
= dpaa2_sec_dev_start
,
3221 .dev_stop
= dpaa2_sec_dev_stop
,
3222 .dev_close
= dpaa2_sec_dev_close
,
3223 .dev_infos_get
= dpaa2_sec_dev_infos_get
,
3224 .stats_get
= dpaa2_sec_stats_get
,
3225 .stats_reset
= dpaa2_sec_stats_reset
,
3226 .queue_pair_setup
= dpaa2_sec_queue_pair_setup
,
3227 .queue_pair_release
= dpaa2_sec_queue_pair_release
,
3228 .queue_pair_count
= dpaa2_sec_queue_pair_count
,
3229 .sym_session_get_size
= dpaa2_sec_sym_session_get_size
,
3230 .sym_session_configure
= dpaa2_sec_sym_session_configure
,
3231 .sym_session_clear
= dpaa2_sec_sym_session_clear
,
3234 static const struct rte_security_capability
*
3235 dpaa2_sec_capabilities_get(void *device __rte_unused
)
3237 return dpaa2_sec_security_cap
;
3240 static const struct rte_security_ops dpaa2_sec_security_ops
= {
3241 .session_create
= dpaa2_sec_security_session_create
,
3242 .session_update
= NULL
,
3243 .session_stats_get
= NULL
,
3244 .session_destroy
= dpaa2_sec_security_session_destroy
,
3245 .set_pkt_metadata
= NULL
,
3246 .capabilities_get
= dpaa2_sec_capabilities_get
3250 dpaa2_sec_uninit(const struct rte_cryptodev
*dev
)
3252 struct dpaa2_sec_dev_private
*internals
= dev
->data
->dev_private
;
3254 rte_free(dev
->security_ctx
);
3256 rte_mempool_free(internals
->fle_pool
);
3258 DPAA2_SEC_INFO("Closing DPAA2_SEC device %s on numa socket %u",
3259 dev
->data
->name
, rte_socket_id());
3265 dpaa2_sec_dev_init(struct rte_cryptodev
*cryptodev
)
3267 struct dpaa2_sec_dev_private
*internals
;
3268 struct rte_device
*dev
= cryptodev
->device
;
3269 struct rte_dpaa2_device
*dpaa2_dev
;
3270 struct rte_security_ctx
*security_instance
;
3271 struct fsl_mc_io
*dpseci
;
3273 struct dpseci_attr attr
;
3277 PMD_INIT_FUNC_TRACE();
3278 dpaa2_dev
= container_of(dev
, struct rte_dpaa2_device
, device
);
3279 if (dpaa2_dev
== NULL
) {
3280 DPAA2_SEC_ERR("DPAA2 SEC device not found");
3283 hw_id
= dpaa2_dev
->object_id
;
3285 cryptodev
->driver_id
= cryptodev_driver_id
;
3286 cryptodev
->dev_ops
= &crypto_ops
;
3288 cryptodev
->enqueue_burst
= dpaa2_sec_enqueue_burst
;
3289 cryptodev
->dequeue_burst
= dpaa2_sec_dequeue_burst
;
3290 cryptodev
->feature_flags
= RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO
|
3291 RTE_CRYPTODEV_FF_HW_ACCELERATED
|
3292 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING
|
3293 RTE_CRYPTODEV_FF_SECURITY
|
3294 RTE_CRYPTODEV_FF_IN_PLACE_SGL
|
3295 RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT
|
3296 RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT
|
3297 RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT
|
3298 RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT
;
3300 internals
= cryptodev
->data
->dev_private
;
3303 * For secondary processes, we don't initialise any further as primary
3304 * has already done this work. Only check we don't need a different
3307 if (rte_eal_process_type() != RTE_PROC_PRIMARY
) {
3308 DPAA2_SEC_DEBUG("Device already init by primary process");
3312 /* Initialize security_ctx only for primary process*/
3313 security_instance
= rte_malloc("rte_security_instances_ops",
3314 sizeof(struct rte_security_ctx
), 0);
3315 if (security_instance
== NULL
)
3317 security_instance
->device
= (void *)cryptodev
;
3318 security_instance
->ops
= &dpaa2_sec_security_ops
;
3319 security_instance
->sess_cnt
= 0;
3320 cryptodev
->security_ctx
= security_instance
;
3322 /*Open the rte device via MC and save the handle for further use*/
3323 dpseci
= (struct fsl_mc_io
*)rte_calloc(NULL
, 1,
3324 sizeof(struct fsl_mc_io
), 0);
3327 "Error in allocating the memory for dpsec object");
3330 dpseci
->regs
= rte_mcp_ptr_list
[0];
3332 retcode
= dpseci_open(dpseci
, CMD_PRI_LOW
, hw_id
, &token
);
3334 DPAA2_SEC_ERR("Cannot open the dpsec device: Error = %x",
3338 retcode
= dpseci_get_attributes(dpseci
, CMD_PRI_LOW
, token
, &attr
);
3341 "Cannot get dpsec device attributed: Error = %x",
3345 snprintf(cryptodev
->data
->name
, sizeof(cryptodev
->data
->name
),
3348 internals
->max_nb_queue_pairs
= attr
.num_tx_queues
;
3349 cryptodev
->data
->nb_queue_pairs
= internals
->max_nb_queue_pairs
;
3350 internals
->hw
= dpseci
;
3351 internals
->token
= token
;
3353 snprintf(str
, sizeof(str
), "sec_fle_pool_p%d_%d",
3354 getpid(), cryptodev
->data
->dev_id
);
3355 internals
->fle_pool
= rte_mempool_create((const char *)str
,
3358 FLE_POOL_CACHE_SIZE
, 0,
3359 NULL
, NULL
, NULL
, NULL
,
3361 if (!internals
->fle_pool
) {
3362 DPAA2_SEC_ERR("Mempool (%s) creation failed", str
);
3366 DPAA2_SEC_INFO("driver %s: created", cryptodev
->data
->name
);
3370 DPAA2_SEC_ERR("driver %s: create failed", cryptodev
->data
->name
);
3372 /* dpaa2_sec_uninit(crypto_dev_name); */
3377 cryptodev_dpaa2_sec_probe(struct rte_dpaa2_driver
*dpaa2_drv __rte_unused
,
3378 struct rte_dpaa2_device
*dpaa2_dev
)
3380 struct rte_cryptodev
*cryptodev
;
3381 char cryptodev_name
[RTE_CRYPTODEV_NAME_MAX_LEN
];
3385 snprintf(cryptodev_name
, sizeof(cryptodev_name
), "dpsec-%d",
3386 dpaa2_dev
->object_id
);
3388 cryptodev
= rte_cryptodev_pmd_allocate(cryptodev_name
, rte_socket_id());
3389 if (cryptodev
== NULL
)
3392 if (rte_eal_process_type() == RTE_PROC_PRIMARY
) {
3393 cryptodev
->data
->dev_private
= rte_zmalloc_socket(
3394 "cryptodev private structure",
3395 sizeof(struct dpaa2_sec_dev_private
),
3396 RTE_CACHE_LINE_SIZE
,
3399 if (cryptodev
->data
->dev_private
== NULL
)
3400 rte_panic("Cannot allocate memzone for private "
3404 dpaa2_dev
->cryptodev
= cryptodev
;
3405 cryptodev
->device
= &dpaa2_dev
->device
;
3407 /* init user callbacks */
3408 TAILQ_INIT(&(cryptodev
->link_intr_cbs
));
3410 /* Invoke PMD device initialization function */
3411 retval
= dpaa2_sec_dev_init(cryptodev
);
3415 if (rte_eal_process_type() == RTE_PROC_PRIMARY
)
3416 rte_free(cryptodev
->data
->dev_private
);
3418 cryptodev
->attached
= RTE_CRYPTODEV_DETACHED
;
3424 cryptodev_dpaa2_sec_remove(struct rte_dpaa2_device
*dpaa2_dev
)
3426 struct rte_cryptodev
*cryptodev
;
3429 cryptodev
= dpaa2_dev
->cryptodev
;
3430 if (cryptodev
== NULL
)
3433 ret
= dpaa2_sec_uninit(cryptodev
);
3437 return rte_cryptodev_pmd_destroy(cryptodev
);
3440 static struct rte_dpaa2_driver rte_dpaa2_sec_driver
= {
3441 .drv_flags
= RTE_DPAA2_DRV_IOVA_AS_VA
,
3442 .drv_type
= DPAA2_CRYPTO
,
3444 .name
= "DPAA2 SEC PMD"
3446 .probe
= cryptodev_dpaa2_sec_probe
,
3447 .remove
= cryptodev_dpaa2_sec_remove
,
3450 static struct cryptodev_driver dpaa2_sec_crypto_drv
;
3452 RTE_PMD_REGISTER_DPAA2(CRYPTODEV_NAME_DPAA2_SEC_PMD
, rte_dpaa2_sec_driver
);
3453 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa2_sec_crypto_drv
,
3454 rte_dpaa2_sec_driver
.driver
, cryptodev_driver_id
);
3456 RTE_INIT(dpaa2_sec_init_log
)
3458 /* Bus level logs */
3459 dpaa2_logtype_sec
= rte_log_register("pmd.crypto.dpaa2");
3460 if (dpaa2_logtype_sec
>= 0)
3461 rte_log_set_level(dpaa2_logtype_sec
, RTE_LOG_NOTICE
);