1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Cavium, Inc
6 #include <rte_bus_pci.h>
7 #include <rte_cryptodev.h>
8 #include <rte_cryptodev_pmd.h>
10 #include <rte_malloc.h>
11 #include <rte_mempool.h>
13 #include "cpt_pmd_logs.h"
14 #include "cpt_ucode.h"
16 #include "otx_cryptodev.h"
17 #include "otx_cryptodev_capabilities.h"
18 #include "otx_cryptodev_hw_access.h"
19 #include "otx_cryptodev_ops.h"
21 /* Forward declarations */
24 otx_cpt_que_pair_release(struct rte_cryptodev
*dev
, uint16_t que_pair_id
);
29 otx_cpt_alarm_cb(void *arg
)
31 struct cpt_vf
*cptvf
= arg
;
32 otx_cpt_poll_misc(cptvf
);
33 rte_eal_alarm_set(CPT_INTR_POLL_INTERVAL_MS
* 1000,
34 otx_cpt_alarm_cb
, cptvf
);
38 otx_cpt_periodic_alarm_start(void *arg
)
40 return rte_eal_alarm_set(CPT_INTR_POLL_INTERVAL_MS
* 1000,
41 otx_cpt_alarm_cb
, arg
);
45 otx_cpt_periodic_alarm_stop(void *arg
)
47 return rte_eal_alarm_cancel(otx_cpt_alarm_cb
, arg
);
53 otx_cpt_dev_config(struct rte_cryptodev
*dev __rte_unused
,
54 struct rte_cryptodev_config
*config __rte_unused
)
56 CPT_PMD_INIT_FUNC_TRACE();
61 otx_cpt_dev_start(struct rte_cryptodev
*c_dev
)
63 void *cptvf
= c_dev
->data
->dev_private
;
65 CPT_PMD_INIT_FUNC_TRACE();
67 return otx_cpt_start_device(cptvf
);
71 otx_cpt_dev_stop(struct rte_cryptodev
*c_dev
)
73 void *cptvf
= c_dev
->data
->dev_private
;
75 CPT_PMD_INIT_FUNC_TRACE();
77 otx_cpt_stop_device(cptvf
);
81 otx_cpt_dev_close(struct rte_cryptodev
*c_dev
)
83 void *cptvf
= c_dev
->data
->dev_private
;
86 CPT_PMD_INIT_FUNC_TRACE();
88 for (i
= 0; i
< c_dev
->data
->nb_queue_pairs
; i
++) {
89 ret
= otx_cpt_que_pair_release(c_dev
, i
);
94 otx_cpt_periodic_alarm_stop(cptvf
);
95 otx_cpt_deinit_device(cptvf
);
101 otx_cpt_dev_info_get(struct rte_cryptodev
*dev
, struct rte_cryptodev_info
*info
)
103 CPT_PMD_INIT_FUNC_TRACE();
105 info
->max_nb_queue_pairs
= CPT_NUM_QS_PER_VF
;
106 info
->feature_flags
= dev
->feature_flags
;
107 info
->capabilities
= otx_get_capabilities();
108 info
->sym
.max_nb_sessions
= 0;
109 info
->driver_id
= otx_cryptodev_driver_id
;
110 info
->min_mbuf_headroom_req
= OTX_CPT_MIN_HEADROOM_REQ
;
111 info
->min_mbuf_tailroom_req
= OTX_CPT_MIN_TAILROOM_REQ
;
116 otx_cpt_stats_get(struct rte_cryptodev
*dev __rte_unused
,
117 struct rte_cryptodev_stats
*stats __rte_unused
)
119 CPT_PMD_INIT_FUNC_TRACE();
123 otx_cpt_stats_reset(struct rte_cryptodev
*dev __rte_unused
)
125 CPT_PMD_INIT_FUNC_TRACE();
129 otx_cpt_que_pair_setup(struct rte_cryptodev
*dev
,
130 uint16_t que_pair_id
,
131 const struct rte_cryptodev_qp_conf
*qp_conf
,
132 int socket_id __rte_unused
)
134 struct cpt_instance
*instance
= NULL
;
135 struct rte_pci_device
*pci_dev
;
138 CPT_PMD_INIT_FUNC_TRACE();
140 if (dev
->data
->queue_pairs
[que_pair_id
] != NULL
) {
141 ret
= otx_cpt_que_pair_release(dev
, que_pair_id
);
146 if (qp_conf
->nb_descriptors
> DEFAULT_CMD_QLEN
) {
147 CPT_LOG_INFO("Number of descriptors too big %d, using default "
148 "queue length of %d", qp_conf
->nb_descriptors
,
152 pci_dev
= RTE_DEV_TO_PCI(dev
->device
);
154 if (pci_dev
->mem_resource
[0].addr
== NULL
) {
155 CPT_LOG_ERR("PCI mem address null");
159 ret
= otx_cpt_get_resource(dev
, 0, &instance
, que_pair_id
);
160 if (ret
!= 0 || instance
== NULL
) {
161 CPT_LOG_ERR("Error getting instance handle from device %s : "
162 "ret = %d", dev
->data
->name
, ret
);
166 instance
->queue_id
= que_pair_id
;
167 instance
->sess_mp
= qp_conf
->mp_session
;
168 instance
->sess_mp_priv
= qp_conf
->mp_session_private
;
169 dev
->data
->queue_pairs
[que_pair_id
] = instance
;
175 otx_cpt_que_pair_release(struct rte_cryptodev
*dev
, uint16_t que_pair_id
)
177 struct cpt_instance
*instance
= dev
->data
->queue_pairs
[que_pair_id
];
180 CPT_PMD_INIT_FUNC_TRACE();
182 ret
= otx_cpt_put_resource(instance
);
184 CPT_LOG_ERR("Error putting instance handle of device %s : "
185 "ret = %d", dev
->data
->name
, ret
);
189 dev
->data
->queue_pairs
[que_pair_id
] = NULL
;
195 otx_cpt_get_session_size(struct rte_cryptodev
*dev __rte_unused
)
197 return cpt_get_session_size();
201 otx_cpt_session_init(void *sym_sess
, uint8_t driver_id
)
203 struct rte_cryptodev_sym_session
*sess
= sym_sess
;
204 struct cpt_sess_misc
*cpt_sess
=
205 (struct cpt_sess_misc
*) get_sym_session_private_data(sess
, driver_id
);
207 CPT_PMD_INIT_FUNC_TRACE();
208 cpt_sess
->ctx_dma_addr
= rte_mempool_virt2iova(cpt_sess
) +
209 sizeof(struct cpt_sess_misc
);
213 otx_cpt_session_cfg(struct rte_cryptodev
*dev
,
214 struct rte_crypto_sym_xform
*xform
,
215 struct rte_cryptodev_sym_session
*sess
,
216 struct rte_mempool
*mempool
)
218 struct rte_crypto_sym_xform
*chain
;
219 void *sess_private_data
= NULL
;
221 CPT_PMD_INIT_FUNC_TRACE();
223 if (cpt_is_algo_supported(xform
))
226 if (unlikely(sess
== NULL
)) {
227 CPT_LOG_ERR("invalid session struct");
231 if (rte_mempool_get(mempool
, &sess_private_data
)) {
232 CPT_LOG_ERR("Could not allocate sess_private_data");
238 switch (chain
->type
) {
239 case RTE_CRYPTO_SYM_XFORM_AEAD
:
240 if (fill_sess_aead(chain
, sess_private_data
))
243 case RTE_CRYPTO_SYM_XFORM_CIPHER
:
244 if (fill_sess_cipher(chain
, sess_private_data
))
247 case RTE_CRYPTO_SYM_XFORM_AUTH
:
248 if (chain
->auth
.algo
== RTE_CRYPTO_AUTH_AES_GMAC
) {
249 if (fill_sess_gmac(chain
, sess_private_data
))
252 if (fill_sess_auth(chain
, sess_private_data
))
257 CPT_LOG_ERR("Invalid crypto xform type");
262 set_sym_session_private_data(sess
, dev
->driver_id
, sess_private_data
);
263 otx_cpt_session_init(sess
, dev
->driver_id
);
267 if (sess_private_data
)
268 rte_mempool_put(mempool
, sess_private_data
);
273 otx_cpt_session_clear(struct rte_cryptodev
*dev
,
274 struct rte_cryptodev_sym_session
*sess
)
276 void *sess_priv
= get_sym_session_private_data(sess
, dev
->driver_id
);
278 CPT_PMD_INIT_FUNC_TRACE();
280 memset(sess_priv
, 0, otx_cpt_get_session_size(dev
));
281 struct rte_mempool
*sess_mp
= rte_mempool_from_obj(sess_priv
);
282 set_sym_session_private_data(sess
, dev
->driver_id
, NULL
);
283 rte_mempool_put(sess_mp
, sess_priv
);
287 static __rte_always_inline
int32_t __hot
288 otx_cpt_request_enqueue(struct cpt_instance
*instance
,
289 struct pending_queue
*pqueue
,
292 struct cpt_request_info
*user_req
= (struct cpt_request_info
*)req
;
294 if (unlikely(pqueue
->pending_count
>= DEFAULT_CMD_QLEN
))
297 fill_cpt_inst(instance
, req
);
299 CPT_LOG_DP_DEBUG("req: %p op: %p ", req
, user_req
->op
);
301 /* Fill time_out cycles */
302 user_req
->time_out
= rte_get_timer_cycles() +
303 DEFAULT_COMMAND_TIMEOUT
* rte_get_timer_hz();
304 user_req
->extra_time
= 0;
306 /* Default mode of software queue */
307 mark_cpt_inst(instance
);
309 pqueue
->rid_queue
[pqueue
->enq_tail
].rid
= (uintptr_t)user_req
;
311 /* We will use soft queue length here to limit requests */
312 MOD_INC(pqueue
->enq_tail
, DEFAULT_CMD_QLEN
);
313 pqueue
->pending_count
+= 1;
315 CPT_LOG_DP_DEBUG("Submitted NB cmd with request: %p "
316 "op: %p", user_req
, user_req
->op
);
320 static __rte_always_inline
int __hot
321 otx_cpt_enq_single_sym(struct cpt_instance
*instance
,
322 struct rte_crypto_op
*op
,
323 struct pending_queue
*pqueue
)
325 struct cpt_sess_misc
*sess
;
326 struct rte_crypto_sym_op
*sym_op
= op
->sym
;
327 void *prep_req
, *mdata
= NULL
;
331 sess
= (struct cpt_sess_misc
*)
332 get_sym_session_private_data(sym_op
->session
,
333 otx_cryptodev_driver_id
);
335 cpt_op
= sess
->cpt_op
;
337 if (likely(cpt_op
& CPT_OP_CIPHER_MASK
))
338 ret
= fill_fc_params(op
, sess
, &instance
->meta_info
, &mdata
,
341 ret
= fill_digest_params(op
, sess
, &instance
->meta_info
,
345 CPT_LOG_DP_ERR("prep cryto req : op %p, cpt_op 0x%x "
346 "ret 0x%x", op
, (unsigned int)cpt_op
, ret
);
350 /* Enqueue prepared instruction to h/w */
351 ret
= otx_cpt_request_enqueue(instance
, pqueue
, prep_req
);
354 /* Buffer allocated for request preparation need to be freed */
355 free_op_meta(mdata
, instance
->meta_info
.pool
);
362 static __rte_always_inline
int __hot
363 otx_cpt_enq_single_sym_sessless(struct cpt_instance
*instance
,
364 struct rte_crypto_op
*op
,
365 struct pending_queue
*pqueue
)
367 struct cpt_sess_misc
*sess
;
368 struct rte_crypto_sym_op
*sym_op
= op
->sym
;
371 void *sess_private_data_t
= NULL
;
373 /* Create tmp session */
375 if (rte_mempool_get(instance
->sess_mp
, (void **)&sess_t
)) {
380 if (rte_mempool_get(instance
->sess_mp_priv
,
381 (void **)&sess_private_data_t
)) {
386 sess
= (struct cpt_sess_misc
*)sess_private_data_t
;
388 sess
->ctx_dma_addr
= rte_mempool_virt2iova(sess
) +
389 sizeof(struct cpt_sess_misc
);
391 ret
= instance_session_cfg(sym_op
->xform
, (void *)sess
);
397 /* Save tmp session in op */
399 sym_op
->session
= (struct rte_cryptodev_sym_session
*)sess_t
;
400 set_sym_session_private_data(sym_op
->session
, otx_cryptodev_driver_id
,
401 sess_private_data_t
);
403 /* Enqueue op with the tmp session set */
404 ret
= otx_cpt_enq_single_sym(instance
, op
, pqueue
);
412 rte_mempool_put(instance
->sess_mp_priv
, sess_private_data_t
);
414 rte_mempool_put(instance
->sess_mp
, sess_t
);
419 static __rte_always_inline
int __hot
420 otx_cpt_enq_single(struct cpt_instance
*inst
,
421 struct rte_crypto_op
*op
,
422 struct pending_queue
*pqueue
)
424 /* Check for the type */
426 if (op
->sess_type
== RTE_CRYPTO_OP_WITH_SESSION
)
427 return otx_cpt_enq_single_sym(inst
, op
, pqueue
);
428 else if (unlikely(op
->sess_type
== RTE_CRYPTO_OP_SESSIONLESS
))
429 return otx_cpt_enq_single_sym_sessless(inst
, op
, pqueue
);
431 /* Should not reach here */
436 otx_cpt_pkt_enqueue(void *qptr
, struct rte_crypto_op
**ops
, uint16_t nb_ops
)
438 struct cpt_instance
*instance
= (struct cpt_instance
*)qptr
;
441 struct cpt_vf
*cptvf
= (struct cpt_vf
*)instance
;
442 struct pending_queue
*pqueue
= &cptvf
->pqueue
;
444 count
= DEFAULT_CMD_QLEN
- pqueue
->pending_count
;
449 while (likely(count
< nb_ops
)) {
451 /* Enqueue single op */
452 ret
= otx_cpt_enq_single(instance
, ops
[count
], pqueue
);
458 otx_cpt_ring_dbell(instance
, count
);
462 static __rte_always_inline
void
463 otx_cpt_dequeue_post_process(struct rte_crypto_op
*cop
, uintptr_t *rsp
)
465 /* H/w has returned success */
466 cop
->status
= RTE_CRYPTO_OP_STATUS_SUCCESS
;
468 /* Perform further post processing */
470 if (cop
->type
== RTE_CRYPTO_OP_TYPE_SYMMETRIC
) {
471 /* Check if auth verify need to be completed */
472 if (unlikely(rsp
[2]))
473 compl_auth_verify(cop
, (uint8_t *)rsp
[2], rsp
[3]);
479 otx_cpt_pkt_dequeue(void *qptr
, struct rte_crypto_op
**ops
, uint16_t nb_ops
)
481 struct cpt_instance
*instance
= (struct cpt_instance
*)qptr
;
482 struct cpt_request_info
*user_req
;
483 struct cpt_vf
*cptvf
= (struct cpt_vf
*)instance
;
486 int i
, count
, pcount
;
489 struct pending_queue
*pqueue
= &cptvf
->pqueue
;
490 struct rte_crypto_op
*cop
;
494 pcount
= pqueue
->pending_count
;
495 count
= (nb_ops
> pcount
) ? pcount
: nb_ops
;
497 for (i
= 0; i
< count
; i
++) {
498 rid_e
= &pqueue
->rid_queue
[pqueue
->deq_head
];
499 user_req
= (struct cpt_request_info
*)(rid_e
->rid
);
501 if (likely((i
+1) < count
))
502 rte_prefetch_non_temporal((void *)rid_e
[1].rid
);
504 ret
= check_nb_command_id(user_req
, instance
);
506 if (unlikely(ret
== ERR_REQ_PENDING
)) {
507 /* Stop checking for completions */
511 /* Return completion code and op handle */
513 ops
[i
] = user_req
->op
;
515 CPT_LOG_DP_DEBUG("Request %p Op %p completed with code %d",
516 user_req
, user_req
->op
, ret
);
518 MOD_INC(pqueue
->deq_head
, DEFAULT_CMD_QLEN
);
519 pqueue
->pending_count
-= 1;
524 for (i
= 0; i
< nb_completed
; i
++) {
526 rsp
= (void *)ops
[i
];
528 if (likely((i
+ 1) < nb_completed
))
529 rte_prefetch0(ops
[i
+1]);
531 metabuf
= (void *)rsp
[0];
532 cop
= (void *)rsp
[1];
536 /* Check completion code */
538 if (likely(cc
[i
] == 0)) {
539 /* H/w success pkt. Post process */
540 otx_cpt_dequeue_post_process(cop
, rsp
);
541 } else if (cc
[i
] == ERR_GC_ICV_MISCOMPARE
) {
542 /* auth data mismatch */
543 cop
->status
= RTE_CRYPTO_OP_STATUS_AUTH_FAILED
;
546 cop
->status
= RTE_CRYPTO_OP_STATUS_ERROR
;
549 if (unlikely(cop
->sess_type
== RTE_CRYPTO_OP_SESSIONLESS
)) {
550 void *sess_private_data_t
=
551 get_sym_session_private_data(cop
->sym
->session
,
552 otx_cryptodev_driver_id
);
553 memset(sess_private_data_t
, 0,
554 cpt_get_session_size());
555 memset(cop
->sym
->session
, 0,
556 rte_cryptodev_sym_get_existing_header_session_size(
558 rte_mempool_put(instance
->sess_mp_priv
,
559 sess_private_data_t
);
560 rte_mempool_put(instance
->sess_mp
, cop
->sym
->session
);
561 cop
->sym
->session
= NULL
;
563 free_op_meta(metabuf
, instance
->meta_info
.pool
);
569 static struct rte_cryptodev_ops cptvf_ops
= {
570 /* Device related operations */
571 .dev_configure
= otx_cpt_dev_config
,
572 .dev_start
= otx_cpt_dev_start
,
573 .dev_stop
= otx_cpt_dev_stop
,
574 .dev_close
= otx_cpt_dev_close
,
575 .dev_infos_get
= otx_cpt_dev_info_get
,
577 .stats_get
= otx_cpt_stats_get
,
578 .stats_reset
= otx_cpt_stats_reset
,
579 .queue_pair_setup
= otx_cpt_que_pair_setup
,
580 .queue_pair_release
= otx_cpt_que_pair_release
,
581 .queue_pair_count
= NULL
,
583 /* Crypto related operations */
584 .sym_session_get_size
= otx_cpt_get_session_size
,
585 .sym_session_configure
= otx_cpt_session_cfg
,
586 .sym_session_clear
= otx_cpt_session_clear
590 otx_cpt_dev_create(struct rte_cryptodev
*c_dev
)
592 struct rte_pci_device
*pdev
= RTE_DEV_TO_PCI(c_dev
->device
);
593 struct cpt_vf
*cptvf
= NULL
;
598 if (pdev
->mem_resource
[0].phys_addr
== 0ULL)
601 /* for secondary processes, we don't initialise any further as primary
602 * has already done this work.
604 if (rte_eal_process_type() != RTE_PROC_PRIMARY
)
607 cptvf
= rte_zmalloc_socket("otx_cryptodev_private_mem",
608 sizeof(struct cpt_vf
), RTE_CACHE_LINE_SIZE
,
612 CPT_LOG_ERR("Cannot allocate memory for device private data");
616 snprintf(dev_name
, 32, "%02x:%02x.%x",
617 pdev
->addr
.bus
, pdev
->addr
.devid
, pdev
->addr
.function
);
619 reg_base
= pdev
->mem_resource
[0].addr
;
621 CPT_LOG_ERR("Failed to map BAR0 of %s", dev_name
);
626 ret
= otx_cpt_hw_init(cptvf
, pdev
, reg_base
, dev_name
);
628 CPT_LOG_ERR("Failed to init cptvf %s", dev_name
);
633 /* Start off timer for mailbox interrupts */
634 otx_cpt_periodic_alarm_start(cptvf
);
636 c_dev
->dev_ops
= &cptvf_ops
;
638 c_dev
->enqueue_burst
= otx_cpt_pkt_enqueue
;
639 c_dev
->dequeue_burst
= otx_cpt_pkt_dequeue
;
641 c_dev
->feature_flags
= RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO
|
642 RTE_CRYPTODEV_FF_HW_ACCELERATED
|
643 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING
|
644 RTE_CRYPTODEV_FF_IN_PLACE_SGL
|
645 RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT
|
646 RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT
;
648 /* Save dev private data */
649 c_dev
->data
->dev_private
= cptvf
;
655 /* Free private data allocated */