1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Intel Corporation.
8 #include <rte_common.h>
10 #include <rte_errno.h>
11 #include <rte_cryptodev.h>
12 #include <rte_cryptodev_pmd.h>
14 #include <rte_malloc.h>
15 #include <rte_service_component.h>
17 #include "rte_eventdev.h"
18 #include "rte_eventdev_pmd.h"
19 #include "rte_event_crypto_adapter.h"
22 #define DEFAULT_MAX_NB 128
23 #define CRYPTO_ADAPTER_NAME_LEN 32
24 #define CRYPTO_ADAPTER_MEM_NAME_LEN 32
25 #define CRYPTO_ADAPTER_MAX_EV_ENQ_RETRIES 100
27 /* Flush an instance's enqueue buffers every CRYPTO_ENQ_FLUSH_THRESHOLD
28 * iterations of eca_crypto_adapter_enq_run()
30 #define CRYPTO_ENQ_FLUSH_THRESHOLD 1024
32 struct rte_event_crypto_adapter
{
33 /* Event device identifier */
35 /* Event port identifier */
36 uint8_t event_port_id
;
37 /* Store event device's implicit release capability */
38 uint8_t implicit_release_disabled
;
39 /* Max crypto ops processed in any service function invocation */
41 /* Lock to serialize config updates with service function */
43 /* Next crypto device to be processed */
44 uint16_t next_cdev_id
;
45 /* Per crypto device structure */
46 struct crypto_device_info
*cdevs
;
47 /* Loop counter to flush crypto ops */
48 uint16_t transmit_loop_count
;
49 /* Per instance stats structure */
50 struct rte_event_crypto_adapter_stats crypto_stats
;
51 /* Configuration callback for rte_service configuration */
52 rte_event_crypto_adapter_conf_cb conf_cb
;
53 /* Configuration callback argument */
55 /* Set if default_cb is being used */
57 /* Service initialization state */
58 uint8_t service_inited
;
59 /* Memory allocation name */
60 char mem_name
[CRYPTO_ADAPTER_MEM_NAME_LEN
];
61 /* Socket identifier cached from eventdev */
63 /* Per adapter EAL service */
65 /* No. of queue pairs configured */
68 enum rte_event_crypto_adapter_mode mode
;
69 } __rte_cache_aligned
;
71 /* Per crypto device information */
72 struct crypto_device_info
{
73 /* Pointer to cryptodev */
74 struct rte_cryptodev
*dev
;
75 /* Pointer to queue pair info */
76 struct crypto_queue_pair_info
*qpairs
;
77 /* Next queue pair to be processed */
78 uint16_t next_queue_pair_id
;
79 /* Set to indicate cryptodev->eventdev packet
80 * transfer uses a hardware mechanism
82 uint8_t internal_event_port
;
83 /* Set to indicate processing has been started */
85 /* If num_qpairs > 0, the start callback will
86 * be invoked if not already invoked
89 } __rte_cache_aligned
;
91 /* Per queue pair information */
92 struct crypto_queue_pair_info
{
93 /* Set to indicate queue pair is enabled */
95 /* Pointer to hold rte_crypto_ops for batching */
96 struct rte_crypto_op
**op_buffer
;
97 /* No of crypto ops accumulated */
99 } __rte_cache_aligned
;
101 static struct rte_event_crypto_adapter
**event_crypto_adapter
;
103 /* Macros to check for valid adapter */
104 #define EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, retval) do { \
105 if (!eca_valid_id(id)) { \
106 RTE_EDEV_LOG_ERR("Invalid crypto adapter id = %d\n", id); \
112 eca_valid_id(uint8_t id
)
114 return id
< RTE_EVENT_CRYPTO_ADAPTER_MAX_INSTANCE
;
120 const char *name
= "crypto_adapter_array";
121 const struct rte_memzone
*mz
;
124 sz
= sizeof(*event_crypto_adapter
) *
125 RTE_EVENT_CRYPTO_ADAPTER_MAX_INSTANCE
;
126 sz
= RTE_ALIGN(sz
, RTE_CACHE_LINE_SIZE
);
128 mz
= rte_memzone_lookup(name
);
130 mz
= rte_memzone_reserve_aligned(name
, sz
, rte_socket_id(), 0,
131 RTE_CACHE_LINE_SIZE
);
133 RTE_EDEV_LOG_ERR("failed to reserve memzone err = %"
139 event_crypto_adapter
= mz
->addr
;
143 static inline struct rte_event_crypto_adapter
*
144 eca_id_to_adapter(uint8_t id
)
146 return event_crypto_adapter
?
147 event_crypto_adapter
[id
] : NULL
;
151 eca_default_config_cb(uint8_t id
, uint8_t dev_id
,
152 struct rte_event_crypto_adapter_conf
*conf
, void *arg
)
154 struct rte_event_dev_config dev_conf
;
155 struct rte_eventdev
*dev
;
159 struct rte_event_port_conf
*port_conf
= arg
;
160 struct rte_event_crypto_adapter
*adapter
= eca_id_to_adapter(id
);
162 dev
= &rte_eventdevs
[adapter
->eventdev_id
];
163 dev_conf
= dev
->data
->dev_conf
;
165 started
= dev
->data
->dev_started
;
167 rte_event_dev_stop(dev_id
);
168 port_id
= dev_conf
.nb_event_ports
;
169 dev_conf
.nb_event_ports
+= 1;
170 ret
= rte_event_dev_configure(dev_id
, &dev_conf
);
172 RTE_EDEV_LOG_ERR("failed to configure event dev %u\n", dev_id
);
174 if (rte_event_dev_start(dev_id
))
180 ret
= rte_event_port_setup(dev_id
, port_id
, port_conf
);
182 RTE_EDEV_LOG_ERR("failed to setup event port %u\n", port_id
);
186 conf
->event_port_id
= port_id
;
187 conf
->max_nb
= DEFAULT_MAX_NB
;
189 ret
= rte_event_dev_start(dev_id
);
191 adapter
->default_cb_arg
= 1;
195 int __rte_experimental
196 rte_event_crypto_adapter_create_ext(uint8_t id
, uint8_t dev_id
,
197 rte_event_crypto_adapter_conf_cb conf_cb
,
198 enum rte_event_crypto_adapter_mode mode
,
201 struct rte_event_crypto_adapter
*adapter
;
202 char mem_name
[CRYPTO_ADAPTER_NAME_LEN
];
203 struct rte_event_dev_info dev_info
;
208 EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id
, -EINVAL
);
209 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id
, -EINVAL
);
213 if (event_crypto_adapter
== NULL
) {
219 adapter
= eca_id_to_adapter(id
);
220 if (adapter
!= NULL
) {
221 RTE_EDEV_LOG_ERR("Crypto adapter id %u already exists!", id
);
225 socket_id
= rte_event_dev_socket_id(dev_id
);
226 snprintf(mem_name
, CRYPTO_ADAPTER_MEM_NAME_LEN
,
227 "rte_event_crypto_adapter_%d", id
);
229 adapter
= rte_zmalloc_socket(mem_name
, sizeof(*adapter
),
230 RTE_CACHE_LINE_SIZE
, socket_id
);
231 if (adapter
== NULL
) {
232 RTE_EDEV_LOG_ERR("Failed to get mem for event crypto adapter!");
236 ret
= rte_event_dev_info_get(dev_id
, &dev_info
);
238 RTE_EDEV_LOG_ERR("Failed to get info for eventdev %d: %s!",
239 dev_id
, dev_info
.driver_name
);
243 adapter
->implicit_release_disabled
= (dev_info
.event_dev_cap
&
244 RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE
);
245 adapter
->eventdev_id
= dev_id
;
246 adapter
->socket_id
= socket_id
;
247 adapter
->conf_cb
= conf_cb
;
248 adapter
->conf_arg
= conf_arg
;
249 adapter
->mode
= mode
;
250 strcpy(adapter
->mem_name
, mem_name
);
251 adapter
->cdevs
= rte_zmalloc_socket(adapter
->mem_name
,
252 rte_cryptodev_count() *
253 sizeof(struct crypto_device_info
), 0,
255 if (adapter
->cdevs
== NULL
) {
256 RTE_EDEV_LOG_ERR("Failed to get mem for crypto devices\n");
261 rte_spinlock_init(&adapter
->lock
);
262 for (i
= 0; i
< rte_cryptodev_count(); i
++)
263 adapter
->cdevs
[i
].dev
= rte_cryptodev_pmd_get_dev(i
);
265 event_crypto_adapter
[id
] = adapter
;
271 int __rte_experimental
272 rte_event_crypto_adapter_create(uint8_t id
, uint8_t dev_id
,
273 struct rte_event_port_conf
*port_config
,
274 enum rte_event_crypto_adapter_mode mode
)
276 struct rte_event_port_conf
*pc
;
279 if (port_config
== NULL
)
281 EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id
, -EINVAL
);
283 pc
= rte_malloc(NULL
, sizeof(*pc
), 0);
287 ret
= rte_event_crypto_adapter_create_ext(id
, dev_id
,
288 eca_default_config_cb
,
297 int __rte_experimental
298 rte_event_crypto_adapter_free(uint8_t id
)
300 struct rte_event_crypto_adapter
*adapter
;
302 EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id
, -EINVAL
);
304 adapter
= eca_id_to_adapter(id
);
308 if (adapter
->nb_qps
) {
309 RTE_EDEV_LOG_ERR("%" PRIu16
"Queue pairs not deleted",
314 if (adapter
->default_cb_arg
)
315 rte_free(adapter
->conf_arg
);
316 rte_free(adapter
->cdevs
);
318 event_crypto_adapter
[id
] = NULL
;
323 static inline unsigned int
324 eca_enq_to_cryptodev(struct rte_event_crypto_adapter
*adapter
,
325 struct rte_event
*ev
, unsigned int cnt
)
327 struct rte_event_crypto_adapter_stats
*stats
= &adapter
->crypto_stats
;
328 union rte_event_crypto_metadata
*m_data
= NULL
;
329 struct crypto_queue_pair_info
*qp_info
= NULL
;
330 struct rte_crypto_op
*crypto_op
;
332 uint16_t qp_id
, len
, ret
;
338 stats
->event_deq_count
+= cnt
;
340 for (i
= 0; i
< cnt
; i
++) {
341 crypto_op
= ev
[i
].event_ptr
;
342 if (crypto_op
== NULL
)
344 if (crypto_op
->sess_type
== RTE_CRYPTO_OP_WITH_SESSION
) {
345 m_data
= rte_cryptodev_sym_session_get_user_data(
346 crypto_op
->sym
->session
);
347 if (m_data
== NULL
) {
348 rte_pktmbuf_free(crypto_op
->sym
->m_src
);
349 rte_crypto_op_free(crypto_op
);
353 cdev_id
= m_data
->request_info
.cdev_id
;
354 qp_id
= m_data
->request_info
.queue_pair_id
;
355 qp_info
= &adapter
->cdevs
[cdev_id
].qpairs
[qp_id
];
356 if (qp_info
== NULL
) {
357 rte_pktmbuf_free(crypto_op
->sym
->m_src
);
358 rte_crypto_op_free(crypto_op
);
362 qp_info
->op_buffer
[len
] = crypto_op
;
364 } else if (crypto_op
->sess_type
== RTE_CRYPTO_OP_SESSIONLESS
&&
365 crypto_op
->private_data_offset
) {
366 m_data
= (union rte_event_crypto_metadata
*)
367 ((uint8_t *)crypto_op
+
368 crypto_op
->private_data_offset
);
369 cdev_id
= m_data
->request_info
.cdev_id
;
370 qp_id
= m_data
->request_info
.queue_pair_id
;
371 qp_info
= &adapter
->cdevs
[cdev_id
].qpairs
[qp_id
];
372 if (qp_info
== NULL
) {
373 rte_pktmbuf_free(crypto_op
->sym
->m_src
);
374 rte_crypto_op_free(crypto_op
);
378 qp_info
->op_buffer
[len
] = crypto_op
;
381 rte_pktmbuf_free(crypto_op
->sym
->m_src
);
382 rte_crypto_op_free(crypto_op
);
386 if (len
== BATCH_SIZE
) {
387 struct rte_crypto_op
**op_buffer
= qp_info
->op_buffer
;
388 ret
= rte_cryptodev_enqueue_burst(cdev_id
,
393 stats
->crypto_enq_count
+= ret
;
396 struct rte_crypto_op
*op
;
397 op
= op_buffer
[ret
++];
398 stats
->crypto_enq_fail
++;
399 rte_pktmbuf_free(op
->sym
->m_src
);
400 rte_crypto_op_free(op
);
415 eca_crypto_enq_flush(struct rte_event_crypto_adapter
*adapter
)
417 struct rte_event_crypto_adapter_stats
*stats
= &adapter
->crypto_stats
;
418 struct crypto_device_info
*curr_dev
;
419 struct crypto_queue_pair_info
*curr_queue
;
420 struct rte_crypto_op
**op_buffer
;
421 struct rte_cryptodev
*dev
;
425 uint16_t num_cdev
= rte_cryptodev_count();
428 for (cdev_id
= 0; cdev_id
< num_cdev
; cdev_id
++) {
429 curr_dev
= &adapter
->cdevs
[cdev_id
];
430 if (curr_dev
== NULL
)
434 for (qp
= 0; qp
< dev
->data
->nb_queue_pairs
; qp
++) {
436 curr_queue
= &curr_dev
->qpairs
[qp
];
437 if (!curr_queue
->qp_enabled
)
440 op_buffer
= curr_queue
->op_buffer
;
441 ret
= rte_cryptodev_enqueue_burst(cdev_id
,
445 stats
->crypto_enq_count
+= ret
;
447 while (ret
< curr_queue
->len
) {
448 struct rte_crypto_op
*op
;
449 op
= op_buffer
[ret
++];
450 stats
->crypto_enq_fail
++;
451 rte_pktmbuf_free(op
->sym
->m_src
);
452 rte_crypto_op_free(op
);
462 eca_crypto_adapter_enq_run(struct rte_event_crypto_adapter
*adapter
,
463 unsigned int max_enq
)
465 struct rte_event_crypto_adapter_stats
*stats
= &adapter
->crypto_stats
;
466 struct rte_event ev
[BATCH_SIZE
];
467 unsigned int nb_enq
, nb_enqueued
;
469 uint8_t event_dev_id
= adapter
->eventdev_id
;
470 uint8_t event_port_id
= adapter
->event_port_id
;
473 if (adapter
->mode
== RTE_EVENT_CRYPTO_ADAPTER_OP_NEW
)
476 for (nb_enq
= 0; nb_enq
< max_enq
; nb_enq
+= n
) {
477 stats
->event_poll_count
++;
478 n
= rte_event_dequeue_burst(event_dev_id
,
479 event_port_id
, ev
, BATCH_SIZE
, 0);
484 nb_enqueued
+= eca_enq_to_cryptodev(adapter
, ev
, n
);
487 if ((++adapter
->transmit_loop_count
&
488 (CRYPTO_ENQ_FLUSH_THRESHOLD
- 1)) == 0) {
489 nb_enqueued
+= eca_crypto_enq_flush(adapter
);
496 eca_ops_enqueue_burst(struct rte_event_crypto_adapter
*adapter
,
497 struct rte_crypto_op
**ops
, uint16_t num
)
499 struct rte_event_crypto_adapter_stats
*stats
= &adapter
->crypto_stats
;
500 union rte_event_crypto_metadata
*m_data
= NULL
;
501 uint8_t event_dev_id
= adapter
->eventdev_id
;
502 uint8_t event_port_id
= adapter
->event_port_id
;
503 struct rte_event events
[BATCH_SIZE
];
504 uint16_t nb_enqueued
, nb_ev
;
511 num
= RTE_MIN(num
, BATCH_SIZE
);
512 for (i
= 0; i
< num
; i
++) {
513 struct rte_event
*ev
= &events
[nb_ev
++];
514 if (ops
[i
]->sess_type
== RTE_CRYPTO_OP_WITH_SESSION
) {
515 m_data
= rte_cryptodev_sym_session_get_user_data(
516 ops
[i
]->sym
->session
);
517 } else if (ops
[i
]->sess_type
== RTE_CRYPTO_OP_SESSIONLESS
&&
518 ops
[i
]->private_data_offset
) {
519 m_data
= (union rte_event_crypto_metadata
*)
521 ops
[i
]->private_data_offset
);
524 if (unlikely(m_data
== NULL
)) {
525 rte_pktmbuf_free(ops
[i
]->sym
->m_src
);
526 rte_crypto_op_free(ops
[i
]);
530 rte_memcpy(ev
, &m_data
->response_info
, sizeof(*ev
));
531 ev
->event_ptr
= ops
[i
];
532 ev
->event_type
= RTE_EVENT_TYPE_CRYPTODEV
;
533 if (adapter
->implicit_release_disabled
)
534 ev
->op
= RTE_EVENT_OP_FORWARD
;
536 ev
->op
= RTE_EVENT_OP_NEW
;
540 nb_enqueued
+= rte_event_enqueue_burst(event_dev_id
,
542 &events
[nb_enqueued
],
543 nb_ev
- nb_enqueued
);
544 } while (retry
++ < CRYPTO_ADAPTER_MAX_EV_ENQ_RETRIES
&&
545 nb_enqueued
< nb_ev
);
547 /* Free mbufs and rte_crypto_ops for failed events */
548 for (i
= nb_enqueued
; i
< nb_ev
; i
++) {
549 struct rte_crypto_op
*op
= events
[i
].event_ptr
;
550 rte_pktmbuf_free(op
->sym
->m_src
);
551 rte_crypto_op_free(op
);
554 stats
->event_enq_fail_count
+= nb_ev
- nb_enqueued
;
555 stats
->event_enq_count
+= nb_enqueued
;
556 stats
->event_enq_retry_count
+= retry
- 1;
559 static inline unsigned int
560 eca_crypto_adapter_deq_run(struct rte_event_crypto_adapter
*adapter
,
561 unsigned int max_deq
)
563 struct rte_event_crypto_adapter_stats
*stats
= &adapter
->crypto_stats
;
564 struct crypto_device_info
*curr_dev
;
565 struct crypto_queue_pair_info
*curr_queue
;
566 struct rte_crypto_op
*ops
[BATCH_SIZE
];
568 struct rte_cryptodev
*dev
;
570 uint16_t qp
, dev_qps
;
572 uint16_t num_cdev
= rte_cryptodev_count();
579 for (cdev_id
= adapter
->next_cdev_id
;
580 cdev_id
< num_cdev
; cdev_id
++) {
581 curr_dev
= &adapter
->cdevs
[cdev_id
];
582 if (curr_dev
== NULL
)
585 dev_qps
= dev
->data
->nb_queue_pairs
;
587 for (qp
= curr_dev
->next_queue_pair_id
;
588 queues
< dev_qps
; qp
= (qp
+ 1) % dev_qps
,
591 curr_queue
= &curr_dev
->qpairs
[qp
];
592 if (!curr_queue
->qp_enabled
)
595 n
= rte_cryptodev_dequeue_burst(cdev_id
, qp
,
601 stats
->crypto_deq_count
+= n
;
602 eca_ops_enqueue_burst(adapter
, ops
, n
);
605 if (nb_deq
> max_deq
) {
606 if ((qp
+ 1) == dev_qps
) {
607 adapter
->next_cdev_id
=
611 curr_dev
->next_queue_pair_id
= (qp
+ 1)
612 % dev
->data
->nb_queue_pairs
;
618 } while (done
== false);
623 eca_crypto_adapter_run(struct rte_event_crypto_adapter
*adapter
,
624 unsigned int max_ops
)
627 unsigned int e_cnt
, d_cnt
;
629 e_cnt
= eca_crypto_adapter_deq_run(adapter
, max_ops
);
630 max_ops
-= RTE_MIN(max_ops
, e_cnt
);
632 d_cnt
= eca_crypto_adapter_enq_run(adapter
, max_ops
);
633 max_ops
-= RTE_MIN(max_ops
, d_cnt
);
635 if (e_cnt
== 0 && d_cnt
== 0)
642 eca_service_func(void *args
)
644 struct rte_event_crypto_adapter
*adapter
= args
;
646 if (rte_spinlock_trylock(&adapter
->lock
) == 0)
648 eca_crypto_adapter_run(adapter
, adapter
->max_nb
);
649 rte_spinlock_unlock(&adapter
->lock
);
655 eca_init_service(struct rte_event_crypto_adapter
*adapter
, uint8_t id
)
657 struct rte_event_crypto_adapter_conf adapter_conf
;
658 struct rte_service_spec service
;
661 if (adapter
->service_inited
)
664 memset(&service
, 0, sizeof(service
));
665 snprintf(service
.name
, CRYPTO_ADAPTER_NAME_LEN
,
666 "rte_event_crypto_adapter_%d", id
);
667 service
.socket_id
= adapter
->socket_id
;
668 service
.callback
= eca_service_func
;
669 service
.callback_userdata
= adapter
;
670 /* Service function handles locking for queue add/del updates */
671 service
.capabilities
= RTE_SERVICE_CAP_MT_SAFE
;
672 ret
= rte_service_component_register(&service
, &adapter
->service_id
);
674 RTE_EDEV_LOG_ERR("failed to register service %s err = %" PRId32
,
679 ret
= adapter
->conf_cb(id
, adapter
->eventdev_id
,
680 &adapter_conf
, adapter
->conf_arg
);
682 RTE_EDEV_LOG_ERR("configuration callback failed err = %" PRId32
,
687 adapter
->max_nb
= adapter_conf
.max_nb
;
688 adapter
->event_port_id
= adapter_conf
.event_port_id
;
689 adapter
->service_inited
= 1;
695 eca_update_qp_info(struct rte_event_crypto_adapter
*adapter
,
696 struct crypto_device_info
*dev_info
,
697 int32_t queue_pair_id
,
700 struct crypto_queue_pair_info
*qp_info
;
704 if (dev_info
->qpairs
== NULL
)
707 if (queue_pair_id
== -1) {
708 for (i
= 0; i
< dev_info
->dev
->data
->nb_queue_pairs
; i
++)
709 eca_update_qp_info(adapter
, dev_info
, i
, add
);
711 qp_info
= &dev_info
->qpairs
[queue_pair_id
];
712 enabled
= qp_info
->qp_enabled
;
714 adapter
->nb_qps
+= !enabled
;
715 dev_info
->num_qpairs
+= !enabled
;
717 adapter
->nb_qps
-= enabled
;
718 dev_info
->num_qpairs
-= enabled
;
720 qp_info
->qp_enabled
= !!add
;
725 eca_add_queue_pair(struct rte_event_crypto_adapter
*adapter
,
729 struct crypto_device_info
*dev_info
= &adapter
->cdevs
[cdev_id
];
730 struct crypto_queue_pair_info
*qpairs
;
733 if (dev_info
->qpairs
== NULL
) {
735 rte_zmalloc_socket(adapter
->mem_name
,
736 dev_info
->dev
->data
->nb_queue_pairs
*
737 sizeof(struct crypto_queue_pair_info
),
738 0, adapter
->socket_id
);
739 if (dev_info
->qpairs
== NULL
)
742 qpairs
= dev_info
->qpairs
;
743 qpairs
->op_buffer
= rte_zmalloc_socket(adapter
->mem_name
,
745 sizeof(struct rte_crypto_op
*),
746 0, adapter
->socket_id
);
747 if (!qpairs
->op_buffer
) {
753 if (queue_pair_id
== -1) {
754 for (i
= 0; i
< dev_info
->dev
->data
->nb_queue_pairs
; i
++)
755 eca_update_qp_info(adapter
, dev_info
, i
, 1);
757 eca_update_qp_info(adapter
, dev_info
,
758 (uint16_t)queue_pair_id
, 1);
763 int __rte_experimental
764 rte_event_crypto_adapter_queue_pair_add(uint8_t id
,
766 int32_t queue_pair_id
,
767 const struct rte_event
*event
)
769 struct rte_event_crypto_adapter
*adapter
;
770 struct rte_eventdev
*dev
;
771 struct crypto_device_info
*dev_info
;
775 EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id
, -EINVAL
);
777 if (!rte_cryptodev_pmd_is_valid_dev(cdev_id
)) {
778 RTE_EDEV_LOG_ERR("Invalid dev_id=%" PRIu8
, cdev_id
);
782 adapter
= eca_id_to_adapter(id
);
786 dev
= &rte_eventdevs
[adapter
->eventdev_id
];
787 ret
= rte_event_crypto_adapter_caps_get(adapter
->eventdev_id
,
791 RTE_EDEV_LOG_ERR("Failed to get adapter caps dev %" PRIu8
792 " cdev %" PRIu8
, id
, cdev_id
);
796 if ((cap
& RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND
) &&
798 RTE_EDEV_LOG_ERR("Conf value can not be NULL for dev_id=%u",
803 dev_info
= &adapter
->cdevs
[cdev_id
];
805 if (queue_pair_id
!= -1 &&
806 (uint16_t)queue_pair_id
>= dev_info
->dev
->data
->nb_queue_pairs
) {
807 RTE_EDEV_LOG_ERR("Invalid queue_pair_id %" PRIu16
,
808 (uint16_t)queue_pair_id
);
812 /* In case HW cap is RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD,
813 * no need of service core as HW supports event forward capability.
815 if ((cap
& RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD
) ||
816 (cap
& RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND
&&
817 adapter
->mode
== RTE_EVENT_CRYPTO_ADAPTER_OP_NEW
) ||
818 (cap
& RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW
&&
819 adapter
->mode
== RTE_EVENT_CRYPTO_ADAPTER_OP_NEW
)) {
820 RTE_FUNC_PTR_OR_ERR_RET(
821 *dev
->dev_ops
->crypto_adapter_queue_pair_add
,
823 if (dev_info
->qpairs
== NULL
) {
825 rte_zmalloc_socket(adapter
->mem_name
,
826 dev_info
->dev
->data
->nb_queue_pairs
*
827 sizeof(struct crypto_queue_pair_info
),
828 0, adapter
->socket_id
);
829 if (dev_info
->qpairs
== NULL
)
833 ret
= (*dev
->dev_ops
->crypto_adapter_queue_pair_add
)(dev
,
841 eca_update_qp_info(adapter
, &adapter
->cdevs
[cdev_id
],
845 /* In case HW cap is RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW,
846 * or SW adapter, initiate services so the application can choose
847 * which ever way it wants to use the adapter.
848 * Case 1: RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW
849 * Application may wants to use one of below two mode
850 * a. OP_FORWARD mode -> HW Dequeue + SW enqueue
851 * b. OP_NEW mode -> HW Dequeue
852 * Case 2: No HW caps, use SW adapter
853 * a. OP_FORWARD mode -> SW enqueue & dequeue
854 * b. OP_NEW mode -> SW Dequeue
856 if ((cap
& RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW
&&
857 adapter
->mode
== RTE_EVENT_CRYPTO_ADAPTER_OP_FORWARD
) ||
858 (!(cap
& RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW
) &&
859 !(cap
& RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD
) &&
860 !(cap
& RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND
) &&
861 (cap
& RTE_EVENT_CRYPTO_ADAPTER_CAP_SESSION_PRIVATE_DATA
))) {
862 rte_spinlock_lock(&adapter
->lock
);
863 ret
= eca_init_service(adapter
, id
);
865 ret
= eca_add_queue_pair(adapter
, cdev_id
,
867 rte_spinlock_unlock(&adapter
->lock
);
872 rte_service_component_runstate_set(adapter
->service_id
, 1);
878 int __rte_experimental
879 rte_event_crypto_adapter_queue_pair_del(uint8_t id
, uint8_t cdev_id
,
880 int32_t queue_pair_id
)
882 struct rte_event_crypto_adapter
*adapter
;
883 struct crypto_device_info
*dev_info
;
884 struct rte_eventdev
*dev
;
889 EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id
, -EINVAL
);
891 if (!rte_cryptodev_pmd_is_valid_dev(cdev_id
)) {
892 RTE_EDEV_LOG_ERR("Invalid dev_id=%" PRIu8
, cdev_id
);
896 adapter
= eca_id_to_adapter(id
);
900 dev
= &rte_eventdevs
[adapter
->eventdev_id
];
901 ret
= rte_event_crypto_adapter_caps_get(adapter
->eventdev_id
,
907 dev_info
= &adapter
->cdevs
[cdev_id
];
909 if (queue_pair_id
!= -1 &&
910 (uint16_t)queue_pair_id
>= dev_info
->dev
->data
->nb_queue_pairs
) {
911 RTE_EDEV_LOG_ERR("Invalid queue_pair_id %" PRIu16
,
912 (uint16_t)queue_pair_id
);
916 if ((cap
& RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD
) ||
917 (cap
& RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW
&&
918 adapter
->mode
== RTE_EVENT_CRYPTO_ADAPTER_OP_NEW
)) {
919 RTE_FUNC_PTR_OR_ERR_RET(
920 *dev
->dev_ops
->crypto_adapter_queue_pair_del
,
922 ret
= (*dev
->dev_ops
->crypto_adapter_queue_pair_del
)(dev
,
926 eca_update_qp_info(adapter
,
927 &adapter
->cdevs
[cdev_id
],
930 if (dev_info
->num_qpairs
== 0) {
931 rte_free(dev_info
->qpairs
);
932 dev_info
->qpairs
= NULL
;
936 if (adapter
->nb_qps
== 0)
939 rte_spinlock_lock(&adapter
->lock
);
940 if (queue_pair_id
== -1) {
941 for (i
= 0; i
< dev_info
->dev
->data
->nb_queue_pairs
;
943 eca_update_qp_info(adapter
, dev_info
,
946 eca_update_qp_info(adapter
, dev_info
,
947 (uint16_t)queue_pair_id
, 0);
950 if (dev_info
->num_qpairs
== 0) {
951 rte_free(dev_info
->qpairs
);
952 dev_info
->qpairs
= NULL
;
955 rte_spinlock_unlock(&adapter
->lock
);
956 rte_service_component_runstate_set(adapter
->service_id
,
964 eca_adapter_ctrl(uint8_t id
, int start
)
966 struct rte_event_crypto_adapter
*adapter
;
967 struct crypto_device_info
*dev_info
;
968 struct rte_eventdev
*dev
;
974 EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id
, -EINVAL
);
975 adapter
= eca_id_to_adapter(id
);
979 dev
= &rte_eventdevs
[adapter
->eventdev_id
];
981 for (i
= 0; i
< rte_cryptodev_count(); i
++) {
982 dev_info
= &adapter
->cdevs
[i
];
983 /* if start check for num queue pairs */
984 if (start
&& !dev_info
->num_qpairs
)
986 /* if stop check if dev has been started */
987 if (stop
&& !dev_info
->dev_started
)
989 use_service
|= !dev_info
->internal_event_port
;
990 dev_info
->dev_started
= start
;
991 if (dev_info
->internal_event_port
== 0)
993 start
? (*dev
->dev_ops
->crypto_adapter_start
)(dev
,
995 (*dev
->dev_ops
->crypto_adapter_stop
)(dev
,
1000 rte_service_runstate_set(adapter
->service_id
, start
);
1005 int __rte_experimental
1006 rte_event_crypto_adapter_start(uint8_t id
)
1008 struct rte_event_crypto_adapter
*adapter
;
1010 EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id
, -EINVAL
);
1011 adapter
= eca_id_to_adapter(id
);
1012 if (adapter
== NULL
)
1015 return eca_adapter_ctrl(id
, 1);
1018 int __rte_experimental
1019 rte_event_crypto_adapter_stop(uint8_t id
)
1021 return eca_adapter_ctrl(id
, 0);
1024 int __rte_experimental
1025 rte_event_crypto_adapter_stats_get(uint8_t id
,
1026 struct rte_event_crypto_adapter_stats
*stats
)
1028 struct rte_event_crypto_adapter
*adapter
;
1029 struct rte_event_crypto_adapter_stats dev_stats_sum
= { 0 };
1030 struct rte_event_crypto_adapter_stats dev_stats
;
1031 struct rte_eventdev
*dev
;
1032 struct crypto_device_info
*dev_info
;
1036 EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id
, -EINVAL
);
1038 adapter
= eca_id_to_adapter(id
);
1039 if (adapter
== NULL
|| stats
== NULL
)
1042 dev
= &rte_eventdevs
[adapter
->eventdev_id
];
1043 memset(stats
, 0, sizeof(*stats
));
1044 for (i
= 0; i
< rte_cryptodev_count(); i
++) {
1045 dev_info
= &adapter
->cdevs
[i
];
1046 if (dev_info
->internal_event_port
== 0 ||
1047 dev
->dev_ops
->crypto_adapter_stats_get
== NULL
)
1049 ret
= (*dev
->dev_ops
->crypto_adapter_stats_get
)(dev
,
1055 dev_stats_sum
.crypto_deq_count
+= dev_stats
.crypto_deq_count
;
1056 dev_stats_sum
.event_enq_count
+=
1057 dev_stats
.event_enq_count
;
1060 if (adapter
->service_inited
)
1061 *stats
= adapter
->crypto_stats
;
1063 stats
->crypto_deq_count
+= dev_stats_sum
.crypto_deq_count
;
1064 stats
->event_enq_count
+= dev_stats_sum
.event_enq_count
;
1069 int __rte_experimental
1070 rte_event_crypto_adapter_stats_reset(uint8_t id
)
1072 struct rte_event_crypto_adapter
*adapter
;
1073 struct crypto_device_info
*dev_info
;
1074 struct rte_eventdev
*dev
;
1077 EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id
, -EINVAL
);
1079 adapter
= eca_id_to_adapter(id
);
1080 if (adapter
== NULL
)
1083 dev
= &rte_eventdevs
[adapter
->eventdev_id
];
1084 for (i
= 0; i
< rte_cryptodev_count(); i
++) {
1085 dev_info
= &adapter
->cdevs
[i
];
1086 if (dev_info
->internal_event_port
== 0 ||
1087 dev
->dev_ops
->crypto_adapter_stats_reset
== NULL
)
1089 (*dev
->dev_ops
->crypto_adapter_stats_reset
)(dev
,
1093 memset(&adapter
->crypto_stats
, 0, sizeof(adapter
->crypto_stats
));
1097 int __rte_experimental
1098 rte_event_crypto_adapter_service_id_get(uint8_t id
, uint32_t *service_id
)
1100 struct rte_event_crypto_adapter
*adapter
;
1102 EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id
, -EINVAL
);
1104 adapter
= eca_id_to_adapter(id
);
1105 if (adapter
== NULL
|| service_id
== NULL
)
1108 if (adapter
->service_inited
)
1109 *service_id
= adapter
->service_id
;
1111 return adapter
->service_inited
? 0 : -ESRCH
;
1114 int __rte_experimental
1115 rte_event_crypto_adapter_event_port_get(uint8_t id
, uint8_t *event_port_id
)
1117 struct rte_event_crypto_adapter
*adapter
;
1119 EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id
, -EINVAL
);
1121 adapter
= eca_id_to_adapter(id
);
1122 if (adapter
== NULL
|| event_port_id
== NULL
)
1125 *event_port_id
= adapter
->event_port_id
;