1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Intel Corporation.
8 #include <rte_common.h>
10 #include <rte_errno.h>
11 #include <rte_cryptodev.h>
12 #include <rte_cryptodev_pmd.h>
14 #include <rte_malloc.h>
15 #include <rte_service_component.h>
17 #include "rte_eventdev.h"
18 #include "rte_eventdev_pmd.h"
19 #include "rte_event_crypto_adapter.h"
22 #define DEFAULT_MAX_NB 128
23 #define CRYPTO_ADAPTER_NAME_LEN 32
24 #define CRYPTO_ADAPTER_MEM_NAME_LEN 32
25 #define CRYPTO_ADAPTER_MAX_EV_ENQ_RETRIES 100
27 /* Flush an instance's enqueue buffers every CRYPTO_ENQ_FLUSH_THRESHOLD
28 * iterations of eca_crypto_adapter_enq_run()
30 #define CRYPTO_ENQ_FLUSH_THRESHOLD 1024
32 struct rte_event_crypto_adapter
{
33 /* Event device identifier */
35 /* Event port identifier */
36 uint8_t event_port_id
;
37 /* Store event device's implicit release capability */
38 uint8_t implicit_release_disabled
;
39 /* Max crypto ops processed in any service function invocation */
41 /* Lock to serialize config updates with service function */
43 /* Next crypto device to be processed */
44 uint16_t next_cdev_id
;
45 /* Per crypto device structure */
46 struct crypto_device_info
*cdevs
;
47 /* Loop counter to flush crypto ops */
48 uint16_t transmit_loop_count
;
49 /* Per instance stats structure */
50 struct rte_event_crypto_adapter_stats crypto_stats
;
51 /* Configuration callback for rte_service configuration */
52 rte_event_crypto_adapter_conf_cb conf_cb
;
53 /* Configuration callback argument */
55 /* Set if default_cb is being used */
57 /* Service initialization state */
58 uint8_t service_inited
;
59 /* Memory allocation name */
60 char mem_name
[CRYPTO_ADAPTER_MEM_NAME_LEN
];
61 /* Socket identifier cached from eventdev */
63 /* Per adapter EAL service */
65 /* No. of queue pairs configured */
68 enum rte_event_crypto_adapter_mode mode
;
69 } __rte_cache_aligned
;
71 /* Per crypto device information */
72 struct crypto_device_info
{
73 /* Pointer to cryptodev */
74 struct rte_cryptodev
*dev
;
75 /* Pointer to queue pair info */
76 struct crypto_queue_pair_info
*qpairs
;
77 /* Next queue pair to be processed */
78 uint16_t next_queue_pair_id
;
79 /* Set to indicate cryptodev->eventdev packet
80 * transfer uses a hardware mechanism
82 uint8_t internal_event_port
;
83 /* Set to indicate processing has been started */
85 /* If num_qpairs > 0, the start callback will
86 * be invoked if not already invoked
89 } __rte_cache_aligned
;
91 /* Per queue pair information */
92 struct crypto_queue_pair_info
{
93 /* Set to indicate queue pair is enabled */
95 /* Pointer to hold rte_crypto_ops for batching */
96 struct rte_crypto_op
**op_buffer
;
97 /* No of crypto ops accumulated */
99 } __rte_cache_aligned
;
101 static struct rte_event_crypto_adapter
**event_crypto_adapter
;
103 /* Macros to check for valid adapter */
104 #define EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, retval) do { \
105 if (!eca_valid_id(id)) { \
106 RTE_EDEV_LOG_ERR("Invalid crypto adapter id = %d\n", id); \
112 eca_valid_id(uint8_t id
)
114 return id
< RTE_EVENT_CRYPTO_ADAPTER_MAX_INSTANCE
;
120 const char *name
= "crypto_adapter_array";
121 const struct rte_memzone
*mz
;
124 sz
= sizeof(*event_crypto_adapter
) *
125 RTE_EVENT_CRYPTO_ADAPTER_MAX_INSTANCE
;
126 sz
= RTE_ALIGN(sz
, RTE_CACHE_LINE_SIZE
);
128 mz
= rte_memzone_lookup(name
);
130 mz
= rte_memzone_reserve_aligned(name
, sz
, rte_socket_id(), 0,
131 RTE_CACHE_LINE_SIZE
);
133 RTE_EDEV_LOG_ERR("failed to reserve memzone err = %"
139 event_crypto_adapter
= mz
->addr
;
143 static inline struct rte_event_crypto_adapter
*
144 eca_id_to_adapter(uint8_t id
)
146 return event_crypto_adapter
?
147 event_crypto_adapter
[id
] : NULL
;
151 eca_default_config_cb(uint8_t id
, uint8_t dev_id
,
152 struct rte_event_crypto_adapter_conf
*conf
, void *arg
)
154 struct rte_event_dev_config dev_conf
;
155 struct rte_eventdev
*dev
;
159 struct rte_event_port_conf
*port_conf
= arg
;
160 struct rte_event_crypto_adapter
*adapter
= eca_id_to_adapter(id
);
165 dev
= &rte_eventdevs
[adapter
->eventdev_id
];
166 dev_conf
= dev
->data
->dev_conf
;
168 started
= dev
->data
->dev_started
;
170 rte_event_dev_stop(dev_id
);
171 port_id
= dev_conf
.nb_event_ports
;
172 dev_conf
.nb_event_ports
+= 1;
173 ret
= rte_event_dev_configure(dev_id
, &dev_conf
);
175 RTE_EDEV_LOG_ERR("failed to configure event dev %u\n", dev_id
);
177 if (rte_event_dev_start(dev_id
))
183 ret
= rte_event_port_setup(dev_id
, port_id
, port_conf
);
185 RTE_EDEV_LOG_ERR("failed to setup event port %u\n", port_id
);
189 conf
->event_port_id
= port_id
;
190 conf
->max_nb
= DEFAULT_MAX_NB
;
192 ret
= rte_event_dev_start(dev_id
);
194 adapter
->default_cb_arg
= 1;
199 rte_event_crypto_adapter_create_ext(uint8_t id
, uint8_t dev_id
,
200 rte_event_crypto_adapter_conf_cb conf_cb
,
201 enum rte_event_crypto_adapter_mode mode
,
204 struct rte_event_crypto_adapter
*adapter
;
205 char mem_name
[CRYPTO_ADAPTER_NAME_LEN
];
206 struct rte_event_dev_info dev_info
;
211 EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id
, -EINVAL
);
212 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id
, -EINVAL
);
216 if (event_crypto_adapter
== NULL
) {
222 adapter
= eca_id_to_adapter(id
);
223 if (adapter
!= NULL
) {
224 RTE_EDEV_LOG_ERR("Crypto adapter id %u already exists!", id
);
228 socket_id
= rte_event_dev_socket_id(dev_id
);
229 snprintf(mem_name
, CRYPTO_ADAPTER_MEM_NAME_LEN
,
230 "rte_event_crypto_adapter_%d", id
);
232 adapter
= rte_zmalloc_socket(mem_name
, sizeof(*adapter
),
233 RTE_CACHE_LINE_SIZE
, socket_id
);
234 if (adapter
== NULL
) {
235 RTE_EDEV_LOG_ERR("Failed to get mem for event crypto adapter!");
239 ret
= rte_event_dev_info_get(dev_id
, &dev_info
);
241 RTE_EDEV_LOG_ERR("Failed to get info for eventdev %d: %s!",
242 dev_id
, dev_info
.driver_name
);
246 adapter
->implicit_release_disabled
= (dev_info
.event_dev_cap
&
247 RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE
);
248 adapter
->eventdev_id
= dev_id
;
249 adapter
->socket_id
= socket_id
;
250 adapter
->conf_cb
= conf_cb
;
251 adapter
->conf_arg
= conf_arg
;
252 adapter
->mode
= mode
;
253 strcpy(adapter
->mem_name
, mem_name
);
254 adapter
->cdevs
= rte_zmalloc_socket(adapter
->mem_name
,
255 rte_cryptodev_count() *
256 sizeof(struct crypto_device_info
), 0,
258 if (adapter
->cdevs
== NULL
) {
259 RTE_EDEV_LOG_ERR("Failed to get mem for crypto devices\n");
264 rte_spinlock_init(&adapter
->lock
);
265 for (i
= 0; i
< rte_cryptodev_count(); i
++)
266 adapter
->cdevs
[i
].dev
= rte_cryptodev_pmd_get_dev(i
);
268 event_crypto_adapter
[id
] = adapter
;
275 rte_event_crypto_adapter_create(uint8_t id
, uint8_t dev_id
,
276 struct rte_event_port_conf
*port_config
,
277 enum rte_event_crypto_adapter_mode mode
)
279 struct rte_event_port_conf
*pc
;
282 if (port_config
== NULL
)
284 EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id
, -EINVAL
);
286 pc
= rte_malloc(NULL
, sizeof(*pc
), 0);
290 ret
= rte_event_crypto_adapter_create_ext(id
, dev_id
,
291 eca_default_config_cb
,
301 rte_event_crypto_adapter_free(uint8_t id
)
303 struct rte_event_crypto_adapter
*adapter
;
305 EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id
, -EINVAL
);
307 adapter
= eca_id_to_adapter(id
);
311 if (adapter
->nb_qps
) {
312 RTE_EDEV_LOG_ERR("%" PRIu16
"Queue pairs not deleted",
317 if (adapter
->default_cb_arg
)
318 rte_free(adapter
->conf_arg
);
319 rte_free(adapter
->cdevs
);
321 event_crypto_adapter
[id
] = NULL
;
326 static inline unsigned int
327 eca_enq_to_cryptodev(struct rte_event_crypto_adapter
*adapter
,
328 struct rte_event
*ev
, unsigned int cnt
)
330 struct rte_event_crypto_adapter_stats
*stats
= &adapter
->crypto_stats
;
331 union rte_event_crypto_metadata
*m_data
= NULL
;
332 struct crypto_queue_pair_info
*qp_info
= NULL
;
333 struct rte_crypto_op
*crypto_op
;
335 uint16_t qp_id
, len
, ret
;
341 stats
->event_deq_count
+= cnt
;
343 for (i
= 0; i
< cnt
; i
++) {
344 crypto_op
= ev
[i
].event_ptr
;
345 if (crypto_op
== NULL
)
347 if (crypto_op
->sess_type
== RTE_CRYPTO_OP_WITH_SESSION
) {
348 m_data
= rte_cryptodev_sym_session_get_user_data(
349 crypto_op
->sym
->session
);
350 if (m_data
== NULL
) {
351 rte_pktmbuf_free(crypto_op
->sym
->m_src
);
352 rte_crypto_op_free(crypto_op
);
356 cdev_id
= m_data
->request_info
.cdev_id
;
357 qp_id
= m_data
->request_info
.queue_pair_id
;
358 qp_info
= &adapter
->cdevs
[cdev_id
].qpairs
[qp_id
];
359 if (!qp_info
->qp_enabled
) {
360 rte_pktmbuf_free(crypto_op
->sym
->m_src
);
361 rte_crypto_op_free(crypto_op
);
365 qp_info
->op_buffer
[len
] = crypto_op
;
367 } else if (crypto_op
->sess_type
== RTE_CRYPTO_OP_SESSIONLESS
&&
368 crypto_op
->private_data_offset
) {
369 m_data
= (union rte_event_crypto_metadata
*)
370 ((uint8_t *)crypto_op
+
371 crypto_op
->private_data_offset
);
372 cdev_id
= m_data
->request_info
.cdev_id
;
373 qp_id
= m_data
->request_info
.queue_pair_id
;
374 qp_info
= &adapter
->cdevs
[cdev_id
].qpairs
[qp_id
];
375 if (!qp_info
->qp_enabled
) {
376 rte_pktmbuf_free(crypto_op
->sym
->m_src
);
377 rte_crypto_op_free(crypto_op
);
381 qp_info
->op_buffer
[len
] = crypto_op
;
384 rte_pktmbuf_free(crypto_op
->sym
->m_src
);
385 rte_crypto_op_free(crypto_op
);
389 if (len
== BATCH_SIZE
) {
390 struct rte_crypto_op
**op_buffer
= qp_info
->op_buffer
;
391 ret
= rte_cryptodev_enqueue_burst(cdev_id
,
396 stats
->crypto_enq_count
+= ret
;
399 struct rte_crypto_op
*op
;
400 op
= op_buffer
[ret
++];
401 stats
->crypto_enq_fail
++;
402 rte_pktmbuf_free(op
->sym
->m_src
);
403 rte_crypto_op_free(op
);
418 eca_crypto_enq_flush(struct rte_event_crypto_adapter
*adapter
)
420 struct rte_event_crypto_adapter_stats
*stats
= &adapter
->crypto_stats
;
421 struct crypto_device_info
*curr_dev
;
422 struct crypto_queue_pair_info
*curr_queue
;
423 struct rte_crypto_op
**op_buffer
;
424 struct rte_cryptodev
*dev
;
428 uint16_t num_cdev
= rte_cryptodev_count();
431 for (cdev_id
= 0; cdev_id
< num_cdev
; cdev_id
++) {
432 curr_dev
= &adapter
->cdevs
[cdev_id
];
436 for (qp
= 0; qp
< dev
->data
->nb_queue_pairs
; qp
++) {
438 curr_queue
= &curr_dev
->qpairs
[qp
];
439 if (!curr_queue
->qp_enabled
)
442 op_buffer
= curr_queue
->op_buffer
;
443 ret
= rte_cryptodev_enqueue_burst(cdev_id
,
447 stats
->crypto_enq_count
+= ret
;
449 while (ret
< curr_queue
->len
) {
450 struct rte_crypto_op
*op
;
451 op
= op_buffer
[ret
++];
452 stats
->crypto_enq_fail
++;
453 rte_pktmbuf_free(op
->sym
->m_src
);
454 rte_crypto_op_free(op
);
464 eca_crypto_adapter_enq_run(struct rte_event_crypto_adapter
*adapter
,
465 unsigned int max_enq
)
467 struct rte_event_crypto_adapter_stats
*stats
= &adapter
->crypto_stats
;
468 struct rte_event ev
[BATCH_SIZE
];
469 unsigned int nb_enq
, nb_enqueued
;
471 uint8_t event_dev_id
= adapter
->eventdev_id
;
472 uint8_t event_port_id
= adapter
->event_port_id
;
475 if (adapter
->mode
== RTE_EVENT_CRYPTO_ADAPTER_OP_NEW
)
478 for (nb_enq
= 0; nb_enq
< max_enq
; nb_enq
+= n
) {
479 stats
->event_poll_count
++;
480 n
= rte_event_dequeue_burst(event_dev_id
,
481 event_port_id
, ev
, BATCH_SIZE
, 0);
486 nb_enqueued
+= eca_enq_to_cryptodev(adapter
, ev
, n
);
489 if ((++adapter
->transmit_loop_count
&
490 (CRYPTO_ENQ_FLUSH_THRESHOLD
- 1)) == 0) {
491 nb_enqueued
+= eca_crypto_enq_flush(adapter
);
498 eca_ops_enqueue_burst(struct rte_event_crypto_adapter
*adapter
,
499 struct rte_crypto_op
**ops
, uint16_t num
)
501 struct rte_event_crypto_adapter_stats
*stats
= &adapter
->crypto_stats
;
502 union rte_event_crypto_metadata
*m_data
= NULL
;
503 uint8_t event_dev_id
= adapter
->eventdev_id
;
504 uint8_t event_port_id
= adapter
->event_port_id
;
505 struct rte_event events
[BATCH_SIZE
];
506 uint16_t nb_enqueued
, nb_ev
;
513 num
= RTE_MIN(num
, BATCH_SIZE
);
514 for (i
= 0; i
< num
; i
++) {
515 struct rte_event
*ev
= &events
[nb_ev
++];
516 if (ops
[i
]->sess_type
== RTE_CRYPTO_OP_WITH_SESSION
) {
517 m_data
= rte_cryptodev_sym_session_get_user_data(
518 ops
[i
]->sym
->session
);
519 } else if (ops
[i
]->sess_type
== RTE_CRYPTO_OP_SESSIONLESS
&&
520 ops
[i
]->private_data_offset
) {
521 m_data
= (union rte_event_crypto_metadata
*)
523 ops
[i
]->private_data_offset
);
526 if (unlikely(m_data
== NULL
)) {
527 rte_pktmbuf_free(ops
[i
]->sym
->m_src
);
528 rte_crypto_op_free(ops
[i
]);
532 rte_memcpy(ev
, &m_data
->response_info
, sizeof(*ev
));
533 ev
->event_ptr
= ops
[i
];
534 ev
->event_type
= RTE_EVENT_TYPE_CRYPTODEV
;
535 if (adapter
->implicit_release_disabled
)
536 ev
->op
= RTE_EVENT_OP_FORWARD
;
538 ev
->op
= RTE_EVENT_OP_NEW
;
542 nb_enqueued
+= rte_event_enqueue_burst(event_dev_id
,
544 &events
[nb_enqueued
],
545 nb_ev
- nb_enqueued
);
546 } while (retry
++ < CRYPTO_ADAPTER_MAX_EV_ENQ_RETRIES
&&
547 nb_enqueued
< nb_ev
);
549 /* Free mbufs and rte_crypto_ops for failed events */
550 for (i
= nb_enqueued
; i
< nb_ev
; i
++) {
551 struct rte_crypto_op
*op
= events
[i
].event_ptr
;
552 rte_pktmbuf_free(op
->sym
->m_src
);
553 rte_crypto_op_free(op
);
556 stats
->event_enq_fail_count
+= nb_ev
- nb_enqueued
;
557 stats
->event_enq_count
+= nb_enqueued
;
558 stats
->event_enq_retry_count
+= retry
- 1;
561 static inline unsigned int
562 eca_crypto_adapter_deq_run(struct rte_event_crypto_adapter
*adapter
,
563 unsigned int max_deq
)
565 struct rte_event_crypto_adapter_stats
*stats
= &adapter
->crypto_stats
;
566 struct crypto_device_info
*curr_dev
;
567 struct crypto_queue_pair_info
*curr_queue
;
568 struct rte_crypto_op
*ops
[BATCH_SIZE
];
570 struct rte_cryptodev
*dev
;
572 uint16_t qp
, dev_qps
;
574 uint16_t num_cdev
= rte_cryptodev_count();
581 for (cdev_id
= adapter
->next_cdev_id
;
582 cdev_id
< num_cdev
; cdev_id
++) {
583 curr_dev
= &adapter
->cdevs
[cdev_id
];
587 dev_qps
= dev
->data
->nb_queue_pairs
;
589 for (qp
= curr_dev
->next_queue_pair_id
;
590 queues
< dev_qps
; qp
= (qp
+ 1) % dev_qps
,
593 curr_queue
= &curr_dev
->qpairs
[qp
];
594 if (!curr_queue
->qp_enabled
)
597 n
= rte_cryptodev_dequeue_burst(cdev_id
, qp
,
603 stats
->crypto_deq_count
+= n
;
604 eca_ops_enqueue_burst(adapter
, ops
, n
);
607 if (nb_deq
> max_deq
) {
608 if ((qp
+ 1) == dev_qps
) {
609 adapter
->next_cdev_id
=
613 curr_dev
->next_queue_pair_id
= (qp
+ 1)
614 % dev
->data
->nb_queue_pairs
;
620 } while (done
== false);
625 eca_crypto_adapter_run(struct rte_event_crypto_adapter
*adapter
,
626 unsigned int max_ops
)
629 unsigned int e_cnt
, d_cnt
;
631 e_cnt
= eca_crypto_adapter_deq_run(adapter
, max_ops
);
632 max_ops
-= RTE_MIN(max_ops
, e_cnt
);
634 d_cnt
= eca_crypto_adapter_enq_run(adapter
, max_ops
);
635 max_ops
-= RTE_MIN(max_ops
, d_cnt
);
637 if (e_cnt
== 0 && d_cnt
== 0)
644 eca_service_func(void *args
)
646 struct rte_event_crypto_adapter
*adapter
= args
;
648 if (rte_spinlock_trylock(&adapter
->lock
) == 0)
650 eca_crypto_adapter_run(adapter
, adapter
->max_nb
);
651 rte_spinlock_unlock(&adapter
->lock
);
657 eca_init_service(struct rte_event_crypto_adapter
*adapter
, uint8_t id
)
659 struct rte_event_crypto_adapter_conf adapter_conf
;
660 struct rte_service_spec service
;
663 if (adapter
->service_inited
)
666 memset(&service
, 0, sizeof(service
));
667 snprintf(service
.name
, CRYPTO_ADAPTER_NAME_LEN
,
668 "rte_event_crypto_adapter_%d", id
);
669 service
.socket_id
= adapter
->socket_id
;
670 service
.callback
= eca_service_func
;
671 service
.callback_userdata
= adapter
;
672 /* Service function handles locking for queue add/del updates */
673 service
.capabilities
= RTE_SERVICE_CAP_MT_SAFE
;
674 ret
= rte_service_component_register(&service
, &adapter
->service_id
);
676 RTE_EDEV_LOG_ERR("failed to register service %s err = %" PRId32
,
681 ret
= adapter
->conf_cb(id
, adapter
->eventdev_id
,
682 &adapter_conf
, adapter
->conf_arg
);
684 RTE_EDEV_LOG_ERR("configuration callback failed err = %" PRId32
,
689 adapter
->max_nb
= adapter_conf
.max_nb
;
690 adapter
->event_port_id
= adapter_conf
.event_port_id
;
691 adapter
->service_inited
= 1;
697 eca_update_qp_info(struct rte_event_crypto_adapter
*adapter
,
698 struct crypto_device_info
*dev_info
,
699 int32_t queue_pair_id
,
702 struct crypto_queue_pair_info
*qp_info
;
706 if (dev_info
->qpairs
== NULL
)
709 if (queue_pair_id
== -1) {
710 for (i
= 0; i
< dev_info
->dev
->data
->nb_queue_pairs
; i
++)
711 eca_update_qp_info(adapter
, dev_info
, i
, add
);
713 qp_info
= &dev_info
->qpairs
[queue_pair_id
];
714 enabled
= qp_info
->qp_enabled
;
716 adapter
->nb_qps
+= !enabled
;
717 dev_info
->num_qpairs
+= !enabled
;
719 adapter
->nb_qps
-= enabled
;
720 dev_info
->num_qpairs
-= enabled
;
722 qp_info
->qp_enabled
= !!add
;
727 eca_add_queue_pair(struct rte_event_crypto_adapter
*adapter
,
731 struct crypto_device_info
*dev_info
= &adapter
->cdevs
[cdev_id
];
732 struct crypto_queue_pair_info
*qpairs
;
735 if (dev_info
->qpairs
== NULL
) {
737 rte_zmalloc_socket(adapter
->mem_name
,
738 dev_info
->dev
->data
->nb_queue_pairs
*
739 sizeof(struct crypto_queue_pair_info
),
740 0, adapter
->socket_id
);
741 if (dev_info
->qpairs
== NULL
)
744 qpairs
= dev_info
->qpairs
;
745 qpairs
->op_buffer
= rte_zmalloc_socket(adapter
->mem_name
,
747 sizeof(struct rte_crypto_op
*),
748 0, adapter
->socket_id
);
749 if (!qpairs
->op_buffer
) {
755 if (queue_pair_id
== -1) {
756 for (i
= 0; i
< dev_info
->dev
->data
->nb_queue_pairs
; i
++)
757 eca_update_qp_info(adapter
, dev_info
, i
, 1);
759 eca_update_qp_info(adapter
, dev_info
,
760 (uint16_t)queue_pair_id
, 1);
766 rte_event_crypto_adapter_queue_pair_add(uint8_t id
,
768 int32_t queue_pair_id
,
769 const struct rte_event
*event
)
771 struct rte_event_crypto_adapter
*adapter
;
772 struct rte_eventdev
*dev
;
773 struct crypto_device_info
*dev_info
;
777 EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id
, -EINVAL
);
779 if (!rte_cryptodev_pmd_is_valid_dev(cdev_id
)) {
780 RTE_EDEV_LOG_ERR("Invalid dev_id=%" PRIu8
, cdev_id
);
784 adapter
= eca_id_to_adapter(id
);
788 dev
= &rte_eventdevs
[adapter
->eventdev_id
];
789 ret
= rte_event_crypto_adapter_caps_get(adapter
->eventdev_id
,
793 RTE_EDEV_LOG_ERR("Failed to get adapter caps dev %" PRIu8
794 " cdev %" PRIu8
, id
, cdev_id
);
798 if ((cap
& RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND
) &&
800 RTE_EDEV_LOG_ERR("Conf value can not be NULL for dev_id=%u",
805 dev_info
= &adapter
->cdevs
[cdev_id
];
807 if (queue_pair_id
!= -1 &&
808 (uint16_t)queue_pair_id
>= dev_info
->dev
->data
->nb_queue_pairs
) {
809 RTE_EDEV_LOG_ERR("Invalid queue_pair_id %" PRIu16
,
810 (uint16_t)queue_pair_id
);
814 /* In case HW cap is RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD,
815 * no need of service core as HW supports event forward capability.
817 if ((cap
& RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD
) ||
818 (cap
& RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND
&&
819 adapter
->mode
== RTE_EVENT_CRYPTO_ADAPTER_OP_NEW
) ||
820 (cap
& RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW
&&
821 adapter
->mode
== RTE_EVENT_CRYPTO_ADAPTER_OP_NEW
)) {
822 RTE_FUNC_PTR_OR_ERR_RET(
823 *dev
->dev_ops
->crypto_adapter_queue_pair_add
,
825 if (dev_info
->qpairs
== NULL
) {
827 rte_zmalloc_socket(adapter
->mem_name
,
828 dev_info
->dev
->data
->nb_queue_pairs
*
829 sizeof(struct crypto_queue_pair_info
),
830 0, adapter
->socket_id
);
831 if (dev_info
->qpairs
== NULL
)
835 ret
= (*dev
->dev_ops
->crypto_adapter_queue_pair_add
)(dev
,
843 eca_update_qp_info(adapter
, &adapter
->cdevs
[cdev_id
],
847 /* In case HW cap is RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW,
848 * or SW adapter, initiate services so the application can choose
849 * which ever way it wants to use the adapter.
850 * Case 1: RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW
851 * Application may wants to use one of below two mode
852 * a. OP_FORWARD mode -> HW Dequeue + SW enqueue
853 * b. OP_NEW mode -> HW Dequeue
854 * Case 2: No HW caps, use SW adapter
855 * a. OP_FORWARD mode -> SW enqueue & dequeue
856 * b. OP_NEW mode -> SW Dequeue
858 if ((cap
& RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW
&&
859 adapter
->mode
== RTE_EVENT_CRYPTO_ADAPTER_OP_FORWARD
) ||
860 (!(cap
& RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW
) &&
861 !(cap
& RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD
) &&
862 !(cap
& RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND
) &&
863 (cap
& RTE_EVENT_CRYPTO_ADAPTER_CAP_SESSION_PRIVATE_DATA
))) {
864 rte_spinlock_lock(&adapter
->lock
);
865 ret
= eca_init_service(adapter
, id
);
867 ret
= eca_add_queue_pair(adapter
, cdev_id
,
869 rte_spinlock_unlock(&adapter
->lock
);
874 rte_service_component_runstate_set(adapter
->service_id
, 1);
881 rte_event_crypto_adapter_queue_pair_del(uint8_t id
, uint8_t cdev_id
,
882 int32_t queue_pair_id
)
884 struct rte_event_crypto_adapter
*adapter
;
885 struct crypto_device_info
*dev_info
;
886 struct rte_eventdev
*dev
;
891 EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id
, -EINVAL
);
893 if (!rte_cryptodev_pmd_is_valid_dev(cdev_id
)) {
894 RTE_EDEV_LOG_ERR("Invalid dev_id=%" PRIu8
, cdev_id
);
898 adapter
= eca_id_to_adapter(id
);
902 dev
= &rte_eventdevs
[adapter
->eventdev_id
];
903 ret
= rte_event_crypto_adapter_caps_get(adapter
->eventdev_id
,
909 dev_info
= &adapter
->cdevs
[cdev_id
];
911 if (queue_pair_id
!= -1 &&
912 (uint16_t)queue_pair_id
>= dev_info
->dev
->data
->nb_queue_pairs
) {
913 RTE_EDEV_LOG_ERR("Invalid queue_pair_id %" PRIu16
,
914 (uint16_t)queue_pair_id
);
918 if ((cap
& RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD
) ||
919 (cap
& RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW
&&
920 adapter
->mode
== RTE_EVENT_CRYPTO_ADAPTER_OP_NEW
)) {
921 RTE_FUNC_PTR_OR_ERR_RET(
922 *dev
->dev_ops
->crypto_adapter_queue_pair_del
,
924 ret
= (*dev
->dev_ops
->crypto_adapter_queue_pair_del
)(dev
,
928 eca_update_qp_info(adapter
,
929 &adapter
->cdevs
[cdev_id
],
932 if (dev_info
->num_qpairs
== 0) {
933 rte_free(dev_info
->qpairs
);
934 dev_info
->qpairs
= NULL
;
938 if (adapter
->nb_qps
== 0)
941 rte_spinlock_lock(&adapter
->lock
);
942 if (queue_pair_id
== -1) {
943 for (i
= 0; i
< dev_info
->dev
->data
->nb_queue_pairs
;
945 eca_update_qp_info(adapter
, dev_info
,
948 eca_update_qp_info(adapter
, dev_info
,
949 (uint16_t)queue_pair_id
, 0);
952 if (dev_info
->num_qpairs
== 0) {
953 rte_free(dev_info
->qpairs
);
954 dev_info
->qpairs
= NULL
;
957 rte_spinlock_unlock(&adapter
->lock
);
958 rte_service_component_runstate_set(adapter
->service_id
,
966 eca_adapter_ctrl(uint8_t id
, int start
)
968 struct rte_event_crypto_adapter
*adapter
;
969 struct crypto_device_info
*dev_info
;
970 struct rte_eventdev
*dev
;
976 EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id
, -EINVAL
);
977 adapter
= eca_id_to_adapter(id
);
981 dev
= &rte_eventdevs
[adapter
->eventdev_id
];
983 for (i
= 0; i
< rte_cryptodev_count(); i
++) {
984 dev_info
= &adapter
->cdevs
[i
];
985 /* if start check for num queue pairs */
986 if (start
&& !dev_info
->num_qpairs
)
988 /* if stop check if dev has been started */
989 if (stop
&& !dev_info
->dev_started
)
991 use_service
|= !dev_info
->internal_event_port
;
992 dev_info
->dev_started
= start
;
993 if (dev_info
->internal_event_port
== 0)
995 start
? (*dev
->dev_ops
->crypto_adapter_start
)(dev
,
997 (*dev
->dev_ops
->crypto_adapter_stop
)(dev
,
1002 rte_service_runstate_set(adapter
->service_id
, start
);
1008 rte_event_crypto_adapter_start(uint8_t id
)
1010 struct rte_event_crypto_adapter
*adapter
;
1012 EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id
, -EINVAL
);
1013 adapter
= eca_id_to_adapter(id
);
1014 if (adapter
== NULL
)
1017 return eca_adapter_ctrl(id
, 1);
1021 rte_event_crypto_adapter_stop(uint8_t id
)
1023 return eca_adapter_ctrl(id
, 0);
1027 rte_event_crypto_adapter_stats_get(uint8_t id
,
1028 struct rte_event_crypto_adapter_stats
*stats
)
1030 struct rte_event_crypto_adapter
*adapter
;
1031 struct rte_event_crypto_adapter_stats dev_stats_sum
= { 0 };
1032 struct rte_event_crypto_adapter_stats dev_stats
;
1033 struct rte_eventdev
*dev
;
1034 struct crypto_device_info
*dev_info
;
1038 EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id
, -EINVAL
);
1040 adapter
= eca_id_to_adapter(id
);
1041 if (adapter
== NULL
|| stats
== NULL
)
1044 dev
= &rte_eventdevs
[adapter
->eventdev_id
];
1045 memset(stats
, 0, sizeof(*stats
));
1046 for (i
= 0; i
< rte_cryptodev_count(); i
++) {
1047 dev_info
= &adapter
->cdevs
[i
];
1048 if (dev_info
->internal_event_port
== 0 ||
1049 dev
->dev_ops
->crypto_adapter_stats_get
== NULL
)
1051 ret
= (*dev
->dev_ops
->crypto_adapter_stats_get
)(dev
,
1057 dev_stats_sum
.crypto_deq_count
+= dev_stats
.crypto_deq_count
;
1058 dev_stats_sum
.event_enq_count
+=
1059 dev_stats
.event_enq_count
;
1062 if (adapter
->service_inited
)
1063 *stats
= adapter
->crypto_stats
;
1065 stats
->crypto_deq_count
+= dev_stats_sum
.crypto_deq_count
;
1066 stats
->event_enq_count
+= dev_stats_sum
.event_enq_count
;
1072 rte_event_crypto_adapter_stats_reset(uint8_t id
)
1074 struct rte_event_crypto_adapter
*adapter
;
1075 struct crypto_device_info
*dev_info
;
1076 struct rte_eventdev
*dev
;
1079 EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id
, -EINVAL
);
1081 adapter
= eca_id_to_adapter(id
);
1082 if (adapter
== NULL
)
1085 dev
= &rte_eventdevs
[adapter
->eventdev_id
];
1086 for (i
= 0; i
< rte_cryptodev_count(); i
++) {
1087 dev_info
= &adapter
->cdevs
[i
];
1088 if (dev_info
->internal_event_port
== 0 ||
1089 dev
->dev_ops
->crypto_adapter_stats_reset
== NULL
)
1091 (*dev
->dev_ops
->crypto_adapter_stats_reset
)(dev
,
1095 memset(&adapter
->crypto_stats
, 0, sizeof(adapter
->crypto_stats
));
1100 rte_event_crypto_adapter_service_id_get(uint8_t id
, uint32_t *service_id
)
1102 struct rte_event_crypto_adapter
*adapter
;
1104 EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id
, -EINVAL
);
1106 adapter
= eca_id_to_adapter(id
);
1107 if (adapter
== NULL
|| service_id
== NULL
)
1110 if (adapter
->service_inited
)
1111 *service_id
= adapter
->service_id
;
1113 return adapter
->service_inited
? 0 : -ESRCH
;
1117 rte_event_crypto_adapter_event_port_get(uint8_t id
, uint8_t *event_port_id
)
1119 struct rte_event_crypto_adapter
*adapter
;
1121 EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id
, -EINVAL
);
1123 adapter
= eca_id_to_adapter(id
);
1124 if (adapter
== NULL
|| event_port_id
== NULL
)
1127 *event_port_id
= adapter
->event_port_id
;