]> git.proxmox.com Git - ceph.git/blob - ceph/src/spdk/dpdk/lib/librte_eventdev/rte_event_crypto_adapter.c
update sources to ceph Nautilus 14.2.1
[ceph.git] / ceph / src / spdk / dpdk / lib / librte_eventdev / rte_event_crypto_adapter.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Intel Corporation.
3 * All rights reserved.
4 */
5
6 #include <string.h>
7 #include <stdbool.h>
8 #include <rte_common.h>
9 #include <rte_dev.h>
10 #include <rte_errno.h>
11 #include <rte_cryptodev.h>
12 #include <rte_cryptodev_pmd.h>
13 #include <rte_log.h>
14 #include <rte_malloc.h>
15 #include <rte_service_component.h>
16
17 #include "rte_eventdev.h"
18 #include "rte_eventdev_pmd.h"
19 #include "rte_event_crypto_adapter.h"
20
21 #define BATCH_SIZE 32
22 #define DEFAULT_MAX_NB 128
23 #define CRYPTO_ADAPTER_NAME_LEN 32
24 #define CRYPTO_ADAPTER_MEM_NAME_LEN 32
25 #define CRYPTO_ADAPTER_MAX_EV_ENQ_RETRIES 100
26
27 /* Flush an instance's enqueue buffers every CRYPTO_ENQ_FLUSH_THRESHOLD
28 * iterations of eca_crypto_adapter_enq_run()
29 */
30 #define CRYPTO_ENQ_FLUSH_THRESHOLD 1024
31
32 struct rte_event_crypto_adapter {
33 /* Event device identifier */
34 uint8_t eventdev_id;
35 /* Event port identifier */
36 uint8_t event_port_id;
37 /* Store event device's implicit release capability */
38 uint8_t implicit_release_disabled;
39 /* Max crypto ops processed in any service function invocation */
40 uint32_t max_nb;
41 /* Lock to serialize config updates with service function */
42 rte_spinlock_t lock;
43 /* Next crypto device to be processed */
44 uint16_t next_cdev_id;
45 /* Per crypto device structure */
46 struct crypto_device_info *cdevs;
47 /* Loop counter to flush crypto ops */
48 uint16_t transmit_loop_count;
49 /* Per instance stats structure */
50 struct rte_event_crypto_adapter_stats crypto_stats;
51 /* Configuration callback for rte_service configuration */
52 rte_event_crypto_adapter_conf_cb conf_cb;
53 /* Configuration callback argument */
54 void *conf_arg;
55 /* Set if default_cb is being used */
56 int default_cb_arg;
57 /* Service initialization state */
58 uint8_t service_inited;
59 /* Memory allocation name */
60 char mem_name[CRYPTO_ADAPTER_MEM_NAME_LEN];
61 /* Socket identifier cached from eventdev */
62 int socket_id;
63 /* Per adapter EAL service */
64 uint32_t service_id;
65 /* No. of queue pairs configured */
66 uint16_t nb_qps;
67 /* Adapter mode */
68 enum rte_event_crypto_adapter_mode mode;
69 } __rte_cache_aligned;
70
71 /* Per crypto device information */
72 struct crypto_device_info {
73 /* Pointer to cryptodev */
74 struct rte_cryptodev *dev;
75 /* Pointer to queue pair info */
76 struct crypto_queue_pair_info *qpairs;
77 /* Next queue pair to be processed */
78 uint16_t next_queue_pair_id;
79 /* Set to indicate cryptodev->eventdev packet
80 * transfer uses a hardware mechanism
81 */
82 uint8_t internal_event_port;
83 /* Set to indicate processing has been started */
84 uint8_t dev_started;
85 /* If num_qpairs > 0, the start callback will
86 * be invoked if not already invoked
87 */
88 uint16_t num_qpairs;
89 } __rte_cache_aligned;
90
91 /* Per queue pair information */
92 struct crypto_queue_pair_info {
93 /* Set to indicate queue pair is enabled */
94 bool qp_enabled;
95 /* Pointer to hold rte_crypto_ops for batching */
96 struct rte_crypto_op **op_buffer;
97 /* No of crypto ops accumulated */
98 uint8_t len;
99 } __rte_cache_aligned;
100
101 static struct rte_event_crypto_adapter **event_crypto_adapter;
102
103 /* Macros to check for valid adapter */
104 #define EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, retval) do { \
105 if (!eca_valid_id(id)) { \
106 RTE_EDEV_LOG_ERR("Invalid crypto adapter id = %d\n", id); \
107 return retval; \
108 } \
109 } while (0)
110
111 static inline int
112 eca_valid_id(uint8_t id)
113 {
114 return id < RTE_EVENT_CRYPTO_ADAPTER_MAX_INSTANCE;
115 }
116
117 static int
118 eca_init(void)
119 {
120 const char *name = "crypto_adapter_array";
121 const struct rte_memzone *mz;
122 unsigned int sz;
123
124 sz = sizeof(*event_crypto_adapter) *
125 RTE_EVENT_CRYPTO_ADAPTER_MAX_INSTANCE;
126 sz = RTE_ALIGN(sz, RTE_CACHE_LINE_SIZE);
127
128 mz = rte_memzone_lookup(name);
129 if (mz == NULL) {
130 mz = rte_memzone_reserve_aligned(name, sz, rte_socket_id(), 0,
131 RTE_CACHE_LINE_SIZE);
132 if (mz == NULL) {
133 RTE_EDEV_LOG_ERR("failed to reserve memzone err = %"
134 PRId32, rte_errno);
135 return -rte_errno;
136 }
137 }
138
139 event_crypto_adapter = mz->addr;
140 return 0;
141 }
142
143 static inline struct rte_event_crypto_adapter *
144 eca_id_to_adapter(uint8_t id)
145 {
146 return event_crypto_adapter ?
147 event_crypto_adapter[id] : NULL;
148 }
149
150 static int
151 eca_default_config_cb(uint8_t id, uint8_t dev_id,
152 struct rte_event_crypto_adapter_conf *conf, void *arg)
153 {
154 struct rte_event_dev_config dev_conf;
155 struct rte_eventdev *dev;
156 uint8_t port_id;
157 int started;
158 int ret;
159 struct rte_event_port_conf *port_conf = arg;
160 struct rte_event_crypto_adapter *adapter = eca_id_to_adapter(id);
161
162 dev = &rte_eventdevs[adapter->eventdev_id];
163 dev_conf = dev->data->dev_conf;
164
165 started = dev->data->dev_started;
166 if (started)
167 rte_event_dev_stop(dev_id);
168 port_id = dev_conf.nb_event_ports;
169 dev_conf.nb_event_ports += 1;
170 ret = rte_event_dev_configure(dev_id, &dev_conf);
171 if (ret) {
172 RTE_EDEV_LOG_ERR("failed to configure event dev %u\n", dev_id);
173 if (started) {
174 if (rte_event_dev_start(dev_id))
175 return -EIO;
176 }
177 return ret;
178 }
179
180 ret = rte_event_port_setup(dev_id, port_id, port_conf);
181 if (ret) {
182 RTE_EDEV_LOG_ERR("failed to setup event port %u\n", port_id);
183 return ret;
184 }
185
186 conf->event_port_id = port_id;
187 conf->max_nb = DEFAULT_MAX_NB;
188 if (started)
189 ret = rte_event_dev_start(dev_id);
190
191 adapter->default_cb_arg = 1;
192 return ret;
193 }
194
195 int __rte_experimental
196 rte_event_crypto_adapter_create_ext(uint8_t id, uint8_t dev_id,
197 rte_event_crypto_adapter_conf_cb conf_cb,
198 enum rte_event_crypto_adapter_mode mode,
199 void *conf_arg)
200 {
201 struct rte_event_crypto_adapter *adapter;
202 char mem_name[CRYPTO_ADAPTER_NAME_LEN];
203 struct rte_event_dev_info dev_info;
204 int socket_id;
205 uint8_t i;
206 int ret;
207
208 EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
209 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
210 if (conf_cb == NULL)
211 return -EINVAL;
212
213 if (event_crypto_adapter == NULL) {
214 ret = eca_init();
215 if (ret)
216 return ret;
217 }
218
219 adapter = eca_id_to_adapter(id);
220 if (adapter != NULL) {
221 RTE_EDEV_LOG_ERR("Crypto adapter id %u already exists!", id);
222 return -EEXIST;
223 }
224
225 socket_id = rte_event_dev_socket_id(dev_id);
226 snprintf(mem_name, CRYPTO_ADAPTER_MEM_NAME_LEN,
227 "rte_event_crypto_adapter_%d", id);
228
229 adapter = rte_zmalloc_socket(mem_name, sizeof(*adapter),
230 RTE_CACHE_LINE_SIZE, socket_id);
231 if (adapter == NULL) {
232 RTE_EDEV_LOG_ERR("Failed to get mem for event crypto adapter!");
233 return -ENOMEM;
234 }
235
236 ret = rte_event_dev_info_get(dev_id, &dev_info);
237 if (ret < 0) {
238 RTE_EDEV_LOG_ERR("Failed to get info for eventdev %d: %s!",
239 dev_id, dev_info.driver_name);
240 return ret;
241 }
242
243 adapter->implicit_release_disabled = (dev_info.event_dev_cap &
244 RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE);
245 adapter->eventdev_id = dev_id;
246 adapter->socket_id = socket_id;
247 adapter->conf_cb = conf_cb;
248 adapter->conf_arg = conf_arg;
249 adapter->mode = mode;
250 strcpy(adapter->mem_name, mem_name);
251 adapter->cdevs = rte_zmalloc_socket(adapter->mem_name,
252 rte_cryptodev_count() *
253 sizeof(struct crypto_device_info), 0,
254 socket_id);
255 if (adapter->cdevs == NULL) {
256 RTE_EDEV_LOG_ERR("Failed to get mem for crypto devices\n");
257 rte_free(adapter);
258 return -ENOMEM;
259 }
260
261 rte_spinlock_init(&adapter->lock);
262 for (i = 0; i < rte_cryptodev_count(); i++)
263 adapter->cdevs[i].dev = rte_cryptodev_pmd_get_dev(i);
264
265 event_crypto_adapter[id] = adapter;
266
267 return 0;
268 }
269
270
271 int __rte_experimental
272 rte_event_crypto_adapter_create(uint8_t id, uint8_t dev_id,
273 struct rte_event_port_conf *port_config,
274 enum rte_event_crypto_adapter_mode mode)
275 {
276 struct rte_event_port_conf *pc;
277 int ret;
278
279 if (port_config == NULL)
280 return -EINVAL;
281 EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
282
283 pc = rte_malloc(NULL, sizeof(*pc), 0);
284 if (pc == NULL)
285 return -ENOMEM;
286 *pc = *port_config;
287 ret = rte_event_crypto_adapter_create_ext(id, dev_id,
288 eca_default_config_cb,
289 mode,
290 pc);
291 if (ret)
292 rte_free(pc);
293
294 return ret;
295 }
296
297 int __rte_experimental
298 rte_event_crypto_adapter_free(uint8_t id)
299 {
300 struct rte_event_crypto_adapter *adapter;
301
302 EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
303
304 adapter = eca_id_to_adapter(id);
305 if (adapter == NULL)
306 return -EINVAL;
307
308 if (adapter->nb_qps) {
309 RTE_EDEV_LOG_ERR("%" PRIu16 "Queue pairs not deleted",
310 adapter->nb_qps);
311 return -EBUSY;
312 }
313
314 if (adapter->default_cb_arg)
315 rte_free(adapter->conf_arg);
316 rte_free(adapter->cdevs);
317 rte_free(adapter);
318 event_crypto_adapter[id] = NULL;
319
320 return 0;
321 }
322
323 static inline unsigned int
324 eca_enq_to_cryptodev(struct rte_event_crypto_adapter *adapter,
325 struct rte_event *ev, unsigned int cnt)
326 {
327 struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
328 union rte_event_crypto_metadata *m_data = NULL;
329 struct crypto_queue_pair_info *qp_info = NULL;
330 struct rte_crypto_op *crypto_op;
331 unsigned int i, n;
332 uint16_t qp_id, len, ret;
333 uint8_t cdev_id;
334
335 len = 0;
336 ret = 0;
337 n = 0;
338 stats->event_deq_count += cnt;
339
340 for (i = 0; i < cnt; i++) {
341 crypto_op = ev[i].event_ptr;
342 if (crypto_op == NULL)
343 continue;
344 if (crypto_op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
345 m_data = rte_cryptodev_sym_session_get_user_data(
346 crypto_op->sym->session);
347 if (m_data == NULL) {
348 rte_pktmbuf_free(crypto_op->sym->m_src);
349 rte_crypto_op_free(crypto_op);
350 continue;
351 }
352
353 cdev_id = m_data->request_info.cdev_id;
354 qp_id = m_data->request_info.queue_pair_id;
355 qp_info = &adapter->cdevs[cdev_id].qpairs[qp_id];
356 if (qp_info == NULL) {
357 rte_pktmbuf_free(crypto_op->sym->m_src);
358 rte_crypto_op_free(crypto_op);
359 continue;
360 }
361 len = qp_info->len;
362 qp_info->op_buffer[len] = crypto_op;
363 len++;
364 } else if (crypto_op->sess_type == RTE_CRYPTO_OP_SESSIONLESS &&
365 crypto_op->private_data_offset) {
366 m_data = (union rte_event_crypto_metadata *)
367 ((uint8_t *)crypto_op +
368 crypto_op->private_data_offset);
369 cdev_id = m_data->request_info.cdev_id;
370 qp_id = m_data->request_info.queue_pair_id;
371 qp_info = &adapter->cdevs[cdev_id].qpairs[qp_id];
372 if (qp_info == NULL) {
373 rte_pktmbuf_free(crypto_op->sym->m_src);
374 rte_crypto_op_free(crypto_op);
375 continue;
376 }
377 len = qp_info->len;
378 qp_info->op_buffer[len] = crypto_op;
379 len++;
380 } else {
381 rte_pktmbuf_free(crypto_op->sym->m_src);
382 rte_crypto_op_free(crypto_op);
383 continue;
384 }
385
386 if (len == BATCH_SIZE) {
387 struct rte_crypto_op **op_buffer = qp_info->op_buffer;
388 ret = rte_cryptodev_enqueue_burst(cdev_id,
389 qp_id,
390 op_buffer,
391 BATCH_SIZE);
392
393 stats->crypto_enq_count += ret;
394
395 while (ret < len) {
396 struct rte_crypto_op *op;
397 op = op_buffer[ret++];
398 stats->crypto_enq_fail++;
399 rte_pktmbuf_free(op->sym->m_src);
400 rte_crypto_op_free(op);
401 }
402
403 len = 0;
404 }
405
406 if (qp_info)
407 qp_info->len = len;
408 n += ret;
409 }
410
411 return n;
412 }
413
414 static unsigned int
415 eca_crypto_enq_flush(struct rte_event_crypto_adapter *adapter)
416 {
417 struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
418 struct crypto_device_info *curr_dev;
419 struct crypto_queue_pair_info *curr_queue;
420 struct rte_crypto_op **op_buffer;
421 struct rte_cryptodev *dev;
422 uint8_t cdev_id;
423 uint16_t qp;
424 uint16_t ret;
425 uint16_t num_cdev = rte_cryptodev_count();
426
427 ret = 0;
428 for (cdev_id = 0; cdev_id < num_cdev; cdev_id++) {
429 curr_dev = &adapter->cdevs[cdev_id];
430 if (curr_dev == NULL)
431 continue;
432 dev = curr_dev->dev;
433
434 for (qp = 0; qp < dev->data->nb_queue_pairs; qp++) {
435
436 curr_queue = &curr_dev->qpairs[qp];
437 if (!curr_queue->qp_enabled)
438 continue;
439
440 op_buffer = curr_queue->op_buffer;
441 ret = rte_cryptodev_enqueue_burst(cdev_id,
442 qp,
443 op_buffer,
444 curr_queue->len);
445 stats->crypto_enq_count += ret;
446
447 while (ret < curr_queue->len) {
448 struct rte_crypto_op *op;
449 op = op_buffer[ret++];
450 stats->crypto_enq_fail++;
451 rte_pktmbuf_free(op->sym->m_src);
452 rte_crypto_op_free(op);
453 }
454 curr_queue->len = 0;
455 }
456 }
457
458 return ret;
459 }
460
461 static int
462 eca_crypto_adapter_enq_run(struct rte_event_crypto_adapter *adapter,
463 unsigned int max_enq)
464 {
465 struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
466 struct rte_event ev[BATCH_SIZE];
467 unsigned int nb_enq, nb_enqueued;
468 uint16_t n;
469 uint8_t event_dev_id = adapter->eventdev_id;
470 uint8_t event_port_id = adapter->event_port_id;
471
472 nb_enqueued = 0;
473 if (adapter->mode == RTE_EVENT_CRYPTO_ADAPTER_OP_NEW)
474 return 0;
475
476 for (nb_enq = 0; nb_enq < max_enq; nb_enq += n) {
477 stats->event_poll_count++;
478 n = rte_event_dequeue_burst(event_dev_id,
479 event_port_id, ev, BATCH_SIZE, 0);
480
481 if (!n)
482 break;
483
484 nb_enqueued += eca_enq_to_cryptodev(adapter, ev, n);
485 }
486
487 if ((++adapter->transmit_loop_count &
488 (CRYPTO_ENQ_FLUSH_THRESHOLD - 1)) == 0) {
489 nb_enqueued += eca_crypto_enq_flush(adapter);
490 }
491
492 return nb_enqueued;
493 }
494
495 static inline void
496 eca_ops_enqueue_burst(struct rte_event_crypto_adapter *adapter,
497 struct rte_crypto_op **ops, uint16_t num)
498 {
499 struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
500 union rte_event_crypto_metadata *m_data = NULL;
501 uint8_t event_dev_id = adapter->eventdev_id;
502 uint8_t event_port_id = adapter->event_port_id;
503 struct rte_event events[BATCH_SIZE];
504 uint16_t nb_enqueued, nb_ev;
505 uint8_t retry;
506 uint8_t i;
507
508 nb_ev = 0;
509 retry = 0;
510 nb_enqueued = 0;
511 num = RTE_MIN(num, BATCH_SIZE);
512 for (i = 0; i < num; i++) {
513 struct rte_event *ev = &events[nb_ev++];
514 if (ops[i]->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
515 m_data = rte_cryptodev_sym_session_get_user_data(
516 ops[i]->sym->session);
517 } else if (ops[i]->sess_type == RTE_CRYPTO_OP_SESSIONLESS &&
518 ops[i]->private_data_offset) {
519 m_data = (union rte_event_crypto_metadata *)
520 ((uint8_t *)ops[i] +
521 ops[i]->private_data_offset);
522 }
523
524 if (unlikely(m_data == NULL)) {
525 rte_pktmbuf_free(ops[i]->sym->m_src);
526 rte_crypto_op_free(ops[i]);
527 continue;
528 }
529
530 rte_memcpy(ev, &m_data->response_info, sizeof(*ev));
531 ev->event_ptr = ops[i];
532 ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
533 if (adapter->implicit_release_disabled)
534 ev->op = RTE_EVENT_OP_FORWARD;
535 else
536 ev->op = RTE_EVENT_OP_NEW;
537 }
538
539 do {
540 nb_enqueued += rte_event_enqueue_burst(event_dev_id,
541 event_port_id,
542 &events[nb_enqueued],
543 nb_ev - nb_enqueued);
544 } while (retry++ < CRYPTO_ADAPTER_MAX_EV_ENQ_RETRIES &&
545 nb_enqueued < nb_ev);
546
547 /* Free mbufs and rte_crypto_ops for failed events */
548 for (i = nb_enqueued; i < nb_ev; i++) {
549 struct rte_crypto_op *op = events[i].event_ptr;
550 rte_pktmbuf_free(op->sym->m_src);
551 rte_crypto_op_free(op);
552 }
553
554 stats->event_enq_fail_count += nb_ev - nb_enqueued;
555 stats->event_enq_count += nb_enqueued;
556 stats->event_enq_retry_count += retry - 1;
557 }
558
559 static inline unsigned int
560 eca_crypto_adapter_deq_run(struct rte_event_crypto_adapter *adapter,
561 unsigned int max_deq)
562 {
563 struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
564 struct crypto_device_info *curr_dev;
565 struct crypto_queue_pair_info *curr_queue;
566 struct rte_crypto_op *ops[BATCH_SIZE];
567 uint16_t n, nb_deq;
568 struct rte_cryptodev *dev;
569 uint8_t cdev_id;
570 uint16_t qp, dev_qps;
571 bool done;
572 uint16_t num_cdev = rte_cryptodev_count();
573
574 nb_deq = 0;
575 do {
576 uint16_t queues = 0;
577 done = true;
578
579 for (cdev_id = adapter->next_cdev_id;
580 cdev_id < num_cdev; cdev_id++) {
581 curr_dev = &adapter->cdevs[cdev_id];
582 if (curr_dev == NULL)
583 continue;
584 dev = curr_dev->dev;
585 dev_qps = dev->data->nb_queue_pairs;
586
587 for (qp = curr_dev->next_queue_pair_id;
588 queues < dev_qps; qp = (qp + 1) % dev_qps,
589 queues++) {
590
591 curr_queue = &curr_dev->qpairs[qp];
592 if (!curr_queue->qp_enabled)
593 continue;
594
595 n = rte_cryptodev_dequeue_burst(cdev_id, qp,
596 ops, BATCH_SIZE);
597 if (!n)
598 continue;
599
600 done = false;
601 stats->crypto_deq_count += n;
602 eca_ops_enqueue_burst(adapter, ops, n);
603 nb_deq += n;
604
605 if (nb_deq > max_deq) {
606 if ((qp + 1) == dev_qps) {
607 adapter->next_cdev_id =
608 (cdev_id + 1)
609 % num_cdev;
610 }
611 curr_dev->next_queue_pair_id = (qp + 1)
612 % dev->data->nb_queue_pairs;
613
614 return nb_deq;
615 }
616 }
617 }
618 } while (done == false);
619 return nb_deq;
620 }
621
622 static void
623 eca_crypto_adapter_run(struct rte_event_crypto_adapter *adapter,
624 unsigned int max_ops)
625 {
626 while (max_ops) {
627 unsigned int e_cnt, d_cnt;
628
629 e_cnt = eca_crypto_adapter_deq_run(adapter, max_ops);
630 max_ops -= RTE_MIN(max_ops, e_cnt);
631
632 d_cnt = eca_crypto_adapter_enq_run(adapter, max_ops);
633 max_ops -= RTE_MIN(max_ops, d_cnt);
634
635 if (e_cnt == 0 && d_cnt == 0)
636 break;
637
638 }
639 }
640
641 static int
642 eca_service_func(void *args)
643 {
644 struct rte_event_crypto_adapter *adapter = args;
645
646 if (rte_spinlock_trylock(&adapter->lock) == 0)
647 return 0;
648 eca_crypto_adapter_run(adapter, adapter->max_nb);
649 rte_spinlock_unlock(&adapter->lock);
650
651 return 0;
652 }
653
654 static int
655 eca_init_service(struct rte_event_crypto_adapter *adapter, uint8_t id)
656 {
657 struct rte_event_crypto_adapter_conf adapter_conf;
658 struct rte_service_spec service;
659 int ret;
660
661 if (adapter->service_inited)
662 return 0;
663
664 memset(&service, 0, sizeof(service));
665 snprintf(service.name, CRYPTO_ADAPTER_NAME_LEN,
666 "rte_event_crypto_adapter_%d", id);
667 service.socket_id = adapter->socket_id;
668 service.callback = eca_service_func;
669 service.callback_userdata = adapter;
670 /* Service function handles locking for queue add/del updates */
671 service.capabilities = RTE_SERVICE_CAP_MT_SAFE;
672 ret = rte_service_component_register(&service, &adapter->service_id);
673 if (ret) {
674 RTE_EDEV_LOG_ERR("failed to register service %s err = %" PRId32,
675 service.name, ret);
676 return ret;
677 }
678
679 ret = adapter->conf_cb(id, adapter->eventdev_id,
680 &adapter_conf, adapter->conf_arg);
681 if (ret) {
682 RTE_EDEV_LOG_ERR("configuration callback failed err = %" PRId32,
683 ret);
684 return ret;
685 }
686
687 adapter->max_nb = adapter_conf.max_nb;
688 adapter->event_port_id = adapter_conf.event_port_id;
689 adapter->service_inited = 1;
690
691 return ret;
692 }
693
694 static void
695 eca_update_qp_info(struct rte_event_crypto_adapter *adapter,
696 struct crypto_device_info *dev_info,
697 int32_t queue_pair_id,
698 uint8_t add)
699 {
700 struct crypto_queue_pair_info *qp_info;
701 int enabled;
702 uint16_t i;
703
704 if (dev_info->qpairs == NULL)
705 return;
706
707 if (queue_pair_id == -1) {
708 for (i = 0; i < dev_info->dev->data->nb_queue_pairs; i++)
709 eca_update_qp_info(adapter, dev_info, i, add);
710 } else {
711 qp_info = &dev_info->qpairs[queue_pair_id];
712 enabled = qp_info->qp_enabled;
713 if (add) {
714 adapter->nb_qps += !enabled;
715 dev_info->num_qpairs += !enabled;
716 } else {
717 adapter->nb_qps -= enabled;
718 dev_info->num_qpairs -= enabled;
719 }
720 qp_info->qp_enabled = !!add;
721 }
722 }
723
724 static int
725 eca_add_queue_pair(struct rte_event_crypto_adapter *adapter,
726 uint8_t cdev_id,
727 int queue_pair_id)
728 {
729 struct crypto_device_info *dev_info = &adapter->cdevs[cdev_id];
730 struct crypto_queue_pair_info *qpairs;
731 uint32_t i;
732
733 if (dev_info->qpairs == NULL) {
734 dev_info->qpairs =
735 rte_zmalloc_socket(adapter->mem_name,
736 dev_info->dev->data->nb_queue_pairs *
737 sizeof(struct crypto_queue_pair_info),
738 0, adapter->socket_id);
739 if (dev_info->qpairs == NULL)
740 return -ENOMEM;
741
742 qpairs = dev_info->qpairs;
743 qpairs->op_buffer = rte_zmalloc_socket(adapter->mem_name,
744 BATCH_SIZE *
745 sizeof(struct rte_crypto_op *),
746 0, adapter->socket_id);
747 if (!qpairs->op_buffer) {
748 rte_free(qpairs);
749 return -ENOMEM;
750 }
751 }
752
753 if (queue_pair_id == -1) {
754 for (i = 0; i < dev_info->dev->data->nb_queue_pairs; i++)
755 eca_update_qp_info(adapter, dev_info, i, 1);
756 } else
757 eca_update_qp_info(adapter, dev_info,
758 (uint16_t)queue_pair_id, 1);
759
760 return 0;
761 }
762
763 int __rte_experimental
764 rte_event_crypto_adapter_queue_pair_add(uint8_t id,
765 uint8_t cdev_id,
766 int32_t queue_pair_id,
767 const struct rte_event *event)
768 {
769 struct rte_event_crypto_adapter *adapter;
770 struct rte_eventdev *dev;
771 struct crypto_device_info *dev_info;
772 uint32_t cap;
773 int ret;
774
775 EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
776
777 if (!rte_cryptodev_pmd_is_valid_dev(cdev_id)) {
778 RTE_EDEV_LOG_ERR("Invalid dev_id=%" PRIu8, cdev_id);
779 return -EINVAL;
780 }
781
782 adapter = eca_id_to_adapter(id);
783 if (adapter == NULL)
784 return -EINVAL;
785
786 dev = &rte_eventdevs[adapter->eventdev_id];
787 ret = rte_event_crypto_adapter_caps_get(adapter->eventdev_id,
788 cdev_id,
789 &cap);
790 if (ret) {
791 RTE_EDEV_LOG_ERR("Failed to get adapter caps dev %" PRIu8
792 " cdev %" PRIu8, id, cdev_id);
793 return ret;
794 }
795
796 if ((cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND) &&
797 (event == NULL)) {
798 RTE_EDEV_LOG_ERR("Conf value can not be NULL for dev_id=%u",
799 cdev_id);
800 return -EINVAL;
801 }
802
803 dev_info = &adapter->cdevs[cdev_id];
804
805 if (queue_pair_id != -1 &&
806 (uint16_t)queue_pair_id >= dev_info->dev->data->nb_queue_pairs) {
807 RTE_EDEV_LOG_ERR("Invalid queue_pair_id %" PRIu16,
808 (uint16_t)queue_pair_id);
809 return -EINVAL;
810 }
811
812 /* In case HW cap is RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD,
813 * no need of service core as HW supports event forward capability.
814 */
815 if ((cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) ||
816 (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND &&
817 adapter->mode == RTE_EVENT_CRYPTO_ADAPTER_OP_NEW) ||
818 (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW &&
819 adapter->mode == RTE_EVENT_CRYPTO_ADAPTER_OP_NEW)) {
820 RTE_FUNC_PTR_OR_ERR_RET(
821 *dev->dev_ops->crypto_adapter_queue_pair_add,
822 -ENOTSUP);
823 if (dev_info->qpairs == NULL) {
824 dev_info->qpairs =
825 rte_zmalloc_socket(adapter->mem_name,
826 dev_info->dev->data->nb_queue_pairs *
827 sizeof(struct crypto_queue_pair_info),
828 0, adapter->socket_id);
829 if (dev_info->qpairs == NULL)
830 return -ENOMEM;
831 }
832
833 ret = (*dev->dev_ops->crypto_adapter_queue_pair_add)(dev,
834 dev_info->dev,
835 queue_pair_id,
836 event);
837 if (ret)
838 return ret;
839
840 else
841 eca_update_qp_info(adapter, &adapter->cdevs[cdev_id],
842 queue_pair_id, 1);
843 }
844
845 /* In case HW cap is RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW,
846 * or SW adapter, initiate services so the application can choose
847 * which ever way it wants to use the adapter.
848 * Case 1: RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW
849 * Application may wants to use one of below two mode
850 * a. OP_FORWARD mode -> HW Dequeue + SW enqueue
851 * b. OP_NEW mode -> HW Dequeue
852 * Case 2: No HW caps, use SW adapter
853 * a. OP_FORWARD mode -> SW enqueue & dequeue
854 * b. OP_NEW mode -> SW Dequeue
855 */
856 if ((cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW &&
857 adapter->mode == RTE_EVENT_CRYPTO_ADAPTER_OP_FORWARD) ||
858 (!(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW) &&
859 !(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) &&
860 !(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND) &&
861 (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_SESSION_PRIVATE_DATA))) {
862 rte_spinlock_lock(&adapter->lock);
863 ret = eca_init_service(adapter, id);
864 if (ret == 0)
865 ret = eca_add_queue_pair(adapter, cdev_id,
866 queue_pair_id);
867 rte_spinlock_unlock(&adapter->lock);
868
869 if (ret)
870 return ret;
871
872 rte_service_component_runstate_set(adapter->service_id, 1);
873 }
874
875 return 0;
876 }
877
878 int __rte_experimental
879 rte_event_crypto_adapter_queue_pair_del(uint8_t id, uint8_t cdev_id,
880 int32_t queue_pair_id)
881 {
882 struct rte_event_crypto_adapter *adapter;
883 struct crypto_device_info *dev_info;
884 struct rte_eventdev *dev;
885 int ret;
886 uint32_t cap;
887 uint16_t i;
888
889 EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
890
891 if (!rte_cryptodev_pmd_is_valid_dev(cdev_id)) {
892 RTE_EDEV_LOG_ERR("Invalid dev_id=%" PRIu8, cdev_id);
893 return -EINVAL;
894 }
895
896 adapter = eca_id_to_adapter(id);
897 if (adapter == NULL)
898 return -EINVAL;
899
900 dev = &rte_eventdevs[adapter->eventdev_id];
901 ret = rte_event_crypto_adapter_caps_get(adapter->eventdev_id,
902 cdev_id,
903 &cap);
904 if (ret)
905 return ret;
906
907 dev_info = &adapter->cdevs[cdev_id];
908
909 if (queue_pair_id != -1 &&
910 (uint16_t)queue_pair_id >= dev_info->dev->data->nb_queue_pairs) {
911 RTE_EDEV_LOG_ERR("Invalid queue_pair_id %" PRIu16,
912 (uint16_t)queue_pair_id);
913 return -EINVAL;
914 }
915
916 if ((cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) ||
917 (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW &&
918 adapter->mode == RTE_EVENT_CRYPTO_ADAPTER_OP_NEW)) {
919 RTE_FUNC_PTR_OR_ERR_RET(
920 *dev->dev_ops->crypto_adapter_queue_pair_del,
921 -ENOTSUP);
922 ret = (*dev->dev_ops->crypto_adapter_queue_pair_del)(dev,
923 dev_info->dev,
924 queue_pair_id);
925 if (ret == 0) {
926 eca_update_qp_info(adapter,
927 &adapter->cdevs[cdev_id],
928 queue_pair_id,
929 0);
930 if (dev_info->num_qpairs == 0) {
931 rte_free(dev_info->qpairs);
932 dev_info->qpairs = NULL;
933 }
934 }
935 } else {
936 if (adapter->nb_qps == 0)
937 return 0;
938
939 rte_spinlock_lock(&adapter->lock);
940 if (queue_pair_id == -1) {
941 for (i = 0; i < dev_info->dev->data->nb_queue_pairs;
942 i++)
943 eca_update_qp_info(adapter, dev_info,
944 queue_pair_id, 0);
945 } else {
946 eca_update_qp_info(adapter, dev_info,
947 (uint16_t)queue_pair_id, 0);
948 }
949
950 if (dev_info->num_qpairs == 0) {
951 rte_free(dev_info->qpairs);
952 dev_info->qpairs = NULL;
953 }
954
955 rte_spinlock_unlock(&adapter->lock);
956 rte_service_component_runstate_set(adapter->service_id,
957 adapter->nb_qps);
958 }
959
960 return ret;
961 }
962
963 static int
964 eca_adapter_ctrl(uint8_t id, int start)
965 {
966 struct rte_event_crypto_adapter *adapter;
967 struct crypto_device_info *dev_info;
968 struct rte_eventdev *dev;
969 uint32_t i;
970 int use_service;
971 int stop = !start;
972
973 use_service = 0;
974 EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
975 adapter = eca_id_to_adapter(id);
976 if (adapter == NULL)
977 return -EINVAL;
978
979 dev = &rte_eventdevs[adapter->eventdev_id];
980
981 for (i = 0; i < rte_cryptodev_count(); i++) {
982 dev_info = &adapter->cdevs[i];
983 /* if start check for num queue pairs */
984 if (start && !dev_info->num_qpairs)
985 continue;
986 /* if stop check if dev has been started */
987 if (stop && !dev_info->dev_started)
988 continue;
989 use_service |= !dev_info->internal_event_port;
990 dev_info->dev_started = start;
991 if (dev_info->internal_event_port == 0)
992 continue;
993 start ? (*dev->dev_ops->crypto_adapter_start)(dev,
994 &dev_info->dev[i]) :
995 (*dev->dev_ops->crypto_adapter_stop)(dev,
996 &dev_info->dev[i]);
997 }
998
999 if (use_service)
1000 rte_service_runstate_set(adapter->service_id, start);
1001
1002 return 0;
1003 }
1004
1005 int __rte_experimental
1006 rte_event_crypto_adapter_start(uint8_t id)
1007 {
1008 struct rte_event_crypto_adapter *adapter;
1009
1010 EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
1011 adapter = eca_id_to_adapter(id);
1012 if (adapter == NULL)
1013 return -EINVAL;
1014
1015 return eca_adapter_ctrl(id, 1);
1016 }
1017
1018 int __rte_experimental
1019 rte_event_crypto_adapter_stop(uint8_t id)
1020 {
1021 return eca_adapter_ctrl(id, 0);
1022 }
1023
1024 int __rte_experimental
1025 rte_event_crypto_adapter_stats_get(uint8_t id,
1026 struct rte_event_crypto_adapter_stats *stats)
1027 {
1028 struct rte_event_crypto_adapter *adapter;
1029 struct rte_event_crypto_adapter_stats dev_stats_sum = { 0 };
1030 struct rte_event_crypto_adapter_stats dev_stats;
1031 struct rte_eventdev *dev;
1032 struct crypto_device_info *dev_info;
1033 uint32_t i;
1034 int ret;
1035
1036 EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
1037
1038 adapter = eca_id_to_adapter(id);
1039 if (adapter == NULL || stats == NULL)
1040 return -EINVAL;
1041
1042 dev = &rte_eventdevs[adapter->eventdev_id];
1043 memset(stats, 0, sizeof(*stats));
1044 for (i = 0; i < rte_cryptodev_count(); i++) {
1045 dev_info = &adapter->cdevs[i];
1046 if (dev_info->internal_event_port == 0 ||
1047 dev->dev_ops->crypto_adapter_stats_get == NULL)
1048 continue;
1049 ret = (*dev->dev_ops->crypto_adapter_stats_get)(dev,
1050 dev_info->dev,
1051 &dev_stats);
1052 if (ret)
1053 continue;
1054
1055 dev_stats_sum.crypto_deq_count += dev_stats.crypto_deq_count;
1056 dev_stats_sum.event_enq_count +=
1057 dev_stats.event_enq_count;
1058 }
1059
1060 if (adapter->service_inited)
1061 *stats = adapter->crypto_stats;
1062
1063 stats->crypto_deq_count += dev_stats_sum.crypto_deq_count;
1064 stats->event_enq_count += dev_stats_sum.event_enq_count;
1065
1066 return 0;
1067 }
1068
1069 int __rte_experimental
1070 rte_event_crypto_adapter_stats_reset(uint8_t id)
1071 {
1072 struct rte_event_crypto_adapter *adapter;
1073 struct crypto_device_info *dev_info;
1074 struct rte_eventdev *dev;
1075 uint32_t i;
1076
1077 EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
1078
1079 adapter = eca_id_to_adapter(id);
1080 if (adapter == NULL)
1081 return -EINVAL;
1082
1083 dev = &rte_eventdevs[adapter->eventdev_id];
1084 for (i = 0; i < rte_cryptodev_count(); i++) {
1085 dev_info = &adapter->cdevs[i];
1086 if (dev_info->internal_event_port == 0 ||
1087 dev->dev_ops->crypto_adapter_stats_reset == NULL)
1088 continue;
1089 (*dev->dev_ops->crypto_adapter_stats_reset)(dev,
1090 dev_info->dev);
1091 }
1092
1093 memset(&adapter->crypto_stats, 0, sizeof(adapter->crypto_stats));
1094 return 0;
1095 }
1096
1097 int __rte_experimental
1098 rte_event_crypto_adapter_service_id_get(uint8_t id, uint32_t *service_id)
1099 {
1100 struct rte_event_crypto_adapter *adapter;
1101
1102 EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
1103
1104 adapter = eca_id_to_adapter(id);
1105 if (adapter == NULL || service_id == NULL)
1106 return -EINVAL;
1107
1108 if (adapter->service_inited)
1109 *service_id = adapter->service_id;
1110
1111 return adapter->service_inited ? 0 : -ESRCH;
1112 }
1113
1114 int __rte_experimental
1115 rte_event_crypto_adapter_event_port_get(uint8_t id, uint8_t *event_port_id)
1116 {
1117 struct rte_event_crypto_adapter *adapter;
1118
1119 EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
1120
1121 adapter = eca_id_to_adapter(id);
1122 if (adapter == NULL || event_port_id == NULL)
1123 return -EINVAL;
1124
1125 *event_port_id = adapter->event_port_id;
1126
1127 return 0;
1128 }