1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2017,2019 NXP
11 #include <sys/epoll.h>
13 #include <rte_atomic.h>
14 #include <rte_byteorder.h>
15 #include <rte_common.h>
16 #include <rte_debug.h>
19 #include <rte_fslmc.h>
20 #include <rte_lcore.h>
22 #include <rte_malloc.h>
23 #include <rte_memcpy.h>
24 #include <rte_memory.h>
26 #include <rte_bus_vdev.h>
27 #include <rte_ethdev_driver.h>
28 #include <rte_cryptodev.h>
29 #include <rte_event_eth_rx_adapter.h>
30 #include <rte_event_eth_tx_adapter.h>
32 #include <fslmc_vfio.h>
33 #include <dpaa2_hw_pvt.h>
34 #include <dpaa2_hw_mempool.h>
35 #include <dpaa2_hw_dpio.h>
36 #include <dpaa2_ethdev.h>
37 #include <dpaa2_sec_event.h>
38 #include "dpaa2_eventdev.h"
39 #include "dpaa2_eventdev_logs.h"
40 #include <portal/dpaa2_hw_pvt.h>
41 #include <mc/fsl_dpci.h>
44 * Evendev = SoC Instance
45 * Eventport = DPIO Instance
46 * Eventqueue = DPCON Instance
47 * 1 Eventdev can have N Eventqueue
48 * Soft Event Flow is DPCI Instance
51 /* Dynamic logging identified for mempool */
52 int dpaa2_logtype_event
;
53 #define DPAA2_EV_TX_RETRY_COUNT 10000
56 dpaa2_eventdev_enqueue_burst(void *port
, const struct rte_event ev
[],
60 struct dpaa2_port
*dpaa2_portal
= port
;
61 struct dpaa2_dpio_dev
*dpio_dev
;
62 uint32_t queue_id
= ev
[0].queue_id
;
63 struct dpaa2_eventq
*evq_info
;
64 uint32_t fqid
, retry_count
;
65 struct qbman_swp
*swp
;
66 struct qbman_fd fd_arr
[MAX_TX_RING_SLOTS
];
67 uint32_t loop
, frames_to_send
;
68 struct qbman_eq_desc eqdesc
[MAX_TX_RING_SLOTS
];
71 uint8_t channel_index
;
73 if (unlikely(!DPAA2_PER_LCORE_DPIO
)) {
74 /* Affine current thread context to a qman portal */
75 ret
= dpaa2_affine_qbman_swp();
78 "Failed to allocate IO portal, tid: %d\n",
83 /* todo - dpaa2_portal shall have dpio_dev - no per thread variable */
84 dpio_dev
= DPAA2_PER_LCORE_DPIO
;
85 swp
= DPAA2_PER_LCORE_PORTAL
;
87 if (likely(dpaa2_portal
->is_port_linked
))
90 /* Create mapping between portal and channel to receive packets */
91 for (i
= 0; i
< DPAA2_EVENT_MAX_QUEUES
; i
++) {
92 evq_info
= &dpaa2_portal
->evq_info
[i
];
93 if (!evq_info
->event_port
)
96 ret
= dpio_add_static_dequeue_channel(dpio_dev
->dpio
,
99 evq_info
->dpcon
->dpcon_id
,
103 "Static dequeue config failed: err(%d)", ret
);
107 qbman_swp_push_set(swp
, channel_index
, 1);
108 evq_info
->dpcon
->channel_index
= channel_index
;
110 dpaa2_portal
->is_port_linked
= true;
113 evq_info
= &dpaa2_portal
->evq_info
[queue_id
];
116 frames_to_send
= (nb_events
> dpaa2_eqcr_size
) ?
117 dpaa2_eqcr_size
: nb_events
;
119 for (loop
= 0; loop
< frames_to_send
; loop
++) {
120 const struct rte_event
*event
= &ev
[num_tx
+ loop
];
122 if (event
->sched_type
!= RTE_SCHED_TYPE_ATOMIC
)
123 fqid
= evq_info
->dpci
->rx_queue
[
124 DPAA2_EVENT_DPCI_PARALLEL_QUEUE
].fqid
;
126 fqid
= evq_info
->dpci
->rx_queue
[
127 DPAA2_EVENT_DPCI_ATOMIC_QUEUE
].fqid
;
129 /* Prepare enqueue descriptor */
130 qbman_eq_desc_clear(&eqdesc
[loop
]);
131 qbman_eq_desc_set_fq(&eqdesc
[loop
], fqid
);
132 qbman_eq_desc_set_no_orp(&eqdesc
[loop
], 0);
133 qbman_eq_desc_set_response(&eqdesc
[loop
], 0, 0);
135 if (event
->sched_type
== RTE_SCHED_TYPE_ATOMIC
136 && event
->mbuf
->seqn
) {
137 uint8_t dqrr_index
= event
->mbuf
->seqn
- 1;
139 qbman_eq_desc_set_dca(&eqdesc
[loop
], 1,
141 DPAA2_PER_LCORE_DQRR_SIZE
--;
142 DPAA2_PER_LCORE_DQRR_HELD
&= ~(1 << dqrr_index
);
145 memset(&fd_arr
[loop
], 0, sizeof(struct qbman_fd
));
148 * todo - need to align with hw context data
151 struct rte_event
*ev_temp
= rte_malloc(NULL
,
152 sizeof(struct rte_event
), 0);
157 frames_to_send
= loop
;
159 "Unable to allocate event object");
162 rte_memcpy(ev_temp
, event
, sizeof(struct rte_event
));
163 DPAA2_SET_FD_ADDR((&fd_arr
[loop
]), (size_t)ev_temp
);
164 DPAA2_SET_FD_LEN((&fd_arr
[loop
]),
165 sizeof(struct rte_event
));
170 while (loop
< frames_to_send
) {
171 ret
= qbman_swp_enqueue_multiple_desc(swp
,
172 &eqdesc
[loop
], &fd_arr
[loop
],
173 frames_to_send
- loop
);
174 if (unlikely(ret
< 0)) {
176 if (retry_count
> DPAA2_EV_TX_RETRY_COUNT
) {
179 return num_tx
+ loop
;
192 for (n
= 0; n
< i
; n
++) {
193 evq_info
= &dpaa2_portal
->evq_info
[n
];
194 if (!evq_info
->event_port
)
196 qbman_swp_push_set(swp
, evq_info
->dpcon
->channel_index
, 0);
197 dpio_remove_static_dequeue_channel(dpio_dev
->dpio
, 0,
199 evq_info
->dpcon
->dpcon_id
);
206 dpaa2_eventdev_enqueue(void *port
, const struct rte_event
*ev
)
208 return dpaa2_eventdev_enqueue_burst(port
, ev
, 1);
211 static void dpaa2_eventdev_dequeue_wait(uint64_t timeout_ticks
)
213 struct epoll_event epoll_ev
;
215 qbman_swp_interrupt_clear_status(DPAA2_PER_LCORE_PORTAL
,
216 QBMAN_SWP_INTERRUPT_DQRI
);
218 epoll_wait(DPAA2_PER_LCORE_DPIO
->epoll_fd
,
219 &epoll_ev
, 1, timeout_ticks
);
222 static void dpaa2_eventdev_process_parallel(struct qbman_swp
*swp
,
223 const struct qbman_fd
*fd
,
224 const struct qbman_result
*dq
,
225 struct dpaa2_queue
*rxq
,
226 struct rte_event
*ev
)
228 struct rte_event
*ev_temp
=
229 (struct rte_event
*)(size_t)DPAA2_GET_FD_ADDR(fd
);
233 rte_memcpy(ev
, ev_temp
, sizeof(struct rte_event
));
236 qbman_swp_dqrr_consume(swp
, dq
);
239 static void dpaa2_eventdev_process_atomic(struct qbman_swp
*swp
,
240 const struct qbman_fd
*fd
,
241 const struct qbman_result
*dq
,
242 struct dpaa2_queue
*rxq
,
243 struct rte_event
*ev
)
245 struct rte_event
*ev_temp
=
246 (struct rte_event
*)(size_t)DPAA2_GET_FD_ADDR(fd
);
247 uint8_t dqrr_index
= qbman_get_dqrr_idx(dq
);
252 rte_memcpy(ev
, ev_temp
, sizeof(struct rte_event
));
254 ev
->mbuf
->seqn
= dqrr_index
+ 1;
255 DPAA2_PER_LCORE_DQRR_SIZE
++;
256 DPAA2_PER_LCORE_DQRR_HELD
|= 1 << dqrr_index
;
257 DPAA2_PER_LCORE_DQRR_MBUF(dqrr_index
) = ev
->mbuf
;
261 dpaa2_eventdev_dequeue_burst(void *port
, struct rte_event ev
[],
262 uint16_t nb_events
, uint64_t timeout_ticks
)
264 const struct qbman_result
*dq
;
265 struct dpaa2_dpio_dev
*dpio_dev
= NULL
;
266 struct dpaa2_port
*dpaa2_portal
= port
;
267 struct dpaa2_eventq
*evq_info
;
268 struct qbman_swp
*swp
;
269 const struct qbman_fd
*fd
;
270 struct dpaa2_queue
*rxq
;
271 int num_pkts
= 0, ret
, i
= 0, n
;
272 uint8_t channel_index
;
274 if (unlikely(!DPAA2_PER_LCORE_DPIO
)) {
275 /* Affine current thread context to a qman portal */
276 ret
= dpaa2_affine_qbman_swp();
279 "Failed to allocate IO portal, tid: %d\n",
285 dpio_dev
= DPAA2_PER_LCORE_DPIO
;
286 swp
= DPAA2_PER_LCORE_PORTAL
;
288 if (likely(dpaa2_portal
->is_port_linked
))
291 /* Create mapping between portal and channel to receive packets */
292 for (i
= 0; i
< DPAA2_EVENT_MAX_QUEUES
; i
++) {
293 evq_info
= &dpaa2_portal
->evq_info
[i
];
294 if (!evq_info
->event_port
)
297 ret
= dpio_add_static_dequeue_channel(dpio_dev
->dpio
,
300 evq_info
->dpcon
->dpcon_id
,
304 "Static dequeue config failed: err(%d)", ret
);
308 qbman_swp_push_set(swp
, channel_index
, 1);
309 evq_info
->dpcon
->channel_index
= channel_index
;
311 dpaa2_portal
->is_port_linked
= true;
314 /* Check if there are atomic contexts to be released */
315 while (DPAA2_PER_LCORE_DQRR_SIZE
) {
316 if (DPAA2_PER_LCORE_DQRR_HELD
& (1 << i
)) {
317 qbman_swp_dqrr_idx_consume(swp
, i
);
318 DPAA2_PER_LCORE_DQRR_SIZE
--;
319 DPAA2_PER_LCORE_DQRR_MBUF(i
)->seqn
=
320 DPAA2_INVALID_MBUF_SEQN
;
324 DPAA2_PER_LCORE_DQRR_HELD
= 0;
327 dq
= qbman_swp_dqrr_next(swp
);
329 if (!num_pkts
&& timeout_ticks
) {
330 dpaa2_eventdev_dequeue_wait(timeout_ticks
);
336 qbman_swp_prefetch_dqrr_next(swp
);
338 fd
= qbman_result_DQ_fd(dq
);
339 rxq
= (struct dpaa2_queue
*)(size_t)qbman_result_DQ_fqd_ctx(dq
);
341 rxq
->cb(swp
, fd
, dq
, rxq
, &ev
[num_pkts
]);
343 qbman_swp_dqrr_consume(swp
, dq
);
344 DPAA2_EVENTDEV_ERR("Null Return VQ received");
349 } while (num_pkts
< nb_events
);
353 for (n
= 0; n
< i
; n
++) {
354 evq_info
= &dpaa2_portal
->evq_info
[n
];
355 if (!evq_info
->event_port
)
358 qbman_swp_push_set(swp
, evq_info
->dpcon
->channel_index
, 0);
359 dpio_remove_static_dequeue_channel(dpio_dev
->dpio
, 0,
361 evq_info
->dpcon
->dpcon_id
);
367 dpaa2_eventdev_dequeue(void *port
, struct rte_event
*ev
,
368 uint64_t timeout_ticks
)
370 return dpaa2_eventdev_dequeue_burst(port
, ev
, 1, timeout_ticks
);
374 dpaa2_eventdev_info_get(struct rte_eventdev
*dev
,
375 struct rte_event_dev_info
*dev_info
)
377 struct dpaa2_eventdev
*priv
= dev
->data
->dev_private
;
379 EVENTDEV_INIT_FUNC_TRACE();
383 memset(dev_info
, 0, sizeof(struct rte_event_dev_info
));
384 dev_info
->min_dequeue_timeout_ns
=
385 DPAA2_EVENT_MIN_DEQUEUE_TIMEOUT
;
386 dev_info
->max_dequeue_timeout_ns
=
387 DPAA2_EVENT_MAX_DEQUEUE_TIMEOUT
;
388 dev_info
->dequeue_timeout_ns
=
389 DPAA2_EVENT_PORT_DEQUEUE_TIMEOUT_NS
;
390 dev_info
->max_event_queues
= priv
->max_event_queues
;
391 dev_info
->max_event_queue_flows
=
392 DPAA2_EVENT_MAX_QUEUE_FLOWS
;
393 dev_info
->max_event_queue_priority_levels
=
394 DPAA2_EVENT_MAX_QUEUE_PRIORITY_LEVELS
;
395 dev_info
->max_event_priority_levels
=
396 DPAA2_EVENT_MAX_EVENT_PRIORITY_LEVELS
;
397 dev_info
->max_event_ports
= rte_fslmc_get_device_count(DPAA2_IO
);
398 /* we only support dpio up to number of cores */
399 if (dev_info
->max_event_ports
> rte_lcore_count())
400 dev_info
->max_event_ports
= rte_lcore_count();
401 dev_info
->max_event_port_dequeue_depth
=
402 DPAA2_EVENT_MAX_PORT_DEQUEUE_DEPTH
;
403 dev_info
->max_event_port_enqueue_depth
=
404 DPAA2_EVENT_MAX_PORT_ENQUEUE_DEPTH
;
405 dev_info
->max_num_events
= DPAA2_EVENT_MAX_NUM_EVENTS
;
406 dev_info
->event_dev_cap
= RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED
|
407 RTE_EVENT_DEV_CAP_BURST_MODE
|
408 RTE_EVENT_DEV_CAP_RUNTIME_PORT_LINK
|
409 RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT
|
410 RTE_EVENT_DEV_CAP_NONSEQ_MODE
;
415 dpaa2_eventdev_configure(const struct rte_eventdev
*dev
)
417 struct dpaa2_eventdev
*priv
= dev
->data
->dev_private
;
418 struct rte_event_dev_config
*conf
= &dev
->data
->dev_conf
;
420 EVENTDEV_INIT_FUNC_TRACE();
422 priv
->nb_event_queues
= conf
->nb_event_queues
;
423 priv
->nb_event_ports
= conf
->nb_event_ports
;
424 priv
->nb_event_queue_flows
= conf
->nb_event_queue_flows
;
425 priv
->nb_event_port_dequeue_depth
= conf
->nb_event_port_dequeue_depth
;
426 priv
->nb_event_port_enqueue_depth
= conf
->nb_event_port_enqueue_depth
;
427 priv
->event_dev_cfg
= conf
->event_dev_cfg
;
429 /* Check dequeue timeout method is per dequeue or global */
430 if (priv
->event_dev_cfg
& RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT
) {
432 * Use timeout value as given in dequeue operation.
433 * So invalidating this timeout value.
435 priv
->dequeue_timeout_ns
= 0;
437 } else if (conf
->dequeue_timeout_ns
== 0) {
438 priv
->dequeue_timeout_ns
= DPAA2_EVENT_PORT_DEQUEUE_TIMEOUT_NS
;
440 priv
->dequeue_timeout_ns
= conf
->dequeue_timeout_ns
;
443 DPAA2_EVENTDEV_DEBUG("Configured eventdev devid=%d",
449 dpaa2_eventdev_start(struct rte_eventdev
*dev
)
451 EVENTDEV_INIT_FUNC_TRACE();
459 dpaa2_eventdev_stop(struct rte_eventdev
*dev
)
461 EVENTDEV_INIT_FUNC_TRACE();
467 dpaa2_eventdev_close(struct rte_eventdev
*dev
)
469 EVENTDEV_INIT_FUNC_TRACE();
477 dpaa2_eventdev_queue_def_conf(struct rte_eventdev
*dev
, uint8_t queue_id
,
478 struct rte_event_queue_conf
*queue_conf
)
480 EVENTDEV_INIT_FUNC_TRACE();
483 RTE_SET_USED(queue_id
);
485 queue_conf
->nb_atomic_flows
= DPAA2_EVENT_QUEUE_ATOMIC_FLOWS
;
486 queue_conf
->nb_atomic_order_sequences
=
487 DPAA2_EVENT_QUEUE_ORDER_SEQUENCES
;
488 queue_conf
->schedule_type
= RTE_SCHED_TYPE_PARALLEL
;
489 queue_conf
->priority
= RTE_EVENT_DEV_PRIORITY_NORMAL
;
493 dpaa2_eventdev_queue_setup(struct rte_eventdev
*dev
, uint8_t queue_id
,
494 const struct rte_event_queue_conf
*queue_conf
)
496 struct dpaa2_eventdev
*priv
= dev
->data
->dev_private
;
497 struct dpaa2_eventq
*evq_info
= &priv
->evq_info
[queue_id
];
499 EVENTDEV_INIT_FUNC_TRACE();
501 switch (queue_conf
->schedule_type
) {
502 case RTE_SCHED_TYPE_PARALLEL
:
503 case RTE_SCHED_TYPE_ATOMIC
:
504 case RTE_SCHED_TYPE_ORDERED
:
507 DPAA2_EVENTDEV_ERR("Schedule type is not supported.");
510 evq_info
->event_queue_cfg
= queue_conf
->event_queue_cfg
;
511 evq_info
->event_queue_id
= queue_id
;
517 dpaa2_eventdev_queue_release(struct rte_eventdev
*dev
, uint8_t queue_id
)
519 EVENTDEV_INIT_FUNC_TRACE();
522 RTE_SET_USED(queue_id
);
526 dpaa2_eventdev_port_def_conf(struct rte_eventdev
*dev
, uint8_t port_id
,
527 struct rte_event_port_conf
*port_conf
)
529 EVENTDEV_INIT_FUNC_TRACE();
532 RTE_SET_USED(port_id
);
534 port_conf
->new_event_threshold
=
535 DPAA2_EVENT_MAX_NUM_EVENTS
;
536 port_conf
->dequeue_depth
=
537 DPAA2_EVENT_MAX_PORT_DEQUEUE_DEPTH
;
538 port_conf
->enqueue_depth
=
539 DPAA2_EVENT_MAX_PORT_ENQUEUE_DEPTH
;
540 port_conf
->disable_implicit_release
= 0;
544 dpaa2_eventdev_port_setup(struct rte_eventdev
*dev
, uint8_t port_id
,
545 const struct rte_event_port_conf
*port_conf
)
547 char event_port_name
[32];
548 struct dpaa2_port
*portal
;
550 EVENTDEV_INIT_FUNC_TRACE();
552 RTE_SET_USED(port_conf
);
554 sprintf(event_port_name
, "event-port-%d", port_id
);
555 portal
= rte_malloc(event_port_name
, sizeof(struct dpaa2_port
), 0);
557 DPAA2_EVENTDEV_ERR("Memory allocation failure");
561 memset(portal
, 0, sizeof(struct dpaa2_port
));
562 dev
->data
->ports
[port_id
] = portal
;
567 dpaa2_eventdev_port_release(void *port
)
569 struct dpaa2_port
*portal
= port
;
571 EVENTDEV_INIT_FUNC_TRACE();
573 /* TODO: Cleanup is required when ports are in linked state. */
574 if (portal
->is_port_linked
)
575 DPAA2_EVENTDEV_WARN("Event port must be unlinked before release");
584 dpaa2_eventdev_port_link(struct rte_eventdev
*dev
, void *port
,
585 const uint8_t queues
[], const uint8_t priorities
[],
588 struct dpaa2_eventdev
*priv
= dev
->data
->dev_private
;
589 struct dpaa2_port
*dpaa2_portal
= port
;
590 struct dpaa2_eventq
*evq_info
;
593 EVENTDEV_INIT_FUNC_TRACE();
595 RTE_SET_USED(priorities
);
597 for (i
= 0; i
< nb_links
; i
++) {
598 evq_info
= &priv
->evq_info
[queues
[i
]];
599 memcpy(&dpaa2_portal
->evq_info
[queues
[i
]], evq_info
,
600 sizeof(struct dpaa2_eventq
));
601 dpaa2_portal
->evq_info
[queues
[i
]].event_port
= port
;
602 dpaa2_portal
->num_linked_evq
++;
605 return (int)nb_links
;
609 dpaa2_eventdev_port_unlink(struct rte_eventdev
*dev
, void *port
,
610 uint8_t queues
[], uint16_t nb_unlinks
)
612 struct dpaa2_port
*dpaa2_portal
= port
;
614 struct dpaa2_dpio_dev
*dpio_dev
= NULL
;
615 struct dpaa2_eventq
*evq_info
;
616 struct qbman_swp
*swp
;
618 EVENTDEV_INIT_FUNC_TRACE();
621 RTE_SET_USED(queues
);
623 for (i
= 0; i
< nb_unlinks
; i
++) {
624 evq_info
= &dpaa2_portal
->evq_info
[queues
[i
]];
626 if (DPAA2_PER_LCORE_DPIO
&& evq_info
->dpcon
) {
627 /* todo dpaa2_portal shall have dpio_dev-no per lcore*/
628 dpio_dev
= DPAA2_PER_LCORE_DPIO
;
629 swp
= DPAA2_PER_LCORE_PORTAL
;
631 qbman_swp_push_set(swp
,
632 evq_info
->dpcon
->channel_index
, 0);
633 dpio_remove_static_dequeue_channel(dpio_dev
->dpio
, 0,
635 evq_info
->dpcon
->dpcon_id
);
637 memset(evq_info
, 0, sizeof(struct dpaa2_eventq
));
638 if (dpaa2_portal
->num_linked_evq
)
639 dpaa2_portal
->num_linked_evq
--;
642 if (!dpaa2_portal
->num_linked_evq
)
643 dpaa2_portal
->is_port_linked
= false;
645 return (int)nb_unlinks
;
650 dpaa2_eventdev_timeout_ticks(struct rte_eventdev
*dev
, uint64_t ns
,
651 uint64_t *timeout_ticks
)
653 uint32_t scale
= 1000*1000;
655 EVENTDEV_INIT_FUNC_TRACE();
658 *timeout_ticks
= ns
/ scale
;
664 dpaa2_eventdev_dump(struct rte_eventdev
*dev
, FILE *f
)
666 EVENTDEV_INIT_FUNC_TRACE();
673 dpaa2_eventdev_eth_caps_get(const struct rte_eventdev
*dev
,
674 const struct rte_eth_dev
*eth_dev
,
677 const char *ethdev_driver
= eth_dev
->device
->driver
->name
;
679 EVENTDEV_INIT_FUNC_TRACE();
683 if (!strcmp(ethdev_driver
, "net_dpaa2"))
684 *caps
= RTE_EVENT_ETH_RX_ADAPTER_DPAA2_CAP
;
686 *caps
= RTE_EVENT_ETH_RX_ADAPTER_SW_CAP
;
692 dpaa2_eventdev_eth_queue_add_all(const struct rte_eventdev
*dev
,
693 const struct rte_eth_dev
*eth_dev
,
694 const struct rte_event_eth_rx_adapter_queue_conf
*queue_conf
)
696 struct dpaa2_eventdev
*priv
= dev
->data
->dev_private
;
697 uint8_t ev_qid
= queue_conf
->ev
.queue_id
;
698 struct dpaa2_dpcon_dev
*dpcon
= priv
->evq_info
[ev_qid
].dpcon
;
701 EVENTDEV_INIT_FUNC_TRACE();
703 for (i
= 0; i
< eth_dev
->data
->nb_rx_queues
; i
++) {
704 ret
= dpaa2_eth_eventq_attach(eth_dev
, i
,
708 "Event queue attach failed: err(%d)", ret
);
714 for (i
= (i
- 1); i
>= 0 ; i
--)
715 dpaa2_eth_eventq_detach(eth_dev
, i
);
721 dpaa2_eventdev_eth_queue_add(const struct rte_eventdev
*dev
,
722 const struct rte_eth_dev
*eth_dev
,
724 const struct rte_event_eth_rx_adapter_queue_conf
*queue_conf
)
726 struct dpaa2_eventdev
*priv
= dev
->data
->dev_private
;
727 uint8_t ev_qid
= queue_conf
->ev
.queue_id
;
728 struct dpaa2_dpcon_dev
*dpcon
= priv
->evq_info
[ev_qid
].dpcon
;
731 EVENTDEV_INIT_FUNC_TRACE();
733 if (rx_queue_id
== -1)
734 return dpaa2_eventdev_eth_queue_add_all(dev
,
735 eth_dev
, queue_conf
);
737 ret
= dpaa2_eth_eventq_attach(eth_dev
, rx_queue_id
,
741 "Event queue attach failed: err(%d)", ret
);
748 dpaa2_eventdev_eth_queue_del_all(const struct rte_eventdev
*dev
,
749 const struct rte_eth_dev
*eth_dev
)
753 EVENTDEV_INIT_FUNC_TRACE();
757 for (i
= 0; i
< eth_dev
->data
->nb_rx_queues
; i
++) {
758 ret
= dpaa2_eth_eventq_detach(eth_dev
, i
);
761 "Event queue detach failed: err(%d)", ret
);
770 dpaa2_eventdev_eth_queue_del(const struct rte_eventdev
*dev
,
771 const struct rte_eth_dev
*eth_dev
,
776 EVENTDEV_INIT_FUNC_TRACE();
778 if (rx_queue_id
== -1)
779 return dpaa2_eventdev_eth_queue_del_all(dev
, eth_dev
);
781 ret
= dpaa2_eth_eventq_detach(eth_dev
, rx_queue_id
);
784 "Event queue detach failed: err(%d)", ret
);
792 dpaa2_eventdev_eth_start(const struct rte_eventdev
*dev
,
793 const struct rte_eth_dev
*eth_dev
)
795 EVENTDEV_INIT_FUNC_TRACE();
798 RTE_SET_USED(eth_dev
);
804 dpaa2_eventdev_eth_stop(const struct rte_eventdev
*dev
,
805 const struct rte_eth_dev
*eth_dev
)
807 EVENTDEV_INIT_FUNC_TRACE();
810 RTE_SET_USED(eth_dev
);
816 dpaa2_eventdev_crypto_caps_get(const struct rte_eventdev
*dev
,
817 const struct rte_cryptodev
*cdev
,
820 const char *name
= cdev
->data
->name
;
822 EVENTDEV_INIT_FUNC_TRACE();
826 if (!strncmp(name
, "dpsec-", 6))
827 *caps
= RTE_EVENT_CRYPTO_ADAPTER_DPAA2_CAP
;
835 dpaa2_eventdev_crypto_queue_add_all(const struct rte_eventdev
*dev
,
836 const struct rte_cryptodev
*cryptodev
,
837 const struct rte_event
*ev
)
839 struct dpaa2_eventdev
*priv
= dev
->data
->dev_private
;
840 uint8_t ev_qid
= ev
->queue_id
;
841 struct dpaa2_dpcon_dev
*dpcon
= priv
->evq_info
[ev_qid
].dpcon
;
844 EVENTDEV_INIT_FUNC_TRACE();
846 for (i
= 0; i
< cryptodev
->data
->nb_queue_pairs
; i
++) {
847 ret
= dpaa2_sec_eventq_attach(cryptodev
, i
, dpcon
, ev
);
849 DPAA2_EVENTDEV_ERR("dpaa2_sec_eventq_attach failed: ret %d\n",
856 for (i
= (i
- 1); i
>= 0 ; i
--)
857 dpaa2_sec_eventq_detach(cryptodev
, i
);
863 dpaa2_eventdev_crypto_queue_add(const struct rte_eventdev
*dev
,
864 const struct rte_cryptodev
*cryptodev
,
866 const struct rte_event
*ev
)
868 struct dpaa2_eventdev
*priv
= dev
->data
->dev_private
;
869 uint8_t ev_qid
= ev
->queue_id
;
870 struct dpaa2_dpcon_dev
*dpcon
= priv
->evq_info
[ev_qid
].dpcon
;
873 EVENTDEV_INIT_FUNC_TRACE();
875 if (rx_queue_id
== -1)
876 return dpaa2_eventdev_crypto_queue_add_all(dev
,
879 ret
= dpaa2_sec_eventq_attach(cryptodev
, rx_queue_id
,
883 "dpaa2_sec_eventq_attach failed: ret: %d\n", ret
);
890 dpaa2_eventdev_crypto_queue_del_all(const struct rte_eventdev
*dev
,
891 const struct rte_cryptodev
*cdev
)
895 EVENTDEV_INIT_FUNC_TRACE();
899 for (i
= 0; i
< cdev
->data
->nb_queue_pairs
; i
++) {
900 ret
= dpaa2_sec_eventq_detach(cdev
, i
);
903 "dpaa2_sec_eventq_detach failed:ret %d\n", ret
);
912 dpaa2_eventdev_crypto_queue_del(const struct rte_eventdev
*dev
,
913 const struct rte_cryptodev
*cryptodev
,
918 EVENTDEV_INIT_FUNC_TRACE();
920 if (rx_queue_id
== -1)
921 return dpaa2_eventdev_crypto_queue_del_all(dev
, cryptodev
);
923 ret
= dpaa2_sec_eventq_detach(cryptodev
, rx_queue_id
);
926 "dpaa2_sec_eventq_detach failed: ret: %d\n", ret
);
934 dpaa2_eventdev_crypto_start(const struct rte_eventdev
*dev
,
935 const struct rte_cryptodev
*cryptodev
)
937 EVENTDEV_INIT_FUNC_TRACE();
940 RTE_SET_USED(cryptodev
);
946 dpaa2_eventdev_crypto_stop(const struct rte_eventdev
*dev
,
947 const struct rte_cryptodev
*cryptodev
)
949 EVENTDEV_INIT_FUNC_TRACE();
952 RTE_SET_USED(cryptodev
);
958 dpaa2_eventdev_tx_adapter_create(uint8_t id
,
959 const struct rte_eventdev
*dev
)
964 /* Nothing to do. Simply return. */
969 dpaa2_eventdev_tx_adapter_caps(const struct rte_eventdev
*dev
,
970 const struct rte_eth_dev
*eth_dev
,
974 RTE_SET_USED(eth_dev
);
976 *caps
= RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT
;
981 dpaa2_eventdev_txa_enqueue_same_dest(void *port
,
982 struct rte_event ev
[],
985 struct rte_mbuf
*m
[DPAA2_EVENT_MAX_PORT_ENQUEUE_DEPTH
], *m0
;
990 m0
= (struct rte_mbuf
*)ev
[0].mbuf
;
991 qid
= rte_event_eth_tx_adapter_txq_get(m0
);
993 for (i
= 0; i
< nb_events
; i
++)
994 m
[i
] = (struct rte_mbuf
*)ev
[i
].mbuf
;
996 return rte_eth_tx_burst(m0
->port
, qid
, m
, nb_events
);
1000 dpaa2_eventdev_txa_enqueue(void *port
,
1001 struct rte_event ev
[],
1004 struct rte_mbuf
*m
= (struct rte_mbuf
*)ev
[0].mbuf
;
1009 for (i
= 0; i
< nb_events
; i
++) {
1010 qid
= rte_event_eth_tx_adapter_txq_get(m
);
1011 rte_eth_tx_burst(m
->port
, qid
, &m
, 1);
1017 static struct rte_eventdev_ops dpaa2_eventdev_ops
= {
1018 .dev_infos_get
= dpaa2_eventdev_info_get
,
1019 .dev_configure
= dpaa2_eventdev_configure
,
1020 .dev_start
= dpaa2_eventdev_start
,
1021 .dev_stop
= dpaa2_eventdev_stop
,
1022 .dev_close
= dpaa2_eventdev_close
,
1023 .queue_def_conf
= dpaa2_eventdev_queue_def_conf
,
1024 .queue_setup
= dpaa2_eventdev_queue_setup
,
1025 .queue_release
= dpaa2_eventdev_queue_release
,
1026 .port_def_conf
= dpaa2_eventdev_port_def_conf
,
1027 .port_setup
= dpaa2_eventdev_port_setup
,
1028 .port_release
= dpaa2_eventdev_port_release
,
1029 .port_link
= dpaa2_eventdev_port_link
,
1030 .port_unlink
= dpaa2_eventdev_port_unlink
,
1031 .timeout_ticks
= dpaa2_eventdev_timeout_ticks
,
1032 .dump
= dpaa2_eventdev_dump
,
1033 .dev_selftest
= test_eventdev_dpaa2
,
1034 .eth_rx_adapter_caps_get
= dpaa2_eventdev_eth_caps_get
,
1035 .eth_rx_adapter_queue_add
= dpaa2_eventdev_eth_queue_add
,
1036 .eth_rx_adapter_queue_del
= dpaa2_eventdev_eth_queue_del
,
1037 .eth_rx_adapter_start
= dpaa2_eventdev_eth_start
,
1038 .eth_rx_adapter_stop
= dpaa2_eventdev_eth_stop
,
1039 .eth_tx_adapter_caps_get
= dpaa2_eventdev_tx_adapter_caps
,
1040 .eth_tx_adapter_create
= dpaa2_eventdev_tx_adapter_create
,
1041 .crypto_adapter_caps_get
= dpaa2_eventdev_crypto_caps_get
,
1042 .crypto_adapter_queue_pair_add
= dpaa2_eventdev_crypto_queue_add
,
1043 .crypto_adapter_queue_pair_del
= dpaa2_eventdev_crypto_queue_del
,
1044 .crypto_adapter_start
= dpaa2_eventdev_crypto_start
,
1045 .crypto_adapter_stop
= dpaa2_eventdev_crypto_stop
,
1049 dpaa2_eventdev_setup_dpci(struct dpaa2_dpci_dev
*dpci_dev
,
1050 struct dpaa2_dpcon_dev
*dpcon_dev
)
1052 struct dpci_rx_queue_cfg rx_queue_cfg
;
1055 /*Do settings to get the frame on a DPCON object*/
1056 rx_queue_cfg
.options
= DPCI_QUEUE_OPT_DEST
|
1057 DPCI_QUEUE_OPT_USER_CTX
;
1058 rx_queue_cfg
.dest_cfg
.dest_type
= DPCI_DEST_DPCON
;
1059 rx_queue_cfg
.dest_cfg
.dest_id
= dpcon_dev
->dpcon_id
;
1060 rx_queue_cfg
.dest_cfg
.priority
= DPAA2_EVENT_DEFAULT_DPCI_PRIO
;
1062 dpci_dev
->rx_queue
[DPAA2_EVENT_DPCI_PARALLEL_QUEUE
].cb
=
1063 dpaa2_eventdev_process_parallel
;
1064 dpci_dev
->rx_queue
[DPAA2_EVENT_DPCI_ATOMIC_QUEUE
].cb
=
1065 dpaa2_eventdev_process_atomic
;
1067 for (i
= 0 ; i
< DPAA2_EVENT_DPCI_MAX_QUEUES
; i
++) {
1068 rx_queue_cfg
.user_ctx
= (size_t)(&dpci_dev
->rx_queue
[i
]);
1069 ret
= dpci_set_rx_queue(&dpci_dev
->dpci
,
1075 "DPCI Rx queue setup failed: err(%d)",
1084 dpaa2_eventdev_create(const char *name
)
1086 struct rte_eventdev
*eventdev
;
1087 struct dpaa2_eventdev
*priv
;
1088 struct dpaa2_dpcon_dev
*dpcon_dev
= NULL
;
1089 struct dpaa2_dpci_dev
*dpci_dev
= NULL
;
1092 eventdev
= rte_event_pmd_vdev_init(name
,
1093 sizeof(struct dpaa2_eventdev
),
1095 if (eventdev
== NULL
) {
1096 DPAA2_EVENTDEV_ERR("Failed to create Event device %s", name
);
1100 eventdev
->dev_ops
= &dpaa2_eventdev_ops
;
1101 eventdev
->enqueue
= dpaa2_eventdev_enqueue
;
1102 eventdev
->enqueue_burst
= dpaa2_eventdev_enqueue_burst
;
1103 eventdev
->enqueue_new_burst
= dpaa2_eventdev_enqueue_burst
;
1104 eventdev
->enqueue_forward_burst
= dpaa2_eventdev_enqueue_burst
;
1105 eventdev
->dequeue
= dpaa2_eventdev_dequeue
;
1106 eventdev
->dequeue_burst
= dpaa2_eventdev_dequeue_burst
;
1107 eventdev
->txa_enqueue
= dpaa2_eventdev_txa_enqueue
;
1108 eventdev
->txa_enqueue_same_dest
= dpaa2_eventdev_txa_enqueue_same_dest
;
1110 /* For secondary processes, the primary has done all the work */
1111 if (rte_eal_process_type() != RTE_PROC_PRIMARY
)
1114 priv
= eventdev
->data
->dev_private
;
1115 priv
->max_event_queues
= 0;
1118 dpcon_dev
= rte_dpaa2_alloc_dpcon_dev();
1121 priv
->evq_info
[priv
->max_event_queues
].dpcon
= dpcon_dev
;
1123 dpci_dev
= rte_dpaa2_alloc_dpci_dev();
1125 rte_dpaa2_free_dpcon_dev(dpcon_dev
);
1128 priv
->evq_info
[priv
->max_event_queues
].dpci
= dpci_dev
;
1130 ret
= dpaa2_eventdev_setup_dpci(dpci_dev
, dpcon_dev
);
1133 "DPCI setup failed: err(%d)", ret
);
1136 priv
->max_event_queues
++;
1137 } while (dpcon_dev
&& dpci_dev
);
1139 RTE_LOG(INFO
, PMD
, "%s eventdev created\n", name
);
1147 dpaa2_eventdev_destroy(const char *name
)
1149 struct rte_eventdev
*eventdev
;
1150 struct dpaa2_eventdev
*priv
;
1153 eventdev
= rte_event_pmd_get_named_dev(name
);
1154 if (eventdev
== NULL
) {
1155 RTE_EDEV_LOG_ERR("eventdev with name %s not allocated", name
);
1159 /* For secondary processes, the primary has done all the work */
1160 if (rte_eal_process_type() != RTE_PROC_PRIMARY
)
1163 priv
= eventdev
->data
->dev_private
;
1164 for (i
= 0; i
< priv
->max_event_queues
; i
++) {
1165 if (priv
->evq_info
[i
].dpcon
)
1166 rte_dpaa2_free_dpcon_dev(priv
->evq_info
[i
].dpcon
);
1168 if (priv
->evq_info
[i
].dpci
)
1169 rte_dpaa2_free_dpci_dev(priv
->evq_info
[i
].dpci
);
1172 priv
->max_event_queues
= 0;
1174 RTE_LOG(INFO
, PMD
, "%s eventdev cleaned\n", name
);
1180 dpaa2_eventdev_probe(struct rte_vdev_device
*vdev
)
1184 name
= rte_vdev_device_name(vdev
);
1185 DPAA2_EVENTDEV_INFO("Initializing %s", name
);
1186 return dpaa2_eventdev_create(name
);
1190 dpaa2_eventdev_remove(struct rte_vdev_device
*vdev
)
1194 name
= rte_vdev_device_name(vdev
);
1195 DPAA2_EVENTDEV_INFO("Closing %s", name
);
1197 dpaa2_eventdev_destroy(name
);
1199 return rte_event_pmd_vdev_uninit(name
);
1202 static struct rte_vdev_driver vdev_eventdev_dpaa2_pmd
= {
1203 .probe
= dpaa2_eventdev_probe
,
1204 .remove
= dpaa2_eventdev_remove
1207 RTE_PMD_REGISTER_VDEV(EVENTDEV_NAME_DPAA2_PMD
, vdev_eventdev_dpaa2_pmd
);
1209 RTE_INIT(dpaa2_eventdev_init_log
)
1211 dpaa2_logtype_event
= rte_log_register("pmd.event.dpaa2");
1212 if (dpaa2_logtype_event
>= 0)
1213 rte_log_set_level(dpaa2_logtype_event
, RTE_LOG_NOTICE
);