4 * Copyright(c) 2016 Cavium networks. All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the
16 * * Neither the name of Cavium networks nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41 #include <sys/types.h>
42 #include <sys/queue.h>
44 #include <rte_byteorder.h>
46 #include <rte_debug.h>
49 #include <rte_memory.h>
50 #include <rte_memcpy.h>
51 #include <rte_memzone.h>
53 #include <rte_per_lcore.h>
54 #include <rte_lcore.h>
55 #include <rte_atomic.h>
56 #include <rte_branch_prediction.h>
57 #include <rte_common.h>
58 #include <rte_malloc.h>
59 #include <rte_errno.h>
61 #include "rte_eventdev.h"
62 #include "rte_eventdev_pmd.h"
64 struct rte_eventdev rte_event_devices
[RTE_EVENT_MAX_DEVS
];
66 struct rte_eventdev
*rte_eventdevs
= &rte_event_devices
[0];
68 static struct rte_eventdev_global eventdev_globals
= {
72 struct rte_eventdev_global
*rte_eventdev_globals
= &eventdev_globals
;
74 /* Event dev north bound API implementation */
77 rte_event_dev_count(void)
79 return rte_eventdev_globals
->nb_devs
;
83 rte_event_dev_get_dev_id(const char *name
)
90 for (i
= 0; i
< rte_eventdev_globals
->nb_devs
; i
++)
91 if ((strcmp(rte_event_devices
[i
].data
->name
, name
)
93 (rte_event_devices
[i
].attached
==
94 RTE_EVENTDEV_ATTACHED
))
100 rte_event_dev_socket_id(uint8_t dev_id
)
102 struct rte_eventdev
*dev
;
104 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id
, -EINVAL
);
105 dev
= &rte_eventdevs
[dev_id
];
107 return dev
->data
->socket_id
;
111 rte_event_dev_info_get(uint8_t dev_id
, struct rte_event_dev_info
*dev_info
)
113 struct rte_eventdev
*dev
;
115 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id
, -EINVAL
);
116 dev
= &rte_eventdevs
[dev_id
];
118 if (dev_info
== NULL
)
121 memset(dev_info
, 0, sizeof(struct rte_event_dev_info
));
123 RTE_FUNC_PTR_OR_ERR_RET(*dev
->dev_ops
->dev_infos_get
, -ENOTSUP
);
124 (*dev
->dev_ops
->dev_infos_get
)(dev
, dev_info
);
126 dev_info
->dequeue_timeout_ns
= dev
->data
->dev_conf
.dequeue_timeout_ns
;
128 dev_info
->dev
= dev
->dev
;
130 dev_info
->driver_name
= dev
->driver
->pci_drv
.driver
.name
;
135 rte_event_dev_queue_config(struct rte_eventdev
*dev
, uint8_t nb_queues
)
137 uint8_t old_nb_queues
= dev
->data
->nb_queues
;
138 uint8_t *queues_prio
;
141 RTE_EDEV_LOG_DEBUG("Setup %d queues on device %u", nb_queues
,
144 /* First time configuration */
145 if (dev
->data
->queues_prio
== NULL
&& nb_queues
!= 0) {
146 /* Allocate memory to store queue priority */
147 dev
->data
->queues_prio
= rte_zmalloc_socket(
148 "eventdev->data->queues_prio",
149 sizeof(dev
->data
->queues_prio
[0]) * nb_queues
,
150 RTE_CACHE_LINE_SIZE
, dev
->data
->socket_id
);
151 if (dev
->data
->queues_prio
== NULL
) {
152 dev
->data
->nb_queues
= 0;
153 RTE_EDEV_LOG_ERR("failed to get mem for queue priority,"
154 "nb_queues %u", nb_queues
);
158 } else if (dev
->data
->queues_prio
!= NULL
&& nb_queues
!= 0) {
159 RTE_FUNC_PTR_OR_ERR_RET(*dev
->dev_ops
->queue_release
, -ENOTSUP
);
161 for (i
= nb_queues
; i
< old_nb_queues
; i
++)
162 (*dev
->dev_ops
->queue_release
)(dev
, i
);
164 /* Re allocate memory to store queue priority */
165 queues_prio
= dev
->data
->queues_prio
;
166 queues_prio
= rte_realloc(queues_prio
,
167 sizeof(queues_prio
[0]) * nb_queues
,
168 RTE_CACHE_LINE_SIZE
);
169 if (queues_prio
== NULL
) {
170 RTE_EDEV_LOG_ERR("failed to realloc queue priority,"
171 " nb_queues %u", nb_queues
);
174 dev
->data
->queues_prio
= queues_prio
;
176 if (nb_queues
> old_nb_queues
) {
177 uint8_t new_qs
= nb_queues
- old_nb_queues
;
179 memset(queues_prio
+ old_nb_queues
, 0,
180 sizeof(queues_prio
[0]) * new_qs
);
182 } else if (dev
->data
->queues_prio
!= NULL
&& nb_queues
== 0) {
183 RTE_FUNC_PTR_OR_ERR_RET(*dev
->dev_ops
->queue_release
, -ENOTSUP
);
185 for (i
= nb_queues
; i
< old_nb_queues
; i
++)
186 (*dev
->dev_ops
->queue_release
)(dev
, i
);
189 dev
->data
->nb_queues
= nb_queues
;
193 #define EVENT_QUEUE_SERVICE_PRIORITY_INVALID (0xdead)
196 rte_event_dev_port_config(struct rte_eventdev
*dev
, uint8_t nb_ports
)
198 uint8_t old_nb_ports
= dev
->data
->nb_ports
;
201 uint8_t *ports_dequeue_depth
;
202 uint8_t *ports_enqueue_depth
;
205 RTE_EDEV_LOG_DEBUG("Setup %d ports on device %u", nb_ports
,
208 /* First time configuration */
209 if (dev
->data
->ports
== NULL
&& nb_ports
!= 0) {
210 dev
->data
->ports
= rte_zmalloc_socket("eventdev->data->ports",
211 sizeof(dev
->data
->ports
[0]) * nb_ports
,
212 RTE_CACHE_LINE_SIZE
, dev
->data
->socket_id
);
213 if (dev
->data
->ports
== NULL
) {
214 dev
->data
->nb_ports
= 0;
215 RTE_EDEV_LOG_ERR("failed to get mem for port meta data,"
216 "nb_ports %u", nb_ports
);
220 /* Allocate memory to store ports dequeue depth */
221 dev
->data
->ports_dequeue_depth
=
222 rte_zmalloc_socket("eventdev->ports_dequeue_depth",
223 sizeof(dev
->data
->ports_dequeue_depth
[0]) * nb_ports
,
224 RTE_CACHE_LINE_SIZE
, dev
->data
->socket_id
);
225 if (dev
->data
->ports_dequeue_depth
== NULL
) {
226 dev
->data
->nb_ports
= 0;
227 RTE_EDEV_LOG_ERR("failed to get mem for port deq meta,"
228 "nb_ports %u", nb_ports
);
232 /* Allocate memory to store ports enqueue depth */
233 dev
->data
->ports_enqueue_depth
=
234 rte_zmalloc_socket("eventdev->ports_enqueue_depth",
235 sizeof(dev
->data
->ports_enqueue_depth
[0]) * nb_ports
,
236 RTE_CACHE_LINE_SIZE
, dev
->data
->socket_id
);
237 if (dev
->data
->ports_enqueue_depth
== NULL
) {
238 dev
->data
->nb_ports
= 0;
239 RTE_EDEV_LOG_ERR("failed to get mem for port enq meta,"
240 "nb_ports %u", nb_ports
);
244 /* Allocate memory to store queue to port link connection */
245 dev
->data
->links_map
=
246 rte_zmalloc_socket("eventdev->links_map",
247 sizeof(dev
->data
->links_map
[0]) * nb_ports
*
248 RTE_EVENT_MAX_QUEUES_PER_DEV
,
249 RTE_CACHE_LINE_SIZE
, dev
->data
->socket_id
);
250 if (dev
->data
->links_map
== NULL
) {
251 dev
->data
->nb_ports
= 0;
252 RTE_EDEV_LOG_ERR("failed to get mem for port_map area,"
253 "nb_ports %u", nb_ports
);
256 for (i
= 0; i
< nb_ports
* RTE_EVENT_MAX_QUEUES_PER_DEV
; i
++)
257 dev
->data
->links_map
[i
] =
258 EVENT_QUEUE_SERVICE_PRIORITY_INVALID
;
259 } else if (dev
->data
->ports
!= NULL
&& nb_ports
!= 0) {/* re-config */
260 RTE_FUNC_PTR_OR_ERR_RET(*dev
->dev_ops
->port_release
, -ENOTSUP
);
262 ports
= dev
->data
->ports
;
263 ports_dequeue_depth
= dev
->data
->ports_dequeue_depth
;
264 ports_enqueue_depth
= dev
->data
->ports_enqueue_depth
;
265 links_map
= dev
->data
->links_map
;
267 for (i
= nb_ports
; i
< old_nb_ports
; i
++)
268 (*dev
->dev_ops
->port_release
)(ports
[i
]);
270 /* Realloc memory for ports */
271 ports
= rte_realloc(ports
, sizeof(ports
[0]) * nb_ports
,
272 RTE_CACHE_LINE_SIZE
);
274 RTE_EDEV_LOG_ERR("failed to realloc port meta data,"
275 " nb_ports %u", nb_ports
);
279 /* Realloc memory for ports_dequeue_depth */
280 ports_dequeue_depth
= rte_realloc(ports_dequeue_depth
,
281 sizeof(ports_dequeue_depth
[0]) * nb_ports
,
282 RTE_CACHE_LINE_SIZE
);
283 if (ports_dequeue_depth
== NULL
) {
284 RTE_EDEV_LOG_ERR("failed to realloc port dequeue meta,"
285 " nb_ports %u", nb_ports
);
289 /* Realloc memory for ports_enqueue_depth */
290 ports_enqueue_depth
= rte_realloc(ports_enqueue_depth
,
291 sizeof(ports_enqueue_depth
[0]) * nb_ports
,
292 RTE_CACHE_LINE_SIZE
);
293 if (ports_enqueue_depth
== NULL
) {
294 RTE_EDEV_LOG_ERR("failed to realloc port enqueue meta,"
295 " nb_ports %u", nb_ports
);
299 /* Realloc memory to store queue to port link connection */
300 links_map
= rte_realloc(links_map
,
301 sizeof(dev
->data
->links_map
[0]) * nb_ports
*
302 RTE_EVENT_MAX_QUEUES_PER_DEV
,
303 RTE_CACHE_LINE_SIZE
);
304 if (dev
->data
->links_map
== NULL
) {
305 dev
->data
->nb_ports
= 0;
306 RTE_EDEV_LOG_ERR("failed to realloc mem for port_map,"
307 "nb_ports %u", nb_ports
);
311 if (nb_ports
> old_nb_ports
) {
312 uint8_t new_ps
= nb_ports
- old_nb_ports
;
313 unsigned int old_links_map_end
=
314 old_nb_ports
* RTE_EVENT_MAX_QUEUES_PER_DEV
;
315 unsigned int links_map_end
=
316 nb_ports
* RTE_EVENT_MAX_QUEUES_PER_DEV
;
318 memset(ports
+ old_nb_ports
, 0,
319 sizeof(ports
[0]) * new_ps
);
320 memset(ports_dequeue_depth
+ old_nb_ports
, 0,
321 sizeof(ports_dequeue_depth
[0]) * new_ps
);
322 memset(ports_enqueue_depth
+ old_nb_ports
, 0,
323 sizeof(ports_enqueue_depth
[0]) * new_ps
);
324 for (i
= old_links_map_end
; i
< links_map_end
; i
++)
326 EVENT_QUEUE_SERVICE_PRIORITY_INVALID
;
329 dev
->data
->ports
= ports
;
330 dev
->data
->ports_dequeue_depth
= ports_dequeue_depth
;
331 dev
->data
->ports_enqueue_depth
= ports_enqueue_depth
;
332 dev
->data
->links_map
= links_map
;
333 } else if (dev
->data
->ports
!= NULL
&& nb_ports
== 0) {
334 RTE_FUNC_PTR_OR_ERR_RET(*dev
->dev_ops
->port_release
, -ENOTSUP
);
336 ports
= dev
->data
->ports
;
337 for (i
= nb_ports
; i
< old_nb_ports
; i
++)
338 (*dev
->dev_ops
->port_release
)(ports
[i
]);
341 dev
->data
->nb_ports
= nb_ports
;
346 rte_event_dev_configure(uint8_t dev_id
,
347 const struct rte_event_dev_config
*dev_conf
)
349 struct rte_eventdev
*dev
;
350 struct rte_event_dev_info info
;
353 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id
, -EINVAL
);
354 dev
= &rte_eventdevs
[dev_id
];
356 RTE_FUNC_PTR_OR_ERR_RET(*dev
->dev_ops
->dev_infos_get
, -ENOTSUP
);
357 RTE_FUNC_PTR_OR_ERR_RET(*dev
->dev_ops
->dev_configure
, -ENOTSUP
);
359 if (dev
->data
->dev_started
) {
361 "device %d must be stopped to allow configuration", dev_id
);
365 if (dev_conf
== NULL
)
368 (*dev
->dev_ops
->dev_infos_get
)(dev
, &info
);
370 /* Check dequeue_timeout_ns value is in limit */
371 if (!(dev_conf
->event_dev_cfg
& RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT
)) {
372 if (dev_conf
->dequeue_timeout_ns
< info
.min_dequeue_timeout_ns
373 || dev_conf
->dequeue_timeout_ns
>
374 info
.max_dequeue_timeout_ns
) {
375 RTE_EDEV_LOG_ERR("dev%d invalid dequeue_timeout_ns=%d"
376 " min_dequeue_timeout_ns=%d max_dequeue_timeout_ns=%d",
377 dev_id
, dev_conf
->dequeue_timeout_ns
,
378 info
.min_dequeue_timeout_ns
,
379 info
.max_dequeue_timeout_ns
);
384 /* Check nb_events_limit is in limit */
385 if (dev_conf
->nb_events_limit
> info
.max_num_events
) {
386 RTE_EDEV_LOG_ERR("dev%d nb_events_limit=%d > max_num_events=%d",
387 dev_id
, dev_conf
->nb_events_limit
, info
.max_num_events
);
391 /* Check nb_event_queues is in limit */
392 if (!dev_conf
->nb_event_queues
) {
393 RTE_EDEV_LOG_ERR("dev%d nb_event_queues cannot be zero",
397 if (dev_conf
->nb_event_queues
> info
.max_event_queues
) {
398 RTE_EDEV_LOG_ERR("%d nb_event_queues=%d > max_event_queues=%d",
399 dev_id
, dev_conf
->nb_event_queues
, info
.max_event_queues
);
403 /* Check nb_event_ports is in limit */
404 if (!dev_conf
->nb_event_ports
) {
405 RTE_EDEV_LOG_ERR("dev%d nb_event_ports cannot be zero", dev_id
);
408 if (dev_conf
->nb_event_ports
> info
.max_event_ports
) {
409 RTE_EDEV_LOG_ERR("id%d nb_event_ports=%d > max_event_ports= %d",
410 dev_id
, dev_conf
->nb_event_ports
, info
.max_event_ports
);
414 /* Check nb_event_queue_flows is in limit */
415 if (!dev_conf
->nb_event_queue_flows
) {
416 RTE_EDEV_LOG_ERR("dev%d nb_flows cannot be zero", dev_id
);
419 if (dev_conf
->nb_event_queue_flows
> info
.max_event_queue_flows
) {
420 RTE_EDEV_LOG_ERR("dev%d nb_flows=%x > max_flows=%x",
421 dev_id
, dev_conf
->nb_event_queue_flows
,
422 info
.max_event_queue_flows
);
426 /* Check nb_event_port_dequeue_depth is in limit */
427 if (!dev_conf
->nb_event_port_dequeue_depth
) {
428 RTE_EDEV_LOG_ERR("dev%d nb_dequeue_depth cannot be zero",
432 if (dev_conf
->nb_event_port_dequeue_depth
>
433 info
.max_event_port_dequeue_depth
) {
434 RTE_EDEV_LOG_ERR("dev%d nb_dq_depth=%d > max_dq_depth=%d",
435 dev_id
, dev_conf
->nb_event_port_dequeue_depth
,
436 info
.max_event_port_dequeue_depth
);
440 /* Check nb_event_port_enqueue_depth is in limit */
441 if (!dev_conf
->nb_event_port_enqueue_depth
) {
442 RTE_EDEV_LOG_ERR("dev%d nb_enqueue_depth cannot be zero",
446 if (dev_conf
->nb_event_port_enqueue_depth
>
447 info
.max_event_port_enqueue_depth
) {
448 RTE_EDEV_LOG_ERR("dev%d nb_enq_depth=%d > max_enq_depth=%d",
449 dev_id
, dev_conf
->nb_event_port_enqueue_depth
,
450 info
.max_event_port_enqueue_depth
);
454 /* Copy the dev_conf parameter into the dev structure */
455 memcpy(&dev
->data
->dev_conf
, dev_conf
, sizeof(dev
->data
->dev_conf
));
457 /* Setup new number of queues and reconfigure device. */
458 diag
= rte_event_dev_queue_config(dev
, dev_conf
->nb_event_queues
);
460 RTE_EDEV_LOG_ERR("dev%d rte_event_dev_queue_config = %d",
465 /* Setup new number of ports and reconfigure device. */
466 diag
= rte_event_dev_port_config(dev
, dev_conf
->nb_event_ports
);
468 rte_event_dev_queue_config(dev
, 0);
469 RTE_EDEV_LOG_ERR("dev%d rte_event_dev_port_config = %d",
474 /* Configure the device */
475 diag
= (*dev
->dev_ops
->dev_configure
)(dev
);
477 RTE_EDEV_LOG_ERR("dev%d dev_configure = %d", dev_id
, diag
);
478 rte_event_dev_queue_config(dev
, 0);
479 rte_event_dev_port_config(dev
, 0);
482 dev
->data
->event_dev_cap
= info
.event_dev_cap
;
487 is_valid_queue(struct rte_eventdev
*dev
, uint8_t queue_id
)
489 if (queue_id
< dev
->data
->nb_queues
&& queue_id
<
490 RTE_EVENT_MAX_QUEUES_PER_DEV
)
497 rte_event_queue_default_conf_get(uint8_t dev_id
, uint8_t queue_id
,
498 struct rte_event_queue_conf
*queue_conf
)
500 struct rte_eventdev
*dev
;
502 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id
, -EINVAL
);
503 dev
= &rte_eventdevs
[dev_id
];
505 if (queue_conf
== NULL
)
508 if (!is_valid_queue(dev
, queue_id
)) {
509 RTE_EDEV_LOG_ERR("Invalid queue_id=%" PRIu8
, queue_id
);
513 RTE_FUNC_PTR_OR_ERR_RET(*dev
->dev_ops
->queue_def_conf
, -ENOTSUP
);
514 memset(queue_conf
, 0, sizeof(struct rte_event_queue_conf
));
515 (*dev
->dev_ops
->queue_def_conf
)(dev
, queue_id
, queue_conf
);
520 is_valid_atomic_queue_conf(const struct rte_event_queue_conf
*queue_conf
)
523 ((queue_conf
->event_queue_cfg
&
524 RTE_EVENT_QUEUE_CFG_TYPE_MASK
)
525 == RTE_EVENT_QUEUE_CFG_ALL_TYPES
) ||
526 ((queue_conf
->event_queue_cfg
&
527 RTE_EVENT_QUEUE_CFG_TYPE_MASK
)
528 == RTE_EVENT_QUEUE_CFG_ATOMIC_ONLY
)
536 is_valid_ordered_queue_conf(const struct rte_event_queue_conf
*queue_conf
)
539 ((queue_conf
->event_queue_cfg
&
540 RTE_EVENT_QUEUE_CFG_TYPE_MASK
)
541 == RTE_EVENT_QUEUE_CFG_ALL_TYPES
) ||
542 ((queue_conf
->event_queue_cfg
&
543 RTE_EVENT_QUEUE_CFG_TYPE_MASK
)
544 == RTE_EVENT_QUEUE_CFG_ORDERED_ONLY
)
553 rte_event_queue_setup(uint8_t dev_id
, uint8_t queue_id
,
554 const struct rte_event_queue_conf
*queue_conf
)
556 struct rte_eventdev
*dev
;
557 struct rte_event_queue_conf def_conf
;
559 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id
, -EINVAL
);
560 dev
= &rte_eventdevs
[dev_id
];
562 if (!is_valid_queue(dev
, queue_id
)) {
563 RTE_EDEV_LOG_ERR("Invalid queue_id=%" PRIu8
, queue_id
);
567 /* Check nb_atomic_flows limit */
568 if (is_valid_atomic_queue_conf(queue_conf
)) {
569 if (queue_conf
->nb_atomic_flows
== 0 ||
570 queue_conf
->nb_atomic_flows
>
571 dev
->data
->dev_conf
.nb_event_queue_flows
) {
573 "dev%d queue%d Invalid nb_atomic_flows=%d max_flows=%d",
574 dev_id
, queue_id
, queue_conf
->nb_atomic_flows
,
575 dev
->data
->dev_conf
.nb_event_queue_flows
);
580 /* Check nb_atomic_order_sequences limit */
581 if (is_valid_ordered_queue_conf(queue_conf
)) {
582 if (queue_conf
->nb_atomic_order_sequences
== 0 ||
583 queue_conf
->nb_atomic_order_sequences
>
584 dev
->data
->dev_conf
.nb_event_queue_flows
) {
586 "dev%d queue%d Invalid nb_atomic_order_seq=%d max_flows=%d",
587 dev_id
, queue_id
, queue_conf
->nb_atomic_order_sequences
,
588 dev
->data
->dev_conf
.nb_event_queue_flows
);
593 if (dev
->data
->dev_started
) {
595 "device %d must be stopped to allow queue setup", dev_id
);
599 RTE_FUNC_PTR_OR_ERR_RET(*dev
->dev_ops
->queue_setup
, -ENOTSUP
);
601 if (queue_conf
== NULL
) {
602 RTE_FUNC_PTR_OR_ERR_RET(*dev
->dev_ops
->queue_def_conf
,
604 (*dev
->dev_ops
->queue_def_conf
)(dev
, queue_id
, &def_conf
);
605 queue_conf
= &def_conf
;
608 dev
->data
->queues_prio
[queue_id
] = queue_conf
->priority
;
609 return (*dev
->dev_ops
->queue_setup
)(dev
, queue_id
, queue_conf
);
613 rte_event_queue_count(uint8_t dev_id
)
615 struct rte_eventdev
*dev
;
617 dev
= &rte_eventdevs
[dev_id
];
618 return dev
->data
->nb_queues
;
622 rte_event_queue_priority(uint8_t dev_id
, uint8_t queue_id
)
624 struct rte_eventdev
*dev
;
626 dev
= &rte_eventdevs
[dev_id
];
627 if (dev
->data
->event_dev_cap
& RTE_EVENT_DEV_CAP_QUEUE_QOS
)
628 return dev
->data
->queues_prio
[queue_id
];
630 return RTE_EVENT_DEV_PRIORITY_NORMAL
;
634 is_valid_port(struct rte_eventdev
*dev
, uint8_t port_id
)
636 if (port_id
< dev
->data
->nb_ports
)
643 rte_event_port_default_conf_get(uint8_t dev_id
, uint8_t port_id
,
644 struct rte_event_port_conf
*port_conf
)
646 struct rte_eventdev
*dev
;
648 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id
, -EINVAL
);
649 dev
= &rte_eventdevs
[dev_id
];
651 if (port_conf
== NULL
)
654 if (!is_valid_port(dev
, port_id
)) {
655 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8
, port_id
);
659 RTE_FUNC_PTR_OR_ERR_RET(*dev
->dev_ops
->port_def_conf
, -ENOTSUP
);
660 memset(port_conf
, 0, sizeof(struct rte_event_port_conf
));
661 (*dev
->dev_ops
->port_def_conf
)(dev
, port_id
, port_conf
);
666 rte_event_port_setup(uint8_t dev_id
, uint8_t port_id
,
667 const struct rte_event_port_conf
*port_conf
)
669 struct rte_eventdev
*dev
;
670 struct rte_event_port_conf def_conf
;
673 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id
, -EINVAL
);
674 dev
= &rte_eventdevs
[dev_id
];
676 if (!is_valid_port(dev
, port_id
)) {
677 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8
, port_id
);
681 /* Check new_event_threshold limit */
682 if ((port_conf
&& !port_conf
->new_event_threshold
) ||
683 (port_conf
&& port_conf
->new_event_threshold
>
684 dev
->data
->dev_conf
.nb_events_limit
)) {
686 "dev%d port%d Invalid event_threshold=%d nb_events_limit=%d",
687 dev_id
, port_id
, port_conf
->new_event_threshold
,
688 dev
->data
->dev_conf
.nb_events_limit
);
692 /* Check dequeue_depth limit */
693 if ((port_conf
&& !port_conf
->dequeue_depth
) ||
694 (port_conf
&& port_conf
->dequeue_depth
>
695 dev
->data
->dev_conf
.nb_event_port_dequeue_depth
)) {
697 "dev%d port%d Invalid dequeue depth=%d max_dequeue_depth=%d",
698 dev_id
, port_id
, port_conf
->dequeue_depth
,
699 dev
->data
->dev_conf
.nb_event_port_dequeue_depth
);
703 /* Check enqueue_depth limit */
704 if ((port_conf
&& !port_conf
->enqueue_depth
) ||
705 (port_conf
&& port_conf
->enqueue_depth
>
706 dev
->data
->dev_conf
.nb_event_port_enqueue_depth
)) {
708 "dev%d port%d Invalid enqueue depth=%d max_enqueue_depth=%d",
709 dev_id
, port_id
, port_conf
->enqueue_depth
,
710 dev
->data
->dev_conf
.nb_event_port_enqueue_depth
);
714 if (dev
->data
->dev_started
) {
716 "device %d must be stopped to allow port setup", dev_id
);
720 RTE_FUNC_PTR_OR_ERR_RET(*dev
->dev_ops
->port_setup
, -ENOTSUP
);
722 if (port_conf
== NULL
) {
723 RTE_FUNC_PTR_OR_ERR_RET(*dev
->dev_ops
->port_def_conf
,
725 (*dev
->dev_ops
->port_def_conf
)(dev
, port_id
, &def_conf
);
726 port_conf
= &def_conf
;
729 dev
->data
->ports_dequeue_depth
[port_id
] =
730 port_conf
->dequeue_depth
;
731 dev
->data
->ports_enqueue_depth
[port_id
] =
732 port_conf
->enqueue_depth
;
734 diag
= (*dev
->dev_ops
->port_setup
)(dev
, port_id
, port_conf
);
736 /* Unlink all the queues from this port(default state after setup) */
738 diag
= rte_event_port_unlink(dev_id
, port_id
, NULL
, 0);
747 rte_event_port_dequeue_depth(uint8_t dev_id
, uint8_t port_id
)
749 struct rte_eventdev
*dev
;
751 dev
= &rte_eventdevs
[dev_id
];
752 return dev
->data
->ports_dequeue_depth
[port_id
];
756 rte_event_port_enqueue_depth(uint8_t dev_id
, uint8_t port_id
)
758 struct rte_eventdev
*dev
;
760 dev
= &rte_eventdevs
[dev_id
];
761 return dev
->data
->ports_enqueue_depth
[port_id
];
765 rte_event_port_count(uint8_t dev_id
)
767 struct rte_eventdev
*dev
;
769 dev
= &rte_eventdevs
[dev_id
];
770 return dev
->data
->nb_ports
;
774 rte_event_port_link(uint8_t dev_id
, uint8_t port_id
,
775 const uint8_t queues
[], const uint8_t priorities
[],
778 struct rte_eventdev
*dev
;
779 uint8_t queues_list
[RTE_EVENT_MAX_QUEUES_PER_DEV
];
780 uint8_t priorities_list
[RTE_EVENT_MAX_QUEUES_PER_DEV
];
784 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id
, -EINVAL
);
785 dev
= &rte_eventdevs
[dev_id
];
786 RTE_FUNC_PTR_OR_ERR_RET(*dev
->dev_ops
->port_link
, -ENOTSUP
);
788 if (!is_valid_port(dev
, port_id
)) {
789 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8
, port_id
);
793 if (queues
== NULL
) {
794 for (i
= 0; i
< dev
->data
->nb_queues
; i
++)
797 queues
= queues_list
;
798 nb_links
= dev
->data
->nb_queues
;
801 if (priorities
== NULL
) {
802 for (i
= 0; i
< nb_links
; i
++)
803 priorities_list
[i
] = RTE_EVENT_DEV_PRIORITY_NORMAL
;
805 priorities
= priorities_list
;
808 for (i
= 0; i
< nb_links
; i
++)
809 if (queues
[i
] >= dev
->data
->nb_queues
)
812 diag
= (*dev
->dev_ops
->port_link
)(dev
, dev
->data
->ports
[port_id
],
813 queues
, priorities
, nb_links
);
817 links_map
= dev
->data
->links_map
;
818 /* Point links_map to this port specific area */
819 links_map
+= (port_id
* RTE_EVENT_MAX_QUEUES_PER_DEV
);
820 for (i
= 0; i
< diag
; i
++)
821 links_map
[queues
[i
]] = (uint8_t)priorities
[i
];
827 rte_event_port_unlink(uint8_t dev_id
, uint8_t port_id
,
828 uint8_t queues
[], uint16_t nb_unlinks
)
830 struct rte_eventdev
*dev
;
831 uint8_t all_queues
[RTE_EVENT_MAX_QUEUES_PER_DEV
];
835 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id
, -EINVAL
);
836 dev
= &rte_eventdevs
[dev_id
];
837 RTE_FUNC_PTR_OR_ERR_RET(*dev
->dev_ops
->port_unlink
, -ENOTSUP
);
839 if (!is_valid_port(dev
, port_id
)) {
840 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8
, port_id
);
844 if (queues
== NULL
) {
845 for (i
= 0; i
< dev
->data
->nb_queues
; i
++)
848 nb_unlinks
= dev
->data
->nb_queues
;
851 for (i
= 0; i
< nb_unlinks
; i
++)
852 if (queues
[i
] >= dev
->data
->nb_queues
)
855 diag
= (*dev
->dev_ops
->port_unlink
)(dev
, dev
->data
->ports
[port_id
],
861 links_map
= dev
->data
->links_map
;
862 /* Point links_map to this port specific area */
863 links_map
+= (port_id
* RTE_EVENT_MAX_QUEUES_PER_DEV
);
864 for (i
= 0; i
< diag
; i
++)
865 links_map
[queues
[i
]] = EVENT_QUEUE_SERVICE_PRIORITY_INVALID
;
871 rte_event_port_links_get(uint8_t dev_id
, uint8_t port_id
,
872 uint8_t queues
[], uint8_t priorities
[])
874 struct rte_eventdev
*dev
;
878 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id
, -EINVAL
);
879 dev
= &rte_eventdevs
[dev_id
];
880 if (!is_valid_port(dev
, port_id
)) {
881 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8
, port_id
);
885 links_map
= dev
->data
->links_map
;
886 /* Point links_map to this port specific area */
887 links_map
+= (port_id
* RTE_EVENT_MAX_QUEUES_PER_DEV
);
888 for (i
= 0; i
< dev
->data
->nb_queues
; i
++) {
889 if (links_map
[i
] != EVENT_QUEUE_SERVICE_PRIORITY_INVALID
) {
891 priorities
[count
] = (uint8_t)links_map
[i
];
899 rte_event_dequeue_timeout_ticks(uint8_t dev_id
, uint64_t ns
,
900 uint64_t *timeout_ticks
)
902 struct rte_eventdev
*dev
;
904 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id
, -EINVAL
);
905 dev
= &rte_eventdevs
[dev_id
];
906 RTE_FUNC_PTR_OR_ERR_RET(*dev
->dev_ops
->timeout_ticks
, -ENOTSUP
);
908 if (timeout_ticks
== NULL
)
911 return (*dev
->dev_ops
->timeout_ticks
)(dev
, ns
, timeout_ticks
);
915 rte_event_dev_dump(uint8_t dev_id
, FILE *f
)
917 struct rte_eventdev
*dev
;
919 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id
, -EINVAL
);
920 dev
= &rte_eventdevs
[dev_id
];
921 RTE_FUNC_PTR_OR_ERR_RET(*dev
->dev_ops
->dump
, -ENOTSUP
);
923 (*dev
->dev_ops
->dump
)(dev
, f
);
929 xstats_get_count(uint8_t dev_id
, enum rte_event_dev_xstats_mode mode
,
930 uint8_t queue_port_id
)
932 struct rte_eventdev
*dev
= &rte_eventdevs
[dev_id
];
933 if (dev
->dev_ops
->xstats_get_names
!= NULL
)
934 return (*dev
->dev_ops
->xstats_get_names
)(dev
, mode
,
941 rte_event_dev_xstats_names_get(uint8_t dev_id
,
942 enum rte_event_dev_xstats_mode mode
, uint8_t queue_port_id
,
943 struct rte_event_dev_xstats_name
*xstats_names
,
944 unsigned int *ids
, unsigned int size
)
946 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id
, -ENODEV
);
947 const int cnt_expected_entries
= xstats_get_count(dev_id
, mode
,
949 if (xstats_names
== NULL
|| cnt_expected_entries
< 0 ||
950 (int)size
< cnt_expected_entries
)
951 return cnt_expected_entries
;
953 /* dev_id checked above */
954 const struct rte_eventdev
*dev
= &rte_eventdevs
[dev_id
];
956 if (dev
->dev_ops
->xstats_get_names
!= NULL
)
957 return (*dev
->dev_ops
->xstats_get_names
)(dev
, mode
,
958 queue_port_id
, xstats_names
, ids
, size
);
963 /* retrieve eventdev extended statistics */
965 rte_event_dev_xstats_get(uint8_t dev_id
, enum rte_event_dev_xstats_mode mode
,
966 uint8_t queue_port_id
, const unsigned int ids
[],
967 uint64_t values
[], unsigned int n
)
969 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id
, -ENODEV
);
970 const struct rte_eventdev
*dev
= &rte_eventdevs
[dev_id
];
972 /* implemented by the driver */
973 if (dev
->dev_ops
->xstats_get
!= NULL
)
974 return (*dev
->dev_ops
->xstats_get
)(dev
, mode
, queue_port_id
,
980 rte_event_dev_xstats_by_name_get(uint8_t dev_id
, const char *name
,
983 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id
, 0);
984 const struct rte_eventdev
*dev
= &rte_eventdevs
[dev_id
];
985 unsigned int temp
= -1;
988 *id
= (unsigned int)-1;
990 id
= &temp
; /* ensure driver never gets a NULL value */
992 /* implemented by driver */
993 if (dev
->dev_ops
->xstats_get_by_name
!= NULL
)
994 return (*dev
->dev_ops
->xstats_get_by_name
)(dev
, name
, id
);
998 int rte_event_dev_xstats_reset(uint8_t dev_id
,
999 enum rte_event_dev_xstats_mode mode
, int16_t queue_port_id
,
1000 const uint32_t ids
[], uint32_t nb_ids
)
1002 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id
, -EINVAL
);
1003 struct rte_eventdev
*dev
= &rte_eventdevs
[dev_id
];
1005 if (dev
->dev_ops
->xstats_reset
!= NULL
)
1006 return (*dev
->dev_ops
->xstats_reset
)(dev
, mode
, queue_port_id
,
1012 rte_event_dev_start(uint8_t dev_id
)
1014 struct rte_eventdev
*dev
;
1017 RTE_EDEV_LOG_DEBUG("Start dev_id=%" PRIu8
, dev_id
);
1019 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id
, -EINVAL
);
1020 dev
= &rte_eventdevs
[dev_id
];
1021 RTE_FUNC_PTR_OR_ERR_RET(*dev
->dev_ops
->dev_start
, -ENOTSUP
);
1023 if (dev
->data
->dev_started
!= 0) {
1024 RTE_EDEV_LOG_ERR("Device with dev_id=%" PRIu8
"already started",
1029 diag
= (*dev
->dev_ops
->dev_start
)(dev
);
1031 dev
->data
->dev_started
= 1;
1039 rte_event_dev_stop(uint8_t dev_id
)
1041 struct rte_eventdev
*dev
;
1043 RTE_EDEV_LOG_DEBUG("Stop dev_id=%" PRIu8
, dev_id
);
1045 RTE_EVENTDEV_VALID_DEVID_OR_RET(dev_id
);
1046 dev
= &rte_eventdevs
[dev_id
];
1047 RTE_FUNC_PTR_OR_RET(*dev
->dev_ops
->dev_stop
);
1049 if (dev
->data
->dev_started
== 0) {
1050 RTE_EDEV_LOG_ERR("Device with dev_id=%" PRIu8
"already stopped",
1055 dev
->data
->dev_started
= 0;
1056 (*dev
->dev_ops
->dev_stop
)(dev
);
1060 rte_event_dev_close(uint8_t dev_id
)
1062 struct rte_eventdev
*dev
;
1064 RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id
, -EINVAL
);
1065 dev
= &rte_eventdevs
[dev_id
];
1066 RTE_FUNC_PTR_OR_ERR_RET(*dev
->dev_ops
->dev_close
, -ENOTSUP
);
1068 /* Device must be stopped before it can be closed */
1069 if (dev
->data
->dev_started
== 1) {
1070 RTE_EDEV_LOG_ERR("Device %u must be stopped before closing",
1075 return (*dev
->dev_ops
->dev_close
)(dev
);
1079 rte_eventdev_data_alloc(uint8_t dev_id
, struct rte_eventdev_data
**data
,
1082 char mz_name
[RTE_EVENTDEV_NAME_MAX_LEN
];
1083 const struct rte_memzone
*mz
;
1086 /* Generate memzone name */
1087 n
= snprintf(mz_name
, sizeof(mz_name
), "rte_eventdev_data_%u", dev_id
);
1088 if (n
>= (int)sizeof(mz_name
))
1091 if (rte_eal_process_type() == RTE_PROC_PRIMARY
) {
1092 mz
= rte_memzone_reserve(mz_name
,
1093 sizeof(struct rte_eventdev_data
),
1096 mz
= rte_memzone_lookup(mz_name
);
1102 if (rte_eal_process_type() == RTE_PROC_PRIMARY
)
1103 memset(*data
, 0, sizeof(struct rte_eventdev_data
));
1108 static inline uint8_t
1109 rte_eventdev_find_free_device_index(void)
1113 for (dev_id
= 0; dev_id
< RTE_EVENT_MAX_DEVS
; dev_id
++) {
1114 if (rte_eventdevs
[dev_id
].attached
==
1115 RTE_EVENTDEV_DETACHED
)
1118 return RTE_EVENT_MAX_DEVS
;
1121 struct rte_eventdev
*
1122 rte_event_pmd_allocate(const char *name
, int socket_id
)
1124 struct rte_eventdev
*eventdev
;
1127 if (rte_event_pmd_get_named_dev(name
) != NULL
) {
1128 RTE_EDEV_LOG_ERR("Event device with name %s already "
1129 "allocated!", name
);
1133 dev_id
= rte_eventdev_find_free_device_index();
1134 if (dev_id
== RTE_EVENT_MAX_DEVS
) {
1135 RTE_EDEV_LOG_ERR("Reached maximum number of event devices");
1139 eventdev
= &rte_eventdevs
[dev_id
];
1141 if (eventdev
->data
== NULL
) {
1142 struct rte_eventdev_data
*eventdev_data
= NULL
;
1144 int retval
= rte_eventdev_data_alloc(dev_id
, &eventdev_data
,
1147 if (retval
< 0 || eventdev_data
== NULL
)
1150 eventdev
->data
= eventdev_data
;
1152 snprintf(eventdev
->data
->name
, RTE_EVENTDEV_NAME_MAX_LEN
,
1155 eventdev
->data
->dev_id
= dev_id
;
1156 eventdev
->data
->socket_id
= socket_id
;
1157 eventdev
->data
->dev_started
= 0;
1159 eventdev
->attached
= RTE_EVENTDEV_ATTACHED
;
1161 eventdev_globals
.nb_devs
++;
1168 rte_event_pmd_release(struct rte_eventdev
*eventdev
)
1171 char mz_name
[RTE_EVENTDEV_NAME_MAX_LEN
];
1172 const struct rte_memzone
*mz
;
1174 if (eventdev
== NULL
)
1177 ret
= rte_event_dev_close(eventdev
->data
->dev_id
);
1181 eventdev
->attached
= RTE_EVENTDEV_DETACHED
;
1182 eventdev_globals
.nb_devs
--;
1184 if (rte_eal_process_type() == RTE_PROC_PRIMARY
) {
1185 rte_free(eventdev
->data
->dev_private
);
1187 /* Generate memzone name */
1188 ret
= snprintf(mz_name
, sizeof(mz_name
), "rte_eventdev_data_%u",
1189 eventdev
->data
->dev_id
);
1190 if (ret
>= (int)sizeof(mz_name
))
1193 mz
= rte_memzone_lookup(mz_name
);
1197 ret
= rte_memzone_free(mz
);
1202 eventdev
->data
= NULL
;
1206 struct rte_eventdev
*
1207 rte_event_pmd_vdev_init(const char *name
, size_t dev_private_size
,
1210 struct rte_eventdev
*eventdev
;
1212 /* Allocate device structure */
1213 eventdev
= rte_event_pmd_allocate(name
, socket_id
);
1214 if (eventdev
== NULL
)
1217 /* Allocate private device structure */
1218 if (rte_eal_process_type() == RTE_PROC_PRIMARY
) {
1219 eventdev
->data
->dev_private
=
1220 rte_zmalloc_socket("eventdev device private",
1222 RTE_CACHE_LINE_SIZE
,
1225 if (eventdev
->data
->dev_private
== NULL
)
1226 rte_panic("Cannot allocate memzone for private device"
1234 rte_event_pmd_vdev_uninit(const char *name
)
1236 struct rte_eventdev
*eventdev
;
1241 eventdev
= rte_event_pmd_get_named_dev(name
);
1242 if (eventdev
== NULL
)
1245 /* Free the event device */
1246 rte_event_pmd_release(eventdev
);
1252 rte_event_pmd_pci_probe(struct rte_pci_driver
*pci_drv
,
1253 struct rte_pci_device
*pci_dev
)
1255 struct rte_eventdev_driver
*eventdrv
;
1256 struct rte_eventdev
*eventdev
;
1258 char eventdev_name
[RTE_EVENTDEV_NAME_MAX_LEN
];
1262 eventdrv
= (struct rte_eventdev_driver
*)pci_drv
;
1263 if (eventdrv
== NULL
)
1266 rte_pci_device_name(&pci_dev
->addr
, eventdev_name
,
1267 sizeof(eventdev_name
));
1269 eventdev
= rte_event_pmd_allocate(eventdev_name
,
1270 pci_dev
->device
.numa_node
);
1271 if (eventdev
== NULL
)
1274 if (rte_eal_process_type() == RTE_PROC_PRIMARY
) {
1275 eventdev
->data
->dev_private
=
1277 "eventdev private structure",
1278 eventdrv
->dev_private_size
,
1279 RTE_CACHE_LINE_SIZE
,
1282 if (eventdev
->data
->dev_private
== NULL
)
1283 rte_panic("Cannot allocate memzone for private "
1287 eventdev
->dev
= &pci_dev
->device
;
1288 eventdev
->driver
= eventdrv
;
1290 /* Invoke PMD device initialization function */
1291 retval
= (*eventdrv
->eventdev_init
)(eventdev
);
1295 RTE_EDEV_LOG_ERR("driver %s: (vendor_id=0x%x device_id=0x%x)"
1296 " failed", pci_drv
->driver
.name
,
1297 (unsigned int) pci_dev
->id
.vendor_id
,
1298 (unsigned int) pci_dev
->id
.device_id
);
1300 if (rte_eal_process_type() == RTE_PROC_PRIMARY
)
1301 rte_free(eventdev
->data
->dev_private
);
1303 eventdev
->attached
= RTE_EVENTDEV_DETACHED
;
1304 eventdev_globals
.nb_devs
--;
1310 rte_event_pmd_pci_remove(struct rte_pci_device
*pci_dev
)
1312 const struct rte_eventdev_driver
*eventdrv
;
1313 struct rte_eventdev
*eventdev
;
1314 char eventdev_name
[RTE_EVENTDEV_NAME_MAX_LEN
];
1317 if (pci_dev
== NULL
)
1320 rte_pci_device_name(&pci_dev
->addr
, eventdev_name
,
1321 sizeof(eventdev_name
));
1323 eventdev
= rte_event_pmd_get_named_dev(eventdev_name
);
1324 if (eventdev
== NULL
)
1327 eventdrv
= (const struct rte_eventdev_driver
*)pci_dev
->driver
;
1328 if (eventdrv
== NULL
)
1331 /* Invoke PMD device un-init function */
1332 if (*eventdrv
->eventdev_uninit
) {
1333 ret
= (*eventdrv
->eventdev_uninit
)(eventdev
);
1338 /* Free event device */
1339 rte_event_pmd_release(eventdev
);
1341 eventdev
->dev
= NULL
;
1342 eventdev
->driver
= NULL
;