]>
Commit | Line | Data |
---|---|---|
11fdf7f2 TL |
1 | /* SPDX-License-Identifier: BSD-3-Clause |
2 | * Copyright(c) 2016 Cavium, Inc. | |
3 | * Copyright(c) 2016-2018 Intel Corporation. | |
4 | * Copyright 2016 NXP | |
5 | * All rights reserved. | |
6 | */ | |
7 | ||
8 | #ifndef _RTE_EVENTDEV_H_ | |
9 | #define _RTE_EVENTDEV_H_ | |
10 | ||
11 | /** | |
12 | * @file | |
13 | * | |
14 | * RTE Event Device API | |
15 | * | |
16 | * In a polling model, lcores poll ethdev ports and associated rx queues | |
17 | * directly to look for packet. In an event driven model, by contrast, lcores | |
18 | * call the scheduler that selects packets for them based on programmer | |
19 | * specified criteria. Eventdev library adds support for event driven | |
20 | * programming model, which offer applications automatic multicore scaling, | |
21 | * dynamic load balancing, pipelining, packet ingress order maintenance and | |
22 | * synchronization services to simplify application packet processing. | |
23 | * | |
24 | * The Event Device API is composed of two parts: | |
25 | * | |
26 | * - The application-oriented Event API that includes functions to setup | |
27 | * an event device (configure it, setup its queues, ports and start it), to | |
28 | * establish the link between queues to port and to receive events, and so on. | |
29 | * | |
30 | * - The driver-oriented Event API that exports a function allowing | |
31 | * an event poll Mode Driver (PMD) to simultaneously register itself as | |
32 | * an event device driver. | |
33 | * | |
34 | * Event device components: | |
35 | * | |
36 | * +-----------------+ | |
37 | * | +-------------+ | | |
38 | * +-------+ | | flow 0 | | | |
39 | * |Packet | | +-------------+ | | |
40 | * |event | | +-------------+ | | |
41 | * | | | | flow 1 | |port_link(port0, queue0) | |
42 | * +-------+ | +-------------+ | | +--------+ | |
43 | * +-------+ | +-------------+ o-----v-----o |dequeue +------+ | |
44 | * |Crypto | | | flow n | | | event +------->|Core 0| | |
45 | * |work | | +-------------+ o----+ | port 0 | | | | |
46 | * |done ev| | event queue 0 | | +--------+ +------+ | |
47 | * +-------+ +-----------------+ | | |
48 | * +-------+ | | |
49 | * |Timer | +-----------------+ | +--------+ | |
50 | * |expiry | | +-------------+ | +------o |dequeue +------+ | |
51 | * |event | | | flow 0 | o-----------o event +------->|Core 1| | |
52 | * +-------+ | +-------------+ | +----o port 1 | | | | |
53 | * Event enqueue | +-------------+ | | +--------+ +------+ | |
54 | * o-------------> | | flow 1 | | | | |
55 | * enqueue( | +-------------+ | | | |
56 | * queue_id, | | | +--------+ +------+ | |
57 | * flow_id, | +-------------+ | | | |dequeue |Core 2| | |
58 | * sched_type, | | flow n | o-----------o event +------->| | | |
59 | * event_type, | +-------------+ | | | port 2 | +------+ | |
60 | * subev_type, | event queue 1 | | +--------+ | |
61 | * event) +-----------------+ | +--------+ | |
62 | * | | |dequeue +------+ | |
63 | * +-------+ +-----------------+ | | event +------->|Core n| | |
64 | * |Core | | +-------------+ o-----------o port n | | | | |
65 | * |(SW) | | | flow 0 | | | +--------+ +--+---+ | |
66 | * |event | | +-------------+ | | | | |
67 | * +-------+ | +-------------+ | | | | |
68 | * ^ | | flow 1 | | | | | |
69 | * | | +-------------+ o------+ | | |
70 | * | | +-------------+ | | | |
71 | * | | | flow n | | | | |
72 | * | | +-------------+ | | | |
73 | * | | event queue n | | | |
74 | * | +-----------------+ | | |
75 | * | | | |
76 | * +-----------------------------------------------------------+ | |
77 | * | |
78 | * Event device: A hardware or software-based event scheduler. | |
79 | * | |
80 | * Event: A unit of scheduling that encapsulates a packet or other datatype | |
81 | * like SW generated event from the CPU, Crypto work completion notification, | |
82 | * Timer expiry event notification etc as well as metadata. | |
83 | * The metadata includes flow ID, scheduling type, event priority, event_type, | |
84 | * sub_event_type etc. | |
85 | * | |
86 | * Event queue: A queue containing events that are scheduled by the event dev. | |
87 | * An event queue contains events of different flows associated with scheduling | |
88 | * types, such as atomic, ordered, or parallel. | |
89 | * | |
90 | * Event port: An application's interface into the event dev for enqueue and | |
91 | * dequeue operations. Each event port can be linked with one or more | |
92 | * event queues for dequeue operations. | |
93 | * | |
94 | * By default, all the functions of the Event Device API exported by a PMD | |
95 | * are lock-free functions which assume to not be invoked in parallel on | |
96 | * different logical cores to work on the same target object. For instance, | |
97 | * the dequeue function of a PMD cannot be invoked in parallel on two logical | |
98 | * cores to operates on same event port. Of course, this function | |
99 | * can be invoked in parallel by different logical cores on different ports. | |
100 | * It is the responsibility of the upper level application to enforce this rule. | |
101 | * | |
102 | * In all functions of the Event API, the Event device is | |
103 | * designated by an integer >= 0 named the device identifier *dev_id* | |
104 | * | |
105 | * At the Event driver level, Event devices are represented by a generic | |
106 | * data structure of type *rte_event_dev*. | |
107 | * | |
108 | * Event devices are dynamically registered during the PCI/SoC device probing | |
109 | * phase performed at EAL initialization time. | |
110 | * When an Event device is being probed, a *rte_event_dev* structure and | |
111 | * a new device identifier are allocated for that device. Then, the | |
112 | * event_dev_init() function supplied by the Event driver matching the probed | |
113 | * device is invoked to properly initialize the device. | |
114 | * | |
115 | * The role of the device init function consists of resetting the hardware or | |
116 | * software event driver implementations. | |
117 | * | |
118 | * If the device init operation is successful, the correspondence between | |
119 | * the device identifier assigned to the new device and its associated | |
120 | * *rte_event_dev* structure is effectively registered. | |
121 | * Otherwise, both the *rte_event_dev* structure and the device identifier are | |
122 | * freed. | |
123 | * | |
124 | * The functions exported by the application Event API to setup a device | |
125 | * designated by its device identifier must be invoked in the following order: | |
126 | * - rte_event_dev_configure() | |
127 | * - rte_event_queue_setup() | |
128 | * - rte_event_port_setup() | |
129 | * - rte_event_port_link() | |
130 | * - rte_event_dev_start() | |
131 | * | |
132 | * Then, the application can invoke, in any order, the functions | |
133 | * exported by the Event API to schedule events, dequeue events, enqueue events, | |
134 | * change event queue(s) to event port [un]link establishment and so on. | |
135 | * | |
136 | * Application may use rte_event_[queue/port]_default_conf_get() to get the | |
137 | * default configuration to set up an event queue or event port by | |
138 | * overriding few default values. | |
139 | * | |
140 | * If the application wants to change the configuration (i.e. call | |
141 | * rte_event_dev_configure(), rte_event_queue_setup(), or | |
142 | * rte_event_port_setup()), it must call rte_event_dev_stop() first to stop the | |
143 | * device and then do the reconfiguration before calling rte_event_dev_start() | |
144 | * again. The schedule, enqueue and dequeue functions should not be invoked | |
145 | * when the device is stopped. | |
146 | * | |
147 | * Finally, an application can close an Event device by invoking the | |
148 | * rte_event_dev_close() function. | |
149 | * | |
150 | * Each function of the application Event API invokes a specific function | |
151 | * of the PMD that controls the target device designated by its device | |
152 | * identifier. | |
153 | * | |
154 | * For this purpose, all device-specific functions of an Event driver are | |
155 | * supplied through a set of pointers contained in a generic structure of type | |
156 | * *event_dev_ops*. | |
157 | * The address of the *event_dev_ops* structure is stored in the *rte_event_dev* | |
158 | * structure by the device init function of the Event driver, which is | |
159 | * invoked during the PCI/SoC device probing phase, as explained earlier. | |
160 | * | |
161 | * In other words, each function of the Event API simply retrieves the | |
162 | * *rte_event_dev* structure associated with the device identifier and | |
163 | * performs an indirect invocation of the corresponding driver function | |
164 | * supplied in the *event_dev_ops* structure of the *rte_event_dev* structure. | |
165 | * | |
166 | * For performance reasons, the address of the fast-path functions of the | |
167 | * Event driver is not contained in the *event_dev_ops* structure. | |
168 | * Instead, they are directly stored at the beginning of the *rte_event_dev* | |
169 | * structure to avoid an extra indirect memory access during their invocation. | |
170 | * | |
171 | * RTE event device drivers do not use interrupts for enqueue or dequeue | |
172 | * operation. Instead, Event drivers export Poll-Mode enqueue and dequeue | |
173 | * functions to applications. | |
174 | * | |
175 | * The events are injected to event device through *enqueue* operation by | |
176 | * event producers in the system. The typical event producers are ethdev | |
177 | * subsystem for generating packet events, CPU(SW) for generating events based | |
178 | * on different stages of application processing, cryptodev for generating | |
179 | * crypto work completion notification etc | |
180 | * | |
181 | * The *dequeue* operation gets one or more events from the event ports. | |
182 | * The application process the events and send to downstream event queue through | |
183 | * rte_event_enqueue_burst() if it is an intermediate stage of event processing, | |
184 | * on the final stage, the application may send to different subsystem like | |
185 | * ethdev to send the packet/event on the wire using ethdev | |
186 | * rte_eth_tx_burst() API. | |
187 | * | |
188 | * The point at which events are scheduled to ports depends on the device. | |
189 | * For hardware devices, scheduling occurs asynchronously without any software | |
190 | * intervention. Software schedulers can either be distributed | |
191 | * (each worker thread schedules events to its own port) or centralized | |
192 | * (a dedicated thread schedules to all ports). Distributed software schedulers | |
193 | * perform the scheduling in rte_event_dequeue_burst(), whereas centralized | |
194 | * scheduler logic need a dedicated service core for scheduling. | |
195 | * The RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED capability flag is not set | |
196 | * indicates the device is centralized and thus needs a dedicated scheduling | |
197 | * thread that repeatedly calls software specific scheduling function. | |
198 | * | |
199 | * An event driven worker thread has following typical workflow on fastpath: | |
200 | * \code{.c} | |
201 | * while (1) { | |
202 | * rte_event_dequeue_burst(...); | |
203 | * (event processing) | |
204 | * rte_event_enqueue_burst(...); | |
205 | * } | |
206 | * \endcode | |
207 | * | |
208 | */ | |
209 | ||
210 | #ifdef __cplusplus | |
211 | extern "C" { | |
212 | #endif | |
213 | ||
214 | #include <rte_common.h> | |
215 | #include <rte_config.h> | |
216 | #include <rte_memory.h> | |
217 | #include <rte_errno.h> | |
218 | ||
219 | struct rte_mbuf; /* we just use mbuf pointers; no need to include rte_mbuf.h */ | |
220 | struct rte_event; | |
221 | ||
222 | /* Event device capability bitmap flags */ | |
223 | #define RTE_EVENT_DEV_CAP_QUEUE_QOS (1ULL << 0) | |
224 | /**< Event scheduling prioritization is based on the priority associated with | |
225 | * each event queue. | |
226 | * | |
227 | * @see rte_event_queue_setup() | |
228 | */ | |
229 | #define RTE_EVENT_DEV_CAP_EVENT_QOS (1ULL << 1) | |
230 | /**< Event scheduling prioritization is based on the priority associated with | |
231 | * each event. Priority of each event is supplied in *rte_event* structure | |
232 | * on each enqueue operation. | |
233 | * | |
234 | * @see rte_event_enqueue_burst() | |
235 | */ | |
236 | #define RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED (1ULL << 2) | |
237 | /**< Event device operates in distributed scheduling mode. | |
238 | * In distributed scheduling mode, event scheduling happens in HW or | |
239 | * rte_event_dequeue_burst() or the combination of these two. | |
240 | * If the flag is not set then eventdev is centralized and thus needs a | |
241 | * dedicated service core that acts as a scheduling thread . | |
242 | * | |
243 | * @see rte_event_dequeue_burst() | |
244 | */ | |
245 | #define RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES (1ULL << 3) | |
246 | /**< Event device is capable of enqueuing events of any type to any queue. | |
247 | * If this capability is not set, the queue only supports events of the | |
248 | * *RTE_SCHED_TYPE_* type that it was created with. | |
249 | * | |
250 | * @see RTE_SCHED_TYPE_* values | |
251 | */ | |
252 | #define RTE_EVENT_DEV_CAP_BURST_MODE (1ULL << 4) | |
253 | /**< Event device is capable of operating in burst mode for enqueue(forward, | |
254 | * release) and dequeue operation. If this capability is not set, application | |
255 | * still uses the rte_event_dequeue_burst() and rte_event_enqueue_burst() but | |
256 | * PMD accepts only one event at a time. | |
257 | * | |
258 | * @see rte_event_dequeue_burst() rte_event_enqueue_burst() | |
259 | */ | |
260 | #define RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE (1ULL << 5) | |
261 | /**< Event device ports support disabling the implicit release feature, in | |
262 | * which the port will release all unreleased events in its dequeue operation. | |
263 | * If this capability is set and the port is configured with implicit release | |
264 | * disabled, the application is responsible for explicitly releasing events | |
265 | * using either the RTE_EVENT_OP_FORWARD or the RTE_EVENT_OP_RELEASE event | |
266 | * enqueue operations. | |
267 | * | |
268 | * @see rte_event_dequeue_burst() rte_event_enqueue_burst() | |
269 | */ | |
270 | ||
271 | #define RTE_EVENT_DEV_CAP_NONSEQ_MODE (1ULL << 6) | |
272 | /**< Event device is capable of operating in none sequential mode. The path | |
273 | * of the event is not necessary to be sequential. Application can change | |
274 | * the path of event at runtime. If the flag is not set, then event each event | |
275 | * will follow a path from queue 0 to queue 1 to queue 2 etc. If the flag is | |
276 | * set, events may be sent to queues in any order. If the flag is not set, the | |
277 | * eventdev will return an error when the application enqueues an event for a | |
278 | * qid which is not the next in the sequence. | |
279 | */ | |
280 | ||
281 | #define RTE_EVENT_DEV_CAP_RUNTIME_PORT_LINK (1ULL << 7) | |
282 | /**< Event device is capable of configuring the queue/port link at runtime. | |
283 | * If the flag is not set, the eventdev queue/port link is only can be | |
284 | * configured during initialization. | |
285 | */ | |
286 | ||
287 | #define RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT (1ULL << 8) | |
288 | /**< Event device is capable of setting up the link between multiple queue | |
289 | * with single port. If the flag is not set, the eventdev can only map a | |
290 | * single queue to each port or map a single queue to many port. | |
291 | */ | |
292 | ||
293 | /* Event device priority levels */ | |
294 | #define RTE_EVENT_DEV_PRIORITY_HIGHEST 0 | |
295 | /**< Highest priority expressed across eventdev subsystem | |
296 | * @see rte_event_queue_setup(), rte_event_enqueue_burst() | |
297 | * @see rte_event_port_link() | |
298 | */ | |
299 | #define RTE_EVENT_DEV_PRIORITY_NORMAL 128 | |
300 | /**< Normal priority expressed across eventdev subsystem | |
301 | * @see rte_event_queue_setup(), rte_event_enqueue_burst() | |
302 | * @see rte_event_port_link() | |
303 | */ | |
304 | #define RTE_EVENT_DEV_PRIORITY_LOWEST 255 | |
305 | /**< Lowest priority expressed across eventdev subsystem | |
306 | * @see rte_event_queue_setup(), rte_event_enqueue_burst() | |
307 | * @see rte_event_port_link() | |
308 | */ | |
309 | ||
310 | /** | |
311 | * Get the total number of event devices that have been successfully | |
312 | * initialised. | |
313 | * | |
314 | * @return | |
315 | * The total number of usable event devices. | |
316 | */ | |
317 | uint8_t | |
318 | rte_event_dev_count(void); | |
319 | ||
320 | /** | |
321 | * Get the device identifier for the named event device. | |
322 | * | |
323 | * @param name | |
324 | * Event device name to select the event device identifier. | |
325 | * | |
326 | * @return | |
327 | * Returns event device identifier on success. | |
328 | * - <0: Failure to find named event device. | |
329 | */ | |
330 | int | |
331 | rte_event_dev_get_dev_id(const char *name); | |
332 | ||
333 | /** | |
334 | * Return the NUMA socket to which a device is connected. | |
335 | * | |
336 | * @param dev_id | |
337 | * The identifier of the device. | |
338 | * @return | |
339 | * The NUMA socket id to which the device is connected or | |
340 | * a default of zero if the socket could not be determined. | |
341 | * -(-EINVAL) dev_id value is out of range. | |
342 | */ | |
343 | int | |
344 | rte_event_dev_socket_id(uint8_t dev_id); | |
345 | ||
346 | /** | |
347 | * Event device information | |
348 | */ | |
349 | struct rte_event_dev_info { | |
350 | const char *driver_name; /**< Event driver name */ | |
351 | struct rte_device *dev; /**< Device information */ | |
352 | uint32_t min_dequeue_timeout_ns; | |
353 | /**< Minimum supported global dequeue timeout(ns) by this device */ | |
354 | uint32_t max_dequeue_timeout_ns; | |
355 | /**< Maximum supported global dequeue timeout(ns) by this device */ | |
356 | uint32_t dequeue_timeout_ns; | |
357 | /**< Configured global dequeue timeout(ns) for this device */ | |
358 | uint8_t max_event_queues; | |
359 | /**< Maximum event_queues supported by this device */ | |
360 | uint32_t max_event_queue_flows; | |
361 | /**< Maximum supported flows in an event queue by this device*/ | |
362 | uint8_t max_event_queue_priority_levels; | |
363 | /**< Maximum number of event queue priority levels by this device. | |
364 | * Valid when the device has RTE_EVENT_DEV_CAP_QUEUE_QOS capability | |
365 | */ | |
366 | uint8_t max_event_priority_levels; | |
367 | /**< Maximum number of event priority levels by this device. | |
368 | * Valid when the device has RTE_EVENT_DEV_CAP_EVENT_QOS capability | |
369 | */ | |
370 | uint8_t max_event_ports; | |
371 | /**< Maximum number of event ports supported by this device */ | |
372 | uint8_t max_event_port_dequeue_depth; | |
373 | /**< Maximum number of events can be dequeued at a time from an | |
374 | * event port by this device. | |
375 | * A device that does not support bulk dequeue will set this as 1. | |
376 | */ | |
377 | uint32_t max_event_port_enqueue_depth; | |
378 | /**< Maximum number of events can be enqueued at a time from an | |
379 | * event port by this device. | |
380 | * A device that does not support bulk enqueue will set this as 1. | |
381 | */ | |
382 | int32_t max_num_events; | |
383 | /**< A *closed system* event dev has a limit on the number of events it | |
384 | * can manage at a time. An *open system* event dev does not have a | |
385 | * limit and will specify this as -1. | |
386 | */ | |
387 | uint32_t event_dev_cap; | |
388 | /**< Event device capabilities(RTE_EVENT_DEV_CAP_)*/ | |
389 | }; | |
390 | ||
391 | /** | |
392 | * Retrieve the contextual information of an event device. | |
393 | * | |
394 | * @param dev_id | |
395 | * The identifier of the device. | |
396 | * | |
397 | * @param[out] dev_info | |
398 | * A pointer to a structure of type *rte_event_dev_info* to be filled with the | |
399 | * contextual information of the device. | |
400 | * | |
401 | * @return | |
402 | * - 0: Success, driver updates the contextual information of the event device | |
403 | * - <0: Error code returned by the driver info get function. | |
404 | * | |
405 | */ | |
406 | int | |
407 | rte_event_dev_info_get(uint8_t dev_id, struct rte_event_dev_info *dev_info); | |
408 | ||
409 | /** | |
410 | * The count of ports. | |
411 | */ | |
412 | #define RTE_EVENT_DEV_ATTR_PORT_COUNT 0 | |
413 | /** | |
414 | * The count of queues. | |
415 | */ | |
416 | #define RTE_EVENT_DEV_ATTR_QUEUE_COUNT 1 | |
417 | /** | |
418 | * The status of the device, zero for stopped, non-zero for started. | |
419 | */ | |
420 | #define RTE_EVENT_DEV_ATTR_STARTED 2 | |
421 | ||
422 | /** | |
423 | * Get an attribute from a device. | |
424 | * | |
425 | * @param dev_id Eventdev id | |
426 | * @param attr_id The attribute ID to retrieve | |
427 | * @param[out] attr_value A pointer that will be filled in with the attribute | |
428 | * value if successful. | |
429 | * | |
430 | * @return | |
431 | * - 0: Successfully retrieved attribute value | |
432 | * - -EINVAL: Invalid device or *attr_id* provided, or *attr_value* is NULL | |
433 | */ | |
434 | int | |
435 | rte_event_dev_attr_get(uint8_t dev_id, uint32_t attr_id, | |
436 | uint32_t *attr_value); | |
437 | ||
438 | ||
439 | /* Event device configuration bitmap flags */ | |
440 | #define RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT (1ULL << 0) | |
441 | /**< Override the global *dequeue_timeout_ns* and use per dequeue timeout in ns. | |
442 | * @see rte_event_dequeue_timeout_ticks(), rte_event_dequeue_burst() | |
443 | */ | |
444 | ||
445 | /** Event device configuration structure */ | |
446 | struct rte_event_dev_config { | |
447 | uint32_t dequeue_timeout_ns; | |
448 | /**< rte_event_dequeue_burst() timeout on this device. | |
449 | * This value should be in the range of *min_dequeue_timeout_ns* and | |
450 | * *max_dequeue_timeout_ns* which previously provided in | |
451 | * rte_event_dev_info_get() | |
452 | * The value 0 is allowed, in which case, default dequeue timeout used. | |
453 | * @see RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT | |
454 | */ | |
455 | int32_t nb_events_limit; | |
456 | /**< In a *closed system* this field is the limit on maximum number of | |
457 | * events that can be inflight in the eventdev at a given time. The | |
458 | * limit is required to ensure that the finite space in a closed system | |
459 | * is not overwhelmed. The value cannot exceed the *max_num_events* | |
460 | * as provided by rte_event_dev_info_get(). | |
461 | * This value should be set to -1 for *open system*. | |
462 | */ | |
463 | uint8_t nb_event_queues; | |
464 | /**< Number of event queues to configure on this device. | |
465 | * This value cannot exceed the *max_event_queues* which previously | |
466 | * provided in rte_event_dev_info_get() | |
467 | */ | |
468 | uint8_t nb_event_ports; | |
469 | /**< Number of event ports to configure on this device. | |
470 | * This value cannot exceed the *max_event_ports* which previously | |
471 | * provided in rte_event_dev_info_get() | |
472 | */ | |
473 | uint32_t nb_event_queue_flows; | |
474 | /**< Number of flows for any event queue on this device. | |
475 | * This value cannot exceed the *max_event_queue_flows* which previously | |
476 | * provided in rte_event_dev_info_get() | |
477 | */ | |
478 | uint32_t nb_event_port_dequeue_depth; | |
479 | /**< Maximum number of events can be dequeued at a time from an | |
480 | * event port by this device. | |
481 | * This value cannot exceed the *max_event_port_dequeue_depth* | |
482 | * which previously provided in rte_event_dev_info_get(). | |
483 | * Ignored when device is not RTE_EVENT_DEV_CAP_BURST_MODE capable. | |
484 | * @see rte_event_port_setup() | |
485 | */ | |
486 | uint32_t nb_event_port_enqueue_depth; | |
487 | /**< Maximum number of events can be enqueued at a time from an | |
488 | * event port by this device. | |
489 | * This value cannot exceed the *max_event_port_enqueue_depth* | |
490 | * which previously provided in rte_event_dev_info_get(). | |
491 | * Ignored when device is not RTE_EVENT_DEV_CAP_BURST_MODE capable. | |
492 | * @see rte_event_port_setup() | |
493 | */ | |
494 | uint32_t event_dev_cfg; | |
495 | /**< Event device config flags(RTE_EVENT_DEV_CFG_)*/ | |
496 | }; | |
497 | ||
498 | /** | |
499 | * Configure an event device. | |
500 | * | |
501 | * This function must be invoked first before any other function in the | |
502 | * API. This function can also be re-invoked when a device is in the | |
503 | * stopped state. | |
504 | * | |
505 | * The caller may use rte_event_dev_info_get() to get the capability of each | |
506 | * resources available for this event device. | |
507 | * | |
508 | * @param dev_id | |
509 | * The identifier of the device to configure. | |
510 | * @param dev_conf | |
511 | * The event device configuration structure. | |
512 | * | |
513 | * @return | |
514 | * - 0: Success, device configured. | |
515 | * - <0: Error code returned by the driver configuration function. | |
516 | */ | |
517 | int | |
518 | rte_event_dev_configure(uint8_t dev_id, | |
519 | const struct rte_event_dev_config *dev_conf); | |
520 | ||
521 | ||
522 | /* Event queue specific APIs */ | |
523 | ||
524 | /* Event queue configuration bitmap flags */ | |
525 | #define RTE_EVENT_QUEUE_CFG_ALL_TYPES (1ULL << 0) | |
526 | /**< Allow ATOMIC,ORDERED,PARALLEL schedule type enqueue | |
527 | * | |
528 | * @see RTE_SCHED_TYPE_ORDERED, RTE_SCHED_TYPE_ATOMIC, RTE_SCHED_TYPE_PARALLEL | |
529 | * @see rte_event_enqueue_burst() | |
530 | */ | |
531 | #define RTE_EVENT_QUEUE_CFG_SINGLE_LINK (1ULL << 1) | |
532 | /**< This event queue links only to a single event port. | |
533 | * | |
534 | * @see rte_event_port_setup(), rte_event_port_link() | |
535 | */ | |
536 | ||
537 | /** Event queue configuration structure */ | |
538 | struct rte_event_queue_conf { | |
539 | uint32_t nb_atomic_flows; | |
540 | /**< The maximum number of active flows this queue can track at any | |
541 | * given time. If the queue is configured for atomic scheduling (by | |
542 | * applying the RTE_EVENT_QUEUE_CFG_ALL_TYPES flag to event_queue_cfg | |
543 | * or RTE_SCHED_TYPE_ATOMIC flag to schedule_type), then the | |
544 | * value must be in the range of [1, nb_event_queue_flows], which was | |
545 | * previously provided in rte_event_dev_configure(). | |
546 | */ | |
547 | uint32_t nb_atomic_order_sequences; | |
548 | /**< The maximum number of outstanding events waiting to be | |
549 | * reordered by this queue. In other words, the number of entries in | |
550 | * this queue’s reorder buffer.When the number of events in the | |
551 | * reorder buffer reaches to *nb_atomic_order_sequences* then the | |
552 | * scheduler cannot schedule the events from this queue and invalid | |
553 | * event will be returned from dequeue until one or more entries are | |
554 | * freed up/released. | |
555 | * If the queue is configured for ordered scheduling (by applying the | |
556 | * RTE_EVENT_QUEUE_CFG_ALL_TYPES flag to event_queue_cfg or | |
557 | * RTE_SCHED_TYPE_ORDERED flag to schedule_type), then the value must | |
558 | * be in the range of [1, nb_event_queue_flows], which was | |
559 | * previously supplied to rte_event_dev_configure(). | |
560 | */ | |
561 | uint32_t event_queue_cfg; | |
562 | /**< Queue cfg flags(EVENT_QUEUE_CFG_) */ | |
563 | uint8_t schedule_type; | |
564 | /**< Queue schedule type(RTE_SCHED_TYPE_*). | |
565 | * Valid when RTE_EVENT_QUEUE_CFG_ALL_TYPES bit is not set in | |
566 | * event_queue_cfg. | |
567 | */ | |
568 | uint8_t priority; | |
569 | /**< Priority for this event queue relative to other event queues. | |
570 | * The requested priority should in the range of | |
571 | * [RTE_EVENT_DEV_PRIORITY_HIGHEST, RTE_EVENT_DEV_PRIORITY_LOWEST]. | |
572 | * The implementation shall normalize the requested priority to | |
573 | * event device supported priority value. | |
574 | * Valid when the device has RTE_EVENT_DEV_CAP_QUEUE_QOS capability | |
575 | */ | |
576 | }; | |
577 | ||
578 | /** | |
579 | * Retrieve the default configuration information of an event queue designated | |
580 | * by its *queue_id* from the event driver for an event device. | |
581 | * | |
582 | * This function intended to be used in conjunction with rte_event_queue_setup() | |
583 | * where caller needs to set up the queue by overriding few default values. | |
584 | * | |
585 | * @param dev_id | |
586 | * The identifier of the device. | |
587 | * @param queue_id | |
588 | * The index of the event queue to get the configuration information. | |
589 | * The value must be in the range [0, nb_event_queues - 1] | |
590 | * previously supplied to rte_event_dev_configure(). | |
591 | * @param[out] queue_conf | |
592 | * The pointer to the default event queue configuration data. | |
593 | * @return | |
594 | * - 0: Success, driver updates the default event queue configuration data. | |
595 | * - <0: Error code returned by the driver info get function. | |
596 | * | |
597 | * @see rte_event_queue_setup() | |
598 | * | |
599 | */ | |
600 | int | |
601 | rte_event_queue_default_conf_get(uint8_t dev_id, uint8_t queue_id, | |
602 | struct rte_event_queue_conf *queue_conf); | |
603 | ||
604 | /** | |
605 | * Allocate and set up an event queue for an event device. | |
606 | * | |
607 | * @param dev_id | |
608 | * The identifier of the device. | |
609 | * @param queue_id | |
610 | * The index of the event queue to setup. The value must be in the range | |
611 | * [0, nb_event_queues - 1] previously supplied to rte_event_dev_configure(). | |
612 | * @param queue_conf | |
613 | * The pointer to the configuration data to be used for the event queue. | |
614 | * NULL value is allowed, in which case default configuration used. | |
615 | * | |
616 | * @see rte_event_queue_default_conf_get() | |
617 | * | |
618 | * @return | |
619 | * - 0: Success, event queue correctly set up. | |
620 | * - <0: event queue configuration failed | |
621 | */ | |
622 | int | |
623 | rte_event_queue_setup(uint8_t dev_id, uint8_t queue_id, | |
624 | const struct rte_event_queue_conf *queue_conf); | |
625 | ||
626 | /** | |
627 | * The priority of the queue. | |
628 | */ | |
629 | #define RTE_EVENT_QUEUE_ATTR_PRIORITY 0 | |
630 | /** | |
631 | * The number of atomic flows configured for the queue. | |
632 | */ | |
633 | #define RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_FLOWS 1 | |
634 | /** | |
635 | * The number of atomic order sequences configured for the queue. | |
636 | */ | |
637 | #define RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_ORDER_SEQUENCES 2 | |
638 | /** | |
639 | * The cfg flags for the queue. | |
640 | */ | |
641 | #define RTE_EVENT_QUEUE_ATTR_EVENT_QUEUE_CFG 3 | |
642 | /** | |
643 | * The schedule type of the queue. | |
644 | */ | |
645 | #define RTE_EVENT_QUEUE_ATTR_SCHEDULE_TYPE 4 | |
646 | ||
647 | /** | |
648 | * Get an attribute from a queue. | |
649 | * | |
650 | * @param dev_id | |
651 | * Eventdev id | |
652 | * @param queue_id | |
653 | * Eventdev queue id | |
654 | * @param attr_id | |
655 | * The attribute ID to retrieve | |
656 | * @param[out] attr_value | |
657 | * A pointer that will be filled in with the attribute value if successful | |
658 | * | |
659 | * @return | |
660 | * - 0: Successfully returned value | |
661 | * - -EINVAL: invalid device, queue or attr_id provided, or attr_value was | |
662 | * NULL | |
663 | * - -EOVERFLOW: returned when attr_id is set to | |
664 | * RTE_EVENT_QUEUE_ATTR_SCHEDULE_TYPE and event_queue_cfg is set to | |
665 | * RTE_EVENT_QUEUE_CFG_ALL_TYPES | |
666 | */ | |
667 | int | |
668 | rte_event_queue_attr_get(uint8_t dev_id, uint8_t queue_id, uint32_t attr_id, | |
669 | uint32_t *attr_value); | |
670 | ||
671 | /* Event port specific APIs */ | |
672 | ||
673 | /** Event port configuration structure */ | |
674 | struct rte_event_port_conf { | |
675 | int32_t new_event_threshold; | |
676 | /**< A backpressure threshold for new event enqueues on this port. | |
677 | * Use for *closed system* event dev where event capacity is limited, | |
678 | * and cannot exceed the capacity of the event dev. | |
679 | * Configuring ports with different thresholds can make higher priority | |
680 | * traffic less likely to be backpressured. | |
681 | * For example, a port used to inject NIC Rx packets into the event dev | |
682 | * can have a lower threshold so as not to overwhelm the device, | |
683 | * while ports used for worker pools can have a higher threshold. | |
684 | * This value cannot exceed the *nb_events_limit* | |
685 | * which was previously supplied to rte_event_dev_configure(). | |
686 | * This should be set to '-1' for *open system*. | |
687 | */ | |
688 | uint16_t dequeue_depth; | |
689 | /**< Configure number of bulk dequeues for this event port. | |
690 | * This value cannot exceed the *nb_event_port_dequeue_depth* | |
691 | * which previously supplied to rte_event_dev_configure(). | |
692 | * Ignored when device is not RTE_EVENT_DEV_CAP_BURST_MODE capable. | |
693 | */ | |
694 | uint16_t enqueue_depth; | |
695 | /**< Configure number of bulk enqueues for this event port. | |
696 | * This value cannot exceed the *nb_event_port_enqueue_depth* | |
697 | * which previously supplied to rte_event_dev_configure(). | |
698 | * Ignored when device is not RTE_EVENT_DEV_CAP_BURST_MODE capable. | |
699 | */ | |
700 | uint8_t disable_implicit_release; | |
701 | /**< Configure the port not to release outstanding events in | |
702 | * rte_event_dev_dequeue_burst(). If true, all events received through | |
703 | * the port must be explicitly released with RTE_EVENT_OP_RELEASE or | |
704 | * RTE_EVENT_OP_FORWARD. Must be false when the device is not | |
705 | * RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE capable. | |
706 | */ | |
707 | }; | |
708 | ||
709 | /** | |
710 | * Retrieve the default configuration information of an event port designated | |
711 | * by its *port_id* from the event driver for an event device. | |
712 | * | |
713 | * This function intended to be used in conjunction with rte_event_port_setup() | |
714 | * where caller needs to set up the port by overriding few default values. | |
715 | * | |
716 | * @param dev_id | |
717 | * The identifier of the device. | |
718 | * @param port_id | |
719 | * The index of the event port to get the configuration information. | |
720 | * The value must be in the range [0, nb_event_ports - 1] | |
721 | * previously supplied to rte_event_dev_configure(). | |
722 | * @param[out] port_conf | |
723 | * The pointer to the default event port configuration data | |
724 | * @return | |
725 | * - 0: Success, driver updates the default event port configuration data. | |
726 | * - <0: Error code returned by the driver info get function. | |
727 | * | |
728 | * @see rte_event_port_setup() | |
729 | * | |
730 | */ | |
731 | int | |
732 | rte_event_port_default_conf_get(uint8_t dev_id, uint8_t port_id, | |
733 | struct rte_event_port_conf *port_conf); | |
734 | ||
735 | /** | |
736 | * Allocate and set up an event port for an event device. | |
737 | * | |
738 | * @param dev_id | |
739 | * The identifier of the device. | |
740 | * @param port_id | |
741 | * The index of the event port to setup. The value must be in the range | |
742 | * [0, nb_event_ports - 1] previously supplied to rte_event_dev_configure(). | |
743 | * @param port_conf | |
744 | * The pointer to the configuration data to be used for the queue. | |
745 | * NULL value is allowed, in which case default configuration used. | |
746 | * | |
747 | * @see rte_event_port_default_conf_get() | |
748 | * | |
749 | * @return | |
750 | * - 0: Success, event port correctly set up. | |
751 | * - <0: Port configuration failed | |
752 | * - (-EDQUOT) Quota exceeded(Application tried to link the queue configured | |
753 | * with RTE_EVENT_QUEUE_CFG_SINGLE_LINK to more than one event ports) | |
754 | */ | |
755 | int | |
756 | rte_event_port_setup(uint8_t dev_id, uint8_t port_id, | |
757 | const struct rte_event_port_conf *port_conf); | |
758 | ||
759 | /** | |
760 | * The queue depth of the port on the enqueue side | |
761 | */ | |
762 | #define RTE_EVENT_PORT_ATTR_ENQ_DEPTH 0 | |
763 | /** | |
764 | * The queue depth of the port on the dequeue side | |
765 | */ | |
766 | #define RTE_EVENT_PORT_ATTR_DEQ_DEPTH 1 | |
767 | /** | |
768 | * The new event threshold of the port | |
769 | */ | |
770 | #define RTE_EVENT_PORT_ATTR_NEW_EVENT_THRESHOLD 2 | |
771 | ||
772 | /** | |
773 | * Get an attribute from a port. | |
774 | * | |
775 | * @param dev_id | |
776 | * Eventdev id | |
777 | * @param port_id | |
778 | * Eventdev port id | |
779 | * @param attr_id | |
780 | * The attribute ID to retrieve | |
781 | * @param[out] attr_value | |
782 | * A pointer that will be filled in with the attribute value if successful | |
783 | * | |
784 | * @return | |
785 | * - 0: Successfully returned value | |
786 | * - (-EINVAL) Invalid device, port or attr_id, or attr_value was NULL | |
787 | */ | |
788 | int | |
789 | rte_event_port_attr_get(uint8_t dev_id, uint8_t port_id, uint32_t attr_id, | |
790 | uint32_t *attr_value); | |
791 | ||
792 | /** | |
793 | * Start an event device. | |
794 | * | |
795 | * The device start step is the last one and consists of setting the event | |
796 | * queues to start accepting the events and schedules to event ports. | |
797 | * | |
798 | * On success, all basic functions exported by the API (event enqueue, | |
799 | * event dequeue and so on) can be invoked. | |
800 | * | |
801 | * @param dev_id | |
802 | * Event device identifier | |
803 | * @return | |
804 | * - 0: Success, device started. | |
805 | * - -ESTALE : Not all ports of the device are configured | |
806 | * - -ENOLINK: Not all queues are linked, which could lead to deadlock. | |
807 | */ | |
808 | int | |
809 | rte_event_dev_start(uint8_t dev_id); | |
810 | ||
811 | /** | |
812 | * Stop an event device. | |
813 | * | |
814 | * This function causes all queued events to be drained, including those | |
815 | * residing in event ports. While draining events out of the device, this | |
816 | * function calls the user-provided flush callback (if one was registered) once | |
817 | * per event. | |
818 | * | |
819 | * The device can be restarted with a call to rte_event_dev_start(). Threads | |
820 | * that continue to enqueue/dequeue while the device is stopped, or being | |
821 | * stopped, will result in undefined behavior. This includes event adapters, | |
822 | * which must be stopped prior to stopping the eventdev. | |
823 | * | |
824 | * @param dev_id | |
825 | * Event device identifier. | |
826 | * | |
827 | * @see rte_event_dev_stop_flush_callback_register() | |
828 | */ | |
829 | void | |
830 | rte_event_dev_stop(uint8_t dev_id); | |
831 | ||
832 | typedef void (*eventdev_stop_flush_t)(uint8_t dev_id, struct rte_event event, | |
833 | void *arg); | |
834 | /**< Callback function called during rte_event_dev_stop(), invoked once per | |
835 | * flushed event. | |
836 | */ | |
837 | ||
838 | /** | |
839 | * Registers a callback function to be invoked during rte_event_dev_stop() for | |
840 | * each flushed event. This function can be used to properly dispose of queued | |
841 | * events, for example events containing memory pointers. | |
842 | * | |
843 | * The callback function is only registered for the calling process. The | |
844 | * callback function must be registered in every process that can call | |
845 | * rte_event_dev_stop(). | |
846 | * | |
847 | * To unregister a callback, call this function with a NULL callback pointer. | |
848 | * | |
849 | * @param dev_id | |
850 | * The identifier of the device. | |
851 | * @param callback | |
852 | * Callback function invoked once per flushed event. | |
853 | * @param userdata | |
854 | * Argument supplied to callback. | |
855 | * | |
856 | * @return | |
857 | * - 0 on success. | |
858 | * - -EINVAL if *dev_id* is invalid | |
859 | * | |
860 | * @see rte_event_dev_stop() | |
861 | */ | |
862 | int | |
863 | rte_event_dev_stop_flush_callback_register(uint8_t dev_id, | |
864 | eventdev_stop_flush_t callback, void *userdata); | |
865 | ||
866 | /** | |
867 | * Close an event device. The device cannot be restarted! | |
868 | * | |
869 | * @param dev_id | |
870 | * Event device identifier | |
871 | * | |
872 | * @return | |
873 | * - 0 on successfully closing device | |
874 | * - <0 on failure to close device | |
875 | * - (-EAGAIN) if device is busy | |
876 | */ | |
877 | int | |
878 | rte_event_dev_close(uint8_t dev_id); | |
879 | ||
880 | /* Scheduler type definitions */ | |
881 | #define RTE_SCHED_TYPE_ORDERED 0 | |
882 | /**< Ordered scheduling | |
883 | * | |
884 | * Events from an ordered flow of an event queue can be scheduled to multiple | |
885 | * ports for concurrent processing while maintaining the original event order. | |
886 | * This scheme enables the user to achieve high single flow throughput by | |
887 | * avoiding SW synchronization for ordering between ports which bound to cores. | |
888 | * | |
889 | * The source flow ordering from an event queue is maintained when events are | |
890 | * enqueued to their destination queue within the same ordered flow context. | |
891 | * An event port holds the context until application call | |
892 | * rte_event_dequeue_burst() from the same port, which implicitly releases | |
893 | * the context. | |
894 | * User may allow the scheduler to release the context earlier than that | |
895 | * by invoking rte_event_enqueue_burst() with RTE_EVENT_OP_RELEASE operation. | |
896 | * | |
897 | * Events from the source queue appear in their original order when dequeued | |
898 | * from a destination queue. | |
899 | * Event ordering is based on the received event(s), but also other | |
900 | * (newly allocated or stored) events are ordered when enqueued within the same | |
901 | * ordered context. Events not enqueued (e.g. released or stored) within the | |
902 | * context are considered missing from reordering and are skipped at this time | |
903 | * (but can be ordered again within another context). | |
904 | * | |
905 | * @see rte_event_queue_setup(), rte_event_dequeue_burst(), RTE_EVENT_OP_RELEASE | |
906 | */ | |
907 | ||
908 | #define RTE_SCHED_TYPE_ATOMIC 1 | |
909 | /**< Atomic scheduling | |
910 | * | |
911 | * Events from an atomic flow of an event queue can be scheduled only to a | |
912 | * single port at a time. The port is guaranteed to have exclusive (atomic) | |
913 | * access to the associated flow context, which enables the user to avoid SW | |
914 | * synchronization. Atomic flows also help to maintain event ordering | |
915 | * since only one port at a time can process events from a flow of an | |
916 | * event queue. | |
917 | * | |
918 | * The atomic queue synchronization context is dedicated to the port until | |
919 | * application call rte_event_dequeue_burst() from the same port, | |
920 | * which implicitly releases the context. User may allow the scheduler to | |
921 | * release the context earlier than that by invoking rte_event_enqueue_burst() | |
922 | * with RTE_EVENT_OP_RELEASE operation. | |
923 | * | |
924 | * @see rte_event_queue_setup(), rte_event_dequeue_burst(), RTE_EVENT_OP_RELEASE | |
925 | */ | |
926 | ||
927 | #define RTE_SCHED_TYPE_PARALLEL 2 | |
928 | /**< Parallel scheduling | |
929 | * | |
930 | * The scheduler performs priority scheduling, load balancing, etc. functions | |
931 | * but does not provide additional event synchronization or ordering. | |
932 | * It is free to schedule events from a single parallel flow of an event queue | |
933 | * to multiple events ports for concurrent processing. | |
934 | * The application is responsible for flow context synchronization and | |
935 | * event ordering (SW synchronization). | |
936 | * | |
937 | * @see rte_event_queue_setup(), rte_event_dequeue_burst() | |
938 | */ | |
939 | ||
940 | /* Event types to classify the event source */ | |
941 | #define RTE_EVENT_TYPE_ETHDEV 0x0 | |
942 | /**< The event generated from ethdev subsystem */ | |
943 | #define RTE_EVENT_TYPE_CRYPTODEV 0x1 | |
944 | /**< The event generated from crypodev subsystem */ | |
945 | #define RTE_EVENT_TYPE_TIMER 0x2 | |
946 | /**< The event generated from event timer adapter */ | |
947 | #define RTE_EVENT_TYPE_CPU 0x3 | |
948 | /**< The event generated from cpu for pipelining. | |
949 | * Application may use *sub_event_type* to further classify the event | |
950 | */ | |
951 | #define RTE_EVENT_TYPE_ETH_RX_ADAPTER 0x4 | |
952 | /**< The event generated from event eth Rx adapter */ | |
953 | #define RTE_EVENT_TYPE_MAX 0x10 | |
954 | /**< Maximum number of event types */ | |
955 | ||
956 | /* Event enqueue operations */ | |
957 | #define RTE_EVENT_OP_NEW 0 | |
958 | /**< The event producers use this operation to inject a new event to the | |
959 | * event device. | |
960 | */ | |
961 | #define RTE_EVENT_OP_FORWARD 1 | |
962 | /**< The CPU use this operation to forward the event to different event queue or | |
963 | * change to new application specific flow or schedule type to enable | |
964 | * pipelining. | |
965 | * | |
966 | * This operation must only be enqueued to the same port that the | |
967 | * event to be forwarded was dequeued from. | |
968 | */ | |
969 | #define RTE_EVENT_OP_RELEASE 2 | |
970 | /**< Release the flow context associated with the schedule type. | |
971 | * | |
972 | * If current flow's scheduler type method is *RTE_SCHED_TYPE_ATOMIC* | |
973 | * then this function hints the scheduler that the user has completed critical | |
974 | * section processing in the current atomic context. | |
975 | * The scheduler is now allowed to schedule events from the same flow from | |
976 | * an event queue to another port. However, the context may be still held | |
977 | * until the next rte_event_dequeue_burst() call, this call allows but does not | |
978 | * force the scheduler to release the context early. | |
979 | * | |
980 | * Early atomic context release may increase parallelism and thus system | |
981 | * performance, but the user needs to design carefully the split into critical | |
982 | * vs non-critical sections. | |
983 | * | |
984 | * If current flow's scheduler type method is *RTE_SCHED_TYPE_ORDERED* | |
985 | * then this function hints the scheduler that the user has done all that need | |
986 | * to maintain event order in the current ordered context. | |
987 | * The scheduler is allowed to release the ordered context of this port and | |
988 | * avoid reordering any following enqueues. | |
989 | * | |
990 | * Early ordered context release may increase parallelism and thus system | |
991 | * performance. | |
992 | * | |
993 | * If current flow's scheduler type method is *RTE_SCHED_TYPE_PARALLEL* | |
994 | * or no scheduling context is held then this function may be an NOOP, | |
995 | * depending on the implementation. | |
996 | * | |
997 | * This operation must only be enqueued to the same port that the | |
998 | * event to be released was dequeued from. | |
999 | * | |
1000 | */ | |
1001 | ||
1002 | /** | |
1003 | * The generic *rte_event* structure to hold the event attributes | |
1004 | * for dequeue and enqueue operation | |
1005 | */ | |
1006 | RTE_STD_C11 | |
1007 | struct rte_event { | |
1008 | /** WORD0 */ | |
1009 | union { | |
1010 | uint64_t event; | |
1011 | /** Event attributes for dequeue or enqueue operation */ | |
1012 | struct { | |
1013 | uint32_t flow_id:20; | |
1014 | /**< Targeted flow identifier for the enqueue and | |
1015 | * dequeue operation. | |
1016 | * The value must be in the range of | |
1017 | * [0, nb_event_queue_flows - 1] which | |
1018 | * previously supplied to rte_event_dev_configure(). | |
1019 | */ | |
1020 | uint32_t sub_event_type:8; | |
1021 | /**< Sub-event types based on the event source. | |
1022 | * @see RTE_EVENT_TYPE_CPU | |
1023 | */ | |
1024 | uint32_t event_type:4; | |
1025 | /**< Event type to classify the event source. | |
1026 | * @see RTE_EVENT_TYPE_ETHDEV, (RTE_EVENT_TYPE_*) | |
1027 | */ | |
1028 | uint8_t op:2; | |
1029 | /**< The type of event enqueue operation - new/forward/ | |
1030 | * etc.This field is not preserved across an instance | |
1031 | * and is undefined on dequeue. | |
1032 | * @see RTE_EVENT_OP_NEW, (RTE_EVENT_OP_*) | |
1033 | */ | |
1034 | uint8_t rsvd:4; | |
1035 | /**< Reserved for future use */ | |
1036 | uint8_t sched_type:2; | |
1037 | /**< Scheduler synchronization type (RTE_SCHED_TYPE_*) | |
1038 | * associated with flow id on a given event queue | |
1039 | * for the enqueue and dequeue operation. | |
1040 | */ | |
1041 | uint8_t queue_id; | |
1042 | /**< Targeted event queue identifier for the enqueue or | |
1043 | * dequeue operation. | |
1044 | * The value must be in the range of | |
1045 | * [0, nb_event_queues - 1] which previously supplied to | |
1046 | * rte_event_dev_configure(). | |
1047 | */ | |
1048 | uint8_t priority; | |
1049 | /**< Event priority relative to other events in the | |
1050 | * event queue. The requested priority should in the | |
1051 | * range of [RTE_EVENT_DEV_PRIORITY_HIGHEST, | |
1052 | * RTE_EVENT_DEV_PRIORITY_LOWEST]. | |
1053 | * The implementation shall normalize the requested | |
1054 | * priority to supported priority value. | |
1055 | * Valid when the device has | |
1056 | * RTE_EVENT_DEV_CAP_EVENT_QOS capability. | |
1057 | */ | |
1058 | uint8_t impl_opaque; | |
1059 | /**< Implementation specific opaque value. | |
1060 | * An implementation may use this field to hold | |
1061 | * implementation specific value to share between | |
1062 | * dequeue and enqueue operation. | |
1063 | * The application should not modify this field. | |
1064 | */ | |
1065 | }; | |
1066 | }; | |
1067 | /** WORD1 */ | |
1068 | union { | |
1069 | uint64_t u64; | |
1070 | /**< Opaque 64-bit value */ | |
1071 | void *event_ptr; | |
1072 | /**< Opaque event pointer */ | |
1073 | struct rte_mbuf *mbuf; | |
1074 | /**< mbuf pointer if dequeued event is associated with mbuf */ | |
1075 | }; | |
1076 | }; | |
1077 | ||
1078 | /* Ethdev Rx adapter capability bitmap flags */ | |
1079 | #define RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT 0x1 | |
1080 | /**< This flag is sent when the packet transfer mechanism is in HW. | |
1081 | * Ethdev can send packets to the event device using internal event port. | |
1082 | */ | |
1083 | #define RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ 0x2 | |
1084 | /**< Adapter supports multiple event queues per ethdev. Every ethdev | |
1085 | * Rx queue can be connected to a unique event queue. | |
1086 | */ | |
1087 | #define RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID 0x4 | |
1088 | /**< The application can override the adapter generated flow ID in the | |
1089 | * event. This flow ID can be specified when adding an ethdev Rx queue | |
1090 | * to the adapter using the ev member of struct rte_event_eth_rx_adapter | |
1091 | * @see struct rte_event_eth_rx_adapter_queue_conf::ev | |
1092 | * @see struct rte_event_eth_rx_adapter_queue_conf::rx_queue_flags | |
1093 | */ | |
1094 | ||
1095 | /** | |
1096 | * Retrieve the event device's ethdev Rx adapter capabilities for the | |
1097 | * specified ethernet port | |
1098 | * | |
1099 | * @param dev_id | |
1100 | * The identifier of the device. | |
1101 | * | |
1102 | * @param eth_port_id | |
1103 | * The identifier of the ethernet device. | |
1104 | * | |
1105 | * @param[out] caps | |
1106 | * A pointer to memory filled with Rx event adapter capabilities. | |
1107 | * | |
1108 | * @return | |
1109 | * - 0: Success, driver provides Rx event adapter capabilities for the | |
1110 | * ethernet device. | |
1111 | * - <0: Error code returned by the driver function. | |
1112 | * | |
1113 | */ | |
1114 | int | |
9f95a23c | 1115 | rte_event_eth_rx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id, |
11fdf7f2 TL |
1116 | uint32_t *caps); |
1117 | ||
1118 | #define RTE_EVENT_TIMER_ADAPTER_CAP_INTERNAL_PORT (1ULL << 0) | |
1119 | /**< This flag is set when the timer mechanism is in HW. */ | |
1120 | ||
1121 | /** | |
1122 | * Retrieve the event device's timer adapter capabilities. | |
1123 | * | |
1124 | * @param dev_id | |
1125 | * The identifier of the device. | |
1126 | * | |
1127 | * @param[out] caps | |
1128 | * A pointer to memory to be filled with event timer adapter capabilities. | |
1129 | * | |
1130 | * @return | |
1131 | * - 0: Success, driver provided event timer adapter capabilities. | |
1132 | * - <0: Error code returned by the driver function. | |
1133 | */ | |
9f95a23c | 1134 | int |
11fdf7f2 TL |
1135 | rte_event_timer_adapter_caps_get(uint8_t dev_id, uint32_t *caps); |
1136 | ||
1137 | /* Crypto adapter capability bitmap flag */ | |
1138 | #define RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW 0x1 | |
1139 | /**< Flag indicates HW is capable of generating events in | |
1140 | * RTE_EVENT_OP_NEW enqueue operation. Cryptodev will send | |
1141 | * packets to the event device as new events using an internal | |
1142 | * event port. | |
1143 | */ | |
1144 | ||
1145 | #define RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD 0x2 | |
1146 | /**< Flag indicates HW is capable of generating events in | |
1147 | * RTE_EVENT_OP_FORWARD enqueue operation. Cryptodev will send | |
1148 | * packets to the event device as forwarded event using an | |
1149 | * internal event port. | |
1150 | */ | |
1151 | ||
1152 | #define RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND 0x4 | |
1153 | /**< Flag indicates HW is capable of mapping crypto queue pair to | |
1154 | * event queue. | |
1155 | */ | |
1156 | ||
1157 | #define RTE_EVENT_CRYPTO_ADAPTER_CAP_SESSION_PRIVATE_DATA 0x8 | |
9f95a23c | 1158 | /**< Flag indicates HW/SW supports a mechanism to store and retrieve |
11fdf7f2 TL |
1159 | * the private data information along with the crypto session. |
1160 | */ | |
1161 | ||
1162 | /** | |
11fdf7f2 TL |
1163 | * Retrieve the event device's crypto adapter capabilities for the |
1164 | * specified cryptodev device | |
1165 | * | |
1166 | * @param dev_id | |
1167 | * The identifier of the device. | |
1168 | * | |
1169 | * @param cdev_id | |
1170 | * The identifier of the cryptodev device. | |
1171 | * | |
1172 | * @param[out] caps | |
1173 | * A pointer to memory filled with event adapter capabilities. | |
1174 | * It is expected to be pre-allocated & initialized by caller. | |
1175 | * | |
1176 | * @return | |
1177 | * - 0: Success, driver provides event adapter capabilities for the | |
1178 | * cryptodev device. | |
1179 | * - <0: Error code returned by the driver function. | |
1180 | * | |
1181 | */ | |
9f95a23c | 1182 | int |
11fdf7f2 TL |
1183 | rte_event_crypto_adapter_caps_get(uint8_t dev_id, uint8_t cdev_id, |
1184 | uint32_t *caps); | |
1185 | ||
9f95a23c TL |
1186 | /* Ethdev Tx adapter capability bitmap flags */ |
1187 | #define RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT 0x1 | |
1188 | /**< This flag is sent when the PMD supports a packet transmit callback | |
1189 | */ | |
1190 | ||
1191 | /** | |
1192 | * Retrieve the event device's eth Tx adapter capabilities | |
1193 | * | |
1194 | * @param dev_id | |
1195 | * The identifier of the device. | |
1196 | * | |
1197 | * @param eth_port_id | |
1198 | * The identifier of the ethernet device. | |
1199 | * | |
1200 | * @param[out] caps | |
1201 | * A pointer to memory filled with eth Tx adapter capabilities. | |
1202 | * | |
1203 | * @return | |
1204 | * - 0: Success, driver provides eth Tx adapter capabilities. | |
1205 | * - <0: Error code returned by the driver function. | |
1206 | * | |
1207 | */ | |
1208 | int | |
1209 | rte_event_eth_tx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id, | |
1210 | uint32_t *caps); | |
1211 | ||
11fdf7f2 TL |
1212 | struct rte_eventdev_ops; |
1213 | struct rte_eventdev; | |
1214 | ||
1215 | typedef uint16_t (*event_enqueue_t)(void *port, const struct rte_event *ev); | |
1216 | /**< @internal Enqueue event on port of a device */ | |
1217 | ||
1218 | typedef uint16_t (*event_enqueue_burst_t)(void *port, | |
1219 | const struct rte_event ev[], uint16_t nb_events); | |
1220 | /**< @internal Enqueue burst of events on port of a device */ | |
1221 | ||
1222 | typedef uint16_t (*event_dequeue_t)(void *port, struct rte_event *ev, | |
1223 | uint64_t timeout_ticks); | |
1224 | /**< @internal Dequeue event from port of a device */ | |
1225 | ||
1226 | typedef uint16_t (*event_dequeue_burst_t)(void *port, struct rte_event ev[], | |
1227 | uint16_t nb_events, uint64_t timeout_ticks); | |
1228 | /**< @internal Dequeue burst of events from port of a device */ | |
1229 | ||
9f95a23c TL |
1230 | typedef uint16_t (*event_tx_adapter_enqueue)(void *port, |
1231 | struct rte_event ev[], uint16_t nb_events); | |
1232 | /**< @internal Enqueue burst of events on port of a device */ | |
1233 | ||
11fdf7f2 TL |
1234 | #define RTE_EVENTDEV_NAME_MAX_LEN (64) |
1235 | /**< @internal Max length of name of event PMD */ | |
1236 | ||
1237 | /** | |
1238 | * @internal | |
1239 | * The data part, with no function pointers, associated with each device. | |
1240 | * | |
1241 | * This structure is safe to place in shared memory to be common among | |
1242 | * different processes in a multi-process configuration. | |
1243 | */ | |
1244 | struct rte_eventdev_data { | |
1245 | int socket_id; | |
1246 | /**< Socket ID where memory is allocated */ | |
1247 | uint8_t dev_id; | |
1248 | /**< Device ID for this instance */ | |
1249 | uint8_t nb_queues; | |
1250 | /**< Number of event queues. */ | |
1251 | uint8_t nb_ports; | |
1252 | /**< Number of event ports. */ | |
1253 | void **ports; | |
1254 | /**< Array of pointers to ports. */ | |
1255 | struct rte_event_port_conf *ports_cfg; | |
1256 | /**< Array of port configuration structures. */ | |
1257 | struct rte_event_queue_conf *queues_cfg; | |
1258 | /**< Array of queue configuration structures. */ | |
1259 | uint16_t *links_map; | |
1260 | /**< Memory to store queues to port connections. */ | |
1261 | void *dev_private; | |
1262 | /**< PMD-specific private data */ | |
1263 | uint32_t event_dev_cap; | |
1264 | /**< Event device capabilities(RTE_EVENT_DEV_CAP_)*/ | |
1265 | struct rte_event_dev_config dev_conf; | |
1266 | /**< Configuration applied to device. */ | |
1267 | uint8_t service_inited; | |
1268 | /* Service initialization state */ | |
1269 | uint32_t service_id; | |
1270 | /* Service ID*/ | |
1271 | void *dev_stop_flush_arg; | |
1272 | /**< User-provided argument for event flush function */ | |
1273 | ||
1274 | RTE_STD_C11 | |
1275 | uint8_t dev_started : 1; | |
1276 | /**< Device state: STARTED(1)/STOPPED(0) */ | |
1277 | ||
1278 | char name[RTE_EVENTDEV_NAME_MAX_LEN]; | |
1279 | /**< Unique identifier name */ | |
1280 | } __rte_cache_aligned; | |
1281 | ||
1282 | /** @internal The data structure associated with each event device. */ | |
1283 | struct rte_eventdev { | |
1284 | event_enqueue_t enqueue; | |
1285 | /**< Pointer to PMD enqueue function. */ | |
1286 | event_enqueue_burst_t enqueue_burst; | |
1287 | /**< Pointer to PMD enqueue burst function. */ | |
1288 | event_enqueue_burst_t enqueue_new_burst; | |
1289 | /**< Pointer to PMD enqueue burst function(op new variant) */ | |
1290 | event_enqueue_burst_t enqueue_forward_burst; | |
1291 | /**< Pointer to PMD enqueue burst function(op forward variant) */ | |
1292 | event_dequeue_t dequeue; | |
1293 | /**< Pointer to PMD dequeue function. */ | |
1294 | event_dequeue_burst_t dequeue_burst; | |
1295 | /**< Pointer to PMD dequeue burst function. */ | |
9f95a23c TL |
1296 | event_tx_adapter_enqueue txa_enqueue; |
1297 | /**< Pointer to PMD eth Tx adapter enqueue function. */ | |
11fdf7f2 TL |
1298 | struct rte_eventdev_data *data; |
1299 | /**< Pointer to device data */ | |
1300 | struct rte_eventdev_ops *dev_ops; | |
1301 | /**< Functions exported by PMD */ | |
1302 | struct rte_device *dev; | |
1303 | /**< Device info. supplied by probing */ | |
1304 | ||
1305 | RTE_STD_C11 | |
1306 | uint8_t attached : 1; | |
1307 | /**< Flag indicating the device is attached */ | |
1308 | } __rte_cache_aligned; | |
1309 | ||
1310 | extern struct rte_eventdev *rte_eventdevs; | |
1311 | /** @internal The pool of rte_eventdev structures. */ | |
1312 | ||
1313 | static __rte_always_inline uint16_t | |
1314 | __rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id, | |
1315 | const struct rte_event ev[], uint16_t nb_events, | |
1316 | const event_enqueue_burst_t fn) | |
1317 | { | |
1318 | const struct rte_eventdev *dev = &rte_eventdevs[dev_id]; | |
1319 | ||
1320 | #ifdef RTE_LIBRTE_EVENTDEV_DEBUG | |
1321 | if (dev_id >= RTE_EVENT_MAX_DEVS || !rte_eventdevs[dev_id].attached) { | |
1322 | rte_errno = -EINVAL; | |
1323 | return 0; | |
1324 | } | |
1325 | ||
1326 | if (port_id >= dev->data->nb_ports) { | |
1327 | rte_errno = -EINVAL; | |
1328 | return 0; | |
1329 | } | |
1330 | #endif | |
1331 | /* | |
1332 | * Allow zero cost non burst mode routine invocation if application | |
1333 | * requests nb_events as const one | |
1334 | */ | |
1335 | if (nb_events == 1) | |
1336 | return (*dev->enqueue)(dev->data->ports[port_id], ev); | |
1337 | else | |
1338 | return fn(dev->data->ports[port_id], ev, nb_events); | |
1339 | } | |
1340 | ||
1341 | /** | |
1342 | * Enqueue a burst of events objects or an event object supplied in *rte_event* | |
1343 | * structure on an event device designated by its *dev_id* through the event | |
1344 | * port specified by *port_id*. Each event object specifies the event queue on | |
1345 | * which it will be enqueued. | |
1346 | * | |
1347 | * The *nb_events* parameter is the number of event objects to enqueue which are | |
1348 | * supplied in the *ev* array of *rte_event* structure. | |
1349 | * | |
1350 | * Event operations RTE_EVENT_OP_FORWARD and RTE_EVENT_OP_RELEASE must only be | |
1351 | * enqueued to the same port that their associated events were dequeued from. | |
1352 | * | |
1353 | * The rte_event_enqueue_burst() function returns the number of | |
1354 | * events objects it actually enqueued. A return value equal to *nb_events* | |
1355 | * means that all event objects have been enqueued. | |
1356 | * | |
1357 | * @param dev_id | |
1358 | * The identifier of the device. | |
1359 | * @param port_id | |
1360 | * The identifier of the event port. | |
1361 | * @param ev | |
1362 | * Points to an array of *nb_events* objects of type *rte_event* structure | |
1363 | * which contain the event object enqueue operations to be processed. | |
1364 | * @param nb_events | |
1365 | * The number of event objects to enqueue, typically number of | |
9f95a23c TL |
1366 | * rte_event_port_attr_get(...RTE_EVENT_PORT_ATTR_ENQ_DEPTH...) |
1367 | * available for this port. | |
11fdf7f2 TL |
1368 | * |
1369 | * @return | |
1370 | * The number of event objects actually enqueued on the event device. The | |
1371 | * return value can be less than the value of the *nb_events* parameter when | |
1372 | * the event devices queue is full or if invalid parameters are specified in a | |
1373 | * *rte_event*. If the return value is less than *nb_events*, the remaining | |
1374 | * events at the end of ev[] are not consumed and the caller has to take care | |
1375 | * of them, and rte_errno is set accordingly. Possible errno values include: | |
1376 | * - -EINVAL The port ID is invalid, device ID is invalid, an event's queue | |
1377 | * ID is invalid, or an event's sched type doesn't match the | |
1378 | * capabilities of the destination queue. | |
1379 | * - -ENOSPC The event port was backpressured and unable to enqueue | |
1380 | * one or more events. This error code is only applicable to | |
1381 | * closed systems. | |
9f95a23c | 1382 | * @see rte_event_port_attr_get(), RTE_EVENT_PORT_ATTR_ENQ_DEPTH |
11fdf7f2 TL |
1383 | */ |
1384 | static inline uint16_t | |
1385 | rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id, | |
1386 | const struct rte_event ev[], uint16_t nb_events) | |
1387 | { | |
1388 | const struct rte_eventdev *dev = &rte_eventdevs[dev_id]; | |
1389 | ||
1390 | return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events, | |
1391 | dev->enqueue_burst); | |
1392 | } | |
1393 | ||
1394 | /** | |
1395 | * Enqueue a burst of events objects of operation type *RTE_EVENT_OP_NEW* on | |
1396 | * an event device designated by its *dev_id* through the event port specified | |
1397 | * by *port_id*. | |
1398 | * | |
1399 | * Provides the same functionality as rte_event_enqueue_burst(), expect that | |
1400 | * application can use this API when the all objects in the burst contains | |
1401 | * the enqueue operation of the type *RTE_EVENT_OP_NEW*. This specialized | |
1402 | * function can provide the additional hint to the PMD and optimize if possible. | |
1403 | * | |
1404 | * The rte_event_enqueue_new_burst() result is undefined if the enqueue burst | |
1405 | * has event object of operation type != RTE_EVENT_OP_NEW. | |
1406 | * | |
1407 | * @param dev_id | |
1408 | * The identifier of the device. | |
1409 | * @param port_id | |
1410 | * The identifier of the event port. | |
1411 | * @param ev | |
1412 | * Points to an array of *nb_events* objects of type *rte_event* structure | |
1413 | * which contain the event object enqueue operations to be processed. | |
1414 | * @param nb_events | |
1415 | * The number of event objects to enqueue, typically number of | |
9f95a23c TL |
1416 | * rte_event_port_attr_get(...RTE_EVENT_PORT_ATTR_ENQ_DEPTH...) |
1417 | * available for this port. | |
11fdf7f2 TL |
1418 | * |
1419 | * @return | |
1420 | * The number of event objects actually enqueued on the event device. The | |
1421 | * return value can be less than the value of the *nb_events* parameter when | |
1422 | * the event devices queue is full or if invalid parameters are specified in a | |
1423 | * *rte_event*. If the return value is less than *nb_events*, the remaining | |
1424 | * events at the end of ev[] are not consumed and the caller has to take care | |
1425 | * of them, and rte_errno is set accordingly. Possible errno values include: | |
1426 | * - -EINVAL The port ID is invalid, device ID is invalid, an event's queue | |
1427 | * ID is invalid, or an event's sched type doesn't match the | |
1428 | * capabilities of the destination queue. | |
1429 | * - -ENOSPC The event port was backpressured and unable to enqueue | |
1430 | * one or more events. This error code is only applicable to | |
1431 | * closed systems. | |
9f95a23c TL |
1432 | * @see rte_event_port_attr_get(), RTE_EVENT_PORT_ATTR_ENQ_DEPTH |
1433 | * @see rte_event_enqueue_burst() | |
11fdf7f2 TL |
1434 | */ |
1435 | static inline uint16_t | |
1436 | rte_event_enqueue_new_burst(uint8_t dev_id, uint8_t port_id, | |
1437 | const struct rte_event ev[], uint16_t nb_events) | |
1438 | { | |
1439 | const struct rte_eventdev *dev = &rte_eventdevs[dev_id]; | |
1440 | ||
1441 | return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events, | |
1442 | dev->enqueue_new_burst); | |
1443 | } | |
1444 | ||
1445 | /** | |
1446 | * Enqueue a burst of events objects of operation type *RTE_EVENT_OP_FORWARD* | |
1447 | * on an event device designated by its *dev_id* through the event port | |
1448 | * specified by *port_id*. | |
1449 | * | |
1450 | * Provides the same functionality as rte_event_enqueue_burst(), expect that | |
1451 | * application can use this API when the all objects in the burst contains | |
1452 | * the enqueue operation of the type *RTE_EVENT_OP_FORWARD*. This specialized | |
1453 | * function can provide the additional hint to the PMD and optimize if possible. | |
1454 | * | |
1455 | * The rte_event_enqueue_new_burst() result is undefined if the enqueue burst | |
1456 | * has event object of operation type != RTE_EVENT_OP_FORWARD. | |
1457 | * | |
1458 | * @param dev_id | |
1459 | * The identifier of the device. | |
1460 | * @param port_id | |
1461 | * The identifier of the event port. | |
1462 | * @param ev | |
1463 | * Points to an array of *nb_events* objects of type *rte_event* structure | |
1464 | * which contain the event object enqueue operations to be processed. | |
1465 | * @param nb_events | |
1466 | * The number of event objects to enqueue, typically number of | |
9f95a23c TL |
1467 | * rte_event_port_attr_get(...RTE_EVENT_PORT_ATTR_ENQ_DEPTH...) |
1468 | * available for this port. | |
11fdf7f2 TL |
1469 | * |
1470 | * @return | |
1471 | * The number of event objects actually enqueued on the event device. The | |
1472 | * return value can be less than the value of the *nb_events* parameter when | |
1473 | * the event devices queue is full or if invalid parameters are specified in a | |
1474 | * *rte_event*. If the return value is less than *nb_events*, the remaining | |
1475 | * events at the end of ev[] are not consumed and the caller has to take care | |
1476 | * of them, and rte_errno is set accordingly. Possible errno values include: | |
1477 | * - -EINVAL The port ID is invalid, device ID is invalid, an event's queue | |
1478 | * ID is invalid, or an event's sched type doesn't match the | |
1479 | * capabilities of the destination queue. | |
1480 | * - -ENOSPC The event port was backpressured and unable to enqueue | |
1481 | * one or more events. This error code is only applicable to | |
1482 | * closed systems. | |
9f95a23c TL |
1483 | * @see rte_event_port_attr_get(), RTE_EVENT_PORT_ATTR_ENQ_DEPTH |
1484 | * @see rte_event_enqueue_burst() | |
11fdf7f2 TL |
1485 | */ |
1486 | static inline uint16_t | |
1487 | rte_event_enqueue_forward_burst(uint8_t dev_id, uint8_t port_id, | |
1488 | const struct rte_event ev[], uint16_t nb_events) | |
1489 | { | |
1490 | const struct rte_eventdev *dev = &rte_eventdevs[dev_id]; | |
1491 | ||
1492 | return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events, | |
1493 | dev->enqueue_forward_burst); | |
1494 | } | |
1495 | ||
1496 | /** | |
1497 | * Converts nanoseconds to *timeout_ticks* value for rte_event_dequeue_burst() | |
1498 | * | |
1499 | * If the device is configured with RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT flag | |
1500 | * then application can use this function to convert timeout value in | |
1501 | * nanoseconds to implementations specific timeout value supplied in | |
1502 | * rte_event_dequeue_burst() | |
1503 | * | |
1504 | * @param dev_id | |
1505 | * The identifier of the device. | |
1506 | * @param ns | |
1507 | * Wait time in nanosecond | |
1508 | * @param[out] timeout_ticks | |
1509 | * Value for the *timeout_ticks* parameter in rte_event_dequeue_burst() | |
1510 | * | |
1511 | * @return | |
1512 | * - 0 on success. | |
1513 | * - -ENOTSUP if the device doesn't support timeouts | |
1514 | * - -EINVAL if *dev_id* is invalid or *timeout_ticks* is NULL | |
1515 | * - other values < 0 on failure. | |
1516 | * | |
1517 | * @see rte_event_dequeue_burst(), RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT | |
1518 | * @see rte_event_dev_configure() | |
1519 | * | |
1520 | */ | |
1521 | int | |
1522 | rte_event_dequeue_timeout_ticks(uint8_t dev_id, uint64_t ns, | |
1523 | uint64_t *timeout_ticks); | |
1524 | ||
1525 | /** | |
1526 | * Dequeue a burst of events objects or an event object from the event port | |
1527 | * designated by its *event_port_id*, on an event device designated | |
1528 | * by its *dev_id*. | |
1529 | * | |
1530 | * rte_event_dequeue_burst() does not dictate the specifics of scheduling | |
1531 | * algorithm as each eventdev driver may have different criteria to schedule | |
1532 | * an event. However, in general, from an application perspective scheduler may | |
1533 | * use the following scheme to dispatch an event to the port. | |
1534 | * | |
1535 | * 1) Selection of event queue based on | |
1536 | * a) The list of event queues are linked to the event port. | |
1537 | * b) If the device has RTE_EVENT_DEV_CAP_QUEUE_QOS capability then event | |
1538 | * queue selection from list is based on event queue priority relative to | |
1539 | * other event queue supplied as *priority* in rte_event_queue_setup() | |
1540 | * c) If the device has RTE_EVENT_DEV_CAP_EVENT_QOS capability then event | |
1541 | * queue selection from the list is based on event priority supplied as | |
1542 | * *priority* in rte_event_enqueue_burst() | |
1543 | * 2) Selection of event | |
1544 | * a) The number of flows available in selected event queue. | |
1545 | * b) Schedule type method associated with the event | |
1546 | * | |
1547 | * The *nb_events* parameter is the maximum number of event objects to dequeue | |
1548 | * which are returned in the *ev* array of *rte_event* structure. | |
1549 | * | |
1550 | * The rte_event_dequeue_burst() function returns the number of events objects | |
1551 | * it actually dequeued. A return value equal to *nb_events* means that all | |
1552 | * event objects have been dequeued. | |
1553 | * | |
1554 | * The number of events dequeued is the number of scheduler contexts held by | |
1555 | * this port. These contexts are automatically released in the next | |
1556 | * rte_event_dequeue_burst() invocation if the port supports implicit | |
1557 | * releases, or invoking rte_event_enqueue_burst() with RTE_EVENT_OP_RELEASE | |
1558 | * operation can be used to release the contexts early. | |
1559 | * | |
1560 | * Event operations RTE_EVENT_OP_FORWARD and RTE_EVENT_OP_RELEASE must only be | |
1561 | * enqueued to the same port that their associated events were dequeued from. | |
1562 | * | |
1563 | * @param dev_id | |
1564 | * The identifier of the device. | |
1565 | * @param port_id | |
1566 | * The identifier of the event port. | |
1567 | * @param[out] ev | |
1568 | * Points to an array of *nb_events* objects of type *rte_event* structure | |
1569 | * for output to be populated with the dequeued event objects. | |
1570 | * @param nb_events | |
1571 | * The maximum number of event objects to dequeue, typically number of | |
1572 | * rte_event_port_dequeue_depth() available for this port. | |
1573 | * | |
1574 | * @param timeout_ticks | |
1575 | * - 0 no-wait, returns immediately if there is no event. | |
1576 | * - >0 wait for the event, if the device is configured with | |
1577 | * RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT then this function will wait until | |
1578 | * at least one event is available or *timeout_ticks* time. | |
1579 | * if the device is not configured with RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT | |
1580 | * then this function will wait until the event available or | |
1581 | * *dequeue_timeout_ns* ns which was previously supplied to | |
1582 | * rte_event_dev_configure() | |
1583 | * | |
1584 | * @return | |
1585 | * The number of event objects actually dequeued from the port. The return | |
1586 | * value can be less than the value of the *nb_events* parameter when the | |
1587 | * event port's queue is not full. | |
1588 | * | |
1589 | * @see rte_event_port_dequeue_depth() | |
1590 | */ | |
1591 | static inline uint16_t | |
1592 | rte_event_dequeue_burst(uint8_t dev_id, uint8_t port_id, struct rte_event ev[], | |
1593 | uint16_t nb_events, uint64_t timeout_ticks) | |
1594 | { | |
1595 | struct rte_eventdev *dev = &rte_eventdevs[dev_id]; | |
1596 | ||
1597 | #ifdef RTE_LIBRTE_EVENTDEV_DEBUG | |
1598 | if (dev_id >= RTE_EVENT_MAX_DEVS || !rte_eventdevs[dev_id].attached) { | |
1599 | rte_errno = -EINVAL; | |
1600 | return 0; | |
1601 | } | |
1602 | ||
1603 | if (port_id >= dev->data->nb_ports) { | |
1604 | rte_errno = -EINVAL; | |
1605 | return 0; | |
1606 | } | |
1607 | #endif | |
1608 | ||
1609 | /* | |
1610 | * Allow zero cost non burst mode routine invocation if application | |
1611 | * requests nb_events as const one | |
1612 | */ | |
1613 | if (nb_events == 1) | |
1614 | return (*dev->dequeue)( | |
1615 | dev->data->ports[port_id], ev, timeout_ticks); | |
1616 | else | |
1617 | return (*dev->dequeue_burst)( | |
1618 | dev->data->ports[port_id], ev, nb_events, | |
1619 | timeout_ticks); | |
1620 | } | |
1621 | ||
1622 | /** | |
1623 | * Link multiple source event queues supplied in *queues* to the destination | |
1624 | * event port designated by its *port_id* with associated service priority | |
1625 | * supplied in *priorities* on the event device designated by its *dev_id*. | |
1626 | * | |
1627 | * The link establishment shall enable the event port *port_id* from | |
1628 | * receiving events from the specified event queue(s) supplied in *queues* | |
1629 | * | |
1630 | * An event queue may link to one or more event ports. | |
1631 | * The number of links can be established from an event queue to event port is | |
1632 | * implementation defined. | |
1633 | * | |
1634 | * Event queue(s) to event port link establishment can be changed at runtime | |
1635 | * without re-configuring the device to support scaling and to reduce the | |
1636 | * latency of critical work by establishing the link with more event ports | |
1637 | * at runtime. | |
1638 | * | |
1639 | * @param dev_id | |
1640 | * The identifier of the device. | |
1641 | * | |
1642 | * @param port_id | |
1643 | * Event port identifier to select the destination port to link. | |
1644 | * | |
1645 | * @param queues | |
1646 | * Points to an array of *nb_links* event queues to be linked | |
1647 | * to the event port. | |
1648 | * NULL value is allowed, in which case this function links all the configured | |
1649 | * event queues *nb_event_queues* which previously supplied to | |
1650 | * rte_event_dev_configure() to the event port *port_id* | |
1651 | * | |
1652 | * @param priorities | |
1653 | * Points to an array of *nb_links* service priorities associated with each | |
1654 | * event queue link to event port. | |
1655 | * The priority defines the event port's servicing priority for | |
1656 | * event queue, which may be ignored by an implementation. | |
1657 | * The requested priority should in the range of | |
1658 | * [RTE_EVENT_DEV_PRIORITY_HIGHEST, RTE_EVENT_DEV_PRIORITY_LOWEST]. | |
1659 | * The implementation shall normalize the requested priority to | |
1660 | * implementation supported priority value. | |
1661 | * NULL value is allowed, in which case this function links the event queues | |
1662 | * with RTE_EVENT_DEV_PRIORITY_NORMAL servicing priority | |
1663 | * | |
1664 | * @param nb_links | |
1665 | * The number of links to establish. This parameter is ignored if queues is | |
1666 | * NULL. | |
1667 | * | |
1668 | * @return | |
1669 | * The number of links actually established. The return value can be less than | |
1670 | * the value of the *nb_links* parameter when the implementation has the | |
1671 | * limitation on specific queue to port link establishment or if invalid | |
1672 | * parameters are specified in *queues* | |
1673 | * If the return value is less than *nb_links*, the remaining links at the end | |
1674 | * of link[] are not established, and the caller has to take care of them. | |
1675 | * If return value is less than *nb_links* then implementation shall update the | |
1676 | * rte_errno accordingly, Possible rte_errno values are | |
1677 | * (-EDQUOT) Quota exceeded(Application tried to link the queue configured with | |
1678 | * RTE_EVENT_QUEUE_CFG_SINGLE_LINK to more than one event ports) | |
1679 | * (-EINVAL) Invalid parameter | |
1680 | * | |
1681 | */ | |
1682 | int | |
1683 | rte_event_port_link(uint8_t dev_id, uint8_t port_id, | |
1684 | const uint8_t queues[], const uint8_t priorities[], | |
1685 | uint16_t nb_links); | |
1686 | ||
1687 | /** | |
1688 | * Unlink multiple source event queues supplied in *queues* from the destination | |
1689 | * event port designated by its *port_id* on the event device designated | |
1690 | * by its *dev_id*. | |
1691 | * | |
9f95a23c TL |
1692 | * The unlink call issues an async request to disable the event port *port_id* |
1693 | * from receiving events from the specified event queue *queue_id*. | |
11fdf7f2 TL |
1694 | * Event queue(s) to event port unlink establishment can be changed at runtime |
1695 | * without re-configuring the device. | |
1696 | * | |
9f95a23c TL |
1697 | * @see rte_event_port_unlinks_in_progress() to poll for completed unlinks. |
1698 | * | |
11fdf7f2 TL |
1699 | * @param dev_id |
1700 | * The identifier of the device. | |
1701 | * | |
1702 | * @param port_id | |
1703 | * Event port identifier to select the destination port to unlink. | |
1704 | * | |
1705 | * @param queues | |
1706 | * Points to an array of *nb_unlinks* event queues to be unlinked | |
1707 | * from the event port. | |
1708 | * NULL value is allowed, in which case this function unlinks all the | |
1709 | * event queue(s) from the event port *port_id*. | |
1710 | * | |
1711 | * @param nb_unlinks | |
1712 | * The number of unlinks to establish. This parameter is ignored if queues is | |
1713 | * NULL. | |
1714 | * | |
1715 | * @return | |
9f95a23c | 1716 | * The number of unlinks successfully requested. The return value can be less |
11fdf7f2 TL |
1717 | * than the value of the *nb_unlinks* parameter when the implementation has the |
1718 | * limitation on specific queue to port unlink establishment or | |
1719 | * if invalid parameters are specified. | |
1720 | * If the return value is less than *nb_unlinks*, the remaining queues at the | |
9f95a23c | 1721 | * end of queues[] are not unlinked, and the caller has to take care of them. |
11fdf7f2 TL |
1722 | * If return value is less than *nb_unlinks* then implementation shall update |
1723 | * the rte_errno accordingly, Possible rte_errno values are | |
1724 | * (-EINVAL) Invalid parameter | |
11fdf7f2 TL |
1725 | */ |
1726 | int | |
1727 | rte_event_port_unlink(uint8_t dev_id, uint8_t port_id, | |
1728 | uint8_t queues[], uint16_t nb_unlinks); | |
1729 | ||
9f95a23c TL |
1730 | /** |
1731 | * Returns the number of unlinks in progress. | |
1732 | * | |
1733 | * This function provides the application with a method to detect when an | |
1734 | * unlink has been completed by the implementation. | |
1735 | * | |
1736 | * @see rte_event_port_unlink() to issue unlink requests. | |
1737 | * | |
1738 | * @param dev_id | |
1739 | * The identifier of the device. | |
1740 | * | |
1741 | * @param port_id | |
1742 | * Event port identifier to select port to check for unlinks in progress. | |
1743 | * | |
1744 | * @return | |
1745 | * The number of unlinks that are in progress. A return of zero indicates that | |
1746 | * there are no outstanding unlink requests. A positive return value indicates | |
1747 | * the number of unlinks that are in progress, but are not yet complete. | |
1748 | * A negative return value indicates an error, -EINVAL indicates an invalid | |
1749 | * parameter passed for *dev_id* or *port_id*. | |
1750 | */ | |
1751 | int | |
1752 | rte_event_port_unlinks_in_progress(uint8_t dev_id, uint8_t port_id); | |
1753 | ||
11fdf7f2 TL |
1754 | /** |
1755 | * Retrieve the list of source event queues and its associated service priority | |
1756 | * linked to the destination event port designated by its *port_id* | |
1757 | * on the event device designated by its *dev_id*. | |
1758 | * | |
1759 | * @param dev_id | |
1760 | * The identifier of the device. | |
1761 | * | |
1762 | * @param port_id | |
1763 | * Event port identifier. | |
1764 | * | |
1765 | * @param[out] queues | |
1766 | * Points to an array of *queues* for output. | |
1767 | * The caller has to allocate *RTE_EVENT_MAX_QUEUES_PER_DEV* bytes to | |
1768 | * store the event queue(s) linked with event port *port_id* | |
1769 | * | |
1770 | * @param[out] priorities | |
1771 | * Points to an array of *priorities* for output. | |
1772 | * The caller has to allocate *RTE_EVENT_MAX_QUEUES_PER_DEV* bytes to | |
1773 | * store the service priority associated with each event queue linked | |
1774 | * | |
1775 | * @return | |
1776 | * The number of links established on the event port designated by its | |
1777 | * *port_id*. | |
1778 | * - <0 on failure. | |
1779 | * | |
1780 | */ | |
1781 | int | |
1782 | rte_event_port_links_get(uint8_t dev_id, uint8_t port_id, | |
1783 | uint8_t queues[], uint8_t priorities[]); | |
1784 | ||
1785 | /** | |
1786 | * Retrieve the service ID of the event dev. If the adapter doesn't use | |
1787 | * a rte_service function, this function returns -ESRCH. | |
1788 | * | |
1789 | * @param dev_id | |
1790 | * The identifier of the device. | |
1791 | * | |
1792 | * @param [out] service_id | |
1793 | * A pointer to a uint32_t, to be filled in with the service id. | |
1794 | * | |
1795 | * @return | |
1796 | * - 0: Success | |
1797 | * - <0: Error code on failure, if the event dev doesn't use a rte_service | |
1798 | * function, this function returns -ESRCH. | |
1799 | */ | |
1800 | int | |
1801 | rte_event_dev_service_id_get(uint8_t dev_id, uint32_t *service_id); | |
1802 | ||
1803 | /** | |
1804 | * Dump internal information about *dev_id* to the FILE* provided in *f*. | |
1805 | * | |
1806 | * @param dev_id | |
1807 | * The identifier of the device. | |
1808 | * | |
1809 | * @param f | |
1810 | * A pointer to a file for output | |
1811 | * | |
1812 | * @return | |
1813 | * - 0: on success | |
1814 | * - <0: on failure. | |
1815 | */ | |
1816 | int | |
1817 | rte_event_dev_dump(uint8_t dev_id, FILE *f); | |
1818 | ||
1819 | /** Maximum name length for extended statistics counters */ | |
1820 | #define RTE_EVENT_DEV_XSTATS_NAME_SIZE 64 | |
1821 | ||
1822 | /** | |
1823 | * Selects the component of the eventdev to retrieve statistics from. | |
1824 | */ | |
1825 | enum rte_event_dev_xstats_mode { | |
1826 | RTE_EVENT_DEV_XSTATS_DEVICE, | |
1827 | RTE_EVENT_DEV_XSTATS_PORT, | |
1828 | RTE_EVENT_DEV_XSTATS_QUEUE, | |
1829 | }; | |
1830 | ||
1831 | /** | |
1832 | * A name-key lookup element for extended statistics. | |
1833 | * | |
1834 | * This structure is used to map between names and ID numbers | |
1835 | * for extended ethdev statistics. | |
1836 | */ | |
1837 | struct rte_event_dev_xstats_name { | |
1838 | char name[RTE_EVENT_DEV_XSTATS_NAME_SIZE]; | |
1839 | }; | |
1840 | ||
1841 | /** | |
1842 | * Retrieve names of extended statistics of an event device. | |
1843 | * | |
1844 | * @param dev_id | |
1845 | * The identifier of the event device. | |
1846 | * @param mode | |
1847 | * The mode of statistics to retrieve. Choices include the device statistics, | |
1848 | * port statistics or queue statistics. | |
1849 | * @param queue_port_id | |
1850 | * Used to specify the port or queue number in queue or port mode, and is | |
1851 | * ignored in device mode. | |
1852 | * @param[out] xstats_names | |
1853 | * Block of memory to insert names into. Must be at least size in capacity. | |
1854 | * If set to NULL, function returns required capacity. | |
1855 | * @param[out] ids | |
1856 | * Block of memory to insert ids into. Must be at least size in capacity. | |
1857 | * If set to NULL, function returns required capacity. The id values returned | |
1858 | * can be passed to *rte_event_dev_xstats_get* to select statistics. | |
1859 | * @param size | |
1860 | * Capacity of xstats_names (number of names). | |
1861 | * @return | |
1862 | * - positive value lower or equal to size: success. The return value | |
1863 | * is the number of entries filled in the stats table. | |
1864 | * - positive value higher than size: error, the given statistics table | |
1865 | * is too small. The return value corresponds to the size that should | |
1866 | * be given to succeed. The entries in the table are not valid and | |
1867 | * shall not be used by the caller. | |
1868 | * - negative value on error: | |
1869 | * -ENODEV for invalid *dev_id* | |
1870 | * -EINVAL for invalid mode, queue port or id parameters | |
1871 | * -ENOTSUP if the device doesn't support this function. | |
1872 | */ | |
1873 | int | |
1874 | rte_event_dev_xstats_names_get(uint8_t dev_id, | |
1875 | enum rte_event_dev_xstats_mode mode, | |
1876 | uint8_t queue_port_id, | |
1877 | struct rte_event_dev_xstats_name *xstats_names, | |
1878 | unsigned int *ids, | |
1879 | unsigned int size); | |
1880 | ||
1881 | /** | |
1882 | * Retrieve extended statistics of an event device. | |
1883 | * | |
1884 | * @param dev_id | |
1885 | * The identifier of the device. | |
1886 | * @param mode | |
1887 | * The mode of statistics to retrieve. Choices include the device statistics, | |
1888 | * port statistics or queue statistics. | |
1889 | * @param queue_port_id | |
1890 | * Used to specify the port or queue number in queue or port mode, and is | |
1891 | * ignored in device mode. | |
1892 | * @param ids | |
1893 | * The id numbers of the stats to get. The ids can be got from the stat | |
1894 | * position in the stat list from rte_event_dev_get_xstats_names(), or | |
9f95a23c | 1895 | * by using rte_event_dev_xstats_by_name_get(). |
11fdf7f2 TL |
1896 | * @param[out] values |
1897 | * The values for each stats request by ID. | |
1898 | * @param n | |
1899 | * The number of stats requested | |
1900 | * @return | |
1901 | * - positive value: number of stat entries filled into the values array | |
1902 | * - negative value on error: | |
1903 | * -ENODEV for invalid *dev_id* | |
1904 | * -EINVAL for invalid mode, queue port or id parameters | |
1905 | * -ENOTSUP if the device doesn't support this function. | |
1906 | */ | |
1907 | int | |
1908 | rte_event_dev_xstats_get(uint8_t dev_id, | |
1909 | enum rte_event_dev_xstats_mode mode, | |
1910 | uint8_t queue_port_id, | |
1911 | const unsigned int ids[], | |
1912 | uint64_t values[], unsigned int n); | |
1913 | ||
1914 | /** | |
1915 | * Retrieve the value of a single stat by requesting it by name. | |
1916 | * | |
1917 | * @param dev_id | |
1918 | * The identifier of the device | |
1919 | * @param name | |
1920 | * The stat name to retrieve | |
1921 | * @param[out] id | |
1922 | * If non-NULL, the numerical id of the stat will be returned, so that further | |
9f95a23c | 1923 | * requests for the stat can be got using rte_event_dev_xstats_get, which will |
11fdf7f2 TL |
1924 | * be faster as it doesn't need to scan a list of names for the stat. |
1925 | * If the stat cannot be found, the id returned will be (unsigned)-1. | |
1926 | * @return | |
1927 | * - positive value or zero: the stat value | |
1928 | * - negative value: -EINVAL if stat not found, -ENOTSUP if not supported. | |
1929 | */ | |
1930 | uint64_t | |
1931 | rte_event_dev_xstats_by_name_get(uint8_t dev_id, const char *name, | |
1932 | unsigned int *id); | |
1933 | ||
1934 | /** | |
1935 | * Reset the values of the xstats of the selected component in the device. | |
1936 | * | |
1937 | * @param dev_id | |
1938 | * The identifier of the device | |
1939 | * @param mode | |
1940 | * The mode of the statistics to reset. Choose from device, queue or port. | |
1941 | * @param queue_port_id | |
1942 | * The queue or port to reset. 0 and positive values select ports and queues, | |
1943 | * while -1 indicates all ports or queues. | |
1944 | * @param ids | |
1945 | * Selects specific statistics to be reset. When NULL, all statistics selected | |
1946 | * by *mode* will be reset. If non-NULL, must point to array of at least | |
1947 | * *nb_ids* size. | |
1948 | * @param nb_ids | |
1949 | * The number of ids available from the *ids* array. Ignored when ids is NULL. | |
1950 | * @return | |
1951 | * - zero: successfully reset the statistics to zero | |
1952 | * - negative value: -EINVAL invalid parameters, -ENOTSUP if not supported. | |
1953 | */ | |
1954 | int | |
1955 | rte_event_dev_xstats_reset(uint8_t dev_id, | |
1956 | enum rte_event_dev_xstats_mode mode, | |
1957 | int16_t queue_port_id, | |
1958 | const uint32_t ids[], | |
1959 | uint32_t nb_ids); | |
1960 | ||
1961 | /** | |
1962 | * Trigger the eventdev self test. | |
1963 | * | |
1964 | * @param dev_id | |
1965 | * The identifier of the device | |
1966 | * @return | |
1967 | * - 0: Selftest successful | |
1968 | * - -ENOTSUP if the device doesn't support selftest | |
1969 | * - other values < 0 on failure. | |
1970 | */ | |
1971 | int rte_event_dev_selftest(uint8_t dev_id); | |
1972 | ||
1973 | #ifdef __cplusplus | |
1974 | } | |
1975 | #endif | |
1976 | ||
1977 | #endif /* _RTE_EVENTDEV_H_ */ |