]> git.proxmox.com Git - ceph.git/blob - ceph/src/spdk/dpdk/lib/librte_bbdev/rte_bbdev.h
import 15.2.0 Octopus source
[ceph.git] / ceph / src / spdk / dpdk / lib / librte_bbdev / rte_bbdev.h
1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Intel Corporation
3 */
4
5 #ifndef _RTE_BBDEV_H_
6 #define _RTE_BBDEV_H_
7
8 /**
9 * @file rte_bbdev.h
10 *
11 * Wireless base band device abstraction APIs.
12 *
13 * @warning
14 * @b EXPERIMENTAL: this API may change without prior notice
15 *
16 * This API allows an application to discover, configure and use a device to
17 * process operations. An asynchronous API (enqueue, followed by later dequeue)
18 * is used for processing operations.
19 *
20 * The functions in this API are not thread-safe when called on the same
21 * target object (a device, or a queue on a device), with the exception that
22 * one thread can enqueue operations to a queue while another thread dequeues
23 * from the same queue.
24 */
25
26 #ifdef __cplusplus
27 extern "C" {
28 #endif
29
30 #include <stdint.h>
31 #include <stdbool.h>
32 #include <string.h>
33
34 #include <rte_compat.h>
35 #include <rte_atomic.h>
36 #include <rte_bus.h>
37 #include <rte_cpuflags.h>
38 #include <rte_memory.h>
39
40 #include "rte_bbdev_op.h"
41
42 #ifndef RTE_BBDEV_MAX_DEVS
43 #define RTE_BBDEV_MAX_DEVS 128 /**< Max number of devices */
44 #endif
45
46 /** Flags indicate current state of BBDEV device */
47 enum rte_bbdev_state {
48 RTE_BBDEV_UNUSED,
49 RTE_BBDEV_INITIALIZED
50 };
51
52 /**
53 * Get the total number of devices that have been successfully initialised.
54 *
55 * @return
56 * The total number of usable devices.
57 */
58 uint16_t __rte_experimental
59 rte_bbdev_count(void);
60
61 /**
62 * Check if a device is valid.
63 *
64 * @param dev_id
65 * The identifier of the device.
66 *
67 * @return
68 * true if device ID is valid and device is attached, false otherwise.
69 */
70 bool __rte_experimental
71 rte_bbdev_is_valid(uint16_t dev_id);
72
73 /**
74 * Get the next enabled device.
75 *
76 * @param dev_id
77 * The current device
78 *
79 * @return
80 * - The next device, or
81 * - RTE_BBDEV_MAX_DEVS if none found
82 */
83 uint16_t __rte_experimental
84 rte_bbdev_find_next(uint16_t dev_id);
85
86 /** Iterate through all enabled devices */
87 #define RTE_BBDEV_FOREACH(i) for (i = rte_bbdev_find_next(-1); \
88 i < RTE_BBDEV_MAX_DEVS; \
89 i = rte_bbdev_find_next(i))
90
91 /**
92 * Setup up device queues.
93 * This function must be called on a device before setting up the queues and
94 * starting the device. It can also be called when a device is in the stopped
95 * state. If any device queues have been configured their configuration will be
96 * cleared by a call to this function.
97 *
98 * @param dev_id
99 * The identifier of the device.
100 * @param num_queues
101 * Number of queues to configure on device.
102 * @param socket_id
103 * ID of a socket which will be used to allocate memory.
104 *
105 * @return
106 * - 0 on success
107 * - -ENODEV if dev_id is invalid or the device is corrupted
108 * - -EINVAL if num_queues is invalid, 0 or greater than maximum
109 * - -EBUSY if the identified device has already started
110 * - -ENOMEM if unable to allocate memory
111 */
112 int __rte_experimental
113 rte_bbdev_setup_queues(uint16_t dev_id, uint16_t num_queues, int socket_id);
114
115 /**
116 * Enable interrupts.
117 * This function may be called before starting the device to enable the
118 * interrupts if they are available.
119 *
120 * @param dev_id
121 * The identifier of the device.
122 *
123 * @return
124 * - 0 on success
125 * - -ENODEV if dev_id is invalid or the device is corrupted
126 * - -EBUSY if the identified device has already started
127 * - -ENOTSUP if the interrupts are not supported by the device
128 */
129 int __rte_experimental
130 rte_bbdev_intr_enable(uint16_t dev_id);
131
132 /** Device queue configuration structure */
133 struct rte_bbdev_queue_conf {
134 int socket; /**< NUMA socket used for memory allocation */
135 uint32_t queue_size; /**< Size of queue */
136 uint8_t priority; /**< Queue priority */
137 bool deferred_start; /**< Do not start queue when device is started. */
138 enum rte_bbdev_op_type op_type; /**< Operation type */
139 };
140
141 /**
142 * Configure a queue on a device.
143 * This function can be called after device configuration, and before starting.
144 * It can also be called when the device or the queue is in the stopped state.
145 *
146 * @param dev_id
147 * The identifier of the device.
148 * @param queue_id
149 * The index of the queue.
150 * @param conf
151 * The queue configuration. If NULL, a default configuration will be used.
152 *
153 * @return
154 * - 0 on success
155 * - EINVAL if the identified queue size or priority are invalid
156 * - EBUSY if the identified queue or its device have already started
157 */
158 int __rte_experimental
159 rte_bbdev_queue_configure(uint16_t dev_id, uint16_t queue_id,
160 const struct rte_bbdev_queue_conf *conf);
161
162 /**
163 * Start a device.
164 * This is the last step needed before enqueuing operations is possible.
165 *
166 * @param dev_id
167 * The identifier of the device.
168 *
169 * @return
170 * - 0 on success
171 * - negative value on failure - as returned from PMD driver
172 */
173 int __rte_experimental
174 rte_bbdev_start(uint16_t dev_id);
175
176 /**
177 * Stop a device.
178 * The device can be reconfigured, and restarted after being stopped.
179 *
180 * @param dev_id
181 * The identifier of the device.
182 *
183 * @return
184 * - 0 on success
185 */
186 int __rte_experimental
187 rte_bbdev_stop(uint16_t dev_id);
188
189 /**
190 * Close a device.
191 * The device cannot be restarted without reconfiguration!
192 *
193 * @param dev_id
194 * The identifier of the device.
195 *
196 * @return
197 * - 0 on success
198 */
199 int __rte_experimental
200 rte_bbdev_close(uint16_t dev_id);
201
202 /**
203 * Start a specified queue on a device.
204 * This is only needed if the queue has been stopped, or if the deferred_start
205 * flag has been set when configuring the queue.
206 *
207 * @param dev_id
208 * The identifier of the device.
209 * @param queue_id
210 * The index of the queue.
211 *
212 * @return
213 * - 0 on success
214 * - negative value on failure - as returned from PMD driver
215 */
216 int __rte_experimental
217 rte_bbdev_queue_start(uint16_t dev_id, uint16_t queue_id);
218
219 /**
220 * Stop a specified queue on a device, to allow re configuration.
221 *
222 * @param dev_id
223 * The identifier of the device.
224 * @param queue_id
225 * The index of the queue.
226 *
227 * @return
228 * - 0 on success
229 * - negative value on failure - as returned from PMD driver
230 */
231 int __rte_experimental
232 rte_bbdev_queue_stop(uint16_t dev_id, uint16_t queue_id);
233
234 /** Device statistics. */
235 struct rte_bbdev_stats {
236 uint64_t enqueued_count; /**< Count of all operations enqueued */
237 uint64_t dequeued_count; /**< Count of all operations dequeued */
238 /** Total error count on operations enqueued */
239 uint64_t enqueue_err_count;
240 /** Total error count on operations dequeued */
241 uint64_t dequeue_err_count;
242 /** CPU cycles consumed by the (HW/SW) accelerator device to offload
243 * the enqueue request to its internal queues.
244 * - For a HW device this is the cycles consumed in MMIO write
245 * - For a SW (vdev) device, this is the processing time of the
246 * bbdev operation
247 */
248 uint64_t acc_offload_cycles;
249 };
250
251 /**
252 * Retrieve the general I/O statistics of a device.
253 *
254 * @param dev_id
255 * The identifier of the device.
256 * @param stats
257 * Pointer to structure to where statistics will be copied. On error, this
258 * location may or may not have been modified.
259 *
260 * @return
261 * - 0 on success
262 * - EINVAL if invalid parameter pointer is provided
263 */
264 int __rte_experimental
265 rte_bbdev_stats_get(uint16_t dev_id, struct rte_bbdev_stats *stats);
266
267 /**
268 * Reset the statistics of a device.
269 *
270 * @param dev_id
271 * The identifier of the device.
272 * @return
273 * - 0 on success
274 */
275 int __rte_experimental
276 rte_bbdev_stats_reset(uint16_t dev_id);
277
278 /** Device information supplied by the device's driver */
279 struct rte_bbdev_driver_info {
280 /** Driver name */
281 const char *driver_name;
282
283 /** Maximum number of queues supported by the device */
284 unsigned int max_num_queues;
285 /** Queue size limit (queue size must also be power of 2) */
286 uint32_t queue_size_lim;
287 /** Set if device off-loads operation to hardware */
288 bool hardware_accelerated;
289 /** Max value supported by queue priority for DL */
290 uint8_t max_dl_queue_priority;
291 /** Max value supported by queue priority for UL */
292 uint8_t max_ul_queue_priority;
293 /** Set if device supports per-queue interrupts */
294 bool queue_intr_supported;
295 /** Minimum alignment of buffers, in bytes */
296 uint16_t min_alignment;
297 /** Default queue configuration used if none is supplied */
298 struct rte_bbdev_queue_conf default_queue_conf;
299 /** Device operation capabilities */
300 const struct rte_bbdev_op_cap *capabilities;
301 /** Device cpu_flag requirements */
302 const enum rte_cpu_flag_t *cpu_flag_reqs;
303 };
304
305 /** Macro used at end of bbdev PMD list */
306 #define RTE_BBDEV_END_OF_CAPABILITIES_LIST() \
307 { RTE_BBDEV_OP_NONE }
308
309 /**
310 * Device information structure used by an application to discover a devices
311 * capabilities and current configuration
312 */
313 struct rte_bbdev_info {
314 int socket_id; /**< NUMA socket that device is on */
315 const char *dev_name; /**< Unique device name */
316 const struct rte_bus *bus; /**< Bus information */
317 uint16_t num_queues; /**< Number of queues currently configured */
318 bool started; /**< Set if device is currently started */
319 struct rte_bbdev_driver_info drv; /**< Info from device driver */
320 };
321
322 /**
323 * Retrieve information about a device.
324 *
325 * @param dev_id
326 * The identifier of the device.
327 * @param dev_info
328 * Pointer to structure to where information will be copied. On error, this
329 * location may or may not have been modified.
330 *
331 * @return
332 * - 0 on success
333 * - EINVAL if invalid parameter pointer is provided
334 */
335 int __rte_experimental
336 rte_bbdev_info_get(uint16_t dev_id, struct rte_bbdev_info *dev_info);
337
338 /** Queue information */
339 struct rte_bbdev_queue_info {
340 /** Current device configuration */
341 struct rte_bbdev_queue_conf conf;
342 /** Set if queue is currently started */
343 bool started;
344 };
345
346 /**
347 * Retrieve information about a specific queue on a device.
348 *
349 * @param dev_id
350 * The identifier of the device.
351 * @param queue_id
352 * The index of the queue.
353 * @param queue_info
354 * Pointer to structure to where information will be copied. On error, this
355 * location may or may not have been modified.
356 *
357 * @return
358 * - 0 on success
359 * - EINVAL if invalid parameter pointer is provided
360 */
361 int __rte_experimental
362 rte_bbdev_queue_info_get(uint16_t dev_id, uint16_t queue_id,
363 struct rte_bbdev_queue_info *queue_info);
364
365 /** @internal The data structure associated with each queue of a device. */
366 struct rte_bbdev_queue_data {
367 void *queue_private; /**< Driver-specific per-queue data */
368 struct rte_bbdev_queue_conf conf; /**< Current configuration */
369 struct rte_bbdev_stats queue_stats; /**< Queue statistics */
370 bool started; /**< Queue state */
371 };
372
373 /** @internal Enqueue encode operations for processing on queue of a device. */
374 typedef uint16_t (*rte_bbdev_enqueue_enc_ops_t)(
375 struct rte_bbdev_queue_data *q_data,
376 struct rte_bbdev_enc_op **ops,
377 uint16_t num);
378
379 /** @internal Enqueue decode operations for processing on queue of a device. */
380 typedef uint16_t (*rte_bbdev_enqueue_dec_ops_t)(
381 struct rte_bbdev_queue_data *q_data,
382 struct rte_bbdev_dec_op **ops,
383 uint16_t num);
384
385 /** @internal Dequeue encode operations from a queue of a device. */
386 typedef uint16_t (*rte_bbdev_dequeue_enc_ops_t)(
387 struct rte_bbdev_queue_data *q_data,
388 struct rte_bbdev_enc_op **ops, uint16_t num);
389
390 /** @internal Dequeue decode operations from a queue of a device. */
391 typedef uint16_t (*rte_bbdev_dequeue_dec_ops_t)(
392 struct rte_bbdev_queue_data *q_data,
393 struct rte_bbdev_dec_op **ops, uint16_t num);
394
395 #define RTE_BBDEV_NAME_MAX_LEN 64 /**< Max length of device name */
396
397 /**
398 * @internal The data associated with a device, with no function pointers.
399 * This structure is safe to place in shared memory to be common among
400 * different processes in a multi-process configuration. Drivers can access
401 * these fields, but should never write to them!
402 */
403 struct rte_bbdev_data {
404 char name[RTE_BBDEV_NAME_MAX_LEN]; /**< Unique identifier name */
405 void *dev_private; /**< Driver-specific private data */
406 uint16_t num_queues; /**< Number of currently configured queues */
407 struct rte_bbdev_queue_data *queues; /**< Queue structures */
408 uint16_t dev_id; /**< Device ID */
409 int socket_id; /**< NUMA socket that device is on */
410 bool started; /**< Device run-time state */
411 /** Counter of processes using the device */
412 rte_atomic16_t process_cnt;
413 };
414
415 /* Forward declarations */
416 struct rte_bbdev_ops;
417 struct rte_bbdev_callback;
418 struct rte_intr_handle;
419
420 /** Structure to keep track of registered callbacks */
421 TAILQ_HEAD(rte_bbdev_cb_list, rte_bbdev_callback);
422
423 /**
424 * @internal The data structure associated with a device. Drivers can access
425 * these fields, but should only write to the *_ops fields.
426 */
427 struct __rte_cache_aligned rte_bbdev {
428 /**< Enqueue encode function */
429 rte_bbdev_enqueue_enc_ops_t enqueue_enc_ops;
430 /**< Enqueue decode function */
431 rte_bbdev_enqueue_dec_ops_t enqueue_dec_ops;
432 /**< Dequeue encode function */
433 rte_bbdev_dequeue_enc_ops_t dequeue_enc_ops;
434 /**< Dequeue decode function */
435 rte_bbdev_dequeue_dec_ops_t dequeue_dec_ops;
436 const struct rte_bbdev_ops *dev_ops; /**< Functions exported by PMD */
437 struct rte_bbdev_data *data; /**< Pointer to device data */
438 enum rte_bbdev_state state; /**< If device is currently used or not */
439 struct rte_device *device; /**< Backing device */
440 /** User application callback for interrupts if present */
441 struct rte_bbdev_cb_list list_cbs;
442 struct rte_intr_handle *intr_handle; /**< Device interrupt handle */
443 };
444
445 /** @internal array of all devices */
446 extern struct rte_bbdev rte_bbdev_devices[];
447
448 /**
449 * Enqueue a burst of processed encode operations to a queue of the device.
450 * This functions only enqueues as many operations as currently possible and
451 * does not block until @p num_ops entries in the queue are available.
452 * This function does not provide any error notification to avoid the
453 * corresponding overhead.
454 *
455 * @param dev_id
456 * The identifier of the device.
457 * @param queue_id
458 * The index of the queue.
459 * @param ops
460 * Pointer array containing operations to be enqueued Must have at least
461 * @p num_ops entries
462 * @param num_ops
463 * The maximum number of operations to enqueue.
464 *
465 * @return
466 * The number of operations actually enqueued (this is the number of processed
467 * entries in the @p ops array).
468 */
469 static inline uint16_t __rte_experimental
470 rte_bbdev_enqueue_enc_ops(uint16_t dev_id, uint16_t queue_id,
471 struct rte_bbdev_enc_op **ops, uint16_t num_ops)
472 {
473 struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
474 struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
475 return dev->enqueue_enc_ops(q_data, ops, num_ops);
476 }
477
478 /**
479 * Enqueue a burst of processed decode operations to a queue of the device.
480 * This functions only enqueues as many operations as currently possible and
481 * does not block until @p num_ops entries in the queue are available.
482 * This function does not provide any error notification to avoid the
483 * corresponding overhead.
484 *
485 * @param dev_id
486 * The identifier of the device.
487 * @param queue_id
488 * The index of the queue.
489 * @param ops
490 * Pointer array containing operations to be enqueued Must have at least
491 * @p num_ops entries
492 * @param num_ops
493 * The maximum number of operations to enqueue.
494 *
495 * @return
496 * The number of operations actually enqueued (this is the number of processed
497 * entries in the @p ops array).
498 */
499 static inline uint16_t __rte_experimental
500 rte_bbdev_enqueue_dec_ops(uint16_t dev_id, uint16_t queue_id,
501 struct rte_bbdev_dec_op **ops, uint16_t num_ops)
502 {
503 struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
504 struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
505 return dev->enqueue_dec_ops(q_data, ops, num_ops);
506 }
507
508 /**
509 * Dequeue a burst of processed encode operations from a queue of the device.
510 * This functions returns only the current contents of the queue, and does not
511 * block until @ num_ops is available.
512 * This function does not provide any error notification to avoid the
513 * corresponding overhead.
514 *
515 * @param dev_id
516 * The identifier of the device.
517 * @param queue_id
518 * The index of the queue.
519 * @param ops
520 * Pointer array where operations will be dequeued to. Must have at least
521 * @p num_ops entries
522 * @param num_ops
523 * The maximum number of operations to dequeue.
524 *
525 * @return
526 * The number of operations actually dequeued (this is the number of entries
527 * copied into the @p ops array).
528 */
529 static inline uint16_t __rte_experimental
530 rte_bbdev_dequeue_enc_ops(uint16_t dev_id, uint16_t queue_id,
531 struct rte_bbdev_enc_op **ops, uint16_t num_ops)
532 {
533 struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
534 struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
535 return dev->dequeue_enc_ops(q_data, ops, num_ops);
536 }
537
538 /**
539 * Dequeue a burst of processed decode operations from a queue of the device.
540 * This functions returns only the current contents of the queue, and does not
541 * block until @ num_ops is available.
542 * This function does not provide any error notification to avoid the
543 * corresponding overhead.
544 *
545 * @param dev_id
546 * The identifier of the device.
547 * @param queue_id
548 * The index of the queue.
549 * @param ops
550 * Pointer array where operations will be dequeued to. Must have at least
551 * @p num_ops entries
552 * @param num_ops
553 * The maximum number of operations to dequeue.
554 *
555 * @return
556 * The number of operations actually dequeued (this is the number of entries
557 * copied into the @p ops array).
558 */
559
560 static inline uint16_t __rte_experimental
561 rte_bbdev_dequeue_dec_ops(uint16_t dev_id, uint16_t queue_id,
562 struct rte_bbdev_dec_op **ops, uint16_t num_ops)
563 {
564 struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
565 struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
566 return dev->dequeue_dec_ops(q_data, ops, num_ops);
567 }
568
569 /** Definitions of device event types */
570 enum rte_bbdev_event_type {
571 RTE_BBDEV_EVENT_UNKNOWN, /**< unknown event type */
572 RTE_BBDEV_EVENT_ERROR, /**< error interrupt event */
573 RTE_BBDEV_EVENT_DEQUEUE, /**< dequeue event */
574 RTE_BBDEV_EVENT_MAX /**< max value of this enum */
575 };
576
577 /**
578 * Typedef for application callback function registered by application
579 * software for notification of device events
580 *
581 * @param dev_id
582 * Device identifier
583 * @param event
584 * Device event to register for notification of.
585 * @param cb_arg
586 * User specified parameter to be passed to user's callback function.
587 * @param ret_param
588 * To pass data back to user application.
589 */
590 typedef void (*rte_bbdev_cb_fn)(uint16_t dev_id,
591 enum rte_bbdev_event_type event, void *cb_arg,
592 void *ret_param);
593
594 /**
595 * Register a callback function for specific device id. Multiple callbacks can
596 * be added and will be called in the order they are added when an event is
597 * triggered. Callbacks are called in a separate thread created by the DPDK EAL.
598 *
599 * @param dev_id
600 * Device id.
601 * @param event
602 * The event that the callback will be registered for.
603 * @param cb_fn
604 * User supplied callback function to be called.
605 * @param cb_arg
606 * Pointer to parameter that will be passed to the callback.
607 *
608 * @return
609 * Zero on success, negative value on failure.
610 */
611 int __rte_experimental
612 rte_bbdev_callback_register(uint16_t dev_id, enum rte_bbdev_event_type event,
613 rte_bbdev_cb_fn cb_fn, void *cb_arg);
614
615 /**
616 * Unregister a callback function for specific device id.
617 *
618 * @param dev_id
619 * The device identifier.
620 * @param event
621 * The event that the callback will be unregistered for.
622 * @param cb_fn
623 * User supplied callback function to be unregistered.
624 * @param cb_arg
625 * Pointer to the parameter supplied when registering the callback.
626 * (void *)-1 means to remove all registered callbacks with the specified
627 * function address.
628 *
629 * @return
630 * - 0 on success
631 * - EINVAL if invalid parameter pointer is provided
632 * - EAGAIN if the provided callback pointer does not exist
633 */
634 int __rte_experimental
635 rte_bbdev_callback_unregister(uint16_t dev_id, enum rte_bbdev_event_type event,
636 rte_bbdev_cb_fn cb_fn, void *cb_arg);
637
638 /**
639 * Enable a one-shot interrupt on the next operation enqueued to a particular
640 * queue. The interrupt will be triggered when the operation is ready to be
641 * dequeued. To handle the interrupt, an epoll file descriptor must be
642 * registered using rte_bbdev_queue_intr_ctl(), and then an application
643 * thread/lcore can wait for the interrupt using rte_epoll_wait().
644 *
645 * @param dev_id
646 * The device identifier.
647 * @param queue_id
648 * The index of the queue.
649 *
650 * @return
651 * - 0 on success
652 * - negative value on failure - as returned from PMD driver
653 */
654 int __rte_experimental
655 rte_bbdev_queue_intr_enable(uint16_t dev_id, uint16_t queue_id);
656
657 /**
658 * Disable a one-shot interrupt on the next operation enqueued to a particular
659 * queue (if it has been enabled).
660 *
661 * @param dev_id
662 * The device identifier.
663 * @param queue_id
664 * The index of the queue.
665 *
666 * @return
667 * - 0 on success
668 * - negative value on failure - as returned from PMD driver
669 */
670 int __rte_experimental
671 rte_bbdev_queue_intr_disable(uint16_t dev_id, uint16_t queue_id);
672
673 /**
674 * Control interface for per-queue interrupts.
675 *
676 * @param dev_id
677 * The device identifier.
678 * @param queue_id
679 * The index of the queue.
680 * @param epfd
681 * Epoll file descriptor that will be associated with the interrupt source.
682 * If the special value RTE_EPOLL_PER_THREAD is provided, a per thread epoll
683 * file descriptor created by the EAL is used (RTE_EPOLL_PER_THREAD can also
684 * be used when calling rte_epoll_wait()).
685 * @param op
686 * The operation be performed for the vector.RTE_INTR_EVENT_ADD or
687 * RTE_INTR_EVENT_DEL.
688 * @param data
689 * User context, that will be returned in the epdata.data field of the
690 * rte_epoll_event structure filled in by rte_epoll_wait().
691 *
692 * @return
693 * - 0 on success
694 * - ENOTSUP if interrupts are not supported by the identified device
695 * - negative value on failure - as returned from PMD driver
696 */
697 int __rte_experimental
698 rte_bbdev_queue_intr_ctl(uint16_t dev_id, uint16_t queue_id, int epfd, int op,
699 void *data);
700
701 #ifdef __cplusplus
702 }
703 #endif
704
705 #endif /* _RTE_BBDEV_H_ */