4 * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <sys/types.h>
35 #include <sys/queue.h>
44 #include <netinet/in.h>
46 #include <rte_byteorder.h>
48 #include <rte_debug.h>
49 #include <rte_interrupts.h>
51 #include <rte_memory.h>
52 #include <rte_memcpy.h>
53 #include <rte_memzone.h>
54 #include <rte_launch.h>
56 #include <rte_per_lcore.h>
57 #include <rte_lcore.h>
58 #include <rte_atomic.h>
59 #include <rte_branch_prediction.h>
60 #include <rte_common.h>
61 #include <rte_mempool.h>
62 #include <rte_malloc.h>
64 #include <rte_errno.h>
65 #include <rte_spinlock.h>
66 #include <rte_string_fns.h>
68 #include "rte_ether.h"
69 #include "rte_ethdev.h"
71 static const char *MZ_RTE_ETH_DEV_DATA
= "rte_eth_dev_data";
72 struct rte_eth_dev rte_eth_devices
[RTE_MAX_ETHPORTS
];
73 static struct rte_eth_dev_data
*rte_eth_dev_data
;
74 static uint8_t eth_dev_last_created_port
;
75 static uint8_t nb_ports
;
77 /* spinlock for eth device callbacks */
78 static rte_spinlock_t rte_eth_dev_cb_lock
= RTE_SPINLOCK_INITIALIZER
;
80 /* spinlock for add/remove rx callbacks */
81 static rte_spinlock_t rte_eth_rx_cb_lock
= RTE_SPINLOCK_INITIALIZER
;
83 /* spinlock for add/remove tx callbacks */
84 static rte_spinlock_t rte_eth_tx_cb_lock
= RTE_SPINLOCK_INITIALIZER
;
86 /* store statistics names and its offset in stats structure */
87 struct rte_eth_xstats_name_off
{
88 char name
[RTE_ETH_XSTATS_NAME_SIZE
];
92 static const struct rte_eth_xstats_name_off rte_stats_strings
[] = {
93 {"rx_good_packets", offsetof(struct rte_eth_stats
, ipackets
)},
94 {"tx_good_packets", offsetof(struct rte_eth_stats
, opackets
)},
95 {"rx_good_bytes", offsetof(struct rte_eth_stats
, ibytes
)},
96 {"tx_good_bytes", offsetof(struct rte_eth_stats
, obytes
)},
97 {"rx_errors", offsetof(struct rte_eth_stats
, ierrors
)},
98 {"tx_errors", offsetof(struct rte_eth_stats
, oerrors
)},
99 {"rx_mbuf_allocation_errors", offsetof(struct rte_eth_stats
,
103 #define RTE_NB_STATS (sizeof(rte_stats_strings) / sizeof(rte_stats_strings[0]))
105 static const struct rte_eth_xstats_name_off rte_rxq_stats_strings
[] = {
106 {"packets", offsetof(struct rte_eth_stats
, q_ipackets
)},
107 {"bytes", offsetof(struct rte_eth_stats
, q_ibytes
)},
108 {"errors", offsetof(struct rte_eth_stats
, q_errors
)},
111 #define RTE_NB_RXQ_STATS (sizeof(rte_rxq_stats_strings) / \
112 sizeof(rte_rxq_stats_strings[0]))
114 static const struct rte_eth_xstats_name_off rte_txq_stats_strings
[] = {
115 {"packets", offsetof(struct rte_eth_stats
, q_opackets
)},
116 {"bytes", offsetof(struct rte_eth_stats
, q_obytes
)},
118 #define RTE_NB_TXQ_STATS (sizeof(rte_txq_stats_strings) / \
119 sizeof(rte_txq_stats_strings[0]))
123 * The user application callback description.
125 * It contains callback address to be registered by user application,
126 * the pointer to the parameters for callback, and the event type.
128 struct rte_eth_dev_callback
{
129 TAILQ_ENTRY(rte_eth_dev_callback
) next
; /**< Callbacks list */
130 rte_eth_dev_cb_fn cb_fn
; /**< Callback address */
131 void *cb_arg
; /**< Parameter for callback */
132 enum rte_eth_event_type event
; /**< Interrupt event type */
133 uint32_t active
; /**< Callback is executing */
147 rte_eth_dev_data_alloc(void)
149 const unsigned flags
= 0;
150 const struct rte_memzone
*mz
;
152 if (rte_eal_process_type() == RTE_PROC_PRIMARY
) {
153 mz
= rte_memzone_reserve(MZ_RTE_ETH_DEV_DATA
,
154 RTE_MAX_ETHPORTS
* sizeof(*rte_eth_dev_data
),
155 rte_socket_id(), flags
);
157 mz
= rte_memzone_lookup(MZ_RTE_ETH_DEV_DATA
);
159 rte_panic("Cannot allocate memzone for ethernet port data\n");
161 rte_eth_dev_data
= mz
->addr
;
162 if (rte_eal_process_type() == RTE_PROC_PRIMARY
)
163 memset(rte_eth_dev_data
, 0,
164 RTE_MAX_ETHPORTS
* sizeof(*rte_eth_dev_data
));
168 rte_eth_dev_allocated(const char *name
)
172 for (i
= 0; i
< RTE_MAX_ETHPORTS
; i
++) {
173 if ((rte_eth_devices
[i
].attached
== DEV_ATTACHED
) &&
174 strcmp(rte_eth_devices
[i
].data
->name
, name
) == 0)
175 return &rte_eth_devices
[i
];
181 rte_eth_dev_find_free_port(void)
185 for (i
= 0; i
< RTE_MAX_ETHPORTS
; i
++) {
186 if (rte_eth_devices
[i
].attached
== DEV_DETACHED
)
189 return RTE_MAX_ETHPORTS
;
193 rte_eth_dev_allocate(const char *name
)
196 struct rte_eth_dev
*eth_dev
;
198 port_id
= rte_eth_dev_find_free_port();
199 if (port_id
== RTE_MAX_ETHPORTS
) {
200 RTE_PMD_DEBUG_TRACE("Reached maximum number of Ethernet ports\n");
204 if (rte_eth_dev_data
== NULL
)
205 rte_eth_dev_data_alloc();
207 if (rte_eth_dev_allocated(name
) != NULL
) {
208 RTE_PMD_DEBUG_TRACE("Ethernet Device with name %s already allocated!\n",
213 eth_dev
= &rte_eth_devices
[port_id
];
214 eth_dev
->data
= &rte_eth_dev_data
[port_id
];
215 snprintf(eth_dev
->data
->name
, sizeof(eth_dev
->data
->name
), "%s", name
);
216 eth_dev
->data
->port_id
= port_id
;
217 eth_dev
->attached
= DEV_ATTACHED
;
218 eth_dev_last_created_port
= port_id
;
224 rte_eth_dev_release_port(struct rte_eth_dev
*eth_dev
)
229 eth_dev
->attached
= DEV_DETACHED
;
235 rte_eth_dev_pci_probe(struct rte_pci_driver
*pci_drv
,
236 struct rte_pci_device
*pci_dev
)
238 struct eth_driver
*eth_drv
;
239 struct rte_eth_dev
*eth_dev
;
240 char ethdev_name
[RTE_ETH_NAME_MAX_LEN
];
244 eth_drv
= (struct eth_driver
*)pci_drv
;
246 rte_eal_pci_device_name(&pci_dev
->addr
, ethdev_name
,
247 sizeof(ethdev_name
));
249 eth_dev
= rte_eth_dev_allocate(ethdev_name
);
253 if (rte_eal_process_type() == RTE_PROC_PRIMARY
) {
254 eth_dev
->data
->dev_private
= rte_zmalloc("ethdev private structure",
255 eth_drv
->dev_private_size
,
256 RTE_CACHE_LINE_SIZE
);
257 if (eth_dev
->data
->dev_private
== NULL
)
258 rte_panic("Cannot allocate memzone for private port data\n");
260 eth_dev
->pci_dev
= pci_dev
;
261 eth_dev
->driver
= eth_drv
;
262 eth_dev
->data
->rx_mbuf_alloc_failed
= 0;
264 /* init user callbacks */
265 TAILQ_INIT(&(eth_dev
->link_intr_cbs
));
268 * Set the default MTU.
270 eth_dev
->data
->mtu
= ETHER_MTU
;
272 /* Invoke PMD device initialization function */
273 diag
= (*eth_drv
->eth_dev_init
)(eth_dev
);
277 RTE_PMD_DEBUG_TRACE("driver %s: eth_dev_init(vendor_id=0x%x device_id=0x%x) failed\n",
278 pci_drv
->driver
.name
,
279 (unsigned) pci_dev
->id
.vendor_id
,
280 (unsigned) pci_dev
->id
.device_id
);
281 if (rte_eal_process_type() == RTE_PROC_PRIMARY
)
282 rte_free(eth_dev
->data
->dev_private
);
283 rte_eth_dev_release_port(eth_dev
);
288 rte_eth_dev_pci_remove(struct rte_pci_device
*pci_dev
)
290 const struct eth_driver
*eth_drv
;
291 struct rte_eth_dev
*eth_dev
;
292 char ethdev_name
[RTE_ETH_NAME_MAX_LEN
];
298 rte_eal_pci_device_name(&pci_dev
->addr
, ethdev_name
,
299 sizeof(ethdev_name
));
301 eth_dev
= rte_eth_dev_allocated(ethdev_name
);
305 eth_drv
= (const struct eth_driver
*)pci_dev
->driver
;
307 /* Invoke PMD device uninit function */
308 if (*eth_drv
->eth_dev_uninit
) {
309 ret
= (*eth_drv
->eth_dev_uninit
)(eth_dev
);
314 /* free ether device */
315 rte_eth_dev_release_port(eth_dev
);
317 if (rte_eal_process_type() == RTE_PROC_PRIMARY
)
318 rte_free(eth_dev
->data
->dev_private
);
320 eth_dev
->pci_dev
= NULL
;
321 eth_dev
->driver
= NULL
;
322 eth_dev
->data
= NULL
;
328 rte_eth_dev_is_valid_port(uint8_t port_id
)
330 if (port_id
>= RTE_MAX_ETHPORTS
||
331 rte_eth_devices
[port_id
].attached
!= DEV_ATTACHED
)
338 rte_eth_dev_socket_id(uint8_t port_id
)
340 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id
, -1);
341 return rte_eth_devices
[port_id
].data
->numa_node
;
345 rte_eth_dev_count(void)
351 rte_eth_dev_get_name_by_port(uint8_t port_id
, char *name
)
355 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id
, -EINVAL
);
358 RTE_PMD_DEBUG_TRACE("Null pointer is specified\n");
362 /* shouldn't check 'rte_eth_devices[i].data',
363 * because it might be overwritten by VDEV PMD */
364 tmp
= rte_eth_dev_data
[port_id
].name
;
370 rte_eth_dev_get_port_by_name(const char *name
, uint8_t *port_id
)
375 RTE_PMD_DEBUG_TRACE("Null pointer is specified\n");
379 *port_id
= RTE_MAX_ETHPORTS
;
381 for (i
= 0; i
< RTE_MAX_ETHPORTS
; i
++) {
384 rte_eth_dev_data
[i
].name
, strlen(name
))) {
395 rte_eth_dev_is_detachable(uint8_t port_id
)
399 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id
, -EINVAL
);
401 switch (rte_eth_devices
[port_id
].data
->kdrv
) {
402 case RTE_KDRV_IGB_UIO
:
403 case RTE_KDRV_UIO_GENERIC
:
404 case RTE_KDRV_NIC_UIO
:
411 dev_flags
= rte_eth_devices
[port_id
].data
->dev_flags
;
412 if ((dev_flags
& RTE_ETH_DEV_DETACHABLE
) &&
413 (!(dev_flags
& RTE_ETH_DEV_BONDED_SLAVE
)))
419 /* attach the new device, then store port_id of the device */
421 rte_eth_dev_attach(const char *devargs
, uint8_t *port_id
)
424 int current
= rte_eth_dev_count();
428 if ((devargs
== NULL
) || (port_id
== NULL
)) {
433 /* parse devargs, then retrieve device name and args */
434 if (rte_eal_parse_devargs_str(devargs
, &name
, &args
))
437 ret
= rte_eal_dev_attach(name
, args
);
441 /* no point looking at the port count if no port exists */
442 if (!rte_eth_dev_count()) {
443 RTE_LOG(ERR
, EAL
, "No port found for device (%s)\n", name
);
448 /* if nothing happened, there is a bug here, since some driver told us
449 * it did attach a device, but did not create a port.
451 if (current
== rte_eth_dev_count()) {
456 *port_id
= eth_dev_last_created_port
;
465 /* detach the device, then store the name of the device */
467 rte_eth_dev_detach(uint8_t port_id
, char *name
)
476 /* FIXME: move this to eal, once device flags are relocated there */
477 if (rte_eth_dev_is_detachable(port_id
))
480 snprintf(name
, sizeof(rte_eth_devices
[port_id
].data
->name
),
481 "%s", rte_eth_devices
[port_id
].data
->name
);
482 ret
= rte_eal_dev_detach(name
);
493 rte_eth_dev_rx_queue_config(struct rte_eth_dev
*dev
, uint16_t nb_queues
)
495 uint16_t old_nb_queues
= dev
->data
->nb_rx_queues
;
499 if (dev
->data
->rx_queues
== NULL
&& nb_queues
!= 0) { /* first time configuration */
500 dev
->data
->rx_queues
= rte_zmalloc("ethdev->rx_queues",
501 sizeof(dev
->data
->rx_queues
[0]) * nb_queues
,
502 RTE_CACHE_LINE_SIZE
);
503 if (dev
->data
->rx_queues
== NULL
) {
504 dev
->data
->nb_rx_queues
= 0;
507 } else if (dev
->data
->rx_queues
!= NULL
&& nb_queues
!= 0) { /* re-configure */
508 RTE_FUNC_PTR_OR_ERR_RET(*dev
->dev_ops
->rx_queue_release
, -ENOTSUP
);
510 rxq
= dev
->data
->rx_queues
;
512 for (i
= nb_queues
; i
< old_nb_queues
; i
++)
513 (*dev
->dev_ops
->rx_queue_release
)(rxq
[i
]);
514 rxq
= rte_realloc(rxq
, sizeof(rxq
[0]) * nb_queues
,
515 RTE_CACHE_LINE_SIZE
);
518 if (nb_queues
> old_nb_queues
) {
519 uint16_t new_qs
= nb_queues
- old_nb_queues
;
521 memset(rxq
+ old_nb_queues
, 0,
522 sizeof(rxq
[0]) * new_qs
);
525 dev
->data
->rx_queues
= rxq
;
527 } else if (dev
->data
->rx_queues
!= NULL
&& nb_queues
== 0) {
528 RTE_FUNC_PTR_OR_ERR_RET(*dev
->dev_ops
->rx_queue_release
, -ENOTSUP
);
530 rxq
= dev
->data
->rx_queues
;
532 for (i
= nb_queues
; i
< old_nb_queues
; i
++)
533 (*dev
->dev_ops
->rx_queue_release
)(rxq
[i
]);
535 dev
->data
->nb_rx_queues
= nb_queues
;
540 rte_eth_dev_rx_queue_start(uint8_t port_id
, uint16_t rx_queue_id
)
542 struct rte_eth_dev
*dev
;
544 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id
, -EINVAL
);
546 dev
= &rte_eth_devices
[port_id
];
547 if (rx_queue_id
>= dev
->data
->nb_rx_queues
) {
548 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id
);
552 RTE_FUNC_PTR_OR_ERR_RET(*dev
->dev_ops
->rx_queue_start
, -ENOTSUP
);
554 if (dev
->data
->rx_queue_state
[rx_queue_id
] != RTE_ETH_QUEUE_STATE_STOPPED
) {
555 RTE_PMD_DEBUG_TRACE("Queue %" PRIu16
" of device with port_id=%" PRIu8
556 " already started\n",
557 rx_queue_id
, port_id
);
561 return dev
->dev_ops
->rx_queue_start(dev
, rx_queue_id
);
566 rte_eth_dev_rx_queue_stop(uint8_t port_id
, uint16_t rx_queue_id
)
568 struct rte_eth_dev
*dev
;
570 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id
, -EINVAL
);
572 dev
= &rte_eth_devices
[port_id
];
573 if (rx_queue_id
>= dev
->data
->nb_rx_queues
) {
574 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id
);
578 RTE_FUNC_PTR_OR_ERR_RET(*dev
->dev_ops
->rx_queue_stop
, -ENOTSUP
);
580 if (dev
->data
->rx_queue_state
[rx_queue_id
] == RTE_ETH_QUEUE_STATE_STOPPED
) {
581 RTE_PMD_DEBUG_TRACE("Queue %" PRIu16
" of device with port_id=%" PRIu8
582 " already stopped\n",
583 rx_queue_id
, port_id
);
587 return dev
->dev_ops
->rx_queue_stop(dev
, rx_queue_id
);
592 rte_eth_dev_tx_queue_start(uint8_t port_id
, uint16_t tx_queue_id
)
594 struct rte_eth_dev
*dev
;
596 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id
, -EINVAL
);
598 dev
= &rte_eth_devices
[port_id
];
599 if (tx_queue_id
>= dev
->data
->nb_tx_queues
) {
600 RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id
);
604 RTE_FUNC_PTR_OR_ERR_RET(*dev
->dev_ops
->tx_queue_start
, -ENOTSUP
);
606 if (dev
->data
->tx_queue_state
[tx_queue_id
] != RTE_ETH_QUEUE_STATE_STOPPED
) {
607 RTE_PMD_DEBUG_TRACE("Queue %" PRIu16
" of device with port_id=%" PRIu8
608 " already started\n",
609 tx_queue_id
, port_id
);
613 return dev
->dev_ops
->tx_queue_start(dev
, tx_queue_id
);
618 rte_eth_dev_tx_queue_stop(uint8_t port_id
, uint16_t tx_queue_id
)
620 struct rte_eth_dev
*dev
;
622 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id
, -EINVAL
);
624 dev
= &rte_eth_devices
[port_id
];
625 if (tx_queue_id
>= dev
->data
->nb_tx_queues
) {
626 RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id
);
630 RTE_FUNC_PTR_OR_ERR_RET(*dev
->dev_ops
->tx_queue_stop
, -ENOTSUP
);
632 if (dev
->data
->tx_queue_state
[tx_queue_id
] == RTE_ETH_QUEUE_STATE_STOPPED
) {
633 RTE_PMD_DEBUG_TRACE("Queue %" PRIu16
" of device with port_id=%" PRIu8
634 " already stopped\n",
635 tx_queue_id
, port_id
);
639 return dev
->dev_ops
->tx_queue_stop(dev
, tx_queue_id
);
644 rte_eth_dev_tx_queue_config(struct rte_eth_dev
*dev
, uint16_t nb_queues
)
646 uint16_t old_nb_queues
= dev
->data
->nb_tx_queues
;
650 if (dev
->data
->tx_queues
== NULL
&& nb_queues
!= 0) { /* first time configuration */
651 dev
->data
->tx_queues
= rte_zmalloc("ethdev->tx_queues",
652 sizeof(dev
->data
->tx_queues
[0]) * nb_queues
,
653 RTE_CACHE_LINE_SIZE
);
654 if (dev
->data
->tx_queues
== NULL
) {
655 dev
->data
->nb_tx_queues
= 0;
658 } else if (dev
->data
->tx_queues
!= NULL
&& nb_queues
!= 0) { /* re-configure */
659 RTE_FUNC_PTR_OR_ERR_RET(*dev
->dev_ops
->tx_queue_release
, -ENOTSUP
);
661 txq
= dev
->data
->tx_queues
;
663 for (i
= nb_queues
; i
< old_nb_queues
; i
++)
664 (*dev
->dev_ops
->tx_queue_release
)(txq
[i
]);
665 txq
= rte_realloc(txq
, sizeof(txq
[0]) * nb_queues
,
666 RTE_CACHE_LINE_SIZE
);
669 if (nb_queues
> old_nb_queues
) {
670 uint16_t new_qs
= nb_queues
- old_nb_queues
;
672 memset(txq
+ old_nb_queues
, 0,
673 sizeof(txq
[0]) * new_qs
);
676 dev
->data
->tx_queues
= txq
;
678 } else if (dev
->data
->tx_queues
!= NULL
&& nb_queues
== 0) {
679 RTE_FUNC_PTR_OR_ERR_RET(*dev
->dev_ops
->tx_queue_release
, -ENOTSUP
);
681 txq
= dev
->data
->tx_queues
;
683 for (i
= nb_queues
; i
< old_nb_queues
; i
++)
684 (*dev
->dev_ops
->tx_queue_release
)(txq
[i
]);
686 dev
->data
->nb_tx_queues
= nb_queues
;
691 rte_eth_speed_bitflag(uint32_t speed
, int duplex
)
694 case ETH_SPEED_NUM_10M
:
695 return duplex
? ETH_LINK_SPEED_10M
: ETH_LINK_SPEED_10M_HD
;
696 case ETH_SPEED_NUM_100M
:
697 return duplex
? ETH_LINK_SPEED_100M
: ETH_LINK_SPEED_100M_HD
;
698 case ETH_SPEED_NUM_1G
:
699 return ETH_LINK_SPEED_1G
;
700 case ETH_SPEED_NUM_2_5G
:
701 return ETH_LINK_SPEED_2_5G
;
702 case ETH_SPEED_NUM_5G
:
703 return ETH_LINK_SPEED_5G
;
704 case ETH_SPEED_NUM_10G
:
705 return ETH_LINK_SPEED_10G
;
706 case ETH_SPEED_NUM_20G
:
707 return ETH_LINK_SPEED_20G
;
708 case ETH_SPEED_NUM_25G
:
709 return ETH_LINK_SPEED_25G
;
710 case ETH_SPEED_NUM_40G
:
711 return ETH_LINK_SPEED_40G
;
712 case ETH_SPEED_NUM_50G
:
713 return ETH_LINK_SPEED_50G
;
714 case ETH_SPEED_NUM_56G
:
715 return ETH_LINK_SPEED_56G
;
716 case ETH_SPEED_NUM_100G
:
717 return ETH_LINK_SPEED_100G
;
724 rte_eth_dev_configure(uint8_t port_id
, uint16_t nb_rx_q
, uint16_t nb_tx_q
,
725 const struct rte_eth_conf
*dev_conf
)
727 struct rte_eth_dev
*dev
;
728 struct rte_eth_dev_info dev_info
;
731 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id
, -EINVAL
);
733 if (nb_rx_q
> RTE_MAX_QUEUES_PER_PORT
) {
735 "Number of RX queues requested (%u) is greater than max supported(%d)\n",
736 nb_rx_q
, RTE_MAX_QUEUES_PER_PORT
);
740 if (nb_tx_q
> RTE_MAX_QUEUES_PER_PORT
) {
742 "Number of TX queues requested (%u) is greater than max supported(%d)\n",
743 nb_tx_q
, RTE_MAX_QUEUES_PER_PORT
);
747 dev
= &rte_eth_devices
[port_id
];
749 RTE_FUNC_PTR_OR_ERR_RET(*dev
->dev_ops
->dev_infos_get
, -ENOTSUP
);
750 RTE_FUNC_PTR_OR_ERR_RET(*dev
->dev_ops
->dev_configure
, -ENOTSUP
);
752 if (dev
->data
->dev_started
) {
754 "port %d must be stopped to allow configuration\n", port_id
);
758 /* Copy the dev_conf parameter into the dev structure */
759 memcpy(&dev
->data
->dev_conf
, dev_conf
, sizeof(dev
->data
->dev_conf
));
762 * Check that the numbers of RX and TX queues are not greater
763 * than the maximum number of RX and TX queues supported by the
766 (*dev
->dev_ops
->dev_infos_get
)(dev
, &dev_info
);
768 if (nb_rx_q
== 0 && nb_tx_q
== 0) {
769 RTE_PMD_DEBUG_TRACE("ethdev port_id=%d both rx and tx queue cannot be 0\n", port_id
);
773 if (nb_rx_q
> dev_info
.max_rx_queues
) {
774 RTE_PMD_DEBUG_TRACE("ethdev port_id=%d nb_rx_queues=%d > %d\n",
775 port_id
, nb_rx_q
, dev_info
.max_rx_queues
);
779 if (nb_tx_q
> dev_info
.max_tx_queues
) {
780 RTE_PMD_DEBUG_TRACE("ethdev port_id=%d nb_tx_queues=%d > %d\n",
781 port_id
, nb_tx_q
, dev_info
.max_tx_queues
);
786 * If link state interrupt is enabled, check that the
787 * device supports it.
789 if ((dev_conf
->intr_conf
.lsc
== 1) &&
790 (!(dev
->data
->dev_flags
& RTE_ETH_DEV_INTR_LSC
))) {
791 RTE_PMD_DEBUG_TRACE("driver %s does not support lsc\n",
792 dev
->data
->drv_name
);
797 * If jumbo frames are enabled, check that the maximum RX packet
798 * length is supported by the configured device.
800 if (dev_conf
->rxmode
.jumbo_frame
== 1) {
801 if (dev_conf
->rxmode
.max_rx_pkt_len
>
802 dev_info
.max_rx_pktlen
) {
803 RTE_PMD_DEBUG_TRACE("ethdev port_id=%d max_rx_pkt_len %u"
804 " > max valid value %u\n",
806 (unsigned)dev_conf
->rxmode
.max_rx_pkt_len
,
807 (unsigned)dev_info
.max_rx_pktlen
);
809 } else if (dev_conf
->rxmode
.max_rx_pkt_len
< ETHER_MIN_LEN
) {
810 RTE_PMD_DEBUG_TRACE("ethdev port_id=%d max_rx_pkt_len %u"
811 " < min valid value %u\n",
813 (unsigned)dev_conf
->rxmode
.max_rx_pkt_len
,
814 (unsigned)ETHER_MIN_LEN
);
818 if (dev_conf
->rxmode
.max_rx_pkt_len
< ETHER_MIN_LEN
||
819 dev_conf
->rxmode
.max_rx_pkt_len
> ETHER_MAX_LEN
)
820 /* Use default value */
821 dev
->data
->dev_conf
.rxmode
.max_rx_pkt_len
=
826 * Setup new number of RX/TX queues and reconfigure device.
828 diag
= rte_eth_dev_rx_queue_config(dev
, nb_rx_q
);
830 RTE_PMD_DEBUG_TRACE("port%d rte_eth_dev_rx_queue_config = %d\n",
835 diag
= rte_eth_dev_tx_queue_config(dev
, nb_tx_q
);
837 RTE_PMD_DEBUG_TRACE("port%d rte_eth_dev_tx_queue_config = %d\n",
839 rte_eth_dev_rx_queue_config(dev
, 0);
843 diag
= (*dev
->dev_ops
->dev_configure
)(dev
);
845 RTE_PMD_DEBUG_TRACE("port%d dev_configure = %d\n",
847 rte_eth_dev_rx_queue_config(dev
, 0);
848 rte_eth_dev_tx_queue_config(dev
, 0);
856 rte_eth_dev_config_restore(uint8_t port_id
)
858 struct rte_eth_dev
*dev
;
859 struct rte_eth_dev_info dev_info
;
860 struct ether_addr addr
;
864 dev
= &rte_eth_devices
[port_id
];
866 rte_eth_dev_info_get(port_id
, &dev_info
);
868 if (RTE_ETH_DEV_SRIOV(dev
).active
)
869 pool
= RTE_ETH_DEV_SRIOV(dev
).def_vmdq_idx
;
871 /* replay MAC address configuration */
872 for (i
= 0; i
< dev_info
.max_mac_addrs
; i
++) {
873 addr
= dev
->data
->mac_addrs
[i
];
875 /* skip zero address */
876 if (is_zero_ether_addr(&addr
))
879 /* add address to the hardware */
880 if (*dev
->dev_ops
->mac_addr_add
&&
881 (dev
->data
->mac_pool_sel
[i
] & (1ULL << pool
)))
882 (*dev
->dev_ops
->mac_addr_add
)(dev
, &addr
, i
, pool
);
884 RTE_PMD_DEBUG_TRACE("port %d: MAC address array not supported\n",
886 /* exit the loop but not return an error */
891 /* replay promiscuous configuration */
892 if (rte_eth_promiscuous_get(port_id
) == 1)
893 rte_eth_promiscuous_enable(port_id
);
894 else if (rte_eth_promiscuous_get(port_id
) == 0)
895 rte_eth_promiscuous_disable(port_id
);
897 /* replay all multicast configuration */
898 if (rte_eth_allmulticast_get(port_id
) == 1)
899 rte_eth_allmulticast_enable(port_id
);
900 else if (rte_eth_allmulticast_get(port_id
) == 0)
901 rte_eth_allmulticast_disable(port_id
);
905 rte_eth_dev_start(uint8_t port_id
)
907 struct rte_eth_dev
*dev
;
910 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id
, -EINVAL
);
912 dev
= &rte_eth_devices
[port_id
];
914 RTE_FUNC_PTR_OR_ERR_RET(*dev
->dev_ops
->dev_start
, -ENOTSUP
);
916 if (dev
->data
->dev_started
!= 0) {
917 RTE_PMD_DEBUG_TRACE("Device with port_id=%" PRIu8
918 " already started\n",
923 diag
= (*dev
->dev_ops
->dev_start
)(dev
);
925 dev
->data
->dev_started
= 1;
929 rte_eth_dev_config_restore(port_id
);
931 if (dev
->data
->dev_conf
.intr_conf
.lsc
== 0) {
932 RTE_FUNC_PTR_OR_ERR_RET(*dev
->dev_ops
->link_update
, -ENOTSUP
);
933 (*dev
->dev_ops
->link_update
)(dev
, 0);
939 rte_eth_dev_stop(uint8_t port_id
)
941 struct rte_eth_dev
*dev
;
943 RTE_ETH_VALID_PORTID_OR_RET(port_id
);
944 dev
= &rte_eth_devices
[port_id
];
946 RTE_FUNC_PTR_OR_RET(*dev
->dev_ops
->dev_stop
);
948 if (dev
->data
->dev_started
== 0) {
949 RTE_PMD_DEBUG_TRACE("Device with port_id=%" PRIu8
950 " already stopped\n",
955 dev
->data
->dev_started
= 0;
956 (*dev
->dev_ops
->dev_stop
)(dev
);
960 rte_eth_dev_set_link_up(uint8_t port_id
)
962 struct rte_eth_dev
*dev
;
964 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id
, -EINVAL
);
966 dev
= &rte_eth_devices
[port_id
];
968 RTE_FUNC_PTR_OR_ERR_RET(*dev
->dev_ops
->dev_set_link_up
, -ENOTSUP
);
969 return (*dev
->dev_ops
->dev_set_link_up
)(dev
);
973 rte_eth_dev_set_link_down(uint8_t port_id
)
975 struct rte_eth_dev
*dev
;
977 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id
, -EINVAL
);
979 dev
= &rte_eth_devices
[port_id
];
981 RTE_FUNC_PTR_OR_ERR_RET(*dev
->dev_ops
->dev_set_link_down
, -ENOTSUP
);
982 return (*dev
->dev_ops
->dev_set_link_down
)(dev
);
986 rte_eth_dev_close(uint8_t port_id
)
988 struct rte_eth_dev
*dev
;
990 RTE_ETH_VALID_PORTID_OR_RET(port_id
);
991 dev
= &rte_eth_devices
[port_id
];
993 RTE_FUNC_PTR_OR_RET(*dev
->dev_ops
->dev_close
);
994 dev
->data
->dev_started
= 0;
995 (*dev
->dev_ops
->dev_close
)(dev
);
997 rte_free(dev
->data
->rx_queues
);
998 dev
->data
->rx_queues
= NULL
;
999 rte_free(dev
->data
->tx_queues
);
1000 dev
->data
->tx_queues
= NULL
;
1004 rte_eth_rx_queue_setup(uint8_t port_id
, uint16_t rx_queue_id
,
1005 uint16_t nb_rx_desc
, unsigned int socket_id
,
1006 const struct rte_eth_rxconf
*rx_conf
,
1007 struct rte_mempool
*mp
)
1010 uint32_t mbp_buf_size
;
1011 struct rte_eth_dev
*dev
;
1012 struct rte_eth_dev_info dev_info
;
1014 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id
, -EINVAL
);
1016 dev
= &rte_eth_devices
[port_id
];
1017 if (rx_queue_id
>= dev
->data
->nb_rx_queues
) {
1018 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id
);
1022 if (dev
->data
->dev_started
) {
1023 RTE_PMD_DEBUG_TRACE(
1024 "port %d must be stopped to allow configuration\n", port_id
);
1028 RTE_FUNC_PTR_OR_ERR_RET(*dev
->dev_ops
->dev_infos_get
, -ENOTSUP
);
1029 RTE_FUNC_PTR_OR_ERR_RET(*dev
->dev_ops
->rx_queue_setup
, -ENOTSUP
);
1032 * Check the size of the mbuf data buffer.
1033 * This value must be provided in the private data of the memory pool.
1034 * First check that the memory pool has a valid private data.
1036 rte_eth_dev_info_get(port_id
, &dev_info
);
1037 if (mp
->private_data_size
< sizeof(struct rte_pktmbuf_pool_private
)) {
1038 RTE_PMD_DEBUG_TRACE("%s private_data_size %d < %d\n",
1039 mp
->name
, (int) mp
->private_data_size
,
1040 (int) sizeof(struct rte_pktmbuf_pool_private
));
1043 mbp_buf_size
= rte_pktmbuf_data_room_size(mp
);
1045 if ((mbp_buf_size
- RTE_PKTMBUF_HEADROOM
) < dev_info
.min_rx_bufsize
) {
1046 RTE_PMD_DEBUG_TRACE("%s mbuf_data_room_size %d < %d "
1047 "(RTE_PKTMBUF_HEADROOM=%d + min_rx_bufsize(dev)"
1051 (int)(RTE_PKTMBUF_HEADROOM
+
1052 dev_info
.min_rx_bufsize
),
1053 (int)RTE_PKTMBUF_HEADROOM
,
1054 (int)dev_info
.min_rx_bufsize
);
1058 if (nb_rx_desc
> dev_info
.rx_desc_lim
.nb_max
||
1059 nb_rx_desc
< dev_info
.rx_desc_lim
.nb_min
||
1060 nb_rx_desc
% dev_info
.rx_desc_lim
.nb_align
!= 0) {
1062 RTE_PMD_DEBUG_TRACE("Invalid value for nb_rx_desc(=%hu), "
1063 "should be: <= %hu, = %hu, and a product of %hu\n",
1065 dev_info
.rx_desc_lim
.nb_max
,
1066 dev_info
.rx_desc_lim
.nb_min
,
1067 dev_info
.rx_desc_lim
.nb_align
);
1071 if (rx_conf
== NULL
)
1072 rx_conf
= &dev_info
.default_rxconf
;
1074 ret
= (*dev
->dev_ops
->rx_queue_setup
)(dev
, rx_queue_id
, nb_rx_desc
,
1075 socket_id
, rx_conf
, mp
);
1077 if (!dev
->data
->min_rx_buf_size
||
1078 dev
->data
->min_rx_buf_size
> mbp_buf_size
)
1079 dev
->data
->min_rx_buf_size
= mbp_buf_size
;
1086 rte_eth_tx_queue_setup(uint8_t port_id
, uint16_t tx_queue_id
,
1087 uint16_t nb_tx_desc
, unsigned int socket_id
,
1088 const struct rte_eth_txconf
*tx_conf
)
1090 struct rte_eth_dev
*dev
;
1091 struct rte_eth_dev_info dev_info
;
1093 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id
, -EINVAL
);
1095 dev
= &rte_eth_devices
[port_id
];
1096 if (tx_queue_id
>= dev
->data
->nb_tx_queues
) {
1097 RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id
);
1101 if (dev
->data
->dev_started
) {
1102 RTE_PMD_DEBUG_TRACE(
1103 "port %d must be stopped to allow configuration\n", port_id
);
1107 RTE_FUNC_PTR_OR_ERR_RET(*dev
->dev_ops
->dev_infos_get
, -ENOTSUP
);
1108 RTE_FUNC_PTR_OR_ERR_RET(*dev
->dev_ops
->tx_queue_setup
, -ENOTSUP
);
1110 rte_eth_dev_info_get(port_id
, &dev_info
);
1112 if (nb_tx_desc
> dev_info
.tx_desc_lim
.nb_max
||
1113 nb_tx_desc
< dev_info
.tx_desc_lim
.nb_min
||
1114 nb_tx_desc
% dev_info
.tx_desc_lim
.nb_align
!= 0) {
1115 RTE_PMD_DEBUG_TRACE("Invalid value for nb_tx_desc(=%hu), "
1116 "should be: <= %hu, = %hu, and a product of %hu\n",
1118 dev_info
.tx_desc_lim
.nb_max
,
1119 dev_info
.tx_desc_lim
.nb_min
,
1120 dev_info
.tx_desc_lim
.nb_align
);
1124 if (tx_conf
== NULL
)
1125 tx_conf
= &dev_info
.default_txconf
;
1127 return (*dev
->dev_ops
->tx_queue_setup
)(dev
, tx_queue_id
, nb_tx_desc
,
1128 socket_id
, tx_conf
);
1132 rte_eth_tx_buffer_drop_callback(struct rte_mbuf
**pkts
, uint16_t unsent
,
1133 void *userdata __rte_unused
)
1137 for (i
= 0; i
< unsent
; i
++)
1138 rte_pktmbuf_free(pkts
[i
]);
1142 rte_eth_tx_buffer_count_callback(struct rte_mbuf
**pkts
, uint16_t unsent
,
1145 uint64_t *count
= userdata
;
1148 for (i
= 0; i
< unsent
; i
++)
1149 rte_pktmbuf_free(pkts
[i
]);
1155 rte_eth_tx_buffer_set_err_callback(struct rte_eth_dev_tx_buffer
*buffer
,
1156 buffer_tx_error_fn cbfn
, void *userdata
)
1158 buffer
->error_callback
= cbfn
;
1159 buffer
->error_userdata
= userdata
;
1164 rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer
*buffer
, uint16_t size
)
1171 buffer
->size
= size
;
1172 if (buffer
->error_callback
== NULL
) {
1173 ret
= rte_eth_tx_buffer_set_err_callback(
1174 buffer
, rte_eth_tx_buffer_drop_callback
, NULL
);
1181 rte_eth_promiscuous_enable(uint8_t port_id
)
1183 struct rte_eth_dev
*dev
;
1185 RTE_ETH_VALID_PORTID_OR_RET(port_id
);
1186 dev
= &rte_eth_devices
[port_id
];
1188 RTE_FUNC_PTR_OR_RET(*dev
->dev_ops
->promiscuous_enable
);
1189 (*dev
->dev_ops
->promiscuous_enable
)(dev
);
1190 dev
->data
->promiscuous
= 1;
1194 rte_eth_promiscuous_disable(uint8_t port_id
)
1196 struct rte_eth_dev
*dev
;
1198 RTE_ETH_VALID_PORTID_OR_RET(port_id
);
1199 dev
= &rte_eth_devices
[port_id
];
1201 RTE_FUNC_PTR_OR_RET(*dev
->dev_ops
->promiscuous_disable
);
1202 dev
->data
->promiscuous
= 0;
1203 (*dev
->dev_ops
->promiscuous_disable
)(dev
);
1207 rte_eth_promiscuous_get(uint8_t port_id
)
1209 struct rte_eth_dev
*dev
;
1211 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id
, -EINVAL
);
1213 dev
= &rte_eth_devices
[port_id
];
1214 return dev
->data
->promiscuous
;
1218 rte_eth_allmulticast_enable(uint8_t port_id
)
1220 struct rte_eth_dev
*dev
;
1222 RTE_ETH_VALID_PORTID_OR_RET(port_id
);
1223 dev
= &rte_eth_devices
[port_id
];
1225 RTE_FUNC_PTR_OR_RET(*dev
->dev_ops
->allmulticast_enable
);
1226 (*dev
->dev_ops
->allmulticast_enable
)(dev
);
1227 dev
->data
->all_multicast
= 1;
1231 rte_eth_allmulticast_disable(uint8_t port_id
)
1233 struct rte_eth_dev
*dev
;
1235 RTE_ETH_VALID_PORTID_OR_RET(port_id
);
1236 dev
= &rte_eth_devices
[port_id
];
1238 RTE_FUNC_PTR_OR_RET(*dev
->dev_ops
->allmulticast_disable
);
1239 dev
->data
->all_multicast
= 0;
1240 (*dev
->dev_ops
->allmulticast_disable
)(dev
);
1244 rte_eth_allmulticast_get(uint8_t port_id
)
1246 struct rte_eth_dev
*dev
;
1248 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id
, -EINVAL
);
1250 dev
= &rte_eth_devices
[port_id
];
1251 return dev
->data
->all_multicast
;
1255 rte_eth_dev_atomic_read_link_status(struct rte_eth_dev
*dev
,
1256 struct rte_eth_link
*link
)
1258 struct rte_eth_link
*dst
= link
;
1259 struct rte_eth_link
*src
= &(dev
->data
->dev_link
);
1261 if (rte_atomic64_cmpset((uint64_t *)dst
, *(uint64_t *)dst
,
1262 *(uint64_t *)src
) == 0)
1269 rte_eth_link_get(uint8_t port_id
, struct rte_eth_link
*eth_link
)
1271 struct rte_eth_dev
*dev
;
1273 RTE_ETH_VALID_PORTID_OR_RET(port_id
);
1274 dev
= &rte_eth_devices
[port_id
];
1276 if (dev
->data
->dev_conf
.intr_conf
.lsc
!= 0)
1277 rte_eth_dev_atomic_read_link_status(dev
, eth_link
);
1279 RTE_FUNC_PTR_OR_RET(*dev
->dev_ops
->link_update
);
1280 (*dev
->dev_ops
->link_update
)(dev
, 1);
1281 *eth_link
= dev
->data
->dev_link
;
1286 rte_eth_link_get_nowait(uint8_t port_id
, struct rte_eth_link
*eth_link
)
1288 struct rte_eth_dev
*dev
;
1290 RTE_ETH_VALID_PORTID_OR_RET(port_id
);
1291 dev
= &rte_eth_devices
[port_id
];
1293 if (dev
->data
->dev_conf
.intr_conf
.lsc
!= 0)
1294 rte_eth_dev_atomic_read_link_status(dev
, eth_link
);
1296 RTE_FUNC_PTR_OR_RET(*dev
->dev_ops
->link_update
);
1297 (*dev
->dev_ops
->link_update
)(dev
, 0);
1298 *eth_link
= dev
->data
->dev_link
;
1303 rte_eth_stats_get(uint8_t port_id
, struct rte_eth_stats
*stats
)
1305 struct rte_eth_dev
*dev
;
1307 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id
, -EINVAL
);
1309 dev
= &rte_eth_devices
[port_id
];
1310 memset(stats
, 0, sizeof(*stats
));
1312 RTE_FUNC_PTR_OR_ERR_RET(*dev
->dev_ops
->stats_get
, -ENOTSUP
);
1313 stats
->rx_nombuf
= dev
->data
->rx_mbuf_alloc_failed
;
1314 (*dev
->dev_ops
->stats_get
)(dev
, stats
);
1319 rte_eth_stats_reset(uint8_t port_id
)
1321 struct rte_eth_dev
*dev
;
1323 RTE_ETH_VALID_PORTID_OR_RET(port_id
);
1324 dev
= &rte_eth_devices
[port_id
];
1326 RTE_FUNC_PTR_OR_RET(*dev
->dev_ops
->stats_reset
);
1327 (*dev
->dev_ops
->stats_reset
)(dev
);
1328 dev
->data
->rx_mbuf_alloc_failed
= 0;
1332 get_xstats_count(uint8_t port_id
)
1334 struct rte_eth_dev
*dev
;
1337 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id
, -EINVAL
);
1338 dev
= &rte_eth_devices
[port_id
];
1339 if (dev
->dev_ops
->xstats_get_names
!= NULL
) {
1340 count
= (*dev
->dev_ops
->xstats_get_names
)(dev
, NULL
, 0);
1345 count
+= RTE_NB_STATS
;
1346 count
+= dev
->data
->nb_rx_queues
* RTE_NB_RXQ_STATS
;
1347 count
+= dev
->data
->nb_tx_queues
* RTE_NB_TXQ_STATS
;
1352 rte_eth_xstats_get_names(uint8_t port_id
,
1353 struct rte_eth_xstat_name
*xstats_names
,
1356 struct rte_eth_dev
*dev
;
1357 int cnt_used_entries
;
1358 int cnt_expected_entries
;
1359 int cnt_driver_entries
;
1360 uint32_t idx
, id_queue
;
1362 cnt_expected_entries
= get_xstats_count(port_id
);
1363 if (xstats_names
== NULL
|| cnt_expected_entries
< 0 ||
1364 (int)size
< cnt_expected_entries
)
1365 return cnt_expected_entries
;
1367 /* port_id checked in get_xstats_count() */
1368 dev
= &rte_eth_devices
[port_id
];
1369 cnt_used_entries
= 0;
1371 for (idx
= 0; idx
< RTE_NB_STATS
; idx
++) {
1372 snprintf(xstats_names
[cnt_used_entries
].name
,
1373 sizeof(xstats_names
[0].name
),
1374 "%s", rte_stats_strings
[idx
].name
);
1377 for (id_queue
= 0; id_queue
< dev
->data
->nb_rx_queues
; id_queue
++) {
1378 for (idx
= 0; idx
< RTE_NB_RXQ_STATS
; idx
++) {
1379 snprintf(xstats_names
[cnt_used_entries
].name
,
1380 sizeof(xstats_names
[0].name
),
1382 id_queue
, rte_rxq_stats_strings
[idx
].name
);
1387 for (id_queue
= 0; id_queue
< dev
->data
->nb_tx_queues
; id_queue
++) {
1388 for (idx
= 0; idx
< RTE_NB_TXQ_STATS
; idx
++) {
1389 snprintf(xstats_names
[cnt_used_entries
].name
,
1390 sizeof(xstats_names
[0].name
),
1392 id_queue
, rte_txq_stats_strings
[idx
].name
);
1397 if (dev
->dev_ops
->xstats_get_names
!= NULL
) {
1398 /* If there are any driver-specific xstats, append them
1401 cnt_driver_entries
= (*dev
->dev_ops
->xstats_get_names
)(
1403 xstats_names
+ cnt_used_entries
,
1404 size
- cnt_used_entries
);
1405 if (cnt_driver_entries
< 0)
1406 return cnt_driver_entries
;
1407 cnt_used_entries
+= cnt_driver_entries
;
1410 return cnt_used_entries
;
1413 /* retrieve ethdev extended statistics */
1415 rte_eth_xstats_get(uint8_t port_id
, struct rte_eth_xstat
*xstats
,
1418 struct rte_eth_stats eth_stats
;
1419 struct rte_eth_dev
*dev
;
1420 unsigned count
= 0, i
, q
;
1422 uint64_t val
, *stats_ptr
;
1424 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id
, -EINVAL
);
1426 dev
= &rte_eth_devices
[port_id
];
1428 /* Return generic statistics */
1429 count
= RTE_NB_STATS
+ (dev
->data
->nb_rx_queues
* RTE_NB_RXQ_STATS
) +
1430 (dev
->data
->nb_tx_queues
* RTE_NB_TXQ_STATS
);
1432 /* implemented by the driver */
1433 if (dev
->dev_ops
->xstats_get
!= NULL
) {
1434 /* Retrieve the xstats from the driver at the end of the
1437 xcount
= (*dev
->dev_ops
->xstats_get
)(dev
,
1438 xstats
? xstats
+ count
: NULL
,
1439 (n
> count
) ? n
- count
: 0);
1445 if (n
< count
+ xcount
|| xstats
== NULL
)
1446 return count
+ xcount
;
1448 /* now fill the xstats structure */
1450 rte_eth_stats_get(port_id
, ð_stats
);
1453 for (i
= 0; i
< RTE_NB_STATS
; i
++) {
1454 stats_ptr
= RTE_PTR_ADD(ð_stats
,
1455 rte_stats_strings
[i
].offset
);
1457 xstats
[count
++].value
= val
;
1461 for (q
= 0; q
< dev
->data
->nb_rx_queues
; q
++) {
1462 for (i
= 0; i
< RTE_NB_RXQ_STATS
; i
++) {
1463 stats_ptr
= RTE_PTR_ADD(ð_stats
,
1464 rte_rxq_stats_strings
[i
].offset
+
1465 q
* sizeof(uint64_t));
1467 xstats
[count
++].value
= val
;
1472 for (q
= 0; q
< dev
->data
->nb_tx_queues
; q
++) {
1473 for (i
= 0; i
< RTE_NB_TXQ_STATS
; i
++) {
1474 stats_ptr
= RTE_PTR_ADD(ð_stats
,
1475 rte_txq_stats_strings
[i
].offset
+
1476 q
* sizeof(uint64_t));
1478 xstats
[count
++].value
= val
;
1482 for (i
= 0; i
< count
+ xcount
; i
++)
1485 return count
+ xcount
;
1488 /* reset ethdev extended statistics */
1490 rte_eth_xstats_reset(uint8_t port_id
)
1492 struct rte_eth_dev
*dev
;
1494 RTE_ETH_VALID_PORTID_OR_RET(port_id
);
1495 dev
= &rte_eth_devices
[port_id
];
1497 /* implemented by the driver */
1498 if (dev
->dev_ops
->xstats_reset
!= NULL
) {
1499 (*dev
->dev_ops
->xstats_reset
)(dev
);
1503 /* fallback to default */
1504 rte_eth_stats_reset(port_id
);
1508 set_queue_stats_mapping(uint8_t port_id
, uint16_t queue_id
, uint8_t stat_idx
,
1511 struct rte_eth_dev
*dev
;
1513 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id
, -ENODEV
);
1515 dev
= &rte_eth_devices
[port_id
];
1517 RTE_FUNC_PTR_OR_ERR_RET(*dev
->dev_ops
->queue_stats_mapping_set
, -ENOTSUP
);
1518 return (*dev
->dev_ops
->queue_stats_mapping_set
)
1519 (dev
, queue_id
, stat_idx
, is_rx
);
1524 rte_eth_dev_set_tx_queue_stats_mapping(uint8_t port_id
, uint16_t tx_queue_id
,
1527 return set_queue_stats_mapping(port_id
, tx_queue_id
, stat_idx
,
1533 rte_eth_dev_set_rx_queue_stats_mapping(uint8_t port_id
, uint16_t rx_queue_id
,
1536 return set_queue_stats_mapping(port_id
, rx_queue_id
, stat_idx
,
1541 rte_eth_dev_info_get(uint8_t port_id
, struct rte_eth_dev_info
*dev_info
)
1543 struct rte_eth_dev
*dev
;
1544 const struct rte_eth_desc_lim lim
= {
1545 .nb_max
= UINT16_MAX
,
1550 RTE_ETH_VALID_PORTID_OR_RET(port_id
);
1551 dev
= &rte_eth_devices
[port_id
];
1553 memset(dev_info
, 0, sizeof(struct rte_eth_dev_info
));
1554 dev_info
->rx_desc_lim
= lim
;
1555 dev_info
->tx_desc_lim
= lim
;
1557 RTE_FUNC_PTR_OR_RET(*dev
->dev_ops
->dev_infos_get
);
1558 (*dev
->dev_ops
->dev_infos_get
)(dev
, dev_info
);
1559 dev_info
->pci_dev
= dev
->pci_dev
;
1560 dev_info
->driver_name
= dev
->data
->drv_name
;
1561 dev_info
->nb_rx_queues
= dev
->data
->nb_rx_queues
;
1562 dev_info
->nb_tx_queues
= dev
->data
->nb_tx_queues
;
1566 rte_eth_dev_get_supported_ptypes(uint8_t port_id
, uint32_t ptype_mask
,
1567 uint32_t *ptypes
, int num
)
1570 struct rte_eth_dev
*dev
;
1571 const uint32_t *all_ptypes
;
1573 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id
, -ENODEV
);
1574 dev
= &rte_eth_devices
[port_id
];
1575 RTE_FUNC_PTR_OR_ERR_RET(*dev
->dev_ops
->dev_supported_ptypes_get
, 0);
1576 all_ptypes
= (*dev
->dev_ops
->dev_supported_ptypes_get
)(dev
);
1581 for (i
= 0, j
= 0; all_ptypes
[i
] != RTE_PTYPE_UNKNOWN
; ++i
)
1582 if (all_ptypes
[i
] & ptype_mask
) {
1584 ptypes
[j
] = all_ptypes
[i
];
1592 rte_eth_macaddr_get(uint8_t port_id
, struct ether_addr
*mac_addr
)
1594 struct rte_eth_dev
*dev
;
1596 RTE_ETH_VALID_PORTID_OR_RET(port_id
);
1597 dev
= &rte_eth_devices
[port_id
];
1598 ether_addr_copy(&dev
->data
->mac_addrs
[0], mac_addr
);
1603 rte_eth_dev_get_mtu(uint8_t port_id
, uint16_t *mtu
)
1605 struct rte_eth_dev
*dev
;
1607 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id
, -ENODEV
);
1609 dev
= &rte_eth_devices
[port_id
];
1610 *mtu
= dev
->data
->mtu
;
1615 rte_eth_dev_set_mtu(uint8_t port_id
, uint16_t mtu
)
1618 struct rte_eth_dev
*dev
;
1620 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id
, -ENODEV
);
1621 dev
= &rte_eth_devices
[port_id
];
1622 RTE_FUNC_PTR_OR_ERR_RET(*dev
->dev_ops
->mtu_set
, -ENOTSUP
);
1624 ret
= (*dev
->dev_ops
->mtu_set
)(dev
, mtu
);
1626 dev
->data
->mtu
= mtu
;
1632 rte_eth_dev_vlan_filter(uint8_t port_id
, uint16_t vlan_id
, int on
)
1634 struct rte_eth_dev
*dev
;
1636 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id
, -ENODEV
);
1637 dev
= &rte_eth_devices
[port_id
];
1638 if (!(dev
->data
->dev_conf
.rxmode
.hw_vlan_filter
)) {
1639 RTE_PMD_DEBUG_TRACE("port %d: vlan-filtering disabled\n", port_id
);
1643 if (vlan_id
> 4095) {
1644 RTE_PMD_DEBUG_TRACE("(port_id=%d) invalid vlan_id=%u > 4095\n",
1645 port_id
, (unsigned) vlan_id
);
1648 RTE_FUNC_PTR_OR_ERR_RET(*dev
->dev_ops
->vlan_filter_set
, -ENOTSUP
);
1650 return (*dev
->dev_ops
->vlan_filter_set
)(dev
, vlan_id
, on
);
1654 rte_eth_dev_set_vlan_strip_on_queue(uint8_t port_id
, uint16_t rx_queue_id
, int on
)
1656 struct rte_eth_dev
*dev
;
1658 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id
, -ENODEV
);
1659 dev
= &rte_eth_devices
[port_id
];
1660 if (rx_queue_id
>= dev
->data
->nb_rx_queues
) {
1661 RTE_PMD_DEBUG_TRACE("Invalid rx_queue_id=%d\n", port_id
);
1665 RTE_FUNC_PTR_OR_ERR_RET(*dev
->dev_ops
->vlan_strip_queue_set
, -ENOTSUP
);
1666 (*dev
->dev_ops
->vlan_strip_queue_set
)(dev
, rx_queue_id
, on
);
1672 rte_eth_dev_set_vlan_ether_type(uint8_t port_id
,
1673 enum rte_vlan_type vlan_type
,
1676 struct rte_eth_dev
*dev
;
1678 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id
, -ENODEV
);
1679 dev
= &rte_eth_devices
[port_id
];
1680 RTE_FUNC_PTR_OR_ERR_RET(*dev
->dev_ops
->vlan_tpid_set
, -ENOTSUP
);
1682 return (*dev
->dev_ops
->vlan_tpid_set
)(dev
, vlan_type
, tpid
);
1686 rte_eth_dev_set_vlan_offload(uint8_t port_id
, int offload_mask
)
1688 struct rte_eth_dev
*dev
;
1693 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id
, -ENODEV
);
1694 dev
= &rte_eth_devices
[port_id
];
1696 /*check which option changed by application*/
1697 cur
= !!(offload_mask
& ETH_VLAN_STRIP_OFFLOAD
);
1698 org
= !!(dev
->data
->dev_conf
.rxmode
.hw_vlan_strip
);
1700 dev
->data
->dev_conf
.rxmode
.hw_vlan_strip
= (uint8_t)cur
;
1701 mask
|= ETH_VLAN_STRIP_MASK
;
1704 cur
= !!(offload_mask
& ETH_VLAN_FILTER_OFFLOAD
);
1705 org
= !!(dev
->data
->dev_conf
.rxmode
.hw_vlan_filter
);
1707 dev
->data
->dev_conf
.rxmode
.hw_vlan_filter
= (uint8_t)cur
;
1708 mask
|= ETH_VLAN_FILTER_MASK
;
1711 cur
= !!(offload_mask
& ETH_VLAN_EXTEND_OFFLOAD
);
1712 org
= !!(dev
->data
->dev_conf
.rxmode
.hw_vlan_extend
);
1714 dev
->data
->dev_conf
.rxmode
.hw_vlan_extend
= (uint8_t)cur
;
1715 mask
|= ETH_VLAN_EXTEND_MASK
;
1722 RTE_FUNC_PTR_OR_ERR_RET(*dev
->dev_ops
->vlan_offload_set
, -ENOTSUP
);
1723 (*dev
->dev_ops
->vlan_offload_set
)(dev
, mask
);
1729 rte_eth_dev_get_vlan_offload(uint8_t port_id
)
1731 struct rte_eth_dev
*dev
;
1734 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id
, -ENODEV
);
1735 dev
= &rte_eth_devices
[port_id
];
1737 if (dev
->data
->dev_conf
.rxmode
.hw_vlan_strip
)
1738 ret
|= ETH_VLAN_STRIP_OFFLOAD
;
1740 if (dev
->data
->dev_conf
.rxmode
.hw_vlan_filter
)
1741 ret
|= ETH_VLAN_FILTER_OFFLOAD
;
1743 if (dev
->data
->dev_conf
.rxmode
.hw_vlan_extend
)
1744 ret
|= ETH_VLAN_EXTEND_OFFLOAD
;
1750 rte_eth_dev_set_vlan_pvid(uint8_t port_id
, uint16_t pvid
, int on
)
1752 struct rte_eth_dev
*dev
;
1754 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id
, -ENODEV
);
1755 dev
= &rte_eth_devices
[port_id
];
1756 RTE_FUNC_PTR_OR_ERR_RET(*dev
->dev_ops
->vlan_pvid_set
, -ENOTSUP
);
1757 (*dev
->dev_ops
->vlan_pvid_set
)(dev
, pvid
, on
);
1763 rte_eth_dev_flow_ctrl_get(uint8_t port_id
, struct rte_eth_fc_conf
*fc_conf
)
1765 struct rte_eth_dev
*dev
;
1767 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id
, -ENODEV
);
1768 dev
= &rte_eth_devices
[port_id
];
1769 RTE_FUNC_PTR_OR_ERR_RET(*dev
->dev_ops
->flow_ctrl_get
, -ENOTSUP
);
1770 memset(fc_conf
, 0, sizeof(*fc_conf
));
1771 return (*dev
->dev_ops
->flow_ctrl_get
)(dev
, fc_conf
);
1775 rte_eth_dev_flow_ctrl_set(uint8_t port_id
, struct rte_eth_fc_conf
*fc_conf
)
1777 struct rte_eth_dev
*dev
;
1779 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id
, -ENODEV
);
1780 if ((fc_conf
->send_xon
!= 0) && (fc_conf
->send_xon
!= 1)) {
1781 RTE_PMD_DEBUG_TRACE("Invalid send_xon, only 0/1 allowed\n");
1785 dev
= &rte_eth_devices
[port_id
];
1786 RTE_FUNC_PTR_OR_ERR_RET(*dev
->dev_ops
->flow_ctrl_set
, -ENOTSUP
);
1787 return (*dev
->dev_ops
->flow_ctrl_set
)(dev
, fc_conf
);
1791 rte_eth_dev_priority_flow_ctrl_set(uint8_t port_id
, struct rte_eth_pfc_conf
*pfc_conf
)
1793 struct rte_eth_dev
*dev
;
1795 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id
, -ENODEV
);
1796 if (pfc_conf
->priority
> (ETH_DCB_NUM_USER_PRIORITIES
- 1)) {
1797 RTE_PMD_DEBUG_TRACE("Invalid priority, only 0-7 allowed\n");
1801 dev
= &rte_eth_devices
[port_id
];
1802 /* High water, low water validation are device specific */
1803 if (*dev
->dev_ops
->priority_flow_ctrl_set
)
1804 return (*dev
->dev_ops
->priority_flow_ctrl_set
)(dev
, pfc_conf
);
1809 rte_eth_check_reta_mask(struct rte_eth_rss_reta_entry64
*reta_conf
,
1817 if (reta_size
!= RTE_ALIGN(reta_size
, RTE_RETA_GROUP_SIZE
)) {
1818 RTE_PMD_DEBUG_TRACE("Invalid reta size, should be %u aligned\n",
1819 RTE_RETA_GROUP_SIZE
);
1823 num
= reta_size
/ RTE_RETA_GROUP_SIZE
;
1824 for (i
= 0; i
< num
; i
++) {
1825 if (reta_conf
[i
].mask
)
1833 rte_eth_check_reta_entry(struct rte_eth_rss_reta_entry64
*reta_conf
,
1837 uint16_t i
, idx
, shift
;
1843 RTE_PMD_DEBUG_TRACE("No receive queue is available\n");
1847 for (i
= 0; i
< reta_size
; i
++) {
1848 idx
= i
/ RTE_RETA_GROUP_SIZE
;
1849 shift
= i
% RTE_RETA_GROUP_SIZE
;
1850 if ((reta_conf
[idx
].mask
& (1ULL << shift
)) &&
1851 (reta_conf
[idx
].reta
[shift
] >= max_rxq
)) {
1852 RTE_PMD_DEBUG_TRACE("reta_conf[%u]->reta[%u]: %u exceeds "
1853 "the maximum rxq index: %u\n", idx
, shift
,
1854 reta_conf
[idx
].reta
[shift
], max_rxq
);
1863 rte_eth_dev_rss_reta_update(uint8_t port_id
,
1864 struct rte_eth_rss_reta_entry64
*reta_conf
,
1867 struct rte_eth_dev
*dev
;
1870 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id
, -ENODEV
);
1871 /* Check mask bits */
1872 ret
= rte_eth_check_reta_mask(reta_conf
, reta_size
);
1876 dev
= &rte_eth_devices
[port_id
];
1878 /* Check entry value */
1879 ret
= rte_eth_check_reta_entry(reta_conf
, reta_size
,
1880 dev
->data
->nb_rx_queues
);
1884 RTE_FUNC_PTR_OR_ERR_RET(*dev
->dev_ops
->reta_update
, -ENOTSUP
);
1885 return (*dev
->dev_ops
->reta_update
)(dev
, reta_conf
, reta_size
);
1889 rte_eth_dev_rss_reta_query(uint8_t port_id
,
1890 struct rte_eth_rss_reta_entry64
*reta_conf
,
1893 struct rte_eth_dev
*dev
;
1896 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id
, -ENODEV
);
1898 /* Check mask bits */
1899 ret
= rte_eth_check_reta_mask(reta_conf
, reta_size
);
1903 dev
= &rte_eth_devices
[port_id
];
1904 RTE_FUNC_PTR_OR_ERR_RET(*dev
->dev_ops
->reta_query
, -ENOTSUP
);
1905 return (*dev
->dev_ops
->reta_query
)(dev
, reta_conf
, reta_size
);
1909 rte_eth_dev_rss_hash_update(uint8_t port_id
, struct rte_eth_rss_conf
*rss_conf
)
1911 struct rte_eth_dev
*dev
;
1912 uint16_t rss_hash_protos
;
1914 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id
, -ENODEV
);
1915 rss_hash_protos
= rss_conf
->rss_hf
;
1916 if ((rss_hash_protos
!= 0) &&
1917 ((rss_hash_protos
& ETH_RSS_PROTO_MASK
) == 0)) {
1918 RTE_PMD_DEBUG_TRACE("Invalid rss_hash_protos=0x%x\n",
1922 dev
= &rte_eth_devices
[port_id
];
1923 RTE_FUNC_PTR_OR_ERR_RET(*dev
->dev_ops
->rss_hash_update
, -ENOTSUP
);
1924 return (*dev
->dev_ops
->rss_hash_update
)(dev
, rss_conf
);
1928 rte_eth_dev_rss_hash_conf_get(uint8_t port_id
,
1929 struct rte_eth_rss_conf
*rss_conf
)
1931 struct rte_eth_dev
*dev
;
1933 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id
, -ENODEV
);
1934 dev
= &rte_eth_devices
[port_id
];
1935 RTE_FUNC_PTR_OR_ERR_RET(*dev
->dev_ops
->rss_hash_conf_get
, -ENOTSUP
);
1936 return (*dev
->dev_ops
->rss_hash_conf_get
)(dev
, rss_conf
);
1940 rte_eth_dev_udp_tunnel_port_add(uint8_t port_id
,
1941 struct rte_eth_udp_tunnel
*udp_tunnel
)
1943 struct rte_eth_dev
*dev
;
1945 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id
, -ENODEV
);
1946 if (udp_tunnel
== NULL
) {
1947 RTE_PMD_DEBUG_TRACE("Invalid udp_tunnel parameter\n");
1951 if (udp_tunnel
->prot_type
>= RTE_TUNNEL_TYPE_MAX
) {
1952 RTE_PMD_DEBUG_TRACE("Invalid tunnel type\n");
1956 dev
= &rte_eth_devices
[port_id
];
1957 RTE_FUNC_PTR_OR_ERR_RET(*dev
->dev_ops
->udp_tunnel_port_add
, -ENOTSUP
);
1958 return (*dev
->dev_ops
->udp_tunnel_port_add
)(dev
, udp_tunnel
);
1962 rte_eth_dev_udp_tunnel_port_delete(uint8_t port_id
,
1963 struct rte_eth_udp_tunnel
*udp_tunnel
)
1965 struct rte_eth_dev
*dev
;
1967 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id
, -ENODEV
);
1968 dev
= &rte_eth_devices
[port_id
];
1970 if (udp_tunnel
== NULL
) {
1971 RTE_PMD_DEBUG_TRACE("Invalid udp_tunnel parameter\n");
1975 if (udp_tunnel
->prot_type
>= RTE_TUNNEL_TYPE_MAX
) {
1976 RTE_PMD_DEBUG_TRACE("Invalid tunnel type\n");
1980 RTE_FUNC_PTR_OR_ERR_RET(*dev
->dev_ops
->udp_tunnel_port_del
, -ENOTSUP
);
1981 return (*dev
->dev_ops
->udp_tunnel_port_del
)(dev
, udp_tunnel
);
1985 rte_eth_led_on(uint8_t port_id
)
1987 struct rte_eth_dev
*dev
;
1989 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id
, -ENODEV
);
1990 dev
= &rte_eth_devices
[port_id
];
1991 RTE_FUNC_PTR_OR_ERR_RET(*dev
->dev_ops
->dev_led_on
, -ENOTSUP
);
1992 return (*dev
->dev_ops
->dev_led_on
)(dev
);
1996 rte_eth_led_off(uint8_t port_id
)
1998 struct rte_eth_dev
*dev
;
2000 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id
, -ENODEV
);
2001 dev
= &rte_eth_devices
[port_id
];
2002 RTE_FUNC_PTR_OR_ERR_RET(*dev
->dev_ops
->dev_led_off
, -ENOTSUP
);
2003 return (*dev
->dev_ops
->dev_led_off
)(dev
);
2007 * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
2011 get_mac_addr_index(uint8_t port_id
, const struct ether_addr
*addr
)
2013 struct rte_eth_dev_info dev_info
;
2014 struct rte_eth_dev
*dev
= &rte_eth_devices
[port_id
];
2017 rte_eth_dev_info_get(port_id
, &dev_info
);
2019 for (i
= 0; i
< dev_info
.max_mac_addrs
; i
++)
2020 if (memcmp(addr
, &dev
->data
->mac_addrs
[i
], ETHER_ADDR_LEN
) == 0)
2026 static const struct ether_addr null_mac_addr
;
2029 rte_eth_dev_mac_addr_add(uint8_t port_id
, struct ether_addr
*addr
,
2032 struct rte_eth_dev
*dev
;
2036 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id
, -ENODEV
);
2037 dev
= &rte_eth_devices
[port_id
];
2038 RTE_FUNC_PTR_OR_ERR_RET(*dev
->dev_ops
->mac_addr_add
, -ENOTSUP
);
2040 if (is_zero_ether_addr(addr
)) {
2041 RTE_PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n",
2045 if (pool
>= ETH_64_POOLS
) {
2046 RTE_PMD_DEBUG_TRACE("pool id must be 0-%d\n", ETH_64_POOLS
- 1);
2050 index
= get_mac_addr_index(port_id
, addr
);
2052 index
= get_mac_addr_index(port_id
, &null_mac_addr
);
2054 RTE_PMD_DEBUG_TRACE("port %d: MAC address array full\n",
2059 pool_mask
= dev
->data
->mac_pool_sel
[index
];
2061 /* Check if both MAC address and pool is already there, and do nothing */
2062 if (pool_mask
& (1ULL << pool
))
2067 (*dev
->dev_ops
->mac_addr_add
)(dev
, addr
, index
, pool
);
2069 /* Update address in NIC data structure */
2070 ether_addr_copy(addr
, &dev
->data
->mac_addrs
[index
]);
2072 /* Update pool bitmap in NIC data structure */
2073 dev
->data
->mac_pool_sel
[index
] |= (1ULL << pool
);
2079 rte_eth_dev_mac_addr_remove(uint8_t port_id
, struct ether_addr
*addr
)
2081 struct rte_eth_dev
*dev
;
2084 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id
, -ENODEV
);
2085 dev
= &rte_eth_devices
[port_id
];
2086 RTE_FUNC_PTR_OR_ERR_RET(*dev
->dev_ops
->mac_addr_remove
, -ENOTSUP
);
2088 index
= get_mac_addr_index(port_id
, addr
);
2090 RTE_PMD_DEBUG_TRACE("port %d: Cannot remove default MAC address\n", port_id
);
2092 } else if (index
< 0)
2093 return 0; /* Do nothing if address wasn't found */
2096 (*dev
->dev_ops
->mac_addr_remove
)(dev
, index
);
2098 /* Update address in NIC data structure */
2099 ether_addr_copy(&null_mac_addr
, &dev
->data
->mac_addrs
[index
]);
2101 /* reset pool bitmap */
2102 dev
->data
->mac_pool_sel
[index
] = 0;
2108 rte_eth_dev_default_mac_addr_set(uint8_t port_id
, struct ether_addr
*addr
)
2110 struct rte_eth_dev
*dev
;
2112 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id
, -ENODEV
);
2114 if (!is_valid_assigned_ether_addr(addr
))
2117 dev
= &rte_eth_devices
[port_id
];
2118 RTE_FUNC_PTR_OR_ERR_RET(*dev
->dev_ops
->mac_addr_set
, -ENOTSUP
);
2120 /* Update default address in NIC data structure */
2121 ether_addr_copy(addr
, &dev
->data
->mac_addrs
[0]);
2123 (*dev
->dev_ops
->mac_addr_set
)(dev
, addr
);
2129 rte_eth_dev_set_vf_rxmode(uint8_t port_id
, uint16_t vf
,
2130 uint16_t rx_mode
, uint8_t on
)
2133 struct rte_eth_dev
*dev
;
2134 struct rte_eth_dev_info dev_info
;
2136 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id
, -ENODEV
);
2138 dev
= &rte_eth_devices
[port_id
];
2139 rte_eth_dev_info_get(port_id
, &dev_info
);
2141 num_vfs
= dev_info
.max_vfs
;
2143 RTE_PMD_DEBUG_TRACE("set VF RX mode:invalid VF id %d\n", vf
);
2148 RTE_PMD_DEBUG_TRACE("set VF RX mode:mode mask ca not be zero\n");
2151 RTE_FUNC_PTR_OR_ERR_RET(*dev
->dev_ops
->set_vf_rx_mode
, -ENOTSUP
);
2152 return (*dev
->dev_ops
->set_vf_rx_mode
)(dev
, vf
, rx_mode
, on
);
2156 * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
2160 get_hash_mac_addr_index(uint8_t port_id
, const struct ether_addr
*addr
)
2162 struct rte_eth_dev_info dev_info
;
2163 struct rte_eth_dev
*dev
= &rte_eth_devices
[port_id
];
2166 rte_eth_dev_info_get(port_id
, &dev_info
);
2167 if (!dev
->data
->hash_mac_addrs
)
2170 for (i
= 0; i
< dev_info
.max_hash_mac_addrs
; i
++)
2171 if (memcmp(addr
, &dev
->data
->hash_mac_addrs
[i
],
2172 ETHER_ADDR_LEN
) == 0)
2179 rte_eth_dev_uc_hash_table_set(uint8_t port_id
, struct ether_addr
*addr
,
2184 struct rte_eth_dev
*dev
;
2186 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id
, -ENODEV
);
2188 dev
= &rte_eth_devices
[port_id
];
2189 if (is_zero_ether_addr(addr
)) {
2190 RTE_PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n",
2195 index
= get_hash_mac_addr_index(port_id
, addr
);
2196 /* Check if it's already there, and do nothing */
2197 if ((index
>= 0) && (on
))
2202 RTE_PMD_DEBUG_TRACE("port %d: the MAC address was not "
2203 "set in UTA\n", port_id
);
2207 index
= get_hash_mac_addr_index(port_id
, &null_mac_addr
);
2209 RTE_PMD_DEBUG_TRACE("port %d: MAC address array full\n",
2215 RTE_FUNC_PTR_OR_ERR_RET(*dev
->dev_ops
->uc_hash_table_set
, -ENOTSUP
);
2216 ret
= (*dev
->dev_ops
->uc_hash_table_set
)(dev
, addr
, on
);
2218 /* Update address in NIC data structure */
2220 ether_addr_copy(addr
,
2221 &dev
->data
->hash_mac_addrs
[index
]);
2223 ether_addr_copy(&null_mac_addr
,
2224 &dev
->data
->hash_mac_addrs
[index
]);
2231 rte_eth_dev_uc_all_hash_table_set(uint8_t port_id
, uint8_t on
)
2233 struct rte_eth_dev
*dev
;
2235 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id
, -ENODEV
);
2237 dev
= &rte_eth_devices
[port_id
];
2239 RTE_FUNC_PTR_OR_ERR_RET(*dev
->dev_ops
->uc_all_hash_table_set
, -ENOTSUP
);
2240 return (*dev
->dev_ops
->uc_all_hash_table_set
)(dev
, on
);
2244 rte_eth_dev_set_vf_rx(uint8_t port_id
, uint16_t vf
, uint8_t on
)
2247 struct rte_eth_dev
*dev
;
2248 struct rte_eth_dev_info dev_info
;
2250 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id
, -ENODEV
);
2252 dev
= &rte_eth_devices
[port_id
];
2253 rte_eth_dev_info_get(port_id
, &dev_info
);
2255 num_vfs
= dev_info
.max_vfs
;
2257 RTE_PMD_DEBUG_TRACE("port %d: invalid vf id\n", port_id
);
2261 RTE_FUNC_PTR_OR_ERR_RET(*dev
->dev_ops
->set_vf_rx
, -ENOTSUP
);
2262 return (*dev
->dev_ops
->set_vf_rx
)(dev
, vf
, on
);
2266 rte_eth_dev_set_vf_tx(uint8_t port_id
, uint16_t vf
, uint8_t on
)
2269 struct rte_eth_dev
*dev
;
2270 struct rte_eth_dev_info dev_info
;
2272 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id
, -ENODEV
);
2274 dev
= &rte_eth_devices
[port_id
];
2275 rte_eth_dev_info_get(port_id
, &dev_info
);
2277 num_vfs
= dev_info
.max_vfs
;
2279 RTE_PMD_DEBUG_TRACE("set pool tx:invalid pool id=%d\n", vf
);
2283 RTE_FUNC_PTR_OR_ERR_RET(*dev
->dev_ops
->set_vf_tx
, -ENOTSUP
);
2284 return (*dev
->dev_ops
->set_vf_tx
)(dev
, vf
, on
);
2288 rte_eth_dev_set_vf_vlan_filter(uint8_t port_id
, uint16_t vlan_id
,
2289 uint64_t vf_mask
, uint8_t vlan_on
)
2291 struct rte_eth_dev
*dev
;
2293 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id
, -ENODEV
);
2295 dev
= &rte_eth_devices
[port_id
];
2297 if (vlan_id
> ETHER_MAX_VLAN_ID
) {
2298 RTE_PMD_DEBUG_TRACE("VF VLAN filter:invalid VLAN id=%d\n",
2304 RTE_PMD_DEBUG_TRACE("VF VLAN filter:pool_mask can not be 0\n");
2308 RTE_FUNC_PTR_OR_ERR_RET(*dev
->dev_ops
->set_vf_vlan_filter
, -ENOTSUP
);
2309 return (*dev
->dev_ops
->set_vf_vlan_filter
)(dev
, vlan_id
,
2313 int rte_eth_set_queue_rate_limit(uint8_t port_id
, uint16_t queue_idx
,
2316 struct rte_eth_dev
*dev
;
2317 struct rte_eth_dev_info dev_info
;
2318 struct rte_eth_link link
;
2320 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id
, -ENODEV
);
2322 dev
= &rte_eth_devices
[port_id
];
2323 rte_eth_dev_info_get(port_id
, &dev_info
);
2324 link
= dev
->data
->dev_link
;
2326 if (queue_idx
> dev_info
.max_tx_queues
) {
2327 RTE_PMD_DEBUG_TRACE("set queue rate limit:port %d: "
2328 "invalid queue id=%d\n", port_id
, queue_idx
);
2332 if (tx_rate
> link
.link_speed
) {
2333 RTE_PMD_DEBUG_TRACE("set queue rate limit:invalid tx_rate=%d, "
2334 "bigger than link speed= %d\n",
2335 tx_rate
, link
.link_speed
);
2339 RTE_FUNC_PTR_OR_ERR_RET(*dev
->dev_ops
->set_queue_rate_limit
, -ENOTSUP
);
2340 return (*dev
->dev_ops
->set_queue_rate_limit
)(dev
, queue_idx
, tx_rate
);
2343 int rte_eth_set_vf_rate_limit(uint8_t port_id
, uint16_t vf
, uint16_t tx_rate
,
2346 struct rte_eth_dev
*dev
;
2347 struct rte_eth_dev_info dev_info
;
2348 struct rte_eth_link link
;
2353 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id
, -ENODEV
);
2355 dev
= &rte_eth_devices
[port_id
];
2356 rte_eth_dev_info_get(port_id
, &dev_info
);
2357 link
= dev
->data
->dev_link
;
2359 if (vf
> dev_info
.max_vfs
) {
2360 RTE_PMD_DEBUG_TRACE("set VF rate limit:port %d: "
2361 "invalid vf id=%d\n", port_id
, vf
);
2365 if (tx_rate
> link
.link_speed
) {
2366 RTE_PMD_DEBUG_TRACE("set VF rate limit:invalid tx_rate=%d, "
2367 "bigger than link speed= %d\n",
2368 tx_rate
, link
.link_speed
);
2372 RTE_FUNC_PTR_OR_ERR_RET(*dev
->dev_ops
->set_vf_rate_limit
, -ENOTSUP
);
2373 return (*dev
->dev_ops
->set_vf_rate_limit
)(dev
, vf
, tx_rate
, q_msk
);
2377 rte_eth_mirror_rule_set(uint8_t port_id
,
2378 struct rte_eth_mirror_conf
*mirror_conf
,
2379 uint8_t rule_id
, uint8_t on
)
2381 struct rte_eth_dev
*dev
= &rte_eth_devices
[port_id
];
2383 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id
, -ENODEV
);
2384 if (mirror_conf
->rule_type
== 0) {
2385 RTE_PMD_DEBUG_TRACE("mirror rule type can not be 0.\n");
2389 if (mirror_conf
->dst_pool
>= ETH_64_POOLS
) {
2390 RTE_PMD_DEBUG_TRACE("Invalid dst pool, pool id must be 0-%d\n",
2395 if ((mirror_conf
->rule_type
& (ETH_MIRROR_VIRTUAL_POOL_UP
|
2396 ETH_MIRROR_VIRTUAL_POOL_DOWN
)) &&
2397 (mirror_conf
->pool_mask
== 0)) {
2398 RTE_PMD_DEBUG_TRACE("Invalid mirror pool, pool mask can not be 0.\n");
2402 if ((mirror_conf
->rule_type
& ETH_MIRROR_VLAN
) &&
2403 mirror_conf
->vlan
.vlan_mask
== 0) {
2404 RTE_PMD_DEBUG_TRACE("Invalid vlan mask, vlan mask can not be 0.\n");
2408 dev
= &rte_eth_devices
[port_id
];
2409 RTE_FUNC_PTR_OR_ERR_RET(*dev
->dev_ops
->mirror_rule_set
, -ENOTSUP
);
2411 return (*dev
->dev_ops
->mirror_rule_set
)(dev
, mirror_conf
, rule_id
, on
);
2415 rte_eth_mirror_rule_reset(uint8_t port_id
, uint8_t rule_id
)
2417 struct rte_eth_dev
*dev
= &rte_eth_devices
[port_id
];
2419 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id
, -ENODEV
);
2421 dev
= &rte_eth_devices
[port_id
];
2422 RTE_FUNC_PTR_OR_ERR_RET(*dev
->dev_ops
->mirror_rule_reset
, -ENOTSUP
);
2424 return (*dev
->dev_ops
->mirror_rule_reset
)(dev
, rule_id
);
2428 rte_eth_dev_callback_register(uint8_t port_id
,
2429 enum rte_eth_event_type event
,
2430 rte_eth_dev_cb_fn cb_fn
, void *cb_arg
)
2432 struct rte_eth_dev
*dev
;
2433 struct rte_eth_dev_callback
*user_cb
;
2438 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id
, -EINVAL
);
2440 dev
= &rte_eth_devices
[port_id
];
2441 rte_spinlock_lock(&rte_eth_dev_cb_lock
);
2443 TAILQ_FOREACH(user_cb
, &(dev
->link_intr_cbs
), next
) {
2444 if (user_cb
->cb_fn
== cb_fn
&&
2445 user_cb
->cb_arg
== cb_arg
&&
2446 user_cb
->event
== event
) {
2451 /* create a new callback. */
2452 if (user_cb
== NULL
) {
2453 user_cb
= rte_zmalloc("INTR_USER_CALLBACK",
2454 sizeof(struct rte_eth_dev_callback
), 0);
2455 if (user_cb
!= NULL
) {
2456 user_cb
->cb_fn
= cb_fn
;
2457 user_cb
->cb_arg
= cb_arg
;
2458 user_cb
->event
= event
;
2459 TAILQ_INSERT_TAIL(&(dev
->link_intr_cbs
), user_cb
, next
);
2463 rte_spinlock_unlock(&rte_eth_dev_cb_lock
);
2464 return (user_cb
== NULL
) ? -ENOMEM
: 0;
2468 rte_eth_dev_callback_unregister(uint8_t port_id
,
2469 enum rte_eth_event_type event
,
2470 rte_eth_dev_cb_fn cb_fn
, void *cb_arg
)
2473 struct rte_eth_dev
*dev
;
2474 struct rte_eth_dev_callback
*cb
, *next
;
2479 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id
, -EINVAL
);
2481 dev
= &rte_eth_devices
[port_id
];
2482 rte_spinlock_lock(&rte_eth_dev_cb_lock
);
2485 for (cb
= TAILQ_FIRST(&dev
->link_intr_cbs
); cb
!= NULL
; cb
= next
) {
2487 next
= TAILQ_NEXT(cb
, next
);
2489 if (cb
->cb_fn
!= cb_fn
|| cb
->event
!= event
||
2490 (cb
->cb_arg
!= (void *)-1 &&
2491 cb
->cb_arg
!= cb_arg
))
2495 * if this callback is not executing right now,
2498 if (cb
->active
== 0) {
2499 TAILQ_REMOVE(&(dev
->link_intr_cbs
), cb
, next
);
2506 rte_spinlock_unlock(&rte_eth_dev_cb_lock
);
2511 _rte_eth_dev_callback_process(struct rte_eth_dev
*dev
,
2512 enum rte_eth_event_type event
, void *cb_arg
)
2514 struct rte_eth_dev_callback
*cb_lst
;
2515 struct rte_eth_dev_callback dev_cb
;
2517 rte_spinlock_lock(&rte_eth_dev_cb_lock
);
2518 TAILQ_FOREACH(cb_lst
, &(dev
->link_intr_cbs
), next
) {
2519 if (cb_lst
->cb_fn
== NULL
|| cb_lst
->event
!= event
)
2524 dev_cb
.cb_arg
= (void *) cb_arg
;
2526 rte_spinlock_unlock(&rte_eth_dev_cb_lock
);
2527 dev_cb
.cb_fn(dev
->data
->port_id
, dev_cb
.event
,
2529 rte_spinlock_lock(&rte_eth_dev_cb_lock
);
2532 rte_spinlock_unlock(&rte_eth_dev_cb_lock
);
2536 rte_eth_dev_rx_intr_ctl(uint8_t port_id
, int epfd
, int op
, void *data
)
2539 struct rte_eth_dev
*dev
;
2540 struct rte_intr_handle
*intr_handle
;
2544 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id
, -ENODEV
);
2546 dev
= &rte_eth_devices
[port_id
];
2547 intr_handle
= &dev
->pci_dev
->intr_handle
;
2548 if (!intr_handle
->intr_vec
) {
2549 RTE_PMD_DEBUG_TRACE("RX Intr vector unset\n");
2553 for (qid
= 0; qid
< dev
->data
->nb_rx_queues
; qid
++) {
2554 vec
= intr_handle
->intr_vec
[qid
];
2555 rc
= rte_intr_rx_ctl(intr_handle
, epfd
, op
, vec
, data
);
2556 if (rc
&& rc
!= -EEXIST
) {
2557 RTE_PMD_DEBUG_TRACE("p %u q %u rx ctl error"
2558 " op %d epfd %d vec %u\n",
2559 port_id
, qid
, op
, epfd
, vec
);
2566 const struct rte_memzone
*
2567 rte_eth_dma_zone_reserve(const struct rte_eth_dev
*dev
, const char *ring_name
,
2568 uint16_t queue_id
, size_t size
, unsigned align
,
2571 char z_name
[RTE_MEMZONE_NAMESIZE
];
2572 const struct rte_memzone
*mz
;
2574 snprintf(z_name
, sizeof(z_name
), "%s_%s_%d_%d",
2575 dev
->driver
->pci_drv
.driver
.name
, ring_name
,
2576 dev
->data
->port_id
, queue_id
);
2578 mz
= rte_memzone_lookup(z_name
);
2582 if (rte_xen_dom0_supported())
2583 return rte_memzone_reserve_bounded(z_name
, size
, socket_id
,
2584 0, align
, RTE_PGSIZE_2M
);
2586 return rte_memzone_reserve_aligned(z_name
, size
, socket_id
,
2591 rte_eth_dev_rx_intr_ctl_q(uint8_t port_id
, uint16_t queue_id
,
2592 int epfd
, int op
, void *data
)
2595 struct rte_eth_dev
*dev
;
2596 struct rte_intr_handle
*intr_handle
;
2599 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id
, -ENODEV
);
2601 dev
= &rte_eth_devices
[port_id
];
2602 if (queue_id
>= dev
->data
->nb_rx_queues
) {
2603 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%u\n", queue_id
);
2607 intr_handle
= &dev
->pci_dev
->intr_handle
;
2608 if (!intr_handle
->intr_vec
) {
2609 RTE_PMD_DEBUG_TRACE("RX Intr vector unset\n");
2613 vec
= intr_handle
->intr_vec
[queue_id
];
2614 rc
= rte_intr_rx_ctl(intr_handle
, epfd
, op
, vec
, data
);
2615 if (rc
&& rc
!= -EEXIST
) {
2616 RTE_PMD_DEBUG_TRACE("p %u q %u rx ctl error"
2617 " op %d epfd %d vec %u\n",
2618 port_id
, queue_id
, op
, epfd
, vec
);
2626 rte_eth_dev_rx_intr_enable(uint8_t port_id
,
2629 struct rte_eth_dev
*dev
;
2631 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id
, -ENODEV
);
2633 dev
= &rte_eth_devices
[port_id
];
2635 RTE_FUNC_PTR_OR_ERR_RET(*dev
->dev_ops
->rx_queue_intr_enable
, -ENOTSUP
);
2636 return (*dev
->dev_ops
->rx_queue_intr_enable
)(dev
, queue_id
);
2640 rte_eth_dev_rx_intr_disable(uint8_t port_id
,
2643 struct rte_eth_dev
*dev
;
2645 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id
, -ENODEV
);
2647 dev
= &rte_eth_devices
[port_id
];
2649 RTE_FUNC_PTR_OR_ERR_RET(*dev
->dev_ops
->rx_queue_intr_disable
, -ENOTSUP
);
2650 return (*dev
->dev_ops
->rx_queue_intr_disable
)(dev
, queue_id
);
2653 #ifdef RTE_NIC_BYPASS
2654 int rte_eth_dev_bypass_init(uint8_t port_id
)
2656 struct rte_eth_dev
*dev
;
2658 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id
, -ENODEV
);
2660 dev
= &rte_eth_devices
[port_id
];
2661 RTE_FUNC_PTR_OR_ERR_RET(*dev
->dev_ops
->bypass_init
, -ENOTSUP
);
2662 (*dev
->dev_ops
->bypass_init
)(dev
);
2667 rte_eth_dev_bypass_state_show(uint8_t port_id
, uint32_t *state
)
2669 struct rte_eth_dev
*dev
;
2671 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id
, -ENODEV
);
2673 dev
= &rte_eth_devices
[port_id
];
2674 RTE_FUNC_PTR_OR_ERR_RET(*dev
->dev_ops
->bypass_state_show
, -ENOTSUP
);
2675 (*dev
->dev_ops
->bypass_state_show
)(dev
, state
);
2680 rte_eth_dev_bypass_state_set(uint8_t port_id
, uint32_t *new_state
)
2682 struct rte_eth_dev
*dev
;
2684 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id
, -ENODEV
);
2686 dev
= &rte_eth_devices
[port_id
];
2687 RTE_FUNC_PTR_OR_ERR_RET(*dev
->dev_ops
->bypass_state_set
, -ENOTSUP
);
2688 (*dev
->dev_ops
->bypass_state_set
)(dev
, new_state
);
2693 rte_eth_dev_bypass_event_show(uint8_t port_id
, uint32_t event
, uint32_t *state
)
2695 struct rte_eth_dev
*dev
;
2697 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id
, -ENODEV
);
2699 dev
= &rte_eth_devices
[port_id
];
2700 RTE_FUNC_PTR_OR_ERR_RET(*dev
->dev_ops
->bypass_state_show
, -ENOTSUP
);
2701 (*dev
->dev_ops
->bypass_event_show
)(dev
, event
, state
);
2706 rte_eth_dev_bypass_event_store(uint8_t port_id
, uint32_t event
, uint32_t state
)
2708 struct rte_eth_dev
*dev
;
2710 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id
, -ENODEV
);
2712 dev
= &rte_eth_devices
[port_id
];
2714 RTE_FUNC_PTR_OR_ERR_RET(*dev
->dev_ops
->bypass_event_set
, -ENOTSUP
);
2715 (*dev
->dev_ops
->bypass_event_set
)(dev
, event
, state
);
2720 rte_eth_dev_wd_timeout_store(uint8_t port_id
, uint32_t timeout
)
2722 struct rte_eth_dev
*dev
;
2724 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id
, -ENODEV
);
2726 dev
= &rte_eth_devices
[port_id
];
2728 RTE_FUNC_PTR_OR_ERR_RET(*dev
->dev_ops
->bypass_wd_timeout_set
, -ENOTSUP
);
2729 (*dev
->dev_ops
->bypass_wd_timeout_set
)(dev
, timeout
);
2734 rte_eth_dev_bypass_ver_show(uint8_t port_id
, uint32_t *ver
)
2736 struct rte_eth_dev
*dev
;
2738 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id
, -ENODEV
);
2740 dev
= &rte_eth_devices
[port_id
];
2742 RTE_FUNC_PTR_OR_ERR_RET(*dev
->dev_ops
->bypass_ver_show
, -ENOTSUP
);
2743 (*dev
->dev_ops
->bypass_ver_show
)(dev
, ver
);
2748 rte_eth_dev_bypass_wd_timeout_show(uint8_t port_id
, uint32_t *wd_timeout
)
2750 struct rte_eth_dev
*dev
;
2752 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id
, -ENODEV
);
2754 dev
= &rte_eth_devices
[port_id
];
2756 RTE_FUNC_PTR_OR_ERR_RET(*dev
->dev_ops
->bypass_wd_timeout_show
, -ENOTSUP
);
2757 (*dev
->dev_ops
->bypass_wd_timeout_show
)(dev
, wd_timeout
);
2762 rte_eth_dev_bypass_wd_reset(uint8_t port_id
)
2764 struct rte_eth_dev
*dev
;
2766 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id
, -ENODEV
);
2768 dev
= &rte_eth_devices
[port_id
];
2770 RTE_FUNC_PTR_OR_ERR_RET(*dev
->dev_ops
->bypass_wd_reset
, -ENOTSUP
);
2771 (*dev
->dev_ops
->bypass_wd_reset
)(dev
);
2777 rte_eth_dev_filter_supported(uint8_t port_id
, enum rte_filter_type filter_type
)
2779 struct rte_eth_dev
*dev
;
2781 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id
, -ENODEV
);
2783 dev
= &rte_eth_devices
[port_id
];
2784 RTE_FUNC_PTR_OR_ERR_RET(*dev
->dev_ops
->filter_ctrl
, -ENOTSUP
);
2785 return (*dev
->dev_ops
->filter_ctrl
)(dev
, filter_type
,
2786 RTE_ETH_FILTER_NOP
, NULL
);
2790 rte_eth_dev_filter_ctrl(uint8_t port_id
, enum rte_filter_type filter_type
,
2791 enum rte_filter_op filter_op
, void *arg
)
2793 struct rte_eth_dev
*dev
;
2795 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id
, -ENODEV
);
2797 dev
= &rte_eth_devices
[port_id
];
2798 RTE_FUNC_PTR_OR_ERR_RET(*dev
->dev_ops
->filter_ctrl
, -ENOTSUP
);
2799 return (*dev
->dev_ops
->filter_ctrl
)(dev
, filter_type
, filter_op
, arg
);
2803 rte_eth_add_rx_callback(uint8_t port_id
, uint16_t queue_id
,
2804 rte_rx_callback_fn fn
, void *user_param
)
2806 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
2807 rte_errno
= ENOTSUP
;
2810 /* check input parameters */
2811 if (!rte_eth_dev_is_valid_port(port_id
) || fn
== NULL
||
2812 queue_id
>= rte_eth_devices
[port_id
].data
->nb_rx_queues
) {
2816 struct rte_eth_rxtx_callback
*cb
= rte_zmalloc(NULL
, sizeof(*cb
), 0);
2824 cb
->param
= user_param
;
2826 rte_spinlock_lock(&rte_eth_rx_cb_lock
);
2827 /* Add the callbacks in fifo order. */
2828 struct rte_eth_rxtx_callback
*tail
=
2829 rte_eth_devices
[port_id
].post_rx_burst_cbs
[queue_id
];
2832 rte_eth_devices
[port_id
].post_rx_burst_cbs
[queue_id
] = cb
;
2839 rte_spinlock_unlock(&rte_eth_rx_cb_lock
);
2845 rte_eth_add_first_rx_callback(uint8_t port_id
, uint16_t queue_id
,
2846 rte_rx_callback_fn fn
, void *user_param
)
2848 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
2849 rte_errno
= ENOTSUP
;
2852 /* check input parameters */
2853 if (!rte_eth_dev_is_valid_port(port_id
) || fn
== NULL
||
2854 queue_id
>= rte_eth_devices
[port_id
].data
->nb_rx_queues
) {
2859 struct rte_eth_rxtx_callback
*cb
= rte_zmalloc(NULL
, sizeof(*cb
), 0);
2867 cb
->param
= user_param
;
2869 rte_spinlock_lock(&rte_eth_rx_cb_lock
);
2870 /* Add the callbacks at fisrt position*/
2871 cb
->next
= rte_eth_devices
[port_id
].post_rx_burst_cbs
[queue_id
];
2873 rte_eth_devices
[port_id
].post_rx_burst_cbs
[queue_id
] = cb
;
2874 rte_spinlock_unlock(&rte_eth_rx_cb_lock
);
2880 rte_eth_add_tx_callback(uint8_t port_id
, uint16_t queue_id
,
2881 rte_tx_callback_fn fn
, void *user_param
)
2883 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
2884 rte_errno
= ENOTSUP
;
2887 /* check input parameters */
2888 if (!rte_eth_dev_is_valid_port(port_id
) || fn
== NULL
||
2889 queue_id
>= rte_eth_devices
[port_id
].data
->nb_tx_queues
) {
2894 struct rte_eth_rxtx_callback
*cb
= rte_zmalloc(NULL
, sizeof(*cb
), 0);
2902 cb
->param
= user_param
;
2904 rte_spinlock_lock(&rte_eth_tx_cb_lock
);
2905 /* Add the callbacks in fifo order. */
2906 struct rte_eth_rxtx_callback
*tail
=
2907 rte_eth_devices
[port_id
].pre_tx_burst_cbs
[queue_id
];
2910 rte_eth_devices
[port_id
].pre_tx_burst_cbs
[queue_id
] = cb
;
2917 rte_spinlock_unlock(&rte_eth_tx_cb_lock
);
2923 rte_eth_remove_rx_callback(uint8_t port_id
, uint16_t queue_id
,
2924 struct rte_eth_rxtx_callback
*user_cb
)
2926 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
2929 /* Check input parameters. */
2930 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id
, -EINVAL
);
2931 if (user_cb
== NULL
||
2932 queue_id
>= rte_eth_devices
[port_id
].data
->nb_rx_queues
)
2935 struct rte_eth_dev
*dev
= &rte_eth_devices
[port_id
];
2936 struct rte_eth_rxtx_callback
*cb
;
2937 struct rte_eth_rxtx_callback
**prev_cb
;
2940 rte_spinlock_lock(&rte_eth_rx_cb_lock
);
2941 prev_cb
= &dev
->post_rx_burst_cbs
[queue_id
];
2942 for (; *prev_cb
!= NULL
; prev_cb
= &cb
->next
) {
2944 if (cb
== user_cb
) {
2945 /* Remove the user cb from the callback list. */
2946 *prev_cb
= cb
->next
;
2951 rte_spinlock_unlock(&rte_eth_rx_cb_lock
);
2957 rte_eth_remove_tx_callback(uint8_t port_id
, uint16_t queue_id
,
2958 struct rte_eth_rxtx_callback
*user_cb
)
2960 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
2963 /* Check input parameters. */
2964 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id
, -EINVAL
);
2965 if (user_cb
== NULL
||
2966 queue_id
>= rte_eth_devices
[port_id
].data
->nb_tx_queues
)
2969 struct rte_eth_dev
*dev
= &rte_eth_devices
[port_id
];
2971 struct rte_eth_rxtx_callback
*cb
;
2972 struct rte_eth_rxtx_callback
**prev_cb
;
2974 rte_spinlock_lock(&rte_eth_tx_cb_lock
);
2975 prev_cb
= &dev
->pre_tx_burst_cbs
[queue_id
];
2976 for (; *prev_cb
!= NULL
; prev_cb
= &cb
->next
) {
2978 if (cb
== user_cb
) {
2979 /* Remove the user cb from the callback list. */
2980 *prev_cb
= cb
->next
;
2985 rte_spinlock_unlock(&rte_eth_tx_cb_lock
);
2991 rte_eth_rx_queue_info_get(uint8_t port_id
, uint16_t queue_id
,
2992 struct rte_eth_rxq_info
*qinfo
)
2994 struct rte_eth_dev
*dev
;
2996 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id
, -ENODEV
);
3001 dev
= &rte_eth_devices
[port_id
];
3002 if (queue_id
>= dev
->data
->nb_rx_queues
) {
3003 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", queue_id
);
3007 RTE_FUNC_PTR_OR_ERR_RET(*dev
->dev_ops
->rxq_info_get
, -ENOTSUP
);
3009 memset(qinfo
, 0, sizeof(*qinfo
));
3010 dev
->dev_ops
->rxq_info_get(dev
, queue_id
, qinfo
);
3015 rte_eth_tx_queue_info_get(uint8_t port_id
, uint16_t queue_id
,
3016 struct rte_eth_txq_info
*qinfo
)
3018 struct rte_eth_dev
*dev
;
3020 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id
, -ENODEV
);
3025 dev
= &rte_eth_devices
[port_id
];
3026 if (queue_id
>= dev
->data
->nb_tx_queues
) {
3027 RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", queue_id
);
3031 RTE_FUNC_PTR_OR_ERR_RET(*dev
->dev_ops
->txq_info_get
, -ENOTSUP
);
3033 memset(qinfo
, 0, sizeof(*qinfo
));
3034 dev
->dev_ops
->txq_info_get(dev
, queue_id
, qinfo
);
3039 rte_eth_dev_set_mc_addr_list(uint8_t port_id
,
3040 struct ether_addr
*mc_addr_set
,
3041 uint32_t nb_mc_addr
)
3043 struct rte_eth_dev
*dev
;
3045 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id
, -ENODEV
);
3047 dev
= &rte_eth_devices
[port_id
];
3048 RTE_FUNC_PTR_OR_ERR_RET(*dev
->dev_ops
->set_mc_addr_list
, -ENOTSUP
);
3049 return dev
->dev_ops
->set_mc_addr_list(dev
, mc_addr_set
, nb_mc_addr
);
3053 rte_eth_timesync_enable(uint8_t port_id
)
3055 struct rte_eth_dev
*dev
;
3057 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id
, -ENODEV
);
3058 dev
= &rte_eth_devices
[port_id
];
3060 RTE_FUNC_PTR_OR_ERR_RET(*dev
->dev_ops
->timesync_enable
, -ENOTSUP
);
3061 return (*dev
->dev_ops
->timesync_enable
)(dev
);
3065 rte_eth_timesync_disable(uint8_t port_id
)
3067 struct rte_eth_dev
*dev
;
3069 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id
, -ENODEV
);
3070 dev
= &rte_eth_devices
[port_id
];
3072 RTE_FUNC_PTR_OR_ERR_RET(*dev
->dev_ops
->timesync_disable
, -ENOTSUP
);
3073 return (*dev
->dev_ops
->timesync_disable
)(dev
);
3077 rte_eth_timesync_read_rx_timestamp(uint8_t port_id
, struct timespec
*timestamp
,
3080 struct rte_eth_dev
*dev
;
3082 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id
, -ENODEV
);
3083 dev
= &rte_eth_devices
[port_id
];
3085 RTE_FUNC_PTR_OR_ERR_RET(*dev
->dev_ops
->timesync_read_rx_timestamp
, -ENOTSUP
);
3086 return (*dev
->dev_ops
->timesync_read_rx_timestamp
)(dev
, timestamp
, flags
);
3090 rte_eth_timesync_read_tx_timestamp(uint8_t port_id
, struct timespec
*timestamp
)
3092 struct rte_eth_dev
*dev
;
3094 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id
, -ENODEV
);
3095 dev
= &rte_eth_devices
[port_id
];
3097 RTE_FUNC_PTR_OR_ERR_RET(*dev
->dev_ops
->timesync_read_tx_timestamp
, -ENOTSUP
);
3098 return (*dev
->dev_ops
->timesync_read_tx_timestamp
)(dev
, timestamp
);
3102 rte_eth_timesync_adjust_time(uint8_t port_id
, int64_t delta
)
3104 struct rte_eth_dev
*dev
;
3106 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id
, -ENODEV
);
3107 dev
= &rte_eth_devices
[port_id
];
3109 RTE_FUNC_PTR_OR_ERR_RET(*dev
->dev_ops
->timesync_adjust_time
, -ENOTSUP
);
3110 return (*dev
->dev_ops
->timesync_adjust_time
)(dev
, delta
);
3114 rte_eth_timesync_read_time(uint8_t port_id
, struct timespec
*timestamp
)
3116 struct rte_eth_dev
*dev
;
3118 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id
, -ENODEV
);
3119 dev
= &rte_eth_devices
[port_id
];
3121 RTE_FUNC_PTR_OR_ERR_RET(*dev
->dev_ops
->timesync_read_time
, -ENOTSUP
);
3122 return (*dev
->dev_ops
->timesync_read_time
)(dev
, timestamp
);
3126 rte_eth_timesync_write_time(uint8_t port_id
, const struct timespec
*timestamp
)
3128 struct rte_eth_dev
*dev
;
3130 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id
, -ENODEV
);
3131 dev
= &rte_eth_devices
[port_id
];
3133 RTE_FUNC_PTR_OR_ERR_RET(*dev
->dev_ops
->timesync_write_time
, -ENOTSUP
);
3134 return (*dev
->dev_ops
->timesync_write_time
)(dev
, timestamp
);
3138 rte_eth_dev_get_reg_info(uint8_t port_id
, struct rte_dev_reg_info
*info
)
3140 struct rte_eth_dev
*dev
;
3142 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id
, -ENODEV
);
3144 dev
= &rte_eth_devices
[port_id
];
3145 RTE_FUNC_PTR_OR_ERR_RET(*dev
->dev_ops
->get_reg
, -ENOTSUP
);
3146 return (*dev
->dev_ops
->get_reg
)(dev
, info
);
3150 rte_eth_dev_get_eeprom_length(uint8_t port_id
)
3152 struct rte_eth_dev
*dev
;
3154 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id
, -ENODEV
);
3156 dev
= &rte_eth_devices
[port_id
];
3157 RTE_FUNC_PTR_OR_ERR_RET(*dev
->dev_ops
->get_eeprom_length
, -ENOTSUP
);
3158 return (*dev
->dev_ops
->get_eeprom_length
)(dev
);
3162 rte_eth_dev_get_eeprom(uint8_t port_id
, struct rte_dev_eeprom_info
*info
)
3164 struct rte_eth_dev
*dev
;
3166 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id
, -ENODEV
);
3168 dev
= &rte_eth_devices
[port_id
];
3169 RTE_FUNC_PTR_OR_ERR_RET(*dev
->dev_ops
->get_eeprom
, -ENOTSUP
);
3170 return (*dev
->dev_ops
->get_eeprom
)(dev
, info
);
3174 rte_eth_dev_set_eeprom(uint8_t port_id
, struct rte_dev_eeprom_info
*info
)
3176 struct rte_eth_dev
*dev
;
3178 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id
, -ENODEV
);
3180 dev
= &rte_eth_devices
[port_id
];
3181 RTE_FUNC_PTR_OR_ERR_RET(*dev
->dev_ops
->set_eeprom
, -ENOTSUP
);
3182 return (*dev
->dev_ops
->set_eeprom
)(dev
, info
);
3186 rte_eth_dev_get_dcb_info(uint8_t port_id
,
3187 struct rte_eth_dcb_info
*dcb_info
)
3189 struct rte_eth_dev
*dev
;
3191 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id
, -ENODEV
);
3193 dev
= &rte_eth_devices
[port_id
];
3194 memset(dcb_info
, 0, sizeof(struct rte_eth_dcb_info
));
3196 RTE_FUNC_PTR_OR_ERR_RET(*dev
->dev_ops
->get_dcb_info
, -ENOTSUP
);
3197 return (*dev
->dev_ops
->get_dcb_info
)(dev
, dcb_info
);
3201 rte_eth_copy_pci_info(struct rte_eth_dev
*eth_dev
, struct rte_pci_device
*pci_dev
)
3203 if ((eth_dev
== NULL
) || (pci_dev
== NULL
)) {
3204 RTE_PMD_DEBUG_TRACE("NULL pointer eth_dev=%p pci_dev=%p\n",
3209 eth_dev
->data
->dev_flags
= 0;
3210 if (pci_dev
->driver
->drv_flags
& RTE_PCI_DRV_INTR_LSC
)
3211 eth_dev
->data
->dev_flags
|= RTE_ETH_DEV_INTR_LSC
;
3212 if (pci_dev
->driver
->drv_flags
& RTE_PCI_DRV_DETACHABLE
)
3213 eth_dev
->data
->dev_flags
|= RTE_ETH_DEV_DETACHABLE
;
3215 eth_dev
->data
->kdrv
= pci_dev
->kdrv
;
3216 eth_dev
->data
->numa_node
= pci_dev
->device
.numa_node
;
3217 eth_dev
->data
->drv_name
= pci_dev
->driver
->driver
.name
;
3221 rte_eth_dev_l2_tunnel_eth_type_conf(uint8_t port_id
,
3222 struct rte_eth_l2_tunnel_conf
*l2_tunnel
)
3224 struct rte_eth_dev
*dev
;
3226 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id
, -ENODEV
);
3227 if (l2_tunnel
== NULL
) {
3228 RTE_PMD_DEBUG_TRACE("Invalid l2_tunnel parameter\n");
3232 if (l2_tunnel
->l2_tunnel_type
>= RTE_TUNNEL_TYPE_MAX
) {
3233 RTE_PMD_DEBUG_TRACE("Invalid tunnel type\n");
3237 dev
= &rte_eth_devices
[port_id
];
3238 RTE_FUNC_PTR_OR_ERR_RET(*dev
->dev_ops
->l2_tunnel_eth_type_conf
,
3240 return (*dev
->dev_ops
->l2_tunnel_eth_type_conf
)(dev
, l2_tunnel
);
3244 rte_eth_dev_l2_tunnel_offload_set(uint8_t port_id
,
3245 struct rte_eth_l2_tunnel_conf
*l2_tunnel
,
3249 struct rte_eth_dev
*dev
;
3251 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id
, -ENODEV
);
3253 if (l2_tunnel
== NULL
) {
3254 RTE_PMD_DEBUG_TRACE("Invalid l2_tunnel parameter\n");
3258 if (l2_tunnel
->l2_tunnel_type
>= RTE_TUNNEL_TYPE_MAX
) {
3259 RTE_PMD_DEBUG_TRACE("Invalid tunnel type.\n");
3264 RTE_PMD_DEBUG_TRACE("Mask should have a value.\n");
3268 dev
= &rte_eth_devices
[port_id
];
3269 RTE_FUNC_PTR_OR_ERR_RET(*dev
->dev_ops
->l2_tunnel_offload_set
,
3271 return (*dev
->dev_ops
->l2_tunnel_offload_set
)(dev
, l2_tunnel
, mask
, en
);