]> git.proxmox.com Git - ceph.git/blame - ceph/src/spdk/dpdk/lib/librte_ethdev/rte_ethdev.c
update source to Ceph Pacific 16.2.2
[ceph.git] / ceph / src / spdk / dpdk / lib / librte_ethdev / rte_ethdev.c
CommitLineData
11fdf7f2
TL
1/* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2017 Intel Corporation
3 */
4
5#include <sys/types.h>
6#include <sys/queue.h>
7#include <ctype.h>
8#include <stdio.h>
9#include <stdlib.h>
10#include <string.h>
11#include <stdarg.h>
12#include <errno.h>
13#include <stdbool.h>
14#include <stdint.h>
15#include <inttypes.h>
16#include <netinet/in.h>
17
18#include <rte_byteorder.h>
19#include <rte_log.h>
20#include <rte_debug.h>
21#include <rte_interrupts.h>
22#include <rte_memory.h>
23#include <rte_memcpy.h>
24#include <rte_memzone.h>
25#include <rte_launch.h>
26#include <rte_eal.h>
27#include <rte_per_lcore.h>
28#include <rte_lcore.h>
29#include <rte_atomic.h>
30#include <rte_branch_prediction.h>
31#include <rte_common.h>
32#include <rte_mempool.h>
33#include <rte_malloc.h>
34#include <rte_mbuf.h>
35#include <rte_errno.h>
36#include <rte_spinlock.h>
37#include <rte_string_fns.h>
38#include <rte_kvargs.h>
9f95a23c 39#include <rte_class.h>
f67539c2
TL
40#include <rte_ether.h>
41#include <rte_telemetry.h>
11fdf7f2 42
f67539c2 43#include "rte_ethdev_trace.h"
11fdf7f2
TL
44#include "rte_ethdev.h"
45#include "rte_ethdev_driver.h"
46#include "ethdev_profile.h"
9f95a23c 47#include "ethdev_private.h"
11fdf7f2
TL
48
49int rte_eth_dev_logtype;
50
51static const char *MZ_RTE_ETH_DEV_DATA = "rte_eth_dev_data";
52struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS];
11fdf7f2
TL
53
54/* spinlock for eth device callbacks */
55static rte_spinlock_t rte_eth_dev_cb_lock = RTE_SPINLOCK_INITIALIZER;
56
57/* spinlock for add/remove rx callbacks */
58static rte_spinlock_t rte_eth_rx_cb_lock = RTE_SPINLOCK_INITIALIZER;
59
60/* spinlock for add/remove tx callbacks */
61static rte_spinlock_t rte_eth_tx_cb_lock = RTE_SPINLOCK_INITIALIZER;
62
63/* spinlock for shared data allocation */
64static rte_spinlock_t rte_eth_shared_data_lock = RTE_SPINLOCK_INITIALIZER;
65
66/* store statistics names and its offset in stats structure */
67struct rte_eth_xstats_name_off {
68 char name[RTE_ETH_XSTATS_NAME_SIZE];
69 unsigned offset;
70};
71
72/* Shared memory between primary and secondary processes. */
73static struct {
74 uint64_t next_owner_id;
75 rte_spinlock_t ownership_lock;
76 struct rte_eth_dev_data data[RTE_MAX_ETHPORTS];
77} *rte_eth_dev_shared_data;
78
79static const struct rte_eth_xstats_name_off rte_stats_strings[] = {
80 {"rx_good_packets", offsetof(struct rte_eth_stats, ipackets)},
81 {"tx_good_packets", offsetof(struct rte_eth_stats, opackets)},
82 {"rx_good_bytes", offsetof(struct rte_eth_stats, ibytes)},
83 {"tx_good_bytes", offsetof(struct rte_eth_stats, obytes)},
84 {"rx_missed_errors", offsetof(struct rte_eth_stats, imissed)},
85 {"rx_errors", offsetof(struct rte_eth_stats, ierrors)},
86 {"tx_errors", offsetof(struct rte_eth_stats, oerrors)},
87 {"rx_mbuf_allocation_errors", offsetof(struct rte_eth_stats,
88 rx_nombuf)},
89};
90
f67539c2 91#define RTE_NB_STATS RTE_DIM(rte_stats_strings)
11fdf7f2
TL
92
93static const struct rte_eth_xstats_name_off rte_rxq_stats_strings[] = {
94 {"packets", offsetof(struct rte_eth_stats, q_ipackets)},
95 {"bytes", offsetof(struct rte_eth_stats, q_ibytes)},
96 {"errors", offsetof(struct rte_eth_stats, q_errors)},
97};
98
f67539c2 99#define RTE_NB_RXQ_STATS RTE_DIM(rte_rxq_stats_strings)
11fdf7f2
TL
100
101static const struct rte_eth_xstats_name_off rte_txq_stats_strings[] = {
102 {"packets", offsetof(struct rte_eth_stats, q_opackets)},
103 {"bytes", offsetof(struct rte_eth_stats, q_obytes)},
104};
f67539c2 105#define RTE_NB_TXQ_STATS RTE_DIM(rte_txq_stats_strings)
11fdf7f2
TL
106
107#define RTE_RX_OFFLOAD_BIT2STR(_name) \
108 { DEV_RX_OFFLOAD_##_name, #_name }
109
110static const struct {
111 uint64_t offload;
112 const char *name;
113} rte_rx_offload_names[] = {
114 RTE_RX_OFFLOAD_BIT2STR(VLAN_STRIP),
115 RTE_RX_OFFLOAD_BIT2STR(IPV4_CKSUM),
116 RTE_RX_OFFLOAD_BIT2STR(UDP_CKSUM),
117 RTE_RX_OFFLOAD_BIT2STR(TCP_CKSUM),
118 RTE_RX_OFFLOAD_BIT2STR(TCP_LRO),
119 RTE_RX_OFFLOAD_BIT2STR(QINQ_STRIP),
120 RTE_RX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM),
121 RTE_RX_OFFLOAD_BIT2STR(MACSEC_STRIP),
122 RTE_RX_OFFLOAD_BIT2STR(HEADER_SPLIT),
123 RTE_RX_OFFLOAD_BIT2STR(VLAN_FILTER),
124 RTE_RX_OFFLOAD_BIT2STR(VLAN_EXTEND),
125 RTE_RX_OFFLOAD_BIT2STR(JUMBO_FRAME),
11fdf7f2
TL
126 RTE_RX_OFFLOAD_BIT2STR(SCATTER),
127 RTE_RX_OFFLOAD_BIT2STR(TIMESTAMP),
128 RTE_RX_OFFLOAD_BIT2STR(SECURITY),
129 RTE_RX_OFFLOAD_BIT2STR(KEEP_CRC),
9f95a23c
TL
130 RTE_RX_OFFLOAD_BIT2STR(SCTP_CKSUM),
131 RTE_RX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM),
f67539c2 132 RTE_RX_OFFLOAD_BIT2STR(RSS_HASH),
11fdf7f2
TL
133};
134
135#undef RTE_RX_OFFLOAD_BIT2STR
136
137#define RTE_TX_OFFLOAD_BIT2STR(_name) \
138 { DEV_TX_OFFLOAD_##_name, #_name }
139
140static const struct {
141 uint64_t offload;
142 const char *name;
143} rte_tx_offload_names[] = {
144 RTE_TX_OFFLOAD_BIT2STR(VLAN_INSERT),
145 RTE_TX_OFFLOAD_BIT2STR(IPV4_CKSUM),
146 RTE_TX_OFFLOAD_BIT2STR(UDP_CKSUM),
147 RTE_TX_OFFLOAD_BIT2STR(TCP_CKSUM),
148 RTE_TX_OFFLOAD_BIT2STR(SCTP_CKSUM),
149 RTE_TX_OFFLOAD_BIT2STR(TCP_TSO),
150 RTE_TX_OFFLOAD_BIT2STR(UDP_TSO),
151 RTE_TX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM),
152 RTE_TX_OFFLOAD_BIT2STR(QINQ_INSERT),
153 RTE_TX_OFFLOAD_BIT2STR(VXLAN_TNL_TSO),
154 RTE_TX_OFFLOAD_BIT2STR(GRE_TNL_TSO),
155 RTE_TX_OFFLOAD_BIT2STR(IPIP_TNL_TSO),
156 RTE_TX_OFFLOAD_BIT2STR(GENEVE_TNL_TSO),
157 RTE_TX_OFFLOAD_BIT2STR(MACSEC_INSERT),
158 RTE_TX_OFFLOAD_BIT2STR(MT_LOCKFREE),
159 RTE_TX_OFFLOAD_BIT2STR(MULTI_SEGS),
160 RTE_TX_OFFLOAD_BIT2STR(MBUF_FAST_FREE),
161 RTE_TX_OFFLOAD_BIT2STR(SECURITY),
9f95a23c
TL
162 RTE_TX_OFFLOAD_BIT2STR(UDP_TNL_TSO),
163 RTE_TX_OFFLOAD_BIT2STR(IP_TNL_TSO),
164 RTE_TX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM),
11fdf7f2
TL
165};
166
167#undef RTE_TX_OFFLOAD_BIT2STR
168
169/**
170 * The user application callback description.
171 *
172 * It contains callback address to be registered by user application,
173 * the pointer to the parameters for callback, and the event type.
174 */
175struct rte_eth_dev_callback {
176 TAILQ_ENTRY(rte_eth_dev_callback) next; /**< Callbacks list */
177 rte_eth_dev_cb_fn cb_fn; /**< Callback address */
178 void *cb_arg; /**< Parameter for callback */
179 void *ret_param; /**< Return parameter */
180 enum rte_eth_event_type event; /**< Interrupt event type */
181 uint32_t active; /**< Callback is executing */
182};
183
184enum {
185 STAT_QMAP_TX = 0,
186 STAT_QMAP_RX
187};
188
9f95a23c
TL
189int
190rte_eth_iterator_init(struct rte_dev_iterator *iter, const char *devargs_str)
191{
192 int ret;
193 struct rte_devargs devargs = {.args = NULL};
194 const char *bus_param_key;
195 char *bus_str = NULL;
196 char *cls_str = NULL;
197 int str_size;
198
199 memset(iter, 0, sizeof(*iter));
200
201 /*
202 * The devargs string may use various syntaxes:
203 * - 0000:08:00.0,representor=[1-3]
204 * - pci:0000:06:00.0,representor=[0,5]
205 * - class=eth,mac=00:11:22:33:44:55
206 * A new syntax is in development (not yet supported):
207 * - bus=X,paramX=x/class=Y,paramY=y/driver=Z,paramZ=z
208 */
209
210 /*
211 * Handle pure class filter (i.e. without any bus-level argument),
212 * from future new syntax.
213 * rte_devargs_parse() is not yet supporting the new syntax,
214 * that's why this simple case is temporarily parsed here.
215 */
216#define iter_anybus_str "class=eth,"
217 if (strncmp(devargs_str, iter_anybus_str,
218 strlen(iter_anybus_str)) == 0) {
219 iter->cls_str = devargs_str + strlen(iter_anybus_str);
220 goto end;
221 }
222
223 /* Split bus, device and parameters. */
224 ret = rte_devargs_parse(&devargs, devargs_str);
225 if (ret != 0)
226 goto error;
227
228 /*
229 * Assume parameters of old syntax can match only at ethdev level.
230 * Extra parameters will be ignored, thanks to "+" prefix.
231 */
232 str_size = strlen(devargs.args) + 2;
233 cls_str = malloc(str_size);
234 if (cls_str == NULL) {
235 ret = -ENOMEM;
236 goto error;
237 }
238 ret = snprintf(cls_str, str_size, "+%s", devargs.args);
239 if (ret != str_size - 1) {
240 ret = -EINVAL;
241 goto error;
242 }
243 iter->cls_str = cls_str;
244 free(devargs.args); /* allocated by rte_devargs_parse() */
245 devargs.args = NULL;
246
247 iter->bus = devargs.bus;
248 if (iter->bus->dev_iterate == NULL) {
249 ret = -ENOTSUP;
250 goto error;
251 }
252
253 /* Convert bus args to new syntax for use with new API dev_iterate. */
254 if (strcmp(iter->bus->name, "vdev") == 0) {
255 bus_param_key = "name";
256 } else if (strcmp(iter->bus->name, "pci") == 0) {
257 bus_param_key = "addr";
258 } else {
259 ret = -ENOTSUP;
260 goto error;
261 }
262 str_size = strlen(bus_param_key) + strlen(devargs.name) + 2;
263 bus_str = malloc(str_size);
264 if (bus_str == NULL) {
265 ret = -ENOMEM;
266 goto error;
267 }
268 ret = snprintf(bus_str, str_size, "%s=%s",
269 bus_param_key, devargs.name);
270 if (ret != str_size - 1) {
271 ret = -EINVAL;
272 goto error;
273 }
274 iter->bus_str = bus_str;
275
276end:
277 iter->cls = rte_class_find_by_name("eth");
278 return 0;
279
280error:
281 if (ret == -ENOTSUP)
282 RTE_LOG(ERR, EAL, "Bus %s does not support iterating.\n",
283 iter->bus->name);
284 free(devargs.args);
285 free(bus_str);
286 free(cls_str);
287 return ret;
288}
289
290uint16_t
291rte_eth_iterator_next(struct rte_dev_iterator *iter)
292{
293 if (iter->cls == NULL) /* invalid ethdev iterator */
294 return RTE_MAX_ETHPORTS;
295
296 do { /* loop to try all matching rte_device */
297 /* If not pure ethdev filter and */
298 if (iter->bus != NULL &&
299 /* not in middle of rte_eth_dev iteration, */
300 iter->class_device == NULL) {
301 /* get next rte_device to try. */
302 iter->device = iter->bus->dev_iterate(
303 iter->device, iter->bus_str, iter);
304 if (iter->device == NULL)
305 break; /* no more rte_device candidate */
306 }
307 /* A device is matching bus part, need to check ethdev part. */
308 iter->class_device = iter->cls->dev_iterate(
309 iter->class_device, iter->cls_str, iter);
310 if (iter->class_device != NULL)
311 return eth_dev_to_id(iter->class_device); /* match */
312 } while (iter->bus != NULL); /* need to try next rte_device */
313
314 /* No more ethdev port to iterate. */
315 rte_eth_iterator_cleanup(iter);
316 return RTE_MAX_ETHPORTS;
317}
318
319void
320rte_eth_iterator_cleanup(struct rte_dev_iterator *iter)
321{
322 if (iter->bus_str == NULL)
323 return; /* nothing to free in pure class filter */
324 free(RTE_CAST_FIELD(iter, bus_str, char *)); /* workaround const */
325 free(RTE_CAST_FIELD(iter, cls_str, char *)); /* workaround const */
326 memset(iter, 0, sizeof(*iter));
327}
328
11fdf7f2
TL
329uint16_t
330rte_eth_find_next(uint16_t port_id)
331{
332 while (port_id < RTE_MAX_ETHPORTS &&
9f95a23c 333 rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED)
11fdf7f2
TL
334 port_id++;
335
336 if (port_id >= RTE_MAX_ETHPORTS)
337 return RTE_MAX_ETHPORTS;
338
339 return port_id;
340}
341
9f95a23c
TL
342/*
343 * Macro to iterate over all valid ports for internal usage.
344 * Note: RTE_ETH_FOREACH_DEV is different because filtering owned ports.
345 */
346#define RTE_ETH_FOREACH_VALID_DEV(port_id) \
347 for (port_id = rte_eth_find_next(0); \
348 port_id < RTE_MAX_ETHPORTS; \
349 port_id = rte_eth_find_next(port_id + 1))
350
351uint16_t
352rte_eth_find_next_of(uint16_t port_id, const struct rte_device *parent)
353{
354 port_id = rte_eth_find_next(port_id);
355 while (port_id < RTE_MAX_ETHPORTS &&
356 rte_eth_devices[port_id].device != parent)
357 port_id = rte_eth_find_next(port_id + 1);
358
359 return port_id;
360}
361
362uint16_t
363rte_eth_find_next_sibling(uint16_t port_id, uint16_t ref_port_id)
364{
365 RTE_ETH_VALID_PORTID_OR_ERR_RET(ref_port_id, RTE_MAX_ETHPORTS);
366 return rte_eth_find_next_of(port_id,
367 rte_eth_devices[ref_port_id].device);
368}
369
11fdf7f2
TL
370static void
371rte_eth_dev_shared_data_prepare(void)
372{
373 const unsigned flags = 0;
374 const struct rte_memzone *mz;
375
376 rte_spinlock_lock(&rte_eth_shared_data_lock);
377
378 if (rte_eth_dev_shared_data == NULL) {
379 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
380 /* Allocate port data and ownership shared memory. */
381 mz = rte_memzone_reserve(MZ_RTE_ETH_DEV_DATA,
382 sizeof(*rte_eth_dev_shared_data),
383 rte_socket_id(), flags);
384 } else
385 mz = rte_memzone_lookup(MZ_RTE_ETH_DEV_DATA);
386 if (mz == NULL)
387 rte_panic("Cannot allocate ethdev shared data\n");
388
389 rte_eth_dev_shared_data = mz->addr;
390 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
391 rte_eth_dev_shared_data->next_owner_id =
392 RTE_ETH_DEV_NO_OWNER + 1;
393 rte_spinlock_init(&rte_eth_dev_shared_data->ownership_lock);
394 memset(rte_eth_dev_shared_data->data, 0,
395 sizeof(rte_eth_dev_shared_data->data));
396 }
397 }
398
399 rte_spinlock_unlock(&rte_eth_shared_data_lock);
400}
401
402static bool
403is_allocated(const struct rte_eth_dev *ethdev)
404{
405 return ethdev->data->name[0] != '\0';
406}
407
408static struct rte_eth_dev *
409_rte_eth_dev_allocated(const char *name)
410{
411 unsigned i;
412
413 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
414 if (rte_eth_devices[i].data != NULL &&
415 strcmp(rte_eth_devices[i].data->name, name) == 0)
416 return &rte_eth_devices[i];
417 }
418 return NULL;
419}
420
421struct rte_eth_dev *
422rte_eth_dev_allocated(const char *name)
423{
424 struct rte_eth_dev *ethdev;
425
426 rte_eth_dev_shared_data_prepare();
427
428 rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
429
430 ethdev = _rte_eth_dev_allocated(name);
431
432 rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
433
434 return ethdev;
435}
436
437static uint16_t
438rte_eth_dev_find_free_port(void)
439{
440 unsigned i;
441
442 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
443 /* Using shared name field to find a free port. */
444 if (rte_eth_dev_shared_data->data[i].name[0] == '\0') {
445 RTE_ASSERT(rte_eth_devices[i].state ==
446 RTE_ETH_DEV_UNUSED);
447 return i;
448 }
449 }
450 return RTE_MAX_ETHPORTS;
451}
452
453static struct rte_eth_dev *
454eth_dev_get(uint16_t port_id)
455{
456 struct rte_eth_dev *eth_dev = &rte_eth_devices[port_id];
457
458 eth_dev->data = &rte_eth_dev_shared_data->data[port_id];
459
11fdf7f2
TL
460 return eth_dev;
461}
462
463struct rte_eth_dev *
464rte_eth_dev_allocate(const char *name)
465{
466 uint16_t port_id;
467 struct rte_eth_dev *eth_dev = NULL;
9f95a23c
TL
468 size_t name_len;
469
470 name_len = strnlen(name, RTE_ETH_NAME_MAX_LEN);
471 if (name_len == 0) {
472 RTE_ETHDEV_LOG(ERR, "Zero length Ethernet device name\n");
473 return NULL;
474 }
475
476 if (name_len >= RTE_ETH_NAME_MAX_LEN) {
477 RTE_ETHDEV_LOG(ERR, "Ethernet device name is too long\n");
478 return NULL;
479 }
11fdf7f2
TL
480
481 rte_eth_dev_shared_data_prepare();
482
483 /* Synchronize port creation between primary and secondary threads. */
484 rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
485
486 if (_rte_eth_dev_allocated(name) != NULL) {
487 RTE_ETHDEV_LOG(ERR,
488 "Ethernet device with name %s already allocated\n",
489 name);
490 goto unlock;
491 }
492
493 port_id = rte_eth_dev_find_free_port();
494 if (port_id == RTE_MAX_ETHPORTS) {
495 RTE_ETHDEV_LOG(ERR,
496 "Reached maximum number of Ethernet ports\n");
497 goto unlock;
498 }
499
500 eth_dev = eth_dev_get(port_id);
9f95a23c 501 strlcpy(eth_dev->data->name, name, sizeof(eth_dev->data->name));
11fdf7f2 502 eth_dev->data->port_id = port_id;
f67539c2 503 eth_dev->data->mtu = RTE_ETHER_MTU;
11fdf7f2
TL
504
505unlock:
506 rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
507
508 return eth_dev;
509}
510
511/*
512 * Attach to a port already registered by the primary process, which
513 * makes sure that the same device would have the same port id both
514 * in the primary and secondary process.
515 */
516struct rte_eth_dev *
517rte_eth_dev_attach_secondary(const char *name)
518{
519 uint16_t i;
520 struct rte_eth_dev *eth_dev = NULL;
521
522 rte_eth_dev_shared_data_prepare();
523
524 /* Synchronize port attachment to primary port creation and release. */
525 rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
526
527 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
528 if (strcmp(rte_eth_dev_shared_data->data[i].name, name) == 0)
529 break;
530 }
531 if (i == RTE_MAX_ETHPORTS) {
532 RTE_ETHDEV_LOG(ERR,
533 "Device %s is not driven by the primary process\n",
534 name);
535 } else {
536 eth_dev = eth_dev_get(i);
537 RTE_ASSERT(eth_dev->data->port_id == i);
538 }
539
540 rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
541 return eth_dev;
542}
543
544int
545rte_eth_dev_release_port(struct rte_eth_dev *eth_dev)
546{
547 if (eth_dev == NULL)
548 return -EINVAL;
549
550 rte_eth_dev_shared_data_prepare();
551
9f95a23c
TL
552 if (eth_dev->state != RTE_ETH_DEV_UNUSED)
553 _rte_eth_dev_callback_process(eth_dev,
554 RTE_ETH_EVENT_DESTROY, NULL);
11fdf7f2
TL
555
556 rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
557
558 eth_dev->state = RTE_ETH_DEV_UNUSED;
559
9f95a23c
TL
560 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
561 rte_free(eth_dev->data->rx_queues);
562 rte_free(eth_dev->data->tx_queues);
563 rte_free(eth_dev->data->mac_addrs);
564 rte_free(eth_dev->data->hash_mac_addrs);
565 rte_free(eth_dev->data->dev_private);
566 memset(eth_dev->data, 0, sizeof(struct rte_eth_dev_data));
567 }
11fdf7f2
TL
568
569 rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
570
571 return 0;
572}
573
574int
575rte_eth_dev_is_valid_port(uint16_t port_id)
576{
577 if (port_id >= RTE_MAX_ETHPORTS ||
578 (rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED))
579 return 0;
580 else
581 return 1;
582}
583
584static int
585rte_eth_is_valid_owner_id(uint64_t owner_id)
586{
587 if (owner_id == RTE_ETH_DEV_NO_OWNER ||
9f95a23c 588 rte_eth_dev_shared_data->next_owner_id <= owner_id)
11fdf7f2 589 return 0;
11fdf7f2
TL
590 return 1;
591}
592
593uint64_t
594rte_eth_find_next_owned_by(uint16_t port_id, const uint64_t owner_id)
595{
9f95a23c 596 port_id = rte_eth_find_next(port_id);
11fdf7f2 597 while (port_id < RTE_MAX_ETHPORTS &&
9f95a23c
TL
598 rte_eth_devices[port_id].data->owner.id != owner_id)
599 port_id = rte_eth_find_next(port_id + 1);
11fdf7f2
TL
600
601 return port_id;
602}
603
f67539c2 604int
11fdf7f2
TL
605rte_eth_dev_owner_new(uint64_t *owner_id)
606{
607 rte_eth_dev_shared_data_prepare();
608
609 rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
610
611 *owner_id = rte_eth_dev_shared_data->next_owner_id++;
612
613 rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
614 return 0;
615}
616
617static int
618_rte_eth_dev_owner_set(const uint16_t port_id, const uint64_t old_owner_id,
619 const struct rte_eth_dev_owner *new_owner)
620{
621 struct rte_eth_dev *ethdev = &rte_eth_devices[port_id];
622 struct rte_eth_dev_owner *port_owner;
11fdf7f2
TL
623
624 if (port_id >= RTE_MAX_ETHPORTS || !is_allocated(ethdev)) {
625 RTE_ETHDEV_LOG(ERR, "Port id %"PRIu16" is not allocated\n",
626 port_id);
627 return -ENODEV;
628 }
629
630 if (!rte_eth_is_valid_owner_id(new_owner->id) &&
9f95a23c
TL
631 !rte_eth_is_valid_owner_id(old_owner_id)) {
632 RTE_ETHDEV_LOG(ERR,
633 "Invalid owner old_id=%016"PRIx64" new_id=%016"PRIx64"\n",
634 old_owner_id, new_owner->id);
11fdf7f2 635 return -EINVAL;
9f95a23c 636 }
11fdf7f2
TL
637
638 port_owner = &rte_eth_devices[port_id].data->owner;
639 if (port_owner->id != old_owner_id) {
640 RTE_ETHDEV_LOG(ERR,
641 "Cannot set owner to port %u already owned by %s_%016"PRIX64"\n",
642 port_id, port_owner->name, port_owner->id);
643 return -EPERM;
644 }
645
9f95a23c
TL
646 /* can not truncate (same structure) */
647 strlcpy(port_owner->name, new_owner->name, RTE_ETH_MAX_OWNER_NAME_LEN);
11fdf7f2
TL
648
649 port_owner->id = new_owner->id;
650
651 RTE_ETHDEV_LOG(DEBUG, "Port %u owner is %s_%016"PRIx64"\n",
652 port_id, new_owner->name, new_owner->id);
653
654 return 0;
655}
656
f67539c2 657int
11fdf7f2
TL
658rte_eth_dev_owner_set(const uint16_t port_id,
659 const struct rte_eth_dev_owner *owner)
660{
661 int ret;
662
663 rte_eth_dev_shared_data_prepare();
664
665 rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
666
667 ret = _rte_eth_dev_owner_set(port_id, RTE_ETH_DEV_NO_OWNER, owner);
668
669 rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
670 return ret;
671}
672
f67539c2 673int
11fdf7f2
TL
674rte_eth_dev_owner_unset(const uint16_t port_id, const uint64_t owner_id)
675{
676 const struct rte_eth_dev_owner new_owner = (struct rte_eth_dev_owner)
677 {.id = RTE_ETH_DEV_NO_OWNER, .name = ""};
678 int ret;
679
680 rte_eth_dev_shared_data_prepare();
681
682 rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
683
684 ret = _rte_eth_dev_owner_set(port_id, owner_id, &new_owner);
685
686 rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
687 return ret;
688}
689
f67539c2 690int
11fdf7f2
TL
691rte_eth_dev_owner_delete(const uint64_t owner_id)
692{
693 uint16_t port_id;
f67539c2 694 int ret = 0;
11fdf7f2
TL
695
696 rte_eth_dev_shared_data_prepare();
697
698 rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
699
700 if (rte_eth_is_valid_owner_id(owner_id)) {
701 for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++)
702 if (rte_eth_devices[port_id].data->owner.id == owner_id)
703 memset(&rte_eth_devices[port_id].data->owner, 0,
704 sizeof(struct rte_eth_dev_owner));
9f95a23c 705 RTE_ETHDEV_LOG(NOTICE,
11fdf7f2
TL
706 "All port owners owned by %016"PRIx64" identifier have removed\n",
707 owner_id);
9f95a23c
TL
708 } else {
709 RTE_ETHDEV_LOG(ERR,
710 "Invalid owner id=%016"PRIx64"\n",
711 owner_id);
f67539c2 712 ret = -EINVAL;
11fdf7f2
TL
713 }
714
715 rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
f67539c2
TL
716
717 return ret;
11fdf7f2
TL
718}
719
f67539c2 720int
11fdf7f2
TL
721rte_eth_dev_owner_get(const uint16_t port_id, struct rte_eth_dev_owner *owner)
722{
723 int ret = 0;
724 struct rte_eth_dev *ethdev = &rte_eth_devices[port_id];
725
726 rte_eth_dev_shared_data_prepare();
727
728 rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
729
730 if (port_id >= RTE_MAX_ETHPORTS || !is_allocated(ethdev)) {
731 RTE_ETHDEV_LOG(ERR, "Port id %"PRIu16" is not allocated\n",
732 port_id);
733 ret = -ENODEV;
734 } else {
735 rte_memcpy(owner, &ethdev->data->owner, sizeof(*owner));
736 }
737
738 rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
739 return ret;
740}
741
742int
743rte_eth_dev_socket_id(uint16_t port_id)
744{
745 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1);
746 return rte_eth_devices[port_id].data->numa_node;
747}
748
749void *
750rte_eth_dev_get_sec_ctx(uint16_t port_id)
751{
752 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, NULL);
753 return rte_eth_devices[port_id].security_ctx;
754}
755
11fdf7f2
TL
756uint16_t
757rte_eth_dev_count_avail(void)
758{
759 uint16_t p;
760 uint16_t count;
761
762 count = 0;
763
764 RTE_ETH_FOREACH_DEV(p)
765 count++;
766
767 return count;
768}
769
9f95a23c 770uint16_t
11fdf7f2
TL
771rte_eth_dev_count_total(void)
772{
773 uint16_t port, count = 0;
774
9f95a23c
TL
775 RTE_ETH_FOREACH_VALID_DEV(port)
776 count++;
11fdf7f2
TL
777
778 return count;
779}
780
781int
782rte_eth_dev_get_name_by_port(uint16_t port_id, char *name)
783{
784 char *tmp;
785
786 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
787
788 if (name == NULL) {
789 RTE_ETHDEV_LOG(ERR, "Null pointer is specified\n");
790 return -EINVAL;
791 }
792
793 /* shouldn't check 'rte_eth_devices[i].data',
794 * because it might be overwritten by VDEV PMD */
795 tmp = rte_eth_dev_shared_data->data[port_id].name;
796 strcpy(name, tmp);
797 return 0;
798}
799
800int
801rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id)
802{
803 uint32_t pid;
804
805 if (name == NULL) {
806 RTE_ETHDEV_LOG(ERR, "Null pointer is specified\n");
807 return -EINVAL;
808 }
809
9f95a23c
TL
810 RTE_ETH_FOREACH_VALID_DEV(pid)
811 if (!strcmp(name, rte_eth_dev_shared_data->data[pid].name)) {
11fdf7f2
TL
812 *port_id = pid;
813 return 0;
814 }
11fdf7f2
TL
815
816 return -ENODEV;
817}
818
819static int
820eth_err(uint16_t port_id, int ret)
821{
822 if (ret == 0)
823 return 0;
824 if (rte_eth_dev_is_removed(port_id))
825 return -EIO;
826 return ret;
827}
828
11fdf7f2
TL
829static int
830rte_eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
831{
832 uint16_t old_nb_queues = dev->data->nb_rx_queues;
833 void **rxq;
834 unsigned i;
835
836 if (dev->data->rx_queues == NULL && nb_queues != 0) { /* first time configuration */
837 dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues",
838 sizeof(dev->data->rx_queues[0]) * nb_queues,
839 RTE_CACHE_LINE_SIZE);
840 if (dev->data->rx_queues == NULL) {
841 dev->data->nb_rx_queues = 0;
842 return -(ENOMEM);
843 }
844 } else if (dev->data->rx_queues != NULL && nb_queues != 0) { /* re-configure */
845 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
846
847 rxq = dev->data->rx_queues;
848
849 for (i = nb_queues; i < old_nb_queues; i++)
850 (*dev->dev_ops->rx_queue_release)(rxq[i]);
851 rxq = rte_realloc(rxq, sizeof(rxq[0]) * nb_queues,
852 RTE_CACHE_LINE_SIZE);
853 if (rxq == NULL)
854 return -(ENOMEM);
855 if (nb_queues > old_nb_queues) {
856 uint16_t new_qs = nb_queues - old_nb_queues;
857
858 memset(rxq + old_nb_queues, 0,
859 sizeof(rxq[0]) * new_qs);
860 }
861
862 dev->data->rx_queues = rxq;
863
864 } else if (dev->data->rx_queues != NULL && nb_queues == 0) {
865 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
866
867 rxq = dev->data->rx_queues;
868
869 for (i = nb_queues; i < old_nb_queues; i++)
870 (*dev->dev_ops->rx_queue_release)(rxq[i]);
871
872 rte_free(dev->data->rx_queues);
873 dev->data->rx_queues = NULL;
874 }
875 dev->data->nb_rx_queues = nb_queues;
876 return 0;
877}
878
879int
880rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id)
881{
882 struct rte_eth_dev *dev;
883
884 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
885
886 dev = &rte_eth_devices[port_id];
887 if (!dev->data->dev_started) {
888 RTE_ETHDEV_LOG(ERR,
889 "Port %u must be started before start any queue\n",
890 port_id);
891 return -EINVAL;
892 }
893
894 if (rx_queue_id >= dev->data->nb_rx_queues) {
895 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", rx_queue_id);
896 return -EINVAL;
897 }
898
899 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_start, -ENOTSUP);
900
f67539c2
TL
901 if (rte_eth_dev_is_rx_hairpin_queue(dev, rx_queue_id)) {
902 RTE_ETHDEV_LOG(INFO,
903 "Can't start Rx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n",
904 rx_queue_id, port_id);
905 return -EINVAL;
906 }
907
11fdf7f2
TL
908 if (dev->data->rx_queue_state[rx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
909 RTE_ETHDEV_LOG(INFO,
910 "Queue %"PRIu16" of device with port_id=%"PRIu16" already started\n",
911 rx_queue_id, port_id);
912 return 0;
913 }
914
915 return eth_err(port_id, dev->dev_ops->rx_queue_start(dev,
916 rx_queue_id));
917
918}
919
920int
921rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id)
922{
923 struct rte_eth_dev *dev;
924
925 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
926
927 dev = &rte_eth_devices[port_id];
928 if (rx_queue_id >= dev->data->nb_rx_queues) {
929 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", rx_queue_id);
930 return -EINVAL;
931 }
932
933 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_stop, -ENOTSUP);
934
f67539c2
TL
935 if (rte_eth_dev_is_rx_hairpin_queue(dev, rx_queue_id)) {
936 RTE_ETHDEV_LOG(INFO,
937 "Can't stop Rx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n",
938 rx_queue_id, port_id);
939 return -EINVAL;
940 }
941
11fdf7f2
TL
942 if (dev->data->rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
943 RTE_ETHDEV_LOG(INFO,
944 "Queue %"PRIu16" of device with port_id=%"PRIu16" already stopped\n",
945 rx_queue_id, port_id);
946 return 0;
947 }
948
949 return eth_err(port_id, dev->dev_ops->rx_queue_stop(dev, rx_queue_id));
950
951}
952
953int
954rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id)
955{
956 struct rte_eth_dev *dev;
957
958 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
959
960 dev = &rte_eth_devices[port_id];
961 if (!dev->data->dev_started) {
962 RTE_ETHDEV_LOG(ERR,
963 "Port %u must be started before start any queue\n",
964 port_id);
965 return -EINVAL;
966 }
967
968 if (tx_queue_id >= dev->data->nb_tx_queues) {
969 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", tx_queue_id);
970 return -EINVAL;
971 }
972
973 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_start, -ENOTSUP);
974
f67539c2
TL
975 if (rte_eth_dev_is_tx_hairpin_queue(dev, tx_queue_id)) {
976 RTE_ETHDEV_LOG(INFO,
977 "Can't start Tx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n",
978 tx_queue_id, port_id);
979 return -EINVAL;
980 }
981
11fdf7f2
TL
982 if (dev->data->tx_queue_state[tx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
983 RTE_ETHDEV_LOG(INFO,
984 "Queue %"PRIu16" of device with port_id=%"PRIu16" already started\n",
985 tx_queue_id, port_id);
986 return 0;
987 }
988
989 return eth_err(port_id, dev->dev_ops->tx_queue_start(dev, tx_queue_id));
990}
991
992int
993rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id)
994{
995 struct rte_eth_dev *dev;
996
997 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
998
999 dev = &rte_eth_devices[port_id];
1000 if (tx_queue_id >= dev->data->nb_tx_queues) {
1001 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", tx_queue_id);
1002 return -EINVAL;
1003 }
1004
1005 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_stop, -ENOTSUP);
1006
f67539c2
TL
1007 if (rte_eth_dev_is_tx_hairpin_queue(dev, tx_queue_id)) {
1008 RTE_ETHDEV_LOG(INFO,
1009 "Can't stop Tx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n",
1010 tx_queue_id, port_id);
1011 return -EINVAL;
1012 }
1013
11fdf7f2
TL
1014 if (dev->data->tx_queue_state[tx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
1015 RTE_ETHDEV_LOG(INFO,
1016 "Queue %"PRIu16" of device with port_id=%"PRIu16" already stopped\n",
1017 tx_queue_id, port_id);
1018 return 0;
1019 }
1020
1021 return eth_err(port_id, dev->dev_ops->tx_queue_stop(dev, tx_queue_id));
1022
1023}
1024
1025static int
1026rte_eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
1027{
1028 uint16_t old_nb_queues = dev->data->nb_tx_queues;
1029 void **txq;
1030 unsigned i;
1031
1032 if (dev->data->tx_queues == NULL && nb_queues != 0) { /* first time configuration */
1033 dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues",
1034 sizeof(dev->data->tx_queues[0]) * nb_queues,
1035 RTE_CACHE_LINE_SIZE);
1036 if (dev->data->tx_queues == NULL) {
1037 dev->data->nb_tx_queues = 0;
1038 return -(ENOMEM);
1039 }
1040 } else if (dev->data->tx_queues != NULL && nb_queues != 0) { /* re-configure */
1041 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
1042
1043 txq = dev->data->tx_queues;
1044
1045 for (i = nb_queues; i < old_nb_queues; i++)
1046 (*dev->dev_ops->tx_queue_release)(txq[i]);
1047 txq = rte_realloc(txq, sizeof(txq[0]) * nb_queues,
1048 RTE_CACHE_LINE_SIZE);
1049 if (txq == NULL)
1050 return -ENOMEM;
1051 if (nb_queues > old_nb_queues) {
1052 uint16_t new_qs = nb_queues - old_nb_queues;
1053
1054 memset(txq + old_nb_queues, 0,
1055 sizeof(txq[0]) * new_qs);
1056 }
1057
1058 dev->data->tx_queues = txq;
1059
1060 } else if (dev->data->tx_queues != NULL && nb_queues == 0) {
1061 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
1062
1063 txq = dev->data->tx_queues;
1064
1065 for (i = nb_queues; i < old_nb_queues; i++)
1066 (*dev->dev_ops->tx_queue_release)(txq[i]);
1067
1068 rte_free(dev->data->tx_queues);
1069 dev->data->tx_queues = NULL;
1070 }
1071 dev->data->nb_tx_queues = nb_queues;
1072 return 0;
1073}
1074
1075uint32_t
1076rte_eth_speed_bitflag(uint32_t speed, int duplex)
1077{
1078 switch (speed) {
1079 case ETH_SPEED_NUM_10M:
1080 return duplex ? ETH_LINK_SPEED_10M : ETH_LINK_SPEED_10M_HD;
1081 case ETH_SPEED_NUM_100M:
1082 return duplex ? ETH_LINK_SPEED_100M : ETH_LINK_SPEED_100M_HD;
1083 case ETH_SPEED_NUM_1G:
1084 return ETH_LINK_SPEED_1G;
1085 case ETH_SPEED_NUM_2_5G:
1086 return ETH_LINK_SPEED_2_5G;
1087 case ETH_SPEED_NUM_5G:
1088 return ETH_LINK_SPEED_5G;
1089 case ETH_SPEED_NUM_10G:
1090 return ETH_LINK_SPEED_10G;
1091 case ETH_SPEED_NUM_20G:
1092 return ETH_LINK_SPEED_20G;
1093 case ETH_SPEED_NUM_25G:
1094 return ETH_LINK_SPEED_25G;
1095 case ETH_SPEED_NUM_40G:
1096 return ETH_LINK_SPEED_40G;
1097 case ETH_SPEED_NUM_50G:
1098 return ETH_LINK_SPEED_50G;
1099 case ETH_SPEED_NUM_56G:
1100 return ETH_LINK_SPEED_56G;
1101 case ETH_SPEED_NUM_100G:
1102 return ETH_LINK_SPEED_100G;
f67539c2
TL
1103 case ETH_SPEED_NUM_200G:
1104 return ETH_LINK_SPEED_200G;
11fdf7f2
TL
1105 default:
1106 return 0;
1107 }
1108}
1109
9f95a23c 1110const char *
11fdf7f2
TL
1111rte_eth_dev_rx_offload_name(uint64_t offload)
1112{
1113 const char *name = "UNKNOWN";
1114 unsigned int i;
1115
1116 for (i = 0; i < RTE_DIM(rte_rx_offload_names); ++i) {
1117 if (offload == rte_rx_offload_names[i].offload) {
1118 name = rte_rx_offload_names[i].name;
1119 break;
1120 }
1121 }
1122
1123 return name;
1124}
1125
9f95a23c 1126const char *
11fdf7f2
TL
1127rte_eth_dev_tx_offload_name(uint64_t offload)
1128{
1129 const char *name = "UNKNOWN";
1130 unsigned int i;
1131
1132 for (i = 0; i < RTE_DIM(rte_tx_offload_names); ++i) {
1133 if (offload == rte_tx_offload_names[i].offload) {
1134 name = rte_tx_offload_names[i].name;
1135 break;
1136 }
1137 }
1138
1139 return name;
1140}
1141
f67539c2
TL
1142static inline int
1143check_lro_pkt_size(uint16_t port_id, uint32_t config_size,
1144 uint32_t max_rx_pkt_len, uint32_t dev_info_size)
1145{
1146 int ret = 0;
1147
1148 if (dev_info_size == 0) {
1149 if (config_size != max_rx_pkt_len) {
1150 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size"
1151 " %u != %u is not allowed\n",
1152 port_id, config_size, max_rx_pkt_len);
1153 ret = -EINVAL;
1154 }
1155 } else if (config_size > dev_info_size) {
1156 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size %u "
1157 "> max allowed value %u\n", port_id, config_size,
1158 dev_info_size);
1159 ret = -EINVAL;
1160 } else if (config_size < RTE_ETHER_MIN_LEN) {
1161 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size %u "
1162 "< min allowed value %u\n", port_id, config_size,
1163 (unsigned int)RTE_ETHER_MIN_LEN);
1164 ret = -EINVAL;
1165 }
1166 return ret;
1167}
1168
1169/*
1170 * Validate offloads that are requested through rte_eth_dev_configure against
1171 * the offloads successfully set by the ethernet device.
1172 *
1173 * @param port_id
1174 * The port identifier of the Ethernet device.
1175 * @param req_offloads
1176 * The offloads that have been requested through `rte_eth_dev_configure`.
1177 * @param set_offloads
1178 * The offloads successfully set by the ethernet device.
1179 * @param offload_type
1180 * The offload type i.e. Rx/Tx string.
1181 * @param offload_name
1182 * The function that prints the offload name.
1183 * @return
1184 * - (0) if validation successful.
1185 * - (-EINVAL) if requested offload has been silently disabled.
1186 *
1187 */
1188static int
1189validate_offloads(uint16_t port_id, uint64_t req_offloads,
1190 uint64_t set_offloads, const char *offload_type,
1191 const char *(*offload_name)(uint64_t))
1192{
1193 uint64_t offloads_diff = req_offloads ^ set_offloads;
1194 uint64_t offload;
1195 int ret = 0;
1196
1197 while (offloads_diff != 0) {
1198 /* Check if any offload is requested but not enabled. */
1199 offload = 1ULL << __builtin_ctzll(offloads_diff);
1200 if (offload & req_offloads) {
1201 RTE_ETHDEV_LOG(ERR,
1202 "Port %u failed to enable %s offload %s\n",
1203 port_id, offload_type, offload_name(offload));
1204 ret = -EINVAL;
1205 }
1206
1207 /* Check if offload couldn't be disabled. */
1208 if (offload & set_offloads) {
1209 RTE_ETHDEV_LOG(DEBUG,
1210 "Port %u %s offload %s is not requested but enabled\n",
1211 port_id, offload_type, offload_name(offload));
1212 }
1213
1214 offloads_diff &= ~offload;
1215 }
1216
1217 return ret;
1218}
1219
11fdf7f2
TL
1220int
1221rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
1222 const struct rte_eth_conf *dev_conf)
1223{
1224 struct rte_eth_dev *dev;
1225 struct rte_eth_dev_info dev_info;
9f95a23c 1226 struct rte_eth_conf orig_conf;
11fdf7f2 1227 int diag;
9f95a23c 1228 int ret;
11fdf7f2
TL
1229
1230 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1231
1232 dev = &rte_eth_devices[port_id];
1233
11fdf7f2
TL
1234 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
1235
9f95a23c
TL
1236 if (dev->data->dev_started) {
1237 RTE_ETHDEV_LOG(ERR,
1238 "Port %u must be stopped to allow configuration\n",
1239 port_id);
1240 return -EBUSY;
1241 }
1242
1243 /* Store original config, as rollback required on failure */
1244 memcpy(&orig_conf, &dev->data->dev_conf, sizeof(dev->data->dev_conf));
1245
1246 /*
1247 * Copy the dev_conf parameter into the dev structure.
1248 * rte_eth_dev_info_get() requires dev_conf, copy it before dev_info get
1249 */
f67539c2
TL
1250 if (dev_conf != &dev->data->dev_conf)
1251 memcpy(&dev->data->dev_conf, dev_conf,
1252 sizeof(dev->data->dev_conf));
9f95a23c 1253
f67539c2
TL
1254 ret = rte_eth_dev_info_get(port_id, &dev_info);
1255 if (ret != 0)
1256 goto rollback;
11fdf7f2
TL
1257
1258 /* If number of queues specified by application for both Rx and Tx is
1259 * zero, use driver preferred values. This cannot be done individually
1260 * as it is valid for either Tx or Rx (but not both) to be zero.
1261 * If driver does not provide any preferred valued, fall back on
1262 * EAL defaults.
1263 */
1264 if (nb_rx_q == 0 && nb_tx_q == 0) {
1265 nb_rx_q = dev_info.default_rxportconf.nb_queues;
1266 if (nb_rx_q == 0)
1267 nb_rx_q = RTE_ETH_DEV_FALLBACK_RX_NBQUEUES;
1268 nb_tx_q = dev_info.default_txportconf.nb_queues;
1269 if (nb_tx_q == 0)
1270 nb_tx_q = RTE_ETH_DEV_FALLBACK_TX_NBQUEUES;
1271 }
1272
1273 if (nb_rx_q > RTE_MAX_QUEUES_PER_PORT) {
1274 RTE_ETHDEV_LOG(ERR,
1275 "Number of RX queues requested (%u) is greater than max supported(%d)\n",
1276 nb_rx_q, RTE_MAX_QUEUES_PER_PORT);
9f95a23c
TL
1277 ret = -EINVAL;
1278 goto rollback;
11fdf7f2
TL
1279 }
1280
1281 if (nb_tx_q > RTE_MAX_QUEUES_PER_PORT) {
1282 RTE_ETHDEV_LOG(ERR,
1283 "Number of TX queues requested (%u) is greater than max supported(%d)\n",
1284 nb_tx_q, RTE_MAX_QUEUES_PER_PORT);
9f95a23c
TL
1285 ret = -EINVAL;
1286 goto rollback;
11fdf7f2
TL
1287 }
1288
11fdf7f2
TL
1289 /*
1290 * Check that the numbers of RX and TX queues are not greater
1291 * than the maximum number of RX and TX queues supported by the
1292 * configured device.
1293 */
1294 if (nb_rx_q > dev_info.max_rx_queues) {
1295 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u nb_rx_queues=%u > %u\n",
1296 port_id, nb_rx_q, dev_info.max_rx_queues);
9f95a23c
TL
1297 ret = -EINVAL;
1298 goto rollback;
11fdf7f2
TL
1299 }
1300
1301 if (nb_tx_q > dev_info.max_tx_queues) {
1302 RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u nb_tx_queues=%u > %u\n",
1303 port_id, nb_tx_q, dev_info.max_tx_queues);
9f95a23c
TL
1304 ret = -EINVAL;
1305 goto rollback;
11fdf7f2
TL
1306 }
1307
1308 /* Check that the device supports requested interrupts */
1309 if ((dev_conf->intr_conf.lsc == 1) &&
1310 (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC))) {
1311 RTE_ETHDEV_LOG(ERR, "Driver %s does not support lsc\n",
1312 dev->device->driver->name);
9f95a23c
TL
1313 ret = -EINVAL;
1314 goto rollback;
11fdf7f2
TL
1315 }
1316 if ((dev_conf->intr_conf.rmv == 1) &&
1317 (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_RMV))) {
1318 RTE_ETHDEV_LOG(ERR, "Driver %s does not support rmv\n",
1319 dev->device->driver->name);
9f95a23c
TL
1320 ret = -EINVAL;
1321 goto rollback;
11fdf7f2
TL
1322 }
1323
1324 /*
1325 * If jumbo frames are enabled, check that the maximum RX packet
1326 * length is supported by the configured device.
1327 */
9f95a23c 1328 if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
11fdf7f2
TL
1329 if (dev_conf->rxmode.max_rx_pkt_len > dev_info.max_rx_pktlen) {
1330 RTE_ETHDEV_LOG(ERR,
1331 "Ethdev port_id=%u max_rx_pkt_len %u > max valid value %u\n",
1332 port_id, dev_conf->rxmode.max_rx_pkt_len,
1333 dev_info.max_rx_pktlen);
9f95a23c
TL
1334 ret = -EINVAL;
1335 goto rollback;
f67539c2 1336 } else if (dev_conf->rxmode.max_rx_pkt_len < RTE_ETHER_MIN_LEN) {
11fdf7f2
TL
1337 RTE_ETHDEV_LOG(ERR,
1338 "Ethdev port_id=%u max_rx_pkt_len %u < min valid value %u\n",
1339 port_id, dev_conf->rxmode.max_rx_pkt_len,
f67539c2 1340 (unsigned int)RTE_ETHER_MIN_LEN);
9f95a23c
TL
1341 ret = -EINVAL;
1342 goto rollback;
11fdf7f2
TL
1343 }
1344 } else {
f67539c2
TL
1345 if (dev_conf->rxmode.max_rx_pkt_len < RTE_ETHER_MIN_LEN ||
1346 dev_conf->rxmode.max_rx_pkt_len > RTE_ETHER_MAX_LEN)
11fdf7f2
TL
1347 /* Use default value */
1348 dev->data->dev_conf.rxmode.max_rx_pkt_len =
f67539c2
TL
1349 RTE_ETHER_MAX_LEN;
1350 }
1351
1352 /*
1353 * If LRO is enabled, check that the maximum aggregated packet
1354 * size is supported by the configured device.
1355 */
1356 if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_TCP_LRO) {
1357 if (dev_conf->rxmode.max_lro_pkt_size == 0)
1358 dev->data->dev_conf.rxmode.max_lro_pkt_size =
1359 dev->data->dev_conf.rxmode.max_rx_pkt_len;
1360 ret = check_lro_pkt_size(port_id,
1361 dev->data->dev_conf.rxmode.max_lro_pkt_size,
1362 dev->data->dev_conf.rxmode.max_rx_pkt_len,
1363 dev_info.max_lro_pkt_size);
1364 if (ret != 0)
1365 goto rollback;
11fdf7f2
TL
1366 }
1367
1368 /* Any requested offloading must be within its device capabilities */
9f95a23c
TL
1369 if ((dev_conf->rxmode.offloads & dev_info.rx_offload_capa) !=
1370 dev_conf->rxmode.offloads) {
11fdf7f2
TL
1371 RTE_ETHDEV_LOG(ERR,
1372 "Ethdev port_id=%u requested Rx offloads 0x%"PRIx64" doesn't match Rx offloads "
1373 "capabilities 0x%"PRIx64" in %s()\n",
9f95a23c 1374 port_id, dev_conf->rxmode.offloads,
11fdf7f2
TL
1375 dev_info.rx_offload_capa,
1376 __func__);
9f95a23c
TL
1377 ret = -EINVAL;
1378 goto rollback;
11fdf7f2 1379 }
9f95a23c
TL
1380 if ((dev_conf->txmode.offloads & dev_info.tx_offload_capa) !=
1381 dev_conf->txmode.offloads) {
11fdf7f2
TL
1382 RTE_ETHDEV_LOG(ERR,
1383 "Ethdev port_id=%u requested Tx offloads 0x%"PRIx64" doesn't match Tx offloads "
1384 "capabilities 0x%"PRIx64" in %s()\n",
9f95a23c 1385 port_id, dev_conf->txmode.offloads,
11fdf7f2
TL
1386 dev_info.tx_offload_capa,
1387 __func__);
9f95a23c
TL
1388 ret = -EINVAL;
1389 goto rollback;
11fdf7f2
TL
1390 }
1391
f67539c2
TL
1392 dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf =
1393 rte_eth_rss_hf_refine(dev_conf->rx_adv_conf.rss_conf.rss_hf);
1394
11fdf7f2
TL
1395 /* Check that device supports requested rss hash functions. */
1396 if ((dev_info.flow_type_rss_offloads |
1397 dev_conf->rx_adv_conf.rss_conf.rss_hf) !=
1398 dev_info.flow_type_rss_offloads) {
1399 RTE_ETHDEV_LOG(ERR,
1400 "Ethdev port_id=%u invalid rss_hf: 0x%"PRIx64", valid value: 0x%"PRIx64"\n",
1401 port_id, dev_conf->rx_adv_conf.rss_conf.rss_hf,
1402 dev_info.flow_type_rss_offloads);
9f95a23c
TL
1403 ret = -EINVAL;
1404 goto rollback;
11fdf7f2
TL
1405 }
1406
f67539c2
TL
1407 /* Check if Rx RSS distribution is disabled but RSS hash is enabled. */
1408 if (((dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) == 0) &&
1409 (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_RSS_HASH)) {
1410 RTE_ETHDEV_LOG(ERR,
1411 "Ethdev port_id=%u config invalid Rx mq_mode without RSS but %s offload is requested\n",
1412 port_id,
1413 rte_eth_dev_rx_offload_name(DEV_RX_OFFLOAD_RSS_HASH));
1414 ret = -EINVAL;
1415 goto rollback;
1416 }
1417
11fdf7f2
TL
1418 /*
1419 * Setup new number of RX/TX queues and reconfigure device.
1420 */
1421 diag = rte_eth_dev_rx_queue_config(dev, nb_rx_q);
1422 if (diag != 0) {
1423 RTE_ETHDEV_LOG(ERR,
1424 "Port%u rte_eth_dev_rx_queue_config = %d\n",
1425 port_id, diag);
9f95a23c
TL
1426 ret = diag;
1427 goto rollback;
11fdf7f2
TL
1428 }
1429
1430 diag = rte_eth_dev_tx_queue_config(dev, nb_tx_q);
1431 if (diag != 0) {
1432 RTE_ETHDEV_LOG(ERR,
1433 "Port%u rte_eth_dev_tx_queue_config = %d\n",
1434 port_id, diag);
1435 rte_eth_dev_rx_queue_config(dev, 0);
9f95a23c
TL
1436 ret = diag;
1437 goto rollback;
11fdf7f2
TL
1438 }
1439
1440 diag = (*dev->dev_ops->dev_configure)(dev);
1441 if (diag != 0) {
1442 RTE_ETHDEV_LOG(ERR, "Port%u dev_configure = %d\n",
1443 port_id, diag);
9f95a23c 1444 ret = eth_err(port_id, diag);
f67539c2 1445 goto reset_queues;
11fdf7f2
TL
1446 }
1447
1448 /* Initialize Rx profiling if enabled at compilation time. */
9f95a23c 1449 diag = __rte_eth_dev_profile_init(port_id, dev);
11fdf7f2 1450 if (diag != 0) {
9f95a23c 1451 RTE_ETHDEV_LOG(ERR, "Port%u __rte_eth_dev_profile_init = %d\n",
11fdf7f2 1452 port_id, diag);
9f95a23c 1453 ret = eth_err(port_id, diag);
f67539c2 1454 goto reset_queues;
11fdf7f2
TL
1455 }
1456
f67539c2
TL
1457 /* Validate Rx offloads. */
1458 diag = validate_offloads(port_id,
1459 dev_conf->rxmode.offloads,
1460 dev->data->dev_conf.rxmode.offloads, "Rx",
1461 rte_eth_dev_rx_offload_name);
1462 if (diag != 0) {
1463 ret = diag;
1464 goto reset_queues;
1465 }
1466
1467 /* Validate Tx offloads. */
1468 diag = validate_offloads(port_id,
1469 dev_conf->txmode.offloads,
1470 dev->data->dev_conf.txmode.offloads, "Tx",
1471 rte_eth_dev_tx_offload_name);
1472 if (diag != 0) {
1473 ret = diag;
1474 goto reset_queues;
1475 }
9f95a23c 1476
f67539c2
TL
1477 rte_ethdev_trace_configure(port_id, nb_rx_q, nb_tx_q, dev_conf, 0);
1478 return 0;
1479reset_queues:
1480 rte_eth_dev_rx_queue_config(dev, 0);
1481 rte_eth_dev_tx_queue_config(dev, 0);
9f95a23c
TL
1482rollback:
1483 memcpy(&dev->data->dev_conf, &orig_conf, sizeof(dev->data->dev_conf));
1484
f67539c2 1485 rte_ethdev_trace_configure(port_id, nb_rx_q, nb_tx_q, dev_conf, ret);
9f95a23c 1486 return ret;
11fdf7f2
TL
1487}
1488
1489void
1490_rte_eth_dev_reset(struct rte_eth_dev *dev)
1491{
1492 if (dev->data->dev_started) {
1493 RTE_ETHDEV_LOG(ERR, "Port %u must be stopped to allow reset\n",
1494 dev->data->port_id);
1495 return;
1496 }
1497
1498 rte_eth_dev_rx_queue_config(dev, 0);
1499 rte_eth_dev_tx_queue_config(dev, 0);
1500
1501 memset(&dev->data->dev_conf, 0, sizeof(dev->data->dev_conf));
1502}
1503
1504static void
9f95a23c
TL
1505rte_eth_dev_mac_restore(struct rte_eth_dev *dev,
1506 struct rte_eth_dev_info *dev_info)
11fdf7f2 1507{
f67539c2 1508 struct rte_ether_addr *addr;
11fdf7f2
TL
1509 uint16_t i;
1510 uint32_t pool = 0;
1511 uint64_t pool_mask;
1512
11fdf7f2
TL
1513 /* replay MAC address configuration including default MAC */
1514 addr = &dev->data->mac_addrs[0];
1515 if (*dev->dev_ops->mac_addr_set != NULL)
1516 (*dev->dev_ops->mac_addr_set)(dev, addr);
1517 else if (*dev->dev_ops->mac_addr_add != NULL)
1518 (*dev->dev_ops->mac_addr_add)(dev, addr, 0, pool);
1519
1520 if (*dev->dev_ops->mac_addr_add != NULL) {
9f95a23c 1521 for (i = 1; i < dev_info->max_mac_addrs; i++) {
11fdf7f2
TL
1522 addr = &dev->data->mac_addrs[i];
1523
1524 /* skip zero address */
f67539c2 1525 if (rte_is_zero_ether_addr(addr))
11fdf7f2
TL
1526 continue;
1527
1528 pool = 0;
1529 pool_mask = dev->data->mac_pool_sel[i];
1530
1531 do {
1532 if (pool_mask & 1ULL)
1533 (*dev->dev_ops->mac_addr_add)(dev,
1534 addr, i, pool);
1535 pool_mask >>= 1;
1536 pool++;
1537 } while (pool_mask);
1538 }
1539 }
9f95a23c
TL
1540}
1541
f67539c2 1542static int
9f95a23c
TL
1543rte_eth_dev_config_restore(struct rte_eth_dev *dev,
1544 struct rte_eth_dev_info *dev_info, uint16_t port_id)
1545{
f67539c2
TL
1546 int ret;
1547
9f95a23c
TL
1548 if (!(*dev_info->dev_flags & RTE_ETH_DEV_NOLIVE_MAC_ADDR))
1549 rte_eth_dev_mac_restore(dev, dev_info);
11fdf7f2
TL
1550
1551 /* replay promiscuous configuration */
f67539c2
TL
1552 /*
1553 * use callbacks directly since we don't need port_id check and
1554 * would like to bypass the same value set
1555 */
1556 if (rte_eth_promiscuous_get(port_id) == 1 &&
1557 *dev->dev_ops->promiscuous_enable != NULL) {
1558 ret = eth_err(port_id,
1559 (*dev->dev_ops->promiscuous_enable)(dev));
1560 if (ret != 0 && ret != -ENOTSUP) {
1561 RTE_ETHDEV_LOG(ERR,
1562 "Failed to enable promiscuous mode for device (port %u): %s\n",
1563 port_id, rte_strerror(-ret));
1564 return ret;
1565 }
1566 } else if (rte_eth_promiscuous_get(port_id) == 0 &&
1567 *dev->dev_ops->promiscuous_disable != NULL) {
1568 ret = eth_err(port_id,
1569 (*dev->dev_ops->promiscuous_disable)(dev));
1570 if (ret != 0 && ret != -ENOTSUP) {
1571 RTE_ETHDEV_LOG(ERR,
1572 "Failed to disable promiscuous mode for device (port %u): %s\n",
1573 port_id, rte_strerror(-ret));
1574 return ret;
1575 }
1576 }
11fdf7f2
TL
1577
1578 /* replay all multicast configuration */
f67539c2
TL
1579 /*
1580 * use callbacks directly since we don't need port_id check and
1581 * would like to bypass the same value set
1582 */
1583 if (rte_eth_allmulticast_get(port_id) == 1 &&
1584 *dev->dev_ops->allmulticast_enable != NULL) {
1585 ret = eth_err(port_id,
1586 (*dev->dev_ops->allmulticast_enable)(dev));
1587 if (ret != 0 && ret != -ENOTSUP) {
1588 RTE_ETHDEV_LOG(ERR,
1589 "Failed to enable allmulticast mode for device (port %u): %s\n",
1590 port_id, rte_strerror(-ret));
1591 return ret;
1592 }
1593 } else if (rte_eth_allmulticast_get(port_id) == 0 &&
1594 *dev->dev_ops->allmulticast_disable != NULL) {
1595 ret = eth_err(port_id,
1596 (*dev->dev_ops->allmulticast_disable)(dev));
1597 if (ret != 0 && ret != -ENOTSUP) {
1598 RTE_ETHDEV_LOG(ERR,
1599 "Failed to disable allmulticast mode for device (port %u): %s\n",
1600 port_id, rte_strerror(-ret));
1601 return ret;
1602 }
1603 }
1604
1605 return 0;
11fdf7f2
TL
1606}
1607
1608int
1609rte_eth_dev_start(uint16_t port_id)
1610{
1611 struct rte_eth_dev *dev;
9f95a23c 1612 struct rte_eth_dev_info dev_info;
11fdf7f2 1613 int diag;
f67539c2 1614 int ret;
11fdf7f2
TL
1615
1616 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1617
1618 dev = &rte_eth_devices[port_id];
1619
1620 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
1621
1622 if (dev->data->dev_started != 0) {
1623 RTE_ETHDEV_LOG(INFO,
1624 "Device with port_id=%"PRIu16" already started\n",
1625 port_id);
1626 return 0;
1627 }
1628
f67539c2
TL
1629 ret = rte_eth_dev_info_get(port_id, &dev_info);
1630 if (ret != 0)
1631 return ret;
9f95a23c
TL
1632
1633 /* Lets restore MAC now if device does not support live change */
1634 if (*dev_info.dev_flags & RTE_ETH_DEV_NOLIVE_MAC_ADDR)
1635 rte_eth_dev_mac_restore(dev, &dev_info);
1636
11fdf7f2
TL
1637 diag = (*dev->dev_ops->dev_start)(dev);
1638 if (diag == 0)
1639 dev->data->dev_started = 1;
1640 else
1641 return eth_err(port_id, diag);
1642
f67539c2
TL
1643 ret = rte_eth_dev_config_restore(dev, &dev_info, port_id);
1644 if (ret != 0) {
1645 RTE_ETHDEV_LOG(ERR,
1646 "Error during restoring configuration for device (port %u): %s\n",
1647 port_id, rte_strerror(-ret));
1648 rte_eth_dev_stop(port_id);
1649 return ret;
1650 }
11fdf7f2
TL
1651
1652 if (dev->data->dev_conf.intr_conf.lsc == 0) {
1653 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
1654 (*dev->dev_ops->link_update)(dev, 0);
1655 }
f67539c2
TL
1656
1657 rte_ethdev_trace_start(port_id);
11fdf7f2
TL
1658 return 0;
1659}
1660
1661void
1662rte_eth_dev_stop(uint16_t port_id)
1663{
1664 struct rte_eth_dev *dev;
1665
1666 RTE_ETH_VALID_PORTID_OR_RET(port_id);
1667 dev = &rte_eth_devices[port_id];
1668
1669 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
1670
1671 if (dev->data->dev_started == 0) {
1672 RTE_ETHDEV_LOG(INFO,
1673 "Device with port_id=%"PRIu16" already stopped\n",
1674 port_id);
1675 return;
1676 }
1677
1678 dev->data->dev_started = 0;
1679 (*dev->dev_ops->dev_stop)(dev);
f67539c2 1680 rte_ethdev_trace_stop(port_id);
11fdf7f2
TL
1681}
1682
1683int
1684rte_eth_dev_set_link_up(uint16_t port_id)
1685{
1686 struct rte_eth_dev *dev;
1687
1688 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1689
1690 dev = &rte_eth_devices[port_id];
1691
1692 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_up, -ENOTSUP);
1693 return eth_err(port_id, (*dev->dev_ops->dev_set_link_up)(dev));
1694}
1695
1696int
1697rte_eth_dev_set_link_down(uint16_t port_id)
1698{
1699 struct rte_eth_dev *dev;
1700
1701 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1702
1703 dev = &rte_eth_devices[port_id];
1704
1705 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_down, -ENOTSUP);
1706 return eth_err(port_id, (*dev->dev_ops->dev_set_link_down)(dev));
1707}
1708
1709void
1710rte_eth_dev_close(uint16_t port_id)
1711{
1712 struct rte_eth_dev *dev;
1713
1714 RTE_ETH_VALID_PORTID_OR_RET(port_id);
1715 dev = &rte_eth_devices[port_id];
1716
1717 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_close);
1718 dev->data->dev_started = 0;
1719 (*dev->dev_ops->dev_close)(dev);
1720
f67539c2 1721 rte_ethdev_trace_close(port_id);
9f95a23c
TL
1722 /* check behaviour flag - temporary for PMD migration */
1723 if ((dev->data->dev_flags & RTE_ETH_DEV_CLOSE_REMOVE) != 0) {
1724 /* new behaviour: send event + reset state + free all data */
1725 rte_eth_dev_release_port(dev);
1726 return;
1727 }
1728 RTE_ETHDEV_LOG(DEBUG, "Port closing is using an old behaviour.\n"
1729 "The driver %s should migrate to the new behaviour.\n",
1730 dev->device->driver->name);
1731 /* old behaviour: only free queue arrays */
11fdf7f2
TL
1732 dev->data->nb_rx_queues = 0;
1733 rte_free(dev->data->rx_queues);
1734 dev->data->rx_queues = NULL;
1735 dev->data->nb_tx_queues = 0;
1736 rte_free(dev->data->tx_queues);
1737 dev->data->tx_queues = NULL;
1738}
1739
1740int
1741rte_eth_dev_reset(uint16_t port_id)
1742{
1743 struct rte_eth_dev *dev;
1744 int ret;
1745
1746 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1747 dev = &rte_eth_devices[port_id];
1748
1749 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_reset, -ENOTSUP);
1750
1751 rte_eth_dev_stop(port_id);
1752 ret = dev->dev_ops->dev_reset(dev);
1753
1754 return eth_err(port_id, ret);
1755}
1756
f67539c2 1757int
11fdf7f2
TL
1758rte_eth_dev_is_removed(uint16_t port_id)
1759{
1760 struct rte_eth_dev *dev;
1761 int ret;
1762
1763 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
1764
1765 dev = &rte_eth_devices[port_id];
1766
1767 if (dev->state == RTE_ETH_DEV_REMOVED)
1768 return 1;
1769
1770 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->is_removed, 0);
1771
1772 ret = dev->dev_ops->is_removed(dev);
1773 if (ret != 0)
1774 /* Device is physically removed. */
1775 dev->state = RTE_ETH_DEV_REMOVED;
1776
1777 return ret;
1778}
1779
1780int
1781rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
1782 uint16_t nb_rx_desc, unsigned int socket_id,
1783 const struct rte_eth_rxconf *rx_conf,
1784 struct rte_mempool *mp)
1785{
1786 int ret;
1787 uint32_t mbp_buf_size;
1788 struct rte_eth_dev *dev;
1789 struct rte_eth_dev_info dev_info;
1790 struct rte_eth_rxconf local_conf;
1791 void **rxq;
1792
1793 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1794
1795 dev = &rte_eth_devices[port_id];
1796 if (rx_queue_id >= dev->data->nb_rx_queues) {
1797 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", rx_queue_id);
1798 return -EINVAL;
1799 }
1800
f67539c2
TL
1801 if (mp == NULL) {
1802 RTE_ETHDEV_LOG(ERR, "Invalid null mempool pointer\n");
1803 return -EINVAL;
1804 }
1805
11fdf7f2
TL
1806 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_setup, -ENOTSUP);
1807
1808 /*
1809 * Check the size of the mbuf data buffer.
1810 * This value must be provided in the private data of the memory pool.
1811 * First check that the memory pool has a valid private data.
1812 */
f67539c2
TL
1813 ret = rte_eth_dev_info_get(port_id, &dev_info);
1814 if (ret != 0)
1815 return ret;
1816
11fdf7f2
TL
1817 if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) {
1818 RTE_ETHDEV_LOG(ERR, "%s private_data_size %d < %d\n",
1819 mp->name, (int)mp->private_data_size,
1820 (int)sizeof(struct rte_pktmbuf_pool_private));
1821 return -ENOSPC;
1822 }
1823 mbp_buf_size = rte_pktmbuf_data_room_size(mp);
1824
1825 if ((mbp_buf_size - RTE_PKTMBUF_HEADROOM) < dev_info.min_rx_bufsize) {
1826 RTE_ETHDEV_LOG(ERR,
1827 "%s mbuf_data_room_size %d < %d (RTE_PKTMBUF_HEADROOM=%d + min_rx_bufsize(dev)=%d)\n",
1828 mp->name, (int)mbp_buf_size,
1829 (int)(RTE_PKTMBUF_HEADROOM + dev_info.min_rx_bufsize),
1830 (int)RTE_PKTMBUF_HEADROOM,
1831 (int)dev_info.min_rx_bufsize);
1832 return -EINVAL;
1833 }
1834
1835 /* Use default specified by driver, if nb_rx_desc is zero */
1836 if (nb_rx_desc == 0) {
1837 nb_rx_desc = dev_info.default_rxportconf.ring_size;
1838 /* If driver default is also zero, fall back on EAL default */
1839 if (nb_rx_desc == 0)
1840 nb_rx_desc = RTE_ETH_DEV_FALLBACK_RX_RINGSIZE;
1841 }
1842
1843 if (nb_rx_desc > dev_info.rx_desc_lim.nb_max ||
1844 nb_rx_desc < dev_info.rx_desc_lim.nb_min ||
1845 nb_rx_desc % dev_info.rx_desc_lim.nb_align != 0) {
1846
1847 RTE_ETHDEV_LOG(ERR,
9f95a23c 1848 "Invalid value for nb_rx_desc(=%hu), should be: <= %hu, >= %hu, and a product of %hu\n",
11fdf7f2
TL
1849 nb_rx_desc, dev_info.rx_desc_lim.nb_max,
1850 dev_info.rx_desc_lim.nb_min,
1851 dev_info.rx_desc_lim.nb_align);
1852 return -EINVAL;
1853 }
1854
1855 if (dev->data->dev_started &&
1856 !(dev_info.dev_capa &
1857 RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP))
1858 return -EBUSY;
1859
1860 if (dev->data->dev_started &&
1861 (dev->data->rx_queue_state[rx_queue_id] !=
1862 RTE_ETH_QUEUE_STATE_STOPPED))
1863 return -EBUSY;
1864
1865 rxq = dev->data->rx_queues;
1866 if (rxq[rx_queue_id]) {
1867 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release,
1868 -ENOTSUP);
1869 (*dev->dev_ops->rx_queue_release)(rxq[rx_queue_id]);
1870 rxq[rx_queue_id] = NULL;
1871 }
1872
1873 if (rx_conf == NULL)
1874 rx_conf = &dev_info.default_rxconf;
1875
1876 local_conf = *rx_conf;
1877
1878 /*
1879 * If an offloading has already been enabled in
1880 * rte_eth_dev_configure(), it has been enabled on all queues,
1881 * so there is no need to enable it in this queue again.
1882 * The local_conf.offloads input to underlying PMD only carries
1883 * those offloadings which are only enabled on this queue and
1884 * not enabled on all queues.
1885 */
1886 local_conf.offloads &= ~dev->data->dev_conf.rxmode.offloads;
1887
1888 /*
1889 * New added offloadings for this queue are those not enabled in
1890 * rte_eth_dev_configure() and they must be per-queue type.
1891 * A pure per-port offloading can't be enabled on a queue while
1892 * disabled on another queue. A pure per-port offloading can't
1893 * be enabled for any queue as new added one if it hasn't been
1894 * enabled in rte_eth_dev_configure().
1895 */
1896 if ((local_conf.offloads & dev_info.rx_queue_offload_capa) !=
1897 local_conf.offloads) {
1898 RTE_ETHDEV_LOG(ERR,
1899 "Ethdev port_id=%d rx_queue_id=%d, new added offloads 0x%"PRIx64" must be "
9f95a23c 1900 "within per-queue offload capabilities 0x%"PRIx64" in %s()\n",
11fdf7f2
TL
1901 port_id, rx_queue_id, local_conf.offloads,
1902 dev_info.rx_queue_offload_capa,
1903 __func__);
1904 return -EINVAL;
1905 }
1906
f67539c2
TL
1907 /*
1908 * If LRO is enabled, check that the maximum aggregated packet
1909 * size is supported by the configured device.
1910 */
1911 if (local_conf.offloads & DEV_RX_OFFLOAD_TCP_LRO) {
1912 if (dev->data->dev_conf.rxmode.max_lro_pkt_size == 0)
1913 dev->data->dev_conf.rxmode.max_lro_pkt_size =
1914 dev->data->dev_conf.rxmode.max_rx_pkt_len;
1915 int ret = check_lro_pkt_size(port_id,
1916 dev->data->dev_conf.rxmode.max_lro_pkt_size,
1917 dev->data->dev_conf.rxmode.max_rx_pkt_len,
1918 dev_info.max_lro_pkt_size);
1919 if (ret != 0)
1920 return ret;
1921 }
1922
11fdf7f2
TL
1923 ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc,
1924 socket_id, &local_conf, mp);
1925 if (!ret) {
1926 if (!dev->data->min_rx_buf_size ||
1927 dev->data->min_rx_buf_size > mbp_buf_size)
1928 dev->data->min_rx_buf_size = mbp_buf_size;
1929 }
1930
f67539c2
TL
1931 rte_ethdev_trace_rxq_setup(port_id, rx_queue_id, nb_rx_desc, mp,
1932 rx_conf, ret);
1933 return eth_err(port_id, ret);
1934}
1935
1936int
1937rte_eth_rx_hairpin_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
1938 uint16_t nb_rx_desc,
1939 const struct rte_eth_hairpin_conf *conf)
1940{
1941 int ret;
1942 struct rte_eth_dev *dev;
1943 struct rte_eth_hairpin_cap cap;
1944 void **rxq;
1945 int i;
1946 int count;
1947
1948 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1949
1950 dev = &rte_eth_devices[port_id];
1951 if (rx_queue_id >= dev->data->nb_rx_queues) {
1952 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", rx_queue_id);
1953 return -EINVAL;
1954 }
1955 ret = rte_eth_dev_hairpin_capability_get(port_id, &cap);
1956 if (ret != 0)
1957 return ret;
1958 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_hairpin_queue_setup,
1959 -ENOTSUP);
1960 /* if nb_rx_desc is zero use max number of desc from the driver. */
1961 if (nb_rx_desc == 0)
1962 nb_rx_desc = cap.max_nb_desc;
1963 if (nb_rx_desc > cap.max_nb_desc) {
1964 RTE_ETHDEV_LOG(ERR,
1965 "Invalid value for nb_rx_desc(=%hu), should be: <= %hu",
1966 nb_rx_desc, cap.max_nb_desc);
1967 return -EINVAL;
1968 }
1969 if (conf->peer_count > cap.max_rx_2_tx) {
1970 RTE_ETHDEV_LOG(ERR,
1971 "Invalid value for number of peers for Rx queue(=%hu), should be: <= %hu",
1972 conf->peer_count, cap.max_rx_2_tx);
1973 return -EINVAL;
1974 }
1975 if (conf->peer_count == 0) {
1976 RTE_ETHDEV_LOG(ERR,
1977 "Invalid value for number of peers for Rx queue(=%hu), should be: > 0",
1978 conf->peer_count);
1979 return -EINVAL;
1980 }
1981 for (i = 0, count = 0; i < dev->data->nb_rx_queues &&
1982 cap.max_nb_queues != UINT16_MAX; i++) {
1983 if (i == rx_queue_id || rte_eth_dev_is_rx_hairpin_queue(dev, i))
1984 count++;
1985 }
1986 if (count > cap.max_nb_queues) {
1987 RTE_ETHDEV_LOG(ERR, "To many Rx hairpin queues max is %d",
1988 cap.max_nb_queues);
1989 return -EINVAL;
1990 }
1991 if (dev->data->dev_started)
1992 return -EBUSY;
1993 rxq = dev->data->rx_queues;
1994 if (rxq[rx_queue_id] != NULL) {
1995 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release,
1996 -ENOTSUP);
1997 (*dev->dev_ops->rx_queue_release)(rxq[rx_queue_id]);
1998 rxq[rx_queue_id] = NULL;
1999 }
2000 ret = (*dev->dev_ops->rx_hairpin_queue_setup)(dev, rx_queue_id,
2001 nb_rx_desc, conf);
2002 if (ret == 0)
2003 dev->data->rx_queue_state[rx_queue_id] =
2004 RTE_ETH_QUEUE_STATE_HAIRPIN;
11fdf7f2
TL
2005 return eth_err(port_id, ret);
2006}
2007
2008int
2009rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
2010 uint16_t nb_tx_desc, unsigned int socket_id,
2011 const struct rte_eth_txconf *tx_conf)
2012{
2013 struct rte_eth_dev *dev;
2014 struct rte_eth_dev_info dev_info;
2015 struct rte_eth_txconf local_conf;
2016 void **txq;
f67539c2 2017 int ret;
11fdf7f2
TL
2018
2019 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2020
2021 dev = &rte_eth_devices[port_id];
2022 if (tx_queue_id >= dev->data->nb_tx_queues) {
2023 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", tx_queue_id);
2024 return -EINVAL;
2025 }
2026
11fdf7f2
TL
2027 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_setup, -ENOTSUP);
2028
f67539c2
TL
2029 ret = rte_eth_dev_info_get(port_id, &dev_info);
2030 if (ret != 0)
2031 return ret;
11fdf7f2
TL
2032
2033 /* Use default specified by driver, if nb_tx_desc is zero */
2034 if (nb_tx_desc == 0) {
2035 nb_tx_desc = dev_info.default_txportconf.ring_size;
2036 /* If driver default is zero, fall back on EAL default */
2037 if (nb_tx_desc == 0)
2038 nb_tx_desc = RTE_ETH_DEV_FALLBACK_TX_RINGSIZE;
2039 }
2040 if (nb_tx_desc > dev_info.tx_desc_lim.nb_max ||
2041 nb_tx_desc < dev_info.tx_desc_lim.nb_min ||
2042 nb_tx_desc % dev_info.tx_desc_lim.nb_align != 0) {
2043 RTE_ETHDEV_LOG(ERR,
9f95a23c 2044 "Invalid value for nb_tx_desc(=%hu), should be: <= %hu, >= %hu, and a product of %hu\n",
11fdf7f2
TL
2045 nb_tx_desc, dev_info.tx_desc_lim.nb_max,
2046 dev_info.tx_desc_lim.nb_min,
2047 dev_info.tx_desc_lim.nb_align);
2048 return -EINVAL;
2049 }
2050
2051 if (dev->data->dev_started &&
2052 !(dev_info.dev_capa &
2053 RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP))
2054 return -EBUSY;
2055
2056 if (dev->data->dev_started &&
2057 (dev->data->tx_queue_state[tx_queue_id] !=
2058 RTE_ETH_QUEUE_STATE_STOPPED))
2059 return -EBUSY;
2060
2061 txq = dev->data->tx_queues;
2062 if (txq[tx_queue_id]) {
2063 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release,
2064 -ENOTSUP);
2065 (*dev->dev_ops->tx_queue_release)(txq[tx_queue_id]);
2066 txq[tx_queue_id] = NULL;
2067 }
2068
2069 if (tx_conf == NULL)
2070 tx_conf = &dev_info.default_txconf;
2071
2072 local_conf = *tx_conf;
2073
2074 /*
2075 * If an offloading has already been enabled in
2076 * rte_eth_dev_configure(), it has been enabled on all queues,
2077 * so there is no need to enable it in this queue again.
2078 * The local_conf.offloads input to underlying PMD only carries
2079 * those offloadings which are only enabled on this queue and
2080 * not enabled on all queues.
2081 */
2082 local_conf.offloads &= ~dev->data->dev_conf.txmode.offloads;
2083
2084 /*
2085 * New added offloadings for this queue are those not enabled in
2086 * rte_eth_dev_configure() and they must be per-queue type.
2087 * A pure per-port offloading can't be enabled on a queue while
2088 * disabled on another queue. A pure per-port offloading can't
2089 * be enabled for any queue as new added one if it hasn't been
2090 * enabled in rte_eth_dev_configure().
2091 */
2092 if ((local_conf.offloads & dev_info.tx_queue_offload_capa) !=
2093 local_conf.offloads) {
2094 RTE_ETHDEV_LOG(ERR,
2095 "Ethdev port_id=%d tx_queue_id=%d, new added offloads 0x%"PRIx64" must be "
9f95a23c 2096 "within per-queue offload capabilities 0x%"PRIx64" in %s()\n",
11fdf7f2
TL
2097 port_id, tx_queue_id, local_conf.offloads,
2098 dev_info.tx_queue_offload_capa,
2099 __func__);
2100 return -EINVAL;
2101 }
2102
f67539c2 2103 rte_ethdev_trace_txq_setup(port_id, tx_queue_id, nb_tx_desc, tx_conf);
11fdf7f2
TL
2104 return eth_err(port_id, (*dev->dev_ops->tx_queue_setup)(dev,
2105 tx_queue_id, nb_tx_desc, socket_id, &local_conf));
2106}
2107
f67539c2
TL
2108int
2109rte_eth_tx_hairpin_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
2110 uint16_t nb_tx_desc,
2111 const struct rte_eth_hairpin_conf *conf)
2112{
2113 struct rte_eth_dev *dev;
2114 struct rte_eth_hairpin_cap cap;
2115 void **txq;
2116 int i;
2117 int count;
2118 int ret;
2119
2120 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2121 dev = &rte_eth_devices[port_id];
2122 if (tx_queue_id >= dev->data->nb_tx_queues) {
2123 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", tx_queue_id);
2124 return -EINVAL;
2125 }
2126 ret = rte_eth_dev_hairpin_capability_get(port_id, &cap);
2127 if (ret != 0)
2128 return ret;
2129 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_hairpin_queue_setup,
2130 -ENOTSUP);
2131 /* if nb_rx_desc is zero use max number of desc from the driver. */
2132 if (nb_tx_desc == 0)
2133 nb_tx_desc = cap.max_nb_desc;
2134 if (nb_tx_desc > cap.max_nb_desc) {
2135 RTE_ETHDEV_LOG(ERR,
2136 "Invalid value for nb_tx_desc(=%hu), should be: <= %hu",
2137 nb_tx_desc, cap.max_nb_desc);
2138 return -EINVAL;
2139 }
2140 if (conf->peer_count > cap.max_tx_2_rx) {
2141 RTE_ETHDEV_LOG(ERR,
2142 "Invalid value for number of peers for Tx queue(=%hu), should be: <= %hu",
2143 conf->peer_count, cap.max_tx_2_rx);
2144 return -EINVAL;
2145 }
2146 if (conf->peer_count == 0) {
2147 RTE_ETHDEV_LOG(ERR,
2148 "Invalid value for number of peers for Tx queue(=%hu), should be: > 0",
2149 conf->peer_count);
2150 return -EINVAL;
2151 }
2152 for (i = 0, count = 0; i < dev->data->nb_tx_queues &&
2153 cap.max_nb_queues != UINT16_MAX; i++) {
2154 if (i == tx_queue_id || rte_eth_dev_is_tx_hairpin_queue(dev, i))
2155 count++;
2156 }
2157 if (count > cap.max_nb_queues) {
2158 RTE_ETHDEV_LOG(ERR, "To many Tx hairpin queues max is %d",
2159 cap.max_nb_queues);
2160 return -EINVAL;
2161 }
2162 if (dev->data->dev_started)
2163 return -EBUSY;
2164 txq = dev->data->tx_queues;
2165 if (txq[tx_queue_id] != NULL) {
2166 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release,
2167 -ENOTSUP);
2168 (*dev->dev_ops->tx_queue_release)(txq[tx_queue_id]);
2169 txq[tx_queue_id] = NULL;
2170 }
2171 ret = (*dev->dev_ops->tx_hairpin_queue_setup)
2172 (dev, tx_queue_id, nb_tx_desc, conf);
2173 if (ret == 0)
2174 dev->data->tx_queue_state[tx_queue_id] =
2175 RTE_ETH_QUEUE_STATE_HAIRPIN;
2176 return eth_err(port_id, ret);
2177}
2178
11fdf7f2
TL
2179void
2180rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent,
2181 void *userdata __rte_unused)
2182{
2183 unsigned i;
2184
2185 for (i = 0; i < unsent; i++)
2186 rte_pktmbuf_free(pkts[i]);
2187}
2188
2189void
2190rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent,
2191 void *userdata)
2192{
2193 uint64_t *count = userdata;
2194 unsigned i;
2195
2196 for (i = 0; i < unsent; i++)
2197 rte_pktmbuf_free(pkts[i]);
2198
2199 *count += unsent;
2200}
2201
2202int
2203rte_eth_tx_buffer_set_err_callback(struct rte_eth_dev_tx_buffer *buffer,
2204 buffer_tx_error_fn cbfn, void *userdata)
2205{
2206 buffer->error_callback = cbfn;
2207 buffer->error_userdata = userdata;
2208 return 0;
2209}
2210
2211int
2212rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size)
2213{
2214 int ret = 0;
2215
2216 if (buffer == NULL)
2217 return -EINVAL;
2218
2219 buffer->size = size;
2220 if (buffer->error_callback == NULL) {
2221 ret = rte_eth_tx_buffer_set_err_callback(
2222 buffer, rte_eth_tx_buffer_drop_callback, NULL);
2223 }
2224
2225 return ret;
2226}
2227
2228int
2229rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt)
2230{
2231 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2232 int ret;
2233
2234 /* Validate Input Data. Bail if not valid or not supported. */
2235 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2236 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_done_cleanup, -ENOTSUP);
2237
2238 /* Call driver to free pending mbufs. */
2239 ret = (*dev->dev_ops->tx_done_cleanup)(dev->data->tx_queues[queue_id],
2240 free_cnt);
2241 return eth_err(port_id, ret);
2242}
2243
f67539c2 2244int
11fdf7f2
TL
2245rte_eth_promiscuous_enable(uint16_t port_id)
2246{
2247 struct rte_eth_dev *dev;
f67539c2 2248 int diag = 0;
11fdf7f2 2249
f67539c2 2250 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
11fdf7f2
TL
2251 dev = &rte_eth_devices[port_id];
2252
f67539c2
TL
2253 if (dev->data->promiscuous == 1)
2254 return 0;
2255
2256 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->promiscuous_enable, -ENOTSUP);
2257
2258 diag = (*dev->dev_ops->promiscuous_enable)(dev);
2259 dev->data->promiscuous = (diag == 0) ? 1 : 0;
2260
2261 return eth_err(port_id, diag);
11fdf7f2
TL
2262}
2263
f67539c2 2264int
11fdf7f2
TL
2265rte_eth_promiscuous_disable(uint16_t port_id)
2266{
2267 struct rte_eth_dev *dev;
f67539c2 2268 int diag = 0;
11fdf7f2 2269
f67539c2 2270 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
11fdf7f2
TL
2271 dev = &rte_eth_devices[port_id];
2272
f67539c2
TL
2273 if (dev->data->promiscuous == 0)
2274 return 0;
2275
2276 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->promiscuous_disable, -ENOTSUP);
2277
11fdf7f2 2278 dev->data->promiscuous = 0;
f67539c2
TL
2279 diag = (*dev->dev_ops->promiscuous_disable)(dev);
2280 if (diag != 0)
2281 dev->data->promiscuous = 1;
2282
2283 return eth_err(port_id, diag);
11fdf7f2
TL
2284}
2285
2286int
2287rte_eth_promiscuous_get(uint16_t port_id)
2288{
2289 struct rte_eth_dev *dev;
2290
2291 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2292
2293 dev = &rte_eth_devices[port_id];
2294 return dev->data->promiscuous;
2295}
2296
f67539c2 2297int
11fdf7f2
TL
2298rte_eth_allmulticast_enable(uint16_t port_id)
2299{
2300 struct rte_eth_dev *dev;
f67539c2 2301 int diag;
11fdf7f2 2302
f67539c2 2303 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
11fdf7f2
TL
2304 dev = &rte_eth_devices[port_id];
2305
f67539c2
TL
2306 if (dev->data->all_multicast == 1)
2307 return 0;
2308
2309 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->allmulticast_enable, -ENOTSUP);
2310 diag = (*dev->dev_ops->allmulticast_enable)(dev);
2311 dev->data->all_multicast = (diag == 0) ? 1 : 0;
2312
2313 return eth_err(port_id, diag);
11fdf7f2
TL
2314}
2315
f67539c2 2316int
11fdf7f2
TL
2317rte_eth_allmulticast_disable(uint16_t port_id)
2318{
2319 struct rte_eth_dev *dev;
f67539c2 2320 int diag;
11fdf7f2 2321
f67539c2 2322 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
11fdf7f2
TL
2323 dev = &rte_eth_devices[port_id];
2324
f67539c2
TL
2325 if (dev->data->all_multicast == 0)
2326 return 0;
2327
2328 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->allmulticast_disable, -ENOTSUP);
11fdf7f2 2329 dev->data->all_multicast = 0;
f67539c2
TL
2330 diag = (*dev->dev_ops->allmulticast_disable)(dev);
2331 if (diag != 0)
2332 dev->data->all_multicast = 1;
2333
2334 return eth_err(port_id, diag);
11fdf7f2
TL
2335}
2336
2337int
2338rte_eth_allmulticast_get(uint16_t port_id)
2339{
2340 struct rte_eth_dev *dev;
2341
2342 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2343
2344 dev = &rte_eth_devices[port_id];
2345 return dev->data->all_multicast;
2346}
2347
f67539c2 2348int
11fdf7f2
TL
2349rte_eth_link_get(uint16_t port_id, struct rte_eth_link *eth_link)
2350{
2351 struct rte_eth_dev *dev;
2352
f67539c2 2353 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
11fdf7f2
TL
2354 dev = &rte_eth_devices[port_id];
2355
2356 if (dev->data->dev_conf.intr_conf.lsc &&
2357 dev->data->dev_started)
2358 rte_eth_linkstatus_get(dev, eth_link);
2359 else {
f67539c2 2360 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
11fdf7f2
TL
2361 (*dev->dev_ops->link_update)(dev, 1);
2362 *eth_link = dev->data->dev_link;
2363 }
f67539c2
TL
2364
2365 return 0;
11fdf7f2
TL
2366}
2367
f67539c2 2368int
11fdf7f2
TL
2369rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *eth_link)
2370{
2371 struct rte_eth_dev *dev;
2372
f67539c2 2373 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
11fdf7f2
TL
2374 dev = &rte_eth_devices[port_id];
2375
2376 if (dev->data->dev_conf.intr_conf.lsc &&
2377 dev->data->dev_started)
2378 rte_eth_linkstatus_get(dev, eth_link);
2379 else {
f67539c2 2380 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
11fdf7f2
TL
2381 (*dev->dev_ops->link_update)(dev, 0);
2382 *eth_link = dev->data->dev_link;
2383 }
f67539c2
TL
2384
2385 return 0;
11fdf7f2
TL
2386}
2387
2388int
2389rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats)
2390{
2391 struct rte_eth_dev *dev;
2392
2393 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2394
2395 dev = &rte_eth_devices[port_id];
2396 memset(stats, 0, sizeof(*stats));
2397
2398 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
2399 stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
2400 return eth_err(port_id, (*dev->dev_ops->stats_get)(dev, stats));
2401}
2402
2403int
2404rte_eth_stats_reset(uint16_t port_id)
2405{
2406 struct rte_eth_dev *dev;
f67539c2 2407 int ret;
11fdf7f2
TL
2408
2409 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2410 dev = &rte_eth_devices[port_id];
2411
2412 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_reset, -ENOTSUP);
f67539c2
TL
2413 ret = (*dev->dev_ops->stats_reset)(dev);
2414 if (ret != 0)
2415 return eth_err(port_id, ret);
2416
11fdf7f2
TL
2417 dev->data->rx_mbuf_alloc_failed = 0;
2418
2419 return 0;
2420}
2421
2422static inline int
2423get_xstats_basic_count(struct rte_eth_dev *dev)
2424{
2425 uint16_t nb_rxqs, nb_txqs;
2426 int count;
2427
2428 nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2429 nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2430
2431 count = RTE_NB_STATS;
2432 count += nb_rxqs * RTE_NB_RXQ_STATS;
2433 count += nb_txqs * RTE_NB_TXQ_STATS;
2434
2435 return count;
2436}
2437
2438static int
2439get_xstats_count(uint16_t port_id)
2440{
2441 struct rte_eth_dev *dev;
2442 int count;
2443
2444 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2445 dev = &rte_eth_devices[port_id];
2446 if (dev->dev_ops->xstats_get_names_by_id != NULL) {
2447 count = (*dev->dev_ops->xstats_get_names_by_id)(dev, NULL,
2448 NULL, 0);
2449 if (count < 0)
2450 return eth_err(port_id, count);
2451 }
2452 if (dev->dev_ops->xstats_get_names != NULL) {
2453 count = (*dev->dev_ops->xstats_get_names)(dev, NULL, 0);
2454 if (count < 0)
2455 return eth_err(port_id, count);
2456 } else
2457 count = 0;
2458
2459
2460 count += get_xstats_basic_count(dev);
2461
2462 return count;
2463}
2464
2465int
2466rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name,
2467 uint64_t *id)
2468{
2469 int cnt_xstats, idx_xstat;
2470
2471 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2472
2473 if (!id) {
2474 RTE_ETHDEV_LOG(ERR, "Id pointer is NULL\n");
2475 return -ENOMEM;
2476 }
2477
2478 if (!xstat_name) {
2479 RTE_ETHDEV_LOG(ERR, "xstat_name pointer is NULL\n");
2480 return -ENOMEM;
2481 }
2482
2483 /* Get count */
2484 cnt_xstats = rte_eth_xstats_get_names_by_id(port_id, NULL, 0, NULL);
2485 if (cnt_xstats < 0) {
2486 RTE_ETHDEV_LOG(ERR, "Cannot get count of xstats\n");
2487 return -ENODEV;
2488 }
2489
2490 /* Get id-name lookup table */
2491 struct rte_eth_xstat_name xstats_names[cnt_xstats];
2492
2493 if (cnt_xstats != rte_eth_xstats_get_names_by_id(
2494 port_id, xstats_names, cnt_xstats, NULL)) {
2495 RTE_ETHDEV_LOG(ERR, "Cannot get xstats lookup\n");
2496 return -1;
2497 }
2498
2499 for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) {
2500 if (!strcmp(xstats_names[idx_xstat].name, xstat_name)) {
2501 *id = idx_xstat;
2502 return 0;
2503 };
2504 }
2505
2506 return -EINVAL;
2507}
2508
2509/* retrieve basic stats names */
2510static int
2511rte_eth_basic_stats_get_names(struct rte_eth_dev *dev,
2512 struct rte_eth_xstat_name *xstats_names)
2513{
2514 int cnt_used_entries = 0;
2515 uint32_t idx, id_queue;
2516 uint16_t num_q;
2517
2518 for (idx = 0; idx < RTE_NB_STATS; idx++) {
9f95a23c
TL
2519 strlcpy(xstats_names[cnt_used_entries].name,
2520 rte_stats_strings[idx].name,
2521 sizeof(xstats_names[0].name));
11fdf7f2
TL
2522 cnt_used_entries++;
2523 }
2524 num_q = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2525 for (id_queue = 0; id_queue < num_q; id_queue++) {
2526 for (idx = 0; idx < RTE_NB_RXQ_STATS; idx++) {
2527 snprintf(xstats_names[cnt_used_entries].name,
2528 sizeof(xstats_names[0].name),
2529 "rx_q%u%s",
2530 id_queue, rte_rxq_stats_strings[idx].name);
2531 cnt_used_entries++;
2532 }
2533
2534 }
2535 num_q = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2536 for (id_queue = 0; id_queue < num_q; id_queue++) {
2537 for (idx = 0; idx < RTE_NB_TXQ_STATS; idx++) {
2538 snprintf(xstats_names[cnt_used_entries].name,
2539 sizeof(xstats_names[0].name),
2540 "tx_q%u%s",
2541 id_queue, rte_txq_stats_strings[idx].name);
2542 cnt_used_entries++;
2543 }
2544 }
2545 return cnt_used_entries;
2546}
2547
2548/* retrieve ethdev extended statistics names */
2549int
2550rte_eth_xstats_get_names_by_id(uint16_t port_id,
2551 struct rte_eth_xstat_name *xstats_names, unsigned int size,
2552 uint64_t *ids)
2553{
2554 struct rte_eth_xstat_name *xstats_names_copy;
2555 unsigned int no_basic_stat_requested = 1;
2556 unsigned int no_ext_stat_requested = 1;
2557 unsigned int expected_entries;
2558 unsigned int basic_count;
2559 struct rte_eth_dev *dev;
2560 unsigned int i;
2561 int ret;
2562
2563 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2564 dev = &rte_eth_devices[port_id];
2565
2566 basic_count = get_xstats_basic_count(dev);
2567 ret = get_xstats_count(port_id);
2568 if (ret < 0)
2569 return ret;
2570 expected_entries = (unsigned int)ret;
2571
2572 /* Return max number of stats if no ids given */
2573 if (!ids) {
2574 if (!xstats_names)
2575 return expected_entries;
2576 else if (xstats_names && size < expected_entries)
2577 return expected_entries;
2578 }
2579
2580 if (ids && !xstats_names)
2581 return -EINVAL;
2582
2583 if (ids && dev->dev_ops->xstats_get_names_by_id != NULL && size > 0) {
2584 uint64_t ids_copy[size];
2585
2586 for (i = 0; i < size; i++) {
2587 if (ids[i] < basic_count) {
2588 no_basic_stat_requested = 0;
2589 break;
2590 }
2591
2592 /*
2593 * Convert ids to xstats ids that PMD knows.
2594 * ids known by user are basic + extended stats.
2595 */
2596 ids_copy[i] = ids[i] - basic_count;
2597 }
2598
2599 if (no_basic_stat_requested)
2600 return (*dev->dev_ops->xstats_get_names_by_id)(dev,
2601 xstats_names, ids_copy, size);
2602 }
2603
2604 /* Retrieve all stats */
2605 if (!ids) {
2606 int num_stats = rte_eth_xstats_get_names(port_id, xstats_names,
2607 expected_entries);
2608 if (num_stats < 0 || num_stats > (int)expected_entries)
2609 return num_stats;
2610 else
2611 return expected_entries;
2612 }
2613
2614 xstats_names_copy = calloc(expected_entries,
2615 sizeof(struct rte_eth_xstat_name));
2616
2617 if (!xstats_names_copy) {
2618 RTE_ETHDEV_LOG(ERR, "Can't allocate memory\n");
2619 return -ENOMEM;
2620 }
2621
2622 if (ids) {
2623 for (i = 0; i < size; i++) {
2624 if (ids[i] >= basic_count) {
2625 no_ext_stat_requested = 0;
2626 break;
2627 }
2628 }
2629 }
2630
2631 /* Fill xstats_names_copy structure */
2632 if (ids && no_ext_stat_requested) {
2633 rte_eth_basic_stats_get_names(dev, xstats_names_copy);
2634 } else {
2635 ret = rte_eth_xstats_get_names(port_id, xstats_names_copy,
2636 expected_entries);
2637 if (ret < 0) {
2638 free(xstats_names_copy);
2639 return ret;
2640 }
2641 }
2642
2643 /* Filter stats */
2644 for (i = 0; i < size; i++) {
2645 if (ids[i] >= expected_entries) {
2646 RTE_ETHDEV_LOG(ERR, "Id value isn't valid\n");
2647 free(xstats_names_copy);
2648 return -1;
2649 }
2650 xstats_names[i] = xstats_names_copy[ids[i]];
2651 }
2652
2653 free(xstats_names_copy);
2654 return size;
2655}
2656
2657int
2658rte_eth_xstats_get_names(uint16_t port_id,
2659 struct rte_eth_xstat_name *xstats_names,
2660 unsigned int size)
2661{
2662 struct rte_eth_dev *dev;
2663 int cnt_used_entries;
2664 int cnt_expected_entries;
2665 int cnt_driver_entries;
2666
2667 cnt_expected_entries = get_xstats_count(port_id);
2668 if (xstats_names == NULL || cnt_expected_entries < 0 ||
2669 (int)size < cnt_expected_entries)
2670 return cnt_expected_entries;
2671
2672 /* port_id checked in get_xstats_count() */
2673 dev = &rte_eth_devices[port_id];
2674
2675 cnt_used_entries = rte_eth_basic_stats_get_names(
2676 dev, xstats_names);
2677
2678 if (dev->dev_ops->xstats_get_names != NULL) {
2679 /* If there are any driver-specific xstats, append them
2680 * to end of list.
2681 */
2682 cnt_driver_entries = (*dev->dev_ops->xstats_get_names)(
2683 dev,
2684 xstats_names + cnt_used_entries,
2685 size - cnt_used_entries);
2686 if (cnt_driver_entries < 0)
2687 return eth_err(port_id, cnt_driver_entries);
2688 cnt_used_entries += cnt_driver_entries;
2689 }
2690
2691 return cnt_used_entries;
2692}
2693
2694
2695static int
2696rte_eth_basic_stats_get(uint16_t port_id, struct rte_eth_xstat *xstats)
2697{
2698 struct rte_eth_dev *dev;
2699 struct rte_eth_stats eth_stats;
2700 unsigned int count = 0, i, q;
2701 uint64_t val, *stats_ptr;
2702 uint16_t nb_rxqs, nb_txqs;
2703 int ret;
2704
2705 ret = rte_eth_stats_get(port_id, &eth_stats);
2706 if (ret < 0)
2707 return ret;
2708
2709 dev = &rte_eth_devices[port_id];
2710
2711 nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2712 nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2713
2714 /* global stats */
2715 for (i = 0; i < RTE_NB_STATS; i++) {
2716 stats_ptr = RTE_PTR_ADD(&eth_stats,
2717 rte_stats_strings[i].offset);
2718 val = *stats_ptr;
2719 xstats[count++].value = val;
2720 }
2721
2722 /* per-rxq stats */
2723 for (q = 0; q < nb_rxqs; q++) {
2724 for (i = 0; i < RTE_NB_RXQ_STATS; i++) {
2725 stats_ptr = RTE_PTR_ADD(&eth_stats,
2726 rte_rxq_stats_strings[i].offset +
2727 q * sizeof(uint64_t));
2728 val = *stats_ptr;
2729 xstats[count++].value = val;
2730 }
2731 }
2732
2733 /* per-txq stats */
2734 for (q = 0; q < nb_txqs; q++) {
2735 for (i = 0; i < RTE_NB_TXQ_STATS; i++) {
2736 stats_ptr = RTE_PTR_ADD(&eth_stats,
2737 rte_txq_stats_strings[i].offset +
2738 q * sizeof(uint64_t));
2739 val = *stats_ptr;
2740 xstats[count++].value = val;
2741 }
2742 }
2743 return count;
2744}
2745
2746/* retrieve ethdev extended statistics */
2747int
2748rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids,
2749 uint64_t *values, unsigned int size)
2750{
2751 unsigned int no_basic_stat_requested = 1;
2752 unsigned int no_ext_stat_requested = 1;
2753 unsigned int num_xstats_filled;
2754 unsigned int basic_count;
2755 uint16_t expected_entries;
2756 struct rte_eth_dev *dev;
2757 unsigned int i;
2758 int ret;
2759
2760 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2761 ret = get_xstats_count(port_id);
2762 if (ret < 0)
2763 return ret;
2764 expected_entries = (uint16_t)ret;
2765 struct rte_eth_xstat xstats[expected_entries];
2766 dev = &rte_eth_devices[port_id];
2767 basic_count = get_xstats_basic_count(dev);
2768
2769 /* Return max number of stats if no ids given */
2770 if (!ids) {
2771 if (!values)
2772 return expected_entries;
2773 else if (values && size < expected_entries)
2774 return expected_entries;
2775 }
2776
2777 if (ids && !values)
2778 return -EINVAL;
2779
2780 if (ids && dev->dev_ops->xstats_get_by_id != NULL && size) {
2781 unsigned int basic_count = get_xstats_basic_count(dev);
2782 uint64_t ids_copy[size];
2783
2784 for (i = 0; i < size; i++) {
2785 if (ids[i] < basic_count) {
2786 no_basic_stat_requested = 0;
2787 break;
2788 }
2789
2790 /*
2791 * Convert ids to xstats ids that PMD knows.
2792 * ids known by user are basic + extended stats.
2793 */
2794 ids_copy[i] = ids[i] - basic_count;
2795 }
2796
2797 if (no_basic_stat_requested)
2798 return (*dev->dev_ops->xstats_get_by_id)(dev, ids_copy,
2799 values, size);
2800 }
2801
2802 if (ids) {
2803 for (i = 0; i < size; i++) {
2804 if (ids[i] >= basic_count) {
2805 no_ext_stat_requested = 0;
2806 break;
2807 }
2808 }
2809 }
2810
2811 /* Fill the xstats structure */
2812 if (ids && no_ext_stat_requested)
2813 ret = rte_eth_basic_stats_get(port_id, xstats);
2814 else
2815 ret = rte_eth_xstats_get(port_id, xstats, expected_entries);
2816
2817 if (ret < 0)
2818 return ret;
2819 num_xstats_filled = (unsigned int)ret;
2820
2821 /* Return all stats */
2822 if (!ids) {
2823 for (i = 0; i < num_xstats_filled; i++)
2824 values[i] = xstats[i].value;
2825 return expected_entries;
2826 }
2827
2828 /* Filter stats */
2829 for (i = 0; i < size; i++) {
2830 if (ids[i] >= expected_entries) {
2831 RTE_ETHDEV_LOG(ERR, "Id value isn't valid\n");
2832 return -1;
2833 }
2834 values[i] = xstats[ids[i]].value;
2835 }
2836 return size;
2837}
2838
2839int
2840rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats,
2841 unsigned int n)
2842{
2843 struct rte_eth_dev *dev;
2844 unsigned int count = 0, i;
2845 signed int xcount = 0;
2846 uint16_t nb_rxqs, nb_txqs;
2847 int ret;
2848
2849 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2850
2851 dev = &rte_eth_devices[port_id];
2852
2853 nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2854 nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2855
2856 /* Return generic statistics */
2857 count = RTE_NB_STATS + (nb_rxqs * RTE_NB_RXQ_STATS) +
2858 (nb_txqs * RTE_NB_TXQ_STATS);
2859
2860 /* implemented by the driver */
2861 if (dev->dev_ops->xstats_get != NULL) {
2862 /* Retrieve the xstats from the driver at the end of the
2863 * xstats struct.
2864 */
2865 xcount = (*dev->dev_ops->xstats_get)(dev,
2866 xstats ? xstats + count : NULL,
2867 (n > count) ? n - count : 0);
2868
2869 if (xcount < 0)
2870 return eth_err(port_id, xcount);
2871 }
2872
2873 if (n < count + xcount || xstats == NULL)
2874 return count + xcount;
2875
2876 /* now fill the xstats structure */
2877 ret = rte_eth_basic_stats_get(port_id, xstats);
2878 if (ret < 0)
2879 return ret;
2880 count = ret;
2881
2882 for (i = 0; i < count; i++)
2883 xstats[i].id = i;
2884 /* add an offset to driver-specific stats */
2885 for ( ; i < count + xcount; i++)
2886 xstats[i].id += count;
2887
2888 return count + xcount;
2889}
2890
2891/* reset ethdev extended statistics */
f67539c2 2892int
11fdf7f2
TL
2893rte_eth_xstats_reset(uint16_t port_id)
2894{
2895 struct rte_eth_dev *dev;
2896
f67539c2 2897 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
11fdf7f2
TL
2898 dev = &rte_eth_devices[port_id];
2899
2900 /* implemented by the driver */
f67539c2
TL
2901 if (dev->dev_ops->xstats_reset != NULL)
2902 return eth_err(port_id, (*dev->dev_ops->xstats_reset)(dev));
11fdf7f2
TL
2903
2904 /* fallback to default */
f67539c2 2905 return rte_eth_stats_reset(port_id);
11fdf7f2
TL
2906}
2907
2908static int
2909set_queue_stats_mapping(uint16_t port_id, uint16_t queue_id, uint8_t stat_idx,
2910 uint8_t is_rx)
2911{
2912 struct rte_eth_dev *dev;
2913
2914 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2915
2916 dev = &rte_eth_devices[port_id];
2917
2918 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_stats_mapping_set, -ENOTSUP);
2919
2920 if (is_rx && (queue_id >= dev->data->nb_rx_queues))
2921 return -EINVAL;
2922
2923 if (!is_rx && (queue_id >= dev->data->nb_tx_queues))
2924 return -EINVAL;
2925
2926 if (stat_idx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)
2927 return -EINVAL;
2928
2929 return (*dev->dev_ops->queue_stats_mapping_set)
2930 (dev, queue_id, stat_idx, is_rx);
2931}
2932
2933
2934int
2935rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id, uint16_t tx_queue_id,
2936 uint8_t stat_idx)
2937{
2938 return eth_err(port_id, set_queue_stats_mapping(port_id, tx_queue_id,
2939 stat_idx, STAT_QMAP_TX));
2940}
2941
2942
2943int
2944rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id, uint16_t rx_queue_id,
2945 uint8_t stat_idx)
2946{
2947 return eth_err(port_id, set_queue_stats_mapping(port_id, rx_queue_id,
2948 stat_idx, STAT_QMAP_RX));
2949}
2950
2951int
2952rte_eth_dev_fw_version_get(uint16_t port_id, char *fw_version, size_t fw_size)
2953{
2954 struct rte_eth_dev *dev;
2955
2956 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2957 dev = &rte_eth_devices[port_id];
2958
2959 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fw_version_get, -ENOTSUP);
2960 return eth_err(port_id, (*dev->dev_ops->fw_version_get)(dev,
2961 fw_version, fw_size));
2962}
2963
f67539c2 2964int
11fdf7f2
TL
2965rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info)
2966{
2967 struct rte_eth_dev *dev;
2968 const struct rte_eth_desc_lim lim = {
2969 .nb_max = UINT16_MAX,
2970 .nb_min = 0,
2971 .nb_align = 1,
f67539c2
TL
2972 .nb_seg_max = UINT16_MAX,
2973 .nb_mtu_seg_max = UINT16_MAX,
11fdf7f2 2974 };
f67539c2 2975 int diag;
11fdf7f2 2976
f67539c2
TL
2977 /*
2978 * Init dev_info before port_id check since caller does not have
2979 * return status and does not know if get is successful or not.
2980 */
2981 memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
2982 dev_info->switch_info.domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID;
2983
2984 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
11fdf7f2
TL
2985 dev = &rte_eth_devices[port_id];
2986
11fdf7f2
TL
2987 dev_info->rx_desc_lim = lim;
2988 dev_info->tx_desc_lim = lim;
2989 dev_info->device = dev->device;
f67539c2 2990 dev_info->min_mtu = RTE_ETHER_MIN_MTU;
9f95a23c 2991 dev_info->max_mtu = UINT16_MAX;
11fdf7f2 2992
f67539c2
TL
2993 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
2994 diag = (*dev->dev_ops->dev_infos_get)(dev, dev_info);
2995 if (diag != 0) {
2996 /* Cleanup already filled in device information */
2997 memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
2998 return eth_err(port_id, diag);
2999 }
3000
3001 /* Maximum number of queues should be <= RTE_MAX_QUEUES_PER_PORT */
3002 dev_info->max_rx_queues = RTE_MIN(dev_info->max_rx_queues,
3003 RTE_MAX_QUEUES_PER_PORT);
3004 dev_info->max_tx_queues = RTE_MIN(dev_info->max_tx_queues,
3005 RTE_MAX_QUEUES_PER_PORT);
3006
11fdf7f2
TL
3007 dev_info->driver_name = dev->device->driver->name;
3008 dev_info->nb_rx_queues = dev->data->nb_rx_queues;
3009 dev_info->nb_tx_queues = dev->data->nb_tx_queues;
3010
3011 dev_info->dev_flags = &dev->data->dev_flags;
f67539c2
TL
3012
3013 return 0;
11fdf7f2
TL
3014}
3015
3016int
3017rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask,
3018 uint32_t *ptypes, int num)
3019{
3020 int i, j;
3021 struct rte_eth_dev *dev;
3022 const uint32_t *all_ptypes;
3023
3024 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3025 dev = &rte_eth_devices[port_id];
3026 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_supported_ptypes_get, 0);
3027 all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev);
3028
3029 if (!all_ptypes)
3030 return 0;
3031
3032 for (i = 0, j = 0; all_ptypes[i] != RTE_PTYPE_UNKNOWN; ++i)
3033 if (all_ptypes[i] & ptype_mask) {
3034 if (j < num)
3035 ptypes[j] = all_ptypes[i];
3036 j++;
3037 }
3038
3039 return j;
3040}
3041
f67539c2
TL
3042int
3043rte_eth_dev_set_ptypes(uint16_t port_id, uint32_t ptype_mask,
3044 uint32_t *set_ptypes, unsigned int num)
3045{
3046 const uint32_t valid_ptype_masks[] = {
3047 RTE_PTYPE_L2_MASK,
3048 RTE_PTYPE_L3_MASK,
3049 RTE_PTYPE_L4_MASK,
3050 RTE_PTYPE_TUNNEL_MASK,
3051 RTE_PTYPE_INNER_L2_MASK,
3052 RTE_PTYPE_INNER_L3_MASK,
3053 RTE_PTYPE_INNER_L4_MASK,
3054 };
3055 const uint32_t *all_ptypes;
11fdf7f2 3056 struct rte_eth_dev *dev;
f67539c2
TL
3057 uint32_t unused_mask;
3058 unsigned int i, j;
3059 int ret;
11fdf7f2 3060
f67539c2 3061 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
11fdf7f2 3062 dev = &rte_eth_devices[port_id];
f67539c2
TL
3063
3064 if (num > 0 && set_ptypes == NULL)
3065 return -EINVAL;
3066
3067 if (*dev->dev_ops->dev_supported_ptypes_get == NULL ||
3068 *dev->dev_ops->dev_ptypes_set == NULL) {
3069 ret = 0;
3070 goto ptype_unknown;
3071 }
3072
3073 if (ptype_mask == 0) {
3074 ret = (*dev->dev_ops->dev_ptypes_set)(dev,
3075 ptype_mask);
3076 goto ptype_unknown;
3077 }
3078
3079 unused_mask = ptype_mask;
3080 for (i = 0; i < RTE_DIM(valid_ptype_masks); i++) {
3081 uint32_t mask = ptype_mask & valid_ptype_masks[i];
3082 if (mask && mask != valid_ptype_masks[i]) {
3083 ret = -EINVAL;
3084 goto ptype_unknown;
3085 }
3086 unused_mask &= ~valid_ptype_masks[i];
3087 }
3088
3089 if (unused_mask) {
3090 ret = -EINVAL;
3091 goto ptype_unknown;
3092 }
3093
3094 all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev);
3095 if (all_ptypes == NULL) {
3096 ret = 0;
3097 goto ptype_unknown;
3098 }
3099
3100 /*
3101 * Accommodate as many set_ptypes as possible. If the supplied
3102 * set_ptypes array is insufficient fill it partially.
3103 */
3104 for (i = 0, j = 0; set_ptypes != NULL &&
3105 (all_ptypes[i] != RTE_PTYPE_UNKNOWN); ++i) {
3106 if (ptype_mask & all_ptypes[i]) {
3107 if (j < num - 1) {
3108 set_ptypes[j] = all_ptypes[i];
3109 j++;
3110 continue;
3111 }
3112 break;
3113 }
3114 }
3115
3116 if (set_ptypes != NULL && j < num)
3117 set_ptypes[j] = RTE_PTYPE_UNKNOWN;
3118
3119 return (*dev->dev_ops->dev_ptypes_set)(dev, ptype_mask);
3120
3121ptype_unknown:
3122 if (num > 0)
3123 set_ptypes[0] = RTE_PTYPE_UNKNOWN;
3124
3125 return ret;
11fdf7f2
TL
3126}
3127
f67539c2
TL
3128int
3129rte_eth_macaddr_get(uint16_t port_id, struct rte_ether_addr *mac_addr)
3130{
3131 struct rte_eth_dev *dev;
3132
3133 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3134 dev = &rte_eth_devices[port_id];
3135 rte_ether_addr_copy(&dev->data->mac_addrs[0], mac_addr);
3136
3137 return 0;
3138}
11fdf7f2
TL
3139
3140int
3141rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu)
3142{
3143 struct rte_eth_dev *dev;
3144
3145 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3146
3147 dev = &rte_eth_devices[port_id];
3148 *mtu = dev->data->mtu;
3149 return 0;
3150}
3151
3152int
3153rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu)
3154{
3155 int ret;
9f95a23c 3156 struct rte_eth_dev_info dev_info;
11fdf7f2
TL
3157 struct rte_eth_dev *dev;
3158
3159 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3160 dev = &rte_eth_devices[port_id];
3161 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mtu_set, -ENOTSUP);
3162
9f95a23c
TL
3163 /*
3164 * Check if the device supports dev_infos_get, if it does not
3165 * skip min_mtu/max_mtu validation here as this requires values
3166 * that are populated within the call to rte_eth_dev_info_get()
3167 * which relies on dev->dev_ops->dev_infos_get.
3168 */
3169 if (*dev->dev_ops->dev_infos_get != NULL) {
f67539c2
TL
3170 ret = rte_eth_dev_info_get(port_id, &dev_info);
3171 if (ret != 0)
3172 return ret;
3173
9f95a23c
TL
3174 if (mtu < dev_info.min_mtu || mtu > dev_info.max_mtu)
3175 return -EINVAL;
3176 }
3177
11fdf7f2
TL
3178 ret = (*dev->dev_ops->mtu_set)(dev, mtu);
3179 if (!ret)
3180 dev->data->mtu = mtu;
3181
3182 return eth_err(port_id, ret);
3183}
3184
3185int
3186rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on)
3187{
3188 struct rte_eth_dev *dev;
3189 int ret;
3190
3191 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3192 dev = &rte_eth_devices[port_id];
3193 if (!(dev->data->dev_conf.rxmode.offloads &
3194 DEV_RX_OFFLOAD_VLAN_FILTER)) {
3195 RTE_ETHDEV_LOG(ERR, "Port %u: vlan-filtering disabled\n",
3196 port_id);
3197 return -ENOSYS;
3198 }
3199
3200 if (vlan_id > 4095) {
3201 RTE_ETHDEV_LOG(ERR, "Port_id=%u invalid vlan_id=%u > 4095\n",
3202 port_id, vlan_id);
3203 return -EINVAL;
3204 }
3205 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_filter_set, -ENOTSUP);
3206
3207 ret = (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on);
3208 if (ret == 0) {
3209 struct rte_vlan_filter_conf *vfc;
3210 int vidx;
3211 int vbit;
3212
3213 vfc = &dev->data->vlan_filter_conf;
3214 vidx = vlan_id / 64;
3215 vbit = vlan_id % 64;
3216
3217 if (on)
3218 vfc->ids[vidx] |= UINT64_C(1) << vbit;
3219 else
3220 vfc->ids[vidx] &= ~(UINT64_C(1) << vbit);
3221 }
3222
3223 return eth_err(port_id, ret);
3224}
3225
3226int
3227rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id,
3228 int on)
3229{
3230 struct rte_eth_dev *dev;
3231
3232 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3233 dev = &rte_eth_devices[port_id];
3234 if (rx_queue_id >= dev->data->nb_rx_queues) {
3235 RTE_ETHDEV_LOG(ERR, "Invalid rx_queue_id=%u\n", rx_queue_id);
3236 return -EINVAL;
3237 }
3238
3239 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP);
3240 (*dev->dev_ops->vlan_strip_queue_set)(dev, rx_queue_id, on);
3241
3242 return 0;
3243}
3244
3245int
3246rte_eth_dev_set_vlan_ether_type(uint16_t port_id,
3247 enum rte_vlan_type vlan_type,
3248 uint16_t tpid)
3249{
3250 struct rte_eth_dev *dev;
3251
3252 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3253 dev = &rte_eth_devices[port_id];
3254 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_tpid_set, -ENOTSUP);
3255
3256 return eth_err(port_id, (*dev->dev_ops->vlan_tpid_set)(dev, vlan_type,
3257 tpid));
3258}
3259
3260int
3261rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask)
3262{
3263 struct rte_eth_dev *dev;
3264 int ret = 0;
3265 int mask = 0;
3266 int cur, org = 0;
3267 uint64_t orig_offloads;
f67539c2 3268 uint64_t dev_offloads;
11fdf7f2
TL
3269
3270 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3271 dev = &rte_eth_devices[port_id];
3272
3273 /* save original values in case of failure */
3274 orig_offloads = dev->data->dev_conf.rxmode.offloads;
f67539c2 3275 dev_offloads = orig_offloads;
11fdf7f2 3276
f67539c2 3277 /* check which option changed by application */
11fdf7f2 3278 cur = !!(offload_mask & ETH_VLAN_STRIP_OFFLOAD);
f67539c2 3279 org = !!(dev_offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
11fdf7f2
TL
3280 if (cur != org) {
3281 if (cur)
f67539c2 3282 dev_offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
11fdf7f2 3283 else
f67539c2 3284 dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
11fdf7f2
TL
3285 mask |= ETH_VLAN_STRIP_MASK;
3286 }
3287
3288 cur = !!(offload_mask & ETH_VLAN_FILTER_OFFLOAD);
f67539c2 3289 org = !!(dev_offloads & DEV_RX_OFFLOAD_VLAN_FILTER);
11fdf7f2
TL
3290 if (cur != org) {
3291 if (cur)
f67539c2 3292 dev_offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
11fdf7f2 3293 else
f67539c2 3294 dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_FILTER;
11fdf7f2
TL
3295 mask |= ETH_VLAN_FILTER_MASK;
3296 }
3297
3298 cur = !!(offload_mask & ETH_VLAN_EXTEND_OFFLOAD);
f67539c2 3299 org = !!(dev_offloads & DEV_RX_OFFLOAD_VLAN_EXTEND);
11fdf7f2
TL
3300 if (cur != org) {
3301 if (cur)
f67539c2 3302 dev_offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND;
11fdf7f2 3303 else
f67539c2 3304 dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_EXTEND;
11fdf7f2
TL
3305 mask |= ETH_VLAN_EXTEND_MASK;
3306 }
3307
f67539c2
TL
3308 cur = !!(offload_mask & ETH_QINQ_STRIP_OFFLOAD);
3309 org = !!(dev_offloads & DEV_RX_OFFLOAD_QINQ_STRIP);
3310 if (cur != org) {
3311 if (cur)
3312 dev_offloads |= DEV_RX_OFFLOAD_QINQ_STRIP;
3313 else
3314 dev_offloads &= ~DEV_RX_OFFLOAD_QINQ_STRIP;
3315 mask |= ETH_QINQ_STRIP_MASK;
3316 }
3317
11fdf7f2
TL
3318 /*no change*/
3319 if (mask == 0)
3320 return ret;
3321
3322 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_offload_set, -ENOTSUP);
f67539c2 3323 dev->data->dev_conf.rxmode.offloads = dev_offloads;
11fdf7f2
TL
3324 ret = (*dev->dev_ops->vlan_offload_set)(dev, mask);
3325 if (ret) {
3326 /* hit an error restore original values */
3327 dev->data->dev_conf.rxmode.offloads = orig_offloads;
3328 }
3329
3330 return eth_err(port_id, ret);
3331}
3332
3333int
3334rte_eth_dev_get_vlan_offload(uint16_t port_id)
3335{
3336 struct rte_eth_dev *dev;
f67539c2 3337 uint64_t *dev_offloads;
11fdf7f2
TL
3338 int ret = 0;
3339
3340 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3341 dev = &rte_eth_devices[port_id];
f67539c2 3342 dev_offloads = &dev->data->dev_conf.rxmode.offloads;
11fdf7f2 3343
f67539c2 3344 if (*dev_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
11fdf7f2
TL
3345 ret |= ETH_VLAN_STRIP_OFFLOAD;
3346
f67539c2 3347 if (*dev_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
11fdf7f2
TL
3348 ret |= ETH_VLAN_FILTER_OFFLOAD;
3349
f67539c2 3350 if (*dev_offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
11fdf7f2
TL
3351 ret |= ETH_VLAN_EXTEND_OFFLOAD;
3352
f67539c2
TL
3353 if (*dev_offloads & DEV_RX_OFFLOAD_QINQ_STRIP)
3354 ret |= ETH_QINQ_STRIP_OFFLOAD;
3355
11fdf7f2
TL
3356 return ret;
3357}
3358
3359int
3360rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on)
3361{
3362 struct rte_eth_dev *dev;
3363
3364 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3365 dev = &rte_eth_devices[port_id];
3366 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_pvid_set, -ENOTSUP);
3367
3368 return eth_err(port_id, (*dev->dev_ops->vlan_pvid_set)(dev, pvid, on));
3369}
3370
3371int
3372rte_eth_dev_flow_ctrl_get(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
3373{
3374 struct rte_eth_dev *dev;
3375
3376 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3377 dev = &rte_eth_devices[port_id];
3378 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_get, -ENOTSUP);
3379 memset(fc_conf, 0, sizeof(*fc_conf));
3380 return eth_err(port_id, (*dev->dev_ops->flow_ctrl_get)(dev, fc_conf));
3381}
3382
3383int
3384rte_eth_dev_flow_ctrl_set(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
3385{
3386 struct rte_eth_dev *dev;
3387
3388 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3389 if ((fc_conf->send_xon != 0) && (fc_conf->send_xon != 1)) {
3390 RTE_ETHDEV_LOG(ERR, "Invalid send_xon, only 0/1 allowed\n");
3391 return -EINVAL;
3392 }
3393
3394 dev = &rte_eth_devices[port_id];
3395 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_set, -ENOTSUP);
3396 return eth_err(port_id, (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf));
3397}
3398
3399int
3400rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id,
3401 struct rte_eth_pfc_conf *pfc_conf)
3402{
3403 struct rte_eth_dev *dev;
3404
3405 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3406 if (pfc_conf->priority > (ETH_DCB_NUM_USER_PRIORITIES - 1)) {
3407 RTE_ETHDEV_LOG(ERR, "Invalid priority, only 0-7 allowed\n");
3408 return -EINVAL;
3409 }
3410
3411 dev = &rte_eth_devices[port_id];
3412 /* High water, low water validation are device specific */
3413 if (*dev->dev_ops->priority_flow_ctrl_set)
3414 return eth_err(port_id, (*dev->dev_ops->priority_flow_ctrl_set)
3415 (dev, pfc_conf));
3416 return -ENOTSUP;
3417}
3418
3419static int
3420rte_eth_check_reta_mask(struct rte_eth_rss_reta_entry64 *reta_conf,
3421 uint16_t reta_size)
3422{
3423 uint16_t i, num;
3424
3425 if (!reta_conf)
3426 return -EINVAL;
3427
3428 num = (reta_size + RTE_RETA_GROUP_SIZE - 1) / RTE_RETA_GROUP_SIZE;
3429 for (i = 0; i < num; i++) {
3430 if (reta_conf[i].mask)
3431 return 0;
3432 }
3433
3434 return -EINVAL;
3435}
3436
3437static int
3438rte_eth_check_reta_entry(struct rte_eth_rss_reta_entry64 *reta_conf,
3439 uint16_t reta_size,
3440 uint16_t max_rxq)
3441{
3442 uint16_t i, idx, shift;
3443
3444 if (!reta_conf)
3445 return -EINVAL;
3446
3447 if (max_rxq == 0) {
3448 RTE_ETHDEV_LOG(ERR, "No receive queue is available\n");
3449 return -EINVAL;
3450 }
3451
3452 for (i = 0; i < reta_size; i++) {
3453 idx = i / RTE_RETA_GROUP_SIZE;
3454 shift = i % RTE_RETA_GROUP_SIZE;
3455 if ((reta_conf[idx].mask & (1ULL << shift)) &&
3456 (reta_conf[idx].reta[shift] >= max_rxq)) {
3457 RTE_ETHDEV_LOG(ERR,
3458 "reta_conf[%u]->reta[%u]: %u exceeds the maximum rxq index: %u\n",
3459 idx, shift,
3460 reta_conf[idx].reta[shift], max_rxq);
3461 return -EINVAL;
3462 }
3463 }
3464
3465 return 0;
3466}
3467
3468int
3469rte_eth_dev_rss_reta_update(uint16_t port_id,
3470 struct rte_eth_rss_reta_entry64 *reta_conf,
3471 uint16_t reta_size)
3472{
3473 struct rte_eth_dev *dev;
3474 int ret;
3475
3476 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3477 /* Check mask bits */
3478 ret = rte_eth_check_reta_mask(reta_conf, reta_size);
3479 if (ret < 0)
3480 return ret;
3481
3482 dev = &rte_eth_devices[port_id];
3483
3484 /* Check entry value */
3485 ret = rte_eth_check_reta_entry(reta_conf, reta_size,
3486 dev->data->nb_rx_queues);
3487 if (ret < 0)
3488 return ret;
3489
3490 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_update, -ENOTSUP);
3491 return eth_err(port_id, (*dev->dev_ops->reta_update)(dev, reta_conf,
3492 reta_size));
3493}
3494
3495int
3496rte_eth_dev_rss_reta_query(uint16_t port_id,
3497 struct rte_eth_rss_reta_entry64 *reta_conf,
3498 uint16_t reta_size)
3499{
3500 struct rte_eth_dev *dev;
3501 int ret;
3502
3503 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3504
3505 /* Check mask bits */
3506 ret = rte_eth_check_reta_mask(reta_conf, reta_size);
3507 if (ret < 0)
3508 return ret;
3509
3510 dev = &rte_eth_devices[port_id];
3511 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_query, -ENOTSUP);
3512 return eth_err(port_id, (*dev->dev_ops->reta_query)(dev, reta_conf,
3513 reta_size));
3514}
3515
3516int
3517rte_eth_dev_rss_hash_update(uint16_t port_id,
3518 struct rte_eth_rss_conf *rss_conf)
3519{
3520 struct rte_eth_dev *dev;
3521 struct rte_eth_dev_info dev_info = { .flow_type_rss_offloads = 0, };
f67539c2 3522 int ret;
11fdf7f2
TL
3523
3524 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
f67539c2
TL
3525
3526 ret = rte_eth_dev_info_get(port_id, &dev_info);
3527 if (ret != 0)
3528 return ret;
3529
3530 rss_conf->rss_hf = rte_eth_rss_hf_refine(rss_conf->rss_hf);
3531
11fdf7f2 3532 dev = &rte_eth_devices[port_id];
11fdf7f2
TL
3533 if ((dev_info.flow_type_rss_offloads | rss_conf->rss_hf) !=
3534 dev_info.flow_type_rss_offloads) {
3535 RTE_ETHDEV_LOG(ERR,
3536 "Ethdev port_id=%u invalid rss_hf: 0x%"PRIx64", valid value: 0x%"PRIx64"\n",
3537 port_id, rss_conf->rss_hf,
3538 dev_info.flow_type_rss_offloads);
3539 return -EINVAL;
3540 }
3541 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_update, -ENOTSUP);
3542 return eth_err(port_id, (*dev->dev_ops->rss_hash_update)(dev,
3543 rss_conf));
3544}
3545
3546int
3547rte_eth_dev_rss_hash_conf_get(uint16_t port_id,
3548 struct rte_eth_rss_conf *rss_conf)
3549{
3550 struct rte_eth_dev *dev;
3551
3552 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3553 dev = &rte_eth_devices[port_id];
3554 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_conf_get, -ENOTSUP);
3555 return eth_err(port_id, (*dev->dev_ops->rss_hash_conf_get)(dev,
3556 rss_conf));
3557}
3558
3559int
3560rte_eth_dev_udp_tunnel_port_add(uint16_t port_id,
3561 struct rte_eth_udp_tunnel *udp_tunnel)
3562{
3563 struct rte_eth_dev *dev;
3564
3565 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3566 if (udp_tunnel == NULL) {
3567 RTE_ETHDEV_LOG(ERR, "Invalid udp_tunnel parameter\n");
3568 return -EINVAL;
3569 }
3570
3571 if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
3572 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n");
3573 return -EINVAL;
3574 }
3575
3576 dev = &rte_eth_devices[port_id];
3577 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_add, -ENOTSUP);
3578 return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_add)(dev,
3579 udp_tunnel));
3580}
3581
3582int
3583rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id,
3584 struct rte_eth_udp_tunnel *udp_tunnel)
3585{
3586 struct rte_eth_dev *dev;
3587
3588 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3589 dev = &rte_eth_devices[port_id];
3590
3591 if (udp_tunnel == NULL) {
3592 RTE_ETHDEV_LOG(ERR, "Invalid udp_tunnel parameter\n");
3593 return -EINVAL;
3594 }
3595
3596 if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
3597 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n");
3598 return -EINVAL;
3599 }
3600
3601 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_del, -ENOTSUP);
3602 return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_del)(dev,
3603 udp_tunnel));
3604}
3605
3606int
3607rte_eth_led_on(uint16_t port_id)
3608{
3609 struct rte_eth_dev *dev;
3610
3611 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3612 dev = &rte_eth_devices[port_id];
3613 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_on, -ENOTSUP);
3614 return eth_err(port_id, (*dev->dev_ops->dev_led_on)(dev));
3615}
3616
3617int
3618rte_eth_led_off(uint16_t port_id)
3619{
3620 struct rte_eth_dev *dev;
3621
3622 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3623 dev = &rte_eth_devices[port_id];
3624 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_off, -ENOTSUP);
3625 return eth_err(port_id, (*dev->dev_ops->dev_led_off)(dev));
3626}
3627
3628/*
3629 * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
3630 * an empty spot.
3631 */
3632static int
f67539c2 3633get_mac_addr_index(uint16_t port_id, const struct rte_ether_addr *addr)
11fdf7f2
TL
3634{
3635 struct rte_eth_dev_info dev_info;
3636 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3637 unsigned i;
f67539c2 3638 int ret;
11fdf7f2 3639
f67539c2
TL
3640 ret = rte_eth_dev_info_get(port_id, &dev_info);
3641 if (ret != 0)
3642 return -1;
11fdf7f2
TL
3643
3644 for (i = 0; i < dev_info.max_mac_addrs; i++)
f67539c2
TL
3645 if (memcmp(addr, &dev->data->mac_addrs[i],
3646 RTE_ETHER_ADDR_LEN) == 0)
11fdf7f2
TL
3647 return i;
3648
3649 return -1;
3650}
3651
f67539c2 3652static const struct rte_ether_addr null_mac_addr;
11fdf7f2
TL
3653
3654int
f67539c2 3655rte_eth_dev_mac_addr_add(uint16_t port_id, struct rte_ether_addr *addr,
11fdf7f2
TL
3656 uint32_t pool)
3657{
3658 struct rte_eth_dev *dev;
3659 int index;
3660 uint64_t pool_mask;
3661 int ret;
3662
3663 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3664 dev = &rte_eth_devices[port_id];
3665 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_add, -ENOTSUP);
3666
f67539c2 3667 if (rte_is_zero_ether_addr(addr)) {
11fdf7f2
TL
3668 RTE_ETHDEV_LOG(ERR, "Port %u: Cannot add NULL MAC address\n",
3669 port_id);
3670 return -EINVAL;
3671 }
3672 if (pool >= ETH_64_POOLS) {
3673 RTE_ETHDEV_LOG(ERR, "Pool id must be 0-%d\n", ETH_64_POOLS - 1);
3674 return -EINVAL;
3675 }
3676
3677 index = get_mac_addr_index(port_id, addr);
3678 if (index < 0) {
3679 index = get_mac_addr_index(port_id, &null_mac_addr);
3680 if (index < 0) {
3681 RTE_ETHDEV_LOG(ERR, "Port %u: MAC address array full\n",
3682 port_id);
3683 return -ENOSPC;
3684 }
3685 } else {
3686 pool_mask = dev->data->mac_pool_sel[index];
3687
3688 /* Check if both MAC address and pool is already there, and do nothing */
3689 if (pool_mask & (1ULL << pool))
3690 return 0;
3691 }
3692
3693 /* Update NIC */
3694 ret = (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool);
3695
3696 if (ret == 0) {
3697 /* Update address in NIC data structure */
f67539c2 3698 rte_ether_addr_copy(addr, &dev->data->mac_addrs[index]);
11fdf7f2
TL
3699
3700 /* Update pool bitmap in NIC data structure */
3701 dev->data->mac_pool_sel[index] |= (1ULL << pool);
3702 }
3703
3704 return eth_err(port_id, ret);
3705}
3706
3707int
f67539c2 3708rte_eth_dev_mac_addr_remove(uint16_t port_id, struct rte_ether_addr *addr)
11fdf7f2
TL
3709{
3710 struct rte_eth_dev *dev;
3711 int index;
3712
3713 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3714 dev = &rte_eth_devices[port_id];
3715 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_remove, -ENOTSUP);
3716
3717 index = get_mac_addr_index(port_id, addr);
3718 if (index == 0) {
3719 RTE_ETHDEV_LOG(ERR,
3720 "Port %u: Cannot remove default MAC address\n",
3721 port_id);
3722 return -EADDRINUSE;
3723 } else if (index < 0)
3724 return 0; /* Do nothing if address wasn't found */
3725
3726 /* Update NIC */
3727 (*dev->dev_ops->mac_addr_remove)(dev, index);
3728
3729 /* Update address in NIC data structure */
f67539c2 3730 rte_ether_addr_copy(&null_mac_addr, &dev->data->mac_addrs[index]);
11fdf7f2
TL
3731
3732 /* reset pool bitmap */
3733 dev->data->mac_pool_sel[index] = 0;
3734
3735 return 0;
3736}
3737
3738int
f67539c2 3739rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct rte_ether_addr *addr)
11fdf7f2
TL
3740{
3741 struct rte_eth_dev *dev;
3742 int ret;
3743
3744 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3745
f67539c2 3746 if (!rte_is_valid_assigned_ether_addr(addr))
11fdf7f2
TL
3747 return -EINVAL;
3748
3749 dev = &rte_eth_devices[port_id];
3750 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_set, -ENOTSUP);
3751
3752 ret = (*dev->dev_ops->mac_addr_set)(dev, addr);
3753 if (ret < 0)
3754 return ret;
3755
3756 /* Update default address in NIC data structure */
f67539c2 3757 rte_ether_addr_copy(addr, &dev->data->mac_addrs[0]);
11fdf7f2
TL
3758
3759 return 0;
3760}
3761
3762
3763/*
3764 * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
3765 * an empty spot.
3766 */
3767static int
f67539c2 3768get_hash_mac_addr_index(uint16_t port_id, const struct rte_ether_addr *addr)
11fdf7f2
TL
3769{
3770 struct rte_eth_dev_info dev_info;
3771 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3772 unsigned i;
f67539c2
TL
3773 int ret;
3774
3775 ret = rte_eth_dev_info_get(port_id, &dev_info);
3776 if (ret != 0)
3777 return -1;
11fdf7f2 3778
11fdf7f2
TL
3779 if (!dev->data->hash_mac_addrs)
3780 return -1;
3781
3782 for (i = 0; i < dev_info.max_hash_mac_addrs; i++)
3783 if (memcmp(addr, &dev->data->hash_mac_addrs[i],
f67539c2 3784 RTE_ETHER_ADDR_LEN) == 0)
11fdf7f2
TL
3785 return i;
3786
3787 return -1;
3788}
3789
3790int
f67539c2 3791rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct rte_ether_addr *addr,
11fdf7f2
TL
3792 uint8_t on)
3793{
3794 int index;
3795 int ret;
3796 struct rte_eth_dev *dev;
3797
3798 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3799
3800 dev = &rte_eth_devices[port_id];
f67539c2 3801 if (rte_is_zero_ether_addr(addr)) {
11fdf7f2
TL
3802 RTE_ETHDEV_LOG(ERR, "Port %u: Cannot add NULL MAC address\n",
3803 port_id);
3804 return -EINVAL;
3805 }
3806
3807 index = get_hash_mac_addr_index(port_id, addr);
3808 /* Check if it's already there, and do nothing */
3809 if ((index >= 0) && on)
3810 return 0;
3811
3812 if (index < 0) {
3813 if (!on) {
3814 RTE_ETHDEV_LOG(ERR,
3815 "Port %u: the MAC address was not set in UTA\n",
3816 port_id);
3817 return -EINVAL;
3818 }
3819
3820 index = get_hash_mac_addr_index(port_id, &null_mac_addr);
3821 if (index < 0) {
3822 RTE_ETHDEV_LOG(ERR, "Port %u: MAC address array full\n",
3823 port_id);
3824 return -ENOSPC;
3825 }
3826 }
3827
3828 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_hash_table_set, -ENOTSUP);
3829 ret = (*dev->dev_ops->uc_hash_table_set)(dev, addr, on);
3830 if (ret == 0) {
3831 /* Update address in NIC data structure */
3832 if (on)
f67539c2 3833 rte_ether_addr_copy(addr,
11fdf7f2
TL
3834 &dev->data->hash_mac_addrs[index]);
3835 else
f67539c2 3836 rte_ether_addr_copy(&null_mac_addr,
11fdf7f2
TL
3837 &dev->data->hash_mac_addrs[index]);
3838 }
3839
3840 return eth_err(port_id, ret);
3841}
3842
3843int
3844rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on)
3845{
3846 struct rte_eth_dev *dev;
3847
3848 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3849
3850 dev = &rte_eth_devices[port_id];
3851
3852 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_all_hash_table_set, -ENOTSUP);
3853 return eth_err(port_id, (*dev->dev_ops->uc_all_hash_table_set)(dev,
3854 on));
3855}
3856
3857int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx,
3858 uint16_t tx_rate)
3859{
3860 struct rte_eth_dev *dev;
3861 struct rte_eth_dev_info dev_info;
3862 struct rte_eth_link link;
f67539c2 3863 int ret;
11fdf7f2
TL
3864
3865 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3866
f67539c2
TL
3867 ret = rte_eth_dev_info_get(port_id, &dev_info);
3868 if (ret != 0)
3869 return ret;
3870
11fdf7f2 3871 dev = &rte_eth_devices[port_id];
11fdf7f2
TL
3872 link = dev->data->dev_link;
3873
3874 if (queue_idx > dev_info.max_tx_queues) {
3875 RTE_ETHDEV_LOG(ERR,
3876 "Set queue rate limit:port %u: invalid queue id=%u\n",
3877 port_id, queue_idx);
3878 return -EINVAL;
3879 }
3880
3881 if (tx_rate > link.link_speed) {
3882 RTE_ETHDEV_LOG(ERR,
3883 "Set queue rate limit:invalid tx_rate=%u, bigger than link speed= %d\n",
3884 tx_rate, link.link_speed);
3885 return -EINVAL;
3886 }
3887
3888 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_queue_rate_limit, -ENOTSUP);
3889 return eth_err(port_id, (*dev->dev_ops->set_queue_rate_limit)(dev,
3890 queue_idx, tx_rate));
3891}
3892
3893int
3894rte_eth_mirror_rule_set(uint16_t port_id,
3895 struct rte_eth_mirror_conf *mirror_conf,
3896 uint8_t rule_id, uint8_t on)
3897{
3898 struct rte_eth_dev *dev;
3899
3900 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3901 if (mirror_conf->rule_type == 0) {
3902 RTE_ETHDEV_LOG(ERR, "Mirror rule type can not be 0\n");
3903 return -EINVAL;
3904 }
3905
3906 if (mirror_conf->dst_pool >= ETH_64_POOLS) {
3907 RTE_ETHDEV_LOG(ERR, "Invalid dst pool, pool id must be 0-%d\n",
3908 ETH_64_POOLS - 1);
3909 return -EINVAL;
3910 }
3911
3912 if ((mirror_conf->rule_type & (ETH_MIRROR_VIRTUAL_POOL_UP |
3913 ETH_MIRROR_VIRTUAL_POOL_DOWN)) &&
3914 (mirror_conf->pool_mask == 0)) {
3915 RTE_ETHDEV_LOG(ERR,
3916 "Invalid mirror pool, pool mask can not be 0\n");
3917 return -EINVAL;
3918 }
3919
3920 if ((mirror_conf->rule_type & ETH_MIRROR_VLAN) &&
3921 mirror_conf->vlan.vlan_mask == 0) {
3922 RTE_ETHDEV_LOG(ERR,
3923 "Invalid vlan mask, vlan mask can not be 0\n");
3924 return -EINVAL;
3925 }
3926
3927 dev = &rte_eth_devices[port_id];
3928 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_set, -ENOTSUP);
3929
3930 return eth_err(port_id, (*dev->dev_ops->mirror_rule_set)(dev,
3931 mirror_conf, rule_id, on));
3932}
3933
3934int
3935rte_eth_mirror_rule_reset(uint16_t port_id, uint8_t rule_id)
3936{
3937 struct rte_eth_dev *dev;
3938
3939 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3940
3941 dev = &rte_eth_devices[port_id];
3942 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_reset, -ENOTSUP);
3943
3944 return eth_err(port_id, (*dev->dev_ops->mirror_rule_reset)(dev,
3945 rule_id));
3946}
3947
3948RTE_INIT(eth_dev_init_cb_lists)
3949{
3950 int i;
3951
3952 for (i = 0; i < RTE_MAX_ETHPORTS; i++)
3953 TAILQ_INIT(&rte_eth_devices[i].link_intr_cbs);
3954}
3955
3956int
3957rte_eth_dev_callback_register(uint16_t port_id,
3958 enum rte_eth_event_type event,
3959 rte_eth_dev_cb_fn cb_fn, void *cb_arg)
3960{
3961 struct rte_eth_dev *dev;
3962 struct rte_eth_dev_callback *user_cb;
3963 uint32_t next_port; /* size is 32-bit to prevent loop wrap-around */
3964 uint16_t last_port;
3965
3966 if (!cb_fn)
3967 return -EINVAL;
3968
3969 if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) {
3970 RTE_ETHDEV_LOG(ERR, "Invalid port_id=%d\n", port_id);
3971 return -EINVAL;
3972 }
3973
3974 if (port_id == RTE_ETH_ALL) {
3975 next_port = 0;
3976 last_port = RTE_MAX_ETHPORTS - 1;
3977 } else {
3978 next_port = last_port = port_id;
3979 }
3980
3981 rte_spinlock_lock(&rte_eth_dev_cb_lock);
3982
3983 do {
3984 dev = &rte_eth_devices[next_port];
3985
3986 TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) {
3987 if (user_cb->cb_fn == cb_fn &&
3988 user_cb->cb_arg == cb_arg &&
3989 user_cb->event == event) {
3990 break;
3991 }
3992 }
3993
3994 /* create a new callback. */
3995 if (user_cb == NULL) {
3996 user_cb = rte_zmalloc("INTR_USER_CALLBACK",
3997 sizeof(struct rte_eth_dev_callback), 0);
3998 if (user_cb != NULL) {
3999 user_cb->cb_fn = cb_fn;
4000 user_cb->cb_arg = cb_arg;
4001 user_cb->event = event;
4002 TAILQ_INSERT_TAIL(&(dev->link_intr_cbs),
4003 user_cb, next);
4004 } else {
4005 rte_spinlock_unlock(&rte_eth_dev_cb_lock);
4006 rte_eth_dev_callback_unregister(port_id, event,
4007 cb_fn, cb_arg);
4008 return -ENOMEM;
4009 }
4010
4011 }
4012 } while (++next_port <= last_port);
4013
4014 rte_spinlock_unlock(&rte_eth_dev_cb_lock);
4015 return 0;
4016}
4017
4018int
4019rte_eth_dev_callback_unregister(uint16_t port_id,
4020 enum rte_eth_event_type event,
4021 rte_eth_dev_cb_fn cb_fn, void *cb_arg)
4022{
4023 int ret;
4024 struct rte_eth_dev *dev;
4025 struct rte_eth_dev_callback *cb, *next;
4026 uint32_t next_port; /* size is 32-bit to prevent loop wrap-around */
4027 uint16_t last_port;
4028
4029 if (!cb_fn)
4030 return -EINVAL;
4031
4032 if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) {
4033 RTE_ETHDEV_LOG(ERR, "Invalid port_id=%d\n", port_id);
4034 return -EINVAL;
4035 }
4036
4037 if (port_id == RTE_ETH_ALL) {
4038 next_port = 0;
4039 last_port = RTE_MAX_ETHPORTS - 1;
4040 } else {
4041 next_port = last_port = port_id;
4042 }
4043
4044 rte_spinlock_lock(&rte_eth_dev_cb_lock);
4045
4046 do {
4047 dev = &rte_eth_devices[next_port];
4048 ret = 0;
4049 for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL;
4050 cb = next) {
4051
4052 next = TAILQ_NEXT(cb, next);
4053
4054 if (cb->cb_fn != cb_fn || cb->event != event ||
f67539c2 4055 (cb_arg != (void *)-1 && cb->cb_arg != cb_arg))
11fdf7f2
TL
4056 continue;
4057
4058 /*
4059 * if this callback is not executing right now,
4060 * then remove it.
4061 */
4062 if (cb->active == 0) {
4063 TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next);
4064 rte_free(cb);
4065 } else {
4066 ret = -EAGAIN;
4067 }
4068 }
4069 } while (++next_port <= last_port);
4070
4071 rte_spinlock_unlock(&rte_eth_dev_cb_lock);
4072 return ret;
4073}
4074
4075int
4076_rte_eth_dev_callback_process(struct rte_eth_dev *dev,
4077 enum rte_eth_event_type event, void *ret_param)
4078{
4079 struct rte_eth_dev_callback *cb_lst;
4080 struct rte_eth_dev_callback dev_cb;
4081 int rc = 0;
4082
4083 rte_spinlock_lock(&rte_eth_dev_cb_lock);
4084 TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) {
4085 if (cb_lst->cb_fn == NULL || cb_lst->event != event)
4086 continue;
4087 dev_cb = *cb_lst;
4088 cb_lst->active = 1;
4089 if (ret_param != NULL)
4090 dev_cb.ret_param = ret_param;
4091
4092 rte_spinlock_unlock(&rte_eth_dev_cb_lock);
4093 rc = dev_cb.cb_fn(dev->data->port_id, dev_cb.event,
4094 dev_cb.cb_arg, dev_cb.ret_param);
4095 rte_spinlock_lock(&rte_eth_dev_cb_lock);
4096 cb_lst->active = 0;
4097 }
4098 rte_spinlock_unlock(&rte_eth_dev_cb_lock);
4099 return rc;
4100}
4101
4102void
4103rte_eth_dev_probing_finish(struct rte_eth_dev *dev)
4104{
4105 if (dev == NULL)
4106 return;
4107
4108 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_NEW, NULL);
4109
4110 dev->state = RTE_ETH_DEV_ATTACHED;
4111}
4112
4113int
4114rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data)
4115{
4116 uint32_t vec;
4117 struct rte_eth_dev *dev;
4118 struct rte_intr_handle *intr_handle;
4119 uint16_t qid;
4120 int rc;
4121
4122 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4123
4124 dev = &rte_eth_devices[port_id];
4125
4126 if (!dev->intr_handle) {
4127 RTE_ETHDEV_LOG(ERR, "RX Intr handle unset\n");
4128 return -ENOTSUP;
4129 }
4130
4131 intr_handle = dev->intr_handle;
4132 if (!intr_handle->intr_vec) {
4133 RTE_ETHDEV_LOG(ERR, "RX Intr vector unset\n");
4134 return -EPERM;
4135 }
4136
4137 for (qid = 0; qid < dev->data->nb_rx_queues; qid++) {
4138 vec = intr_handle->intr_vec[qid];
4139 rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
4140 if (rc && rc != -EEXIST) {
4141 RTE_ETHDEV_LOG(ERR,
4142 "p %u q %u rx ctl error op %d epfd %d vec %u\n",
4143 port_id, qid, op, epfd, vec);
4144 }
4145 }
4146
4147 return 0;
4148}
4149
f67539c2 4150int
9f95a23c
TL
4151rte_eth_dev_rx_intr_ctl_q_get_fd(uint16_t port_id, uint16_t queue_id)
4152{
4153 struct rte_intr_handle *intr_handle;
4154 struct rte_eth_dev *dev;
4155 unsigned int efd_idx;
4156 uint32_t vec;
4157 int fd;
4158
4159 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1);
4160
4161 dev = &rte_eth_devices[port_id];
4162
4163 if (queue_id >= dev->data->nb_rx_queues) {
4164 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id);
4165 return -1;
4166 }
4167
4168 if (!dev->intr_handle) {
4169 RTE_ETHDEV_LOG(ERR, "RX Intr handle unset\n");
4170 return -1;
4171 }
4172
4173 intr_handle = dev->intr_handle;
4174 if (!intr_handle->intr_vec) {
4175 RTE_ETHDEV_LOG(ERR, "RX Intr vector unset\n");
4176 return -1;
4177 }
4178
4179 vec = intr_handle->intr_vec[queue_id];
4180 efd_idx = (vec >= RTE_INTR_VEC_RXTX_OFFSET) ?
4181 (vec - RTE_INTR_VEC_RXTX_OFFSET) : vec;
4182 fd = intr_handle->efds[efd_idx];
4183
4184 return fd;
4185}
4186
11fdf7f2
TL
4187const struct rte_memzone *
4188rte_eth_dma_zone_reserve(const struct rte_eth_dev *dev, const char *ring_name,
4189 uint16_t queue_id, size_t size, unsigned align,
4190 int socket_id)
4191{
4192 char z_name[RTE_MEMZONE_NAMESIZE];
4193 const struct rte_memzone *mz;
9f95a23c 4194 int rc;
11fdf7f2 4195
9f95a23c
TL
4196 rc = snprintf(z_name, sizeof(z_name), "eth_p%d_q%d_%s",
4197 dev->data->port_id, queue_id, ring_name);
4198 if (rc >= RTE_MEMZONE_NAMESIZE) {
4199 RTE_ETHDEV_LOG(ERR, "ring name too long\n");
4200 rte_errno = ENAMETOOLONG;
4201 return NULL;
4202 }
11fdf7f2
TL
4203
4204 mz = rte_memzone_lookup(z_name);
4205 if (mz)
4206 return mz;
4207
4208 return rte_memzone_reserve_aligned(z_name, size, socket_id,
4209 RTE_MEMZONE_IOVA_CONTIG, align);
4210}
4211
f67539c2 4212int
11fdf7f2
TL
4213rte_eth_dev_create(struct rte_device *device, const char *name,
4214 size_t priv_data_size,
4215 ethdev_bus_specific_init ethdev_bus_specific_init,
4216 void *bus_init_params,
4217 ethdev_init_t ethdev_init, void *init_params)
4218{
4219 struct rte_eth_dev *ethdev;
4220 int retval;
4221
4222 RTE_FUNC_PTR_OR_ERR_RET(*ethdev_init, -EINVAL);
4223
4224 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
4225 ethdev = rte_eth_dev_allocate(name);
9f95a23c
TL
4226 if (!ethdev)
4227 return -ENODEV;
11fdf7f2
TL
4228
4229 if (priv_data_size) {
4230 ethdev->data->dev_private = rte_zmalloc_socket(
4231 name, priv_data_size, RTE_CACHE_LINE_SIZE,
4232 device->numa_node);
4233
4234 if (!ethdev->data->dev_private) {
4235 RTE_LOG(ERR, EAL, "failed to allocate private data");
4236 retval = -ENOMEM;
4237 goto probe_failed;
4238 }
4239 }
4240 } else {
4241 ethdev = rte_eth_dev_attach_secondary(name);
4242 if (!ethdev) {
4243 RTE_LOG(ERR, EAL, "secondary process attach failed, "
4244 "ethdev doesn't exist");
9f95a23c 4245 return -ENODEV;
11fdf7f2
TL
4246 }
4247 }
4248
4249 ethdev->device = device;
4250
4251 if (ethdev_bus_specific_init) {
4252 retval = ethdev_bus_specific_init(ethdev, bus_init_params);
4253 if (retval) {
4254 RTE_LOG(ERR, EAL,
4255 "ethdev bus specific initialisation failed");
4256 goto probe_failed;
4257 }
4258 }
4259
4260 retval = ethdev_init(ethdev, init_params);
4261 if (retval) {
4262 RTE_LOG(ERR, EAL, "ethdev initialisation failed");
4263 goto probe_failed;
4264 }
4265
4266 rte_eth_dev_probing_finish(ethdev);
4267
4268 return retval;
11fdf7f2 4269
9f95a23c 4270probe_failed:
11fdf7f2 4271 rte_eth_dev_release_port(ethdev);
11fdf7f2
TL
4272 return retval;
4273}
4274
f67539c2 4275int
11fdf7f2
TL
4276rte_eth_dev_destroy(struct rte_eth_dev *ethdev,
4277 ethdev_uninit_t ethdev_uninit)
4278{
4279 int ret;
4280
4281 ethdev = rte_eth_dev_allocated(ethdev->data->name);
4282 if (!ethdev)
4283 return -ENODEV;
4284
4285 RTE_FUNC_PTR_OR_ERR_RET(*ethdev_uninit, -EINVAL);
11fdf7f2 4286
9f95a23c
TL
4287 ret = ethdev_uninit(ethdev);
4288 if (ret)
4289 return ret;
11fdf7f2
TL
4290
4291 return rte_eth_dev_release_port(ethdev);
4292}
4293
4294int
4295rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id,
4296 int epfd, int op, void *data)
4297{
4298 uint32_t vec;
4299 struct rte_eth_dev *dev;
4300 struct rte_intr_handle *intr_handle;
4301 int rc;
4302
4303 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4304
4305 dev = &rte_eth_devices[port_id];
4306 if (queue_id >= dev->data->nb_rx_queues) {
4307 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id);
4308 return -EINVAL;
4309 }
4310
4311 if (!dev->intr_handle) {
4312 RTE_ETHDEV_LOG(ERR, "RX Intr handle unset\n");
4313 return -ENOTSUP;
4314 }
4315
4316 intr_handle = dev->intr_handle;
4317 if (!intr_handle->intr_vec) {
4318 RTE_ETHDEV_LOG(ERR, "RX Intr vector unset\n");
4319 return -EPERM;
4320 }
4321
4322 vec = intr_handle->intr_vec[queue_id];
4323 rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
4324 if (rc && rc != -EEXIST) {
4325 RTE_ETHDEV_LOG(ERR,
4326 "p %u q %u rx ctl error op %d epfd %d vec %u\n",
4327 port_id, queue_id, op, epfd, vec);
4328 return rc;
4329 }
4330
4331 return 0;
4332}
4333
4334int
4335rte_eth_dev_rx_intr_enable(uint16_t port_id,
4336 uint16_t queue_id)
4337{
4338 struct rte_eth_dev *dev;
4339
4340 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4341
4342 dev = &rte_eth_devices[port_id];
4343
4344 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_enable, -ENOTSUP);
4345 return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_enable)(dev,
4346 queue_id));
4347}
4348
4349int
4350rte_eth_dev_rx_intr_disable(uint16_t port_id,
4351 uint16_t queue_id)
4352{
4353 struct rte_eth_dev *dev;
4354
4355 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4356
4357 dev = &rte_eth_devices[port_id];
4358
4359 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_disable, -ENOTSUP);
4360 return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_disable)(dev,
4361 queue_id));
4362}
4363
4364
4365int
4366rte_eth_dev_filter_supported(uint16_t port_id,
4367 enum rte_filter_type filter_type)
4368{
4369 struct rte_eth_dev *dev;
4370
4371 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4372
4373 dev = &rte_eth_devices[port_id];
4374 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
4375 return (*dev->dev_ops->filter_ctrl)(dev, filter_type,
4376 RTE_ETH_FILTER_NOP, NULL);
4377}
4378
4379int
4380rte_eth_dev_filter_ctrl(uint16_t port_id, enum rte_filter_type filter_type,
4381 enum rte_filter_op filter_op, void *arg)
4382{
4383 struct rte_eth_dev *dev;
4384
4385 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4386
4387 dev = &rte_eth_devices[port_id];
4388 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
4389 return eth_err(port_id, (*dev->dev_ops->filter_ctrl)(dev, filter_type,
4390 filter_op, arg));
4391}
4392
4393const struct rte_eth_rxtx_callback *
4394rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id,
4395 rte_rx_callback_fn fn, void *user_param)
4396{
4397#ifndef RTE_ETHDEV_RXTX_CALLBACKS
4398 rte_errno = ENOTSUP;
4399 return NULL;
4400#endif
f67539c2
TL
4401 struct rte_eth_dev *dev;
4402
11fdf7f2
TL
4403 /* check input parameters */
4404 if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
4405 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
4406 rte_errno = EINVAL;
4407 return NULL;
4408 }
f67539c2
TL
4409 dev = &rte_eth_devices[port_id];
4410 if (rte_eth_dev_is_rx_hairpin_queue(dev, queue_id)) {
4411 rte_errno = EINVAL;
4412 return NULL;
4413 }
11fdf7f2
TL
4414 struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
4415
4416 if (cb == NULL) {
4417 rte_errno = ENOMEM;
4418 return NULL;
4419 }
4420
4421 cb->fn.rx = fn;
4422 cb->param = user_param;
4423
4424 rte_spinlock_lock(&rte_eth_rx_cb_lock);
4425 /* Add the callbacks in fifo order. */
4426 struct rte_eth_rxtx_callback *tail =
4427 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
4428
4429 if (!tail) {
4430 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id] = cb;
4431
4432 } else {
4433 while (tail->next)
4434 tail = tail->next;
4435 tail->next = cb;
4436 }
4437 rte_spinlock_unlock(&rte_eth_rx_cb_lock);
4438
4439 return cb;
4440}
4441
4442const struct rte_eth_rxtx_callback *
4443rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id,
4444 rte_rx_callback_fn fn, void *user_param)
4445{
4446#ifndef RTE_ETHDEV_RXTX_CALLBACKS
4447 rte_errno = ENOTSUP;
4448 return NULL;
4449#endif
4450 /* check input parameters */
4451 if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
4452 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
4453 rte_errno = EINVAL;
4454 return NULL;
4455 }
4456
4457 struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
4458
4459 if (cb == NULL) {
4460 rte_errno = ENOMEM;
4461 return NULL;
4462 }
4463
4464 cb->fn.rx = fn;
4465 cb->param = user_param;
4466
4467 rte_spinlock_lock(&rte_eth_rx_cb_lock);
f67539c2 4468 /* Add the callbacks at first position */
11fdf7f2
TL
4469 cb->next = rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
4470 rte_smp_wmb();
4471 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id] = cb;
4472 rte_spinlock_unlock(&rte_eth_rx_cb_lock);
4473
4474 return cb;
4475}
4476
4477const struct rte_eth_rxtx_callback *
4478rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id,
4479 rte_tx_callback_fn fn, void *user_param)
4480{
4481#ifndef RTE_ETHDEV_RXTX_CALLBACKS
4482 rte_errno = ENOTSUP;
4483 return NULL;
4484#endif
f67539c2
TL
4485 struct rte_eth_dev *dev;
4486
11fdf7f2
TL
4487 /* check input parameters */
4488 if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
4489 queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) {
4490 rte_errno = EINVAL;
4491 return NULL;
4492 }
4493
f67539c2
TL
4494 dev = &rte_eth_devices[port_id];
4495 if (rte_eth_dev_is_tx_hairpin_queue(dev, queue_id)) {
4496 rte_errno = EINVAL;
4497 return NULL;
4498 }
4499
11fdf7f2
TL
4500 struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
4501
4502 if (cb == NULL) {
4503 rte_errno = ENOMEM;
4504 return NULL;
4505 }
4506
4507 cb->fn.tx = fn;
4508 cb->param = user_param;
4509
4510 rte_spinlock_lock(&rte_eth_tx_cb_lock);
4511 /* Add the callbacks in fifo order. */
4512 struct rte_eth_rxtx_callback *tail =
4513 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id];
4514
4515 if (!tail) {
4516 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id] = cb;
4517
4518 } else {
4519 while (tail->next)
4520 tail = tail->next;
4521 tail->next = cb;
4522 }
4523 rte_spinlock_unlock(&rte_eth_tx_cb_lock);
4524
4525 return cb;
4526}
4527
4528int
4529rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id,
4530 const struct rte_eth_rxtx_callback *user_cb)
4531{
4532#ifndef RTE_ETHDEV_RXTX_CALLBACKS
4533 return -ENOTSUP;
4534#endif
4535 /* Check input parameters. */
4536 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
4537 if (user_cb == NULL ||
4538 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues)
4539 return -EINVAL;
4540
4541 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
4542 struct rte_eth_rxtx_callback *cb;
4543 struct rte_eth_rxtx_callback **prev_cb;
4544 int ret = -EINVAL;
4545
4546 rte_spinlock_lock(&rte_eth_rx_cb_lock);
4547 prev_cb = &dev->post_rx_burst_cbs[queue_id];
4548 for (; *prev_cb != NULL; prev_cb = &cb->next) {
4549 cb = *prev_cb;
4550 if (cb == user_cb) {
4551 /* Remove the user cb from the callback list. */
4552 *prev_cb = cb->next;
4553 ret = 0;
4554 break;
4555 }
4556 }
4557 rte_spinlock_unlock(&rte_eth_rx_cb_lock);
4558
4559 return ret;
4560}
4561
4562int
4563rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id,
4564 const struct rte_eth_rxtx_callback *user_cb)
4565{
4566#ifndef RTE_ETHDEV_RXTX_CALLBACKS
4567 return -ENOTSUP;
4568#endif
4569 /* Check input parameters. */
4570 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
4571 if (user_cb == NULL ||
4572 queue_id >= rte_eth_devices[port_id].data->nb_tx_queues)
4573 return -EINVAL;
4574
4575 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
4576 int ret = -EINVAL;
4577 struct rte_eth_rxtx_callback *cb;
4578 struct rte_eth_rxtx_callback **prev_cb;
4579
4580 rte_spinlock_lock(&rte_eth_tx_cb_lock);
4581 prev_cb = &dev->pre_tx_burst_cbs[queue_id];
4582 for (; *prev_cb != NULL; prev_cb = &cb->next) {
4583 cb = *prev_cb;
4584 if (cb == user_cb) {
4585 /* Remove the user cb from the callback list. */
4586 *prev_cb = cb->next;
4587 ret = 0;
4588 break;
4589 }
4590 }
4591 rte_spinlock_unlock(&rte_eth_tx_cb_lock);
4592
4593 return ret;
4594}
4595
4596int
4597rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id,
4598 struct rte_eth_rxq_info *qinfo)
4599{
4600 struct rte_eth_dev *dev;
4601
4602 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4603
4604 if (qinfo == NULL)
4605 return -EINVAL;
4606
4607 dev = &rte_eth_devices[port_id];
4608 if (queue_id >= dev->data->nb_rx_queues) {
4609 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id);
4610 return -EINVAL;
4611 }
4612
f67539c2
TL
4613 if (rte_eth_dev_is_rx_hairpin_queue(dev, queue_id)) {
4614 RTE_ETHDEV_LOG(INFO,
4615 "Can't get hairpin Rx queue %"PRIu16" info of device with port_id=%"PRIu16"\n",
4616 queue_id, port_id);
4617 return -EINVAL;
4618 }
4619
11fdf7f2
TL
4620 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rxq_info_get, -ENOTSUP);
4621
4622 memset(qinfo, 0, sizeof(*qinfo));
4623 dev->dev_ops->rxq_info_get(dev, queue_id, qinfo);
4624 return 0;
4625}
4626
4627int
4628rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id,
4629 struct rte_eth_txq_info *qinfo)
4630{
4631 struct rte_eth_dev *dev;
4632
4633 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4634
4635 if (qinfo == NULL)
4636 return -EINVAL;
4637
4638 dev = &rte_eth_devices[port_id];
4639 if (queue_id >= dev->data->nb_tx_queues) {
4640 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", queue_id);
4641 return -EINVAL;
4642 }
4643
f67539c2
TL
4644 if (rte_eth_dev_is_tx_hairpin_queue(dev, queue_id)) {
4645 RTE_ETHDEV_LOG(INFO,
4646 "Can't get hairpin Tx queue %"PRIu16" info of device with port_id=%"PRIu16"\n",
4647 queue_id, port_id);
4648 return -EINVAL;
4649 }
4650
11fdf7f2
TL
4651 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->txq_info_get, -ENOTSUP);
4652
4653 memset(qinfo, 0, sizeof(*qinfo));
4654 dev->dev_ops->txq_info_get(dev, queue_id, qinfo);
4655
4656 return 0;
4657}
4658
f67539c2
TL
4659int
4660rte_eth_rx_burst_mode_get(uint16_t port_id, uint16_t queue_id,
4661 struct rte_eth_burst_mode *mode)
4662{
4663 struct rte_eth_dev *dev;
4664
4665 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4666
4667 if (mode == NULL)
4668 return -EINVAL;
4669
4670 dev = &rte_eth_devices[port_id];
4671
4672 if (queue_id >= dev->data->nb_rx_queues) {
4673 RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id);
4674 return -EINVAL;
4675 }
4676
4677 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_burst_mode_get, -ENOTSUP);
4678 memset(mode, 0, sizeof(*mode));
4679 return eth_err(port_id,
4680 dev->dev_ops->rx_burst_mode_get(dev, queue_id, mode));
4681}
4682
4683int
4684rte_eth_tx_burst_mode_get(uint16_t port_id, uint16_t queue_id,
4685 struct rte_eth_burst_mode *mode)
4686{
4687 struct rte_eth_dev *dev;
4688
4689 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4690
4691 if (mode == NULL)
4692 return -EINVAL;
4693
4694 dev = &rte_eth_devices[port_id];
4695
4696 if (queue_id >= dev->data->nb_tx_queues) {
4697 RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", queue_id);
4698 return -EINVAL;
4699 }
4700
4701 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_burst_mode_get, -ENOTSUP);
4702 memset(mode, 0, sizeof(*mode));
4703 return eth_err(port_id,
4704 dev->dev_ops->tx_burst_mode_get(dev, queue_id, mode));
4705}
4706
11fdf7f2
TL
4707int
4708rte_eth_dev_set_mc_addr_list(uint16_t port_id,
f67539c2 4709 struct rte_ether_addr *mc_addr_set,
11fdf7f2
TL
4710 uint32_t nb_mc_addr)
4711{
4712 struct rte_eth_dev *dev;
4713
4714 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4715
4716 dev = &rte_eth_devices[port_id];
4717 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_mc_addr_list, -ENOTSUP);
4718 return eth_err(port_id, dev->dev_ops->set_mc_addr_list(dev,
4719 mc_addr_set, nb_mc_addr));
4720}
4721
4722int
4723rte_eth_timesync_enable(uint16_t port_id)
4724{
4725 struct rte_eth_dev *dev;
4726
4727 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4728 dev = &rte_eth_devices[port_id];
4729
4730 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_enable, -ENOTSUP);
4731 return eth_err(port_id, (*dev->dev_ops->timesync_enable)(dev));
4732}
4733
4734int
4735rte_eth_timesync_disable(uint16_t port_id)
4736{
4737 struct rte_eth_dev *dev;
4738
4739 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4740 dev = &rte_eth_devices[port_id];
4741
4742 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_disable, -ENOTSUP);
4743 return eth_err(port_id, (*dev->dev_ops->timesync_disable)(dev));
4744}
4745
4746int
4747rte_eth_timesync_read_rx_timestamp(uint16_t port_id, struct timespec *timestamp,
4748 uint32_t flags)
4749{
4750 struct rte_eth_dev *dev;
4751
4752 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4753 dev = &rte_eth_devices[port_id];
4754
4755 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_rx_timestamp, -ENOTSUP);
4756 return eth_err(port_id, (*dev->dev_ops->timesync_read_rx_timestamp)
4757 (dev, timestamp, flags));
4758}
4759
4760int
4761rte_eth_timesync_read_tx_timestamp(uint16_t port_id,
4762 struct timespec *timestamp)
4763{
4764 struct rte_eth_dev *dev;
4765
4766 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4767 dev = &rte_eth_devices[port_id];
4768
4769 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_tx_timestamp, -ENOTSUP);
4770 return eth_err(port_id, (*dev->dev_ops->timesync_read_tx_timestamp)
4771 (dev, timestamp));
4772}
4773
4774int
4775rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta)
4776{
4777 struct rte_eth_dev *dev;
4778
4779 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4780 dev = &rte_eth_devices[port_id];
4781
4782 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_adjust_time, -ENOTSUP);
4783 return eth_err(port_id, (*dev->dev_ops->timesync_adjust_time)(dev,
4784 delta));
4785}
4786
4787int
4788rte_eth_timesync_read_time(uint16_t port_id, struct timespec *timestamp)
4789{
4790 struct rte_eth_dev *dev;
4791
4792 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4793 dev = &rte_eth_devices[port_id];
4794
4795 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_time, -ENOTSUP);
4796 return eth_err(port_id, (*dev->dev_ops->timesync_read_time)(dev,
4797 timestamp));
4798}
4799
4800int
4801rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *timestamp)
4802{
4803 struct rte_eth_dev *dev;
4804
4805 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4806 dev = &rte_eth_devices[port_id];
4807
4808 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_write_time, -ENOTSUP);
4809 return eth_err(port_id, (*dev->dev_ops->timesync_write_time)(dev,
4810 timestamp));
4811}
4812
f67539c2
TL
4813int
4814rte_eth_read_clock(uint16_t port_id, uint64_t *clock)
4815{
4816 struct rte_eth_dev *dev;
4817
4818 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4819 dev = &rte_eth_devices[port_id];
4820
4821 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->read_clock, -ENOTSUP);
4822 return eth_err(port_id, (*dev->dev_ops->read_clock)(dev, clock));
4823}
4824
11fdf7f2
TL
4825int
4826rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info)
4827{
4828 struct rte_eth_dev *dev;
4829
4830 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4831
4832 dev = &rte_eth_devices[port_id];
4833 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_reg, -ENOTSUP);
4834 return eth_err(port_id, (*dev->dev_ops->get_reg)(dev, info));
4835}
4836
4837int
4838rte_eth_dev_get_eeprom_length(uint16_t port_id)
4839{
4840 struct rte_eth_dev *dev;
4841
4842 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4843
4844 dev = &rte_eth_devices[port_id];
4845 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom_length, -ENOTSUP);
4846 return eth_err(port_id, (*dev->dev_ops->get_eeprom_length)(dev));
4847}
4848
4849int
4850rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
4851{
4852 struct rte_eth_dev *dev;
4853
4854 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4855
4856 dev = &rte_eth_devices[port_id];
4857 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom, -ENOTSUP);
4858 return eth_err(port_id, (*dev->dev_ops->get_eeprom)(dev, info));
4859}
4860
4861int
4862rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
4863{
4864 struct rte_eth_dev *dev;
4865
4866 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4867
4868 dev = &rte_eth_devices[port_id];
4869 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_eeprom, -ENOTSUP);
4870 return eth_err(port_id, (*dev->dev_ops->set_eeprom)(dev, info));
4871}
4872
f67539c2 4873int
11fdf7f2
TL
4874rte_eth_dev_get_module_info(uint16_t port_id,
4875 struct rte_eth_dev_module_info *modinfo)
4876{
4877 struct rte_eth_dev *dev;
4878
4879 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4880
4881 dev = &rte_eth_devices[port_id];
4882 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_module_info, -ENOTSUP);
4883 return (*dev->dev_ops->get_module_info)(dev, modinfo);
4884}
4885
f67539c2 4886int
11fdf7f2
TL
4887rte_eth_dev_get_module_eeprom(uint16_t port_id,
4888 struct rte_dev_eeprom_info *info)
4889{
4890 struct rte_eth_dev *dev;
4891
4892 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4893
4894 dev = &rte_eth_devices[port_id];
4895 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_module_eeprom, -ENOTSUP);
4896 return (*dev->dev_ops->get_module_eeprom)(dev, info);
4897}
4898
4899int
4900rte_eth_dev_get_dcb_info(uint16_t port_id,
4901 struct rte_eth_dcb_info *dcb_info)
4902{
4903 struct rte_eth_dev *dev;
4904
4905 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4906
4907 dev = &rte_eth_devices[port_id];
4908 memset(dcb_info, 0, sizeof(struct rte_eth_dcb_info));
4909
4910 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_dcb_info, -ENOTSUP);
4911 return eth_err(port_id, (*dev->dev_ops->get_dcb_info)(dev, dcb_info));
4912}
4913
4914int
4915rte_eth_dev_l2_tunnel_eth_type_conf(uint16_t port_id,
4916 struct rte_eth_l2_tunnel_conf *l2_tunnel)
4917{
4918 struct rte_eth_dev *dev;
4919
4920 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4921 if (l2_tunnel == NULL) {
4922 RTE_ETHDEV_LOG(ERR, "Invalid l2_tunnel parameter\n");
4923 return -EINVAL;
4924 }
4925
4926 if (l2_tunnel->l2_tunnel_type >= RTE_TUNNEL_TYPE_MAX) {
4927 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n");
4928 return -EINVAL;
4929 }
4930
4931 dev = &rte_eth_devices[port_id];
4932 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->l2_tunnel_eth_type_conf,
4933 -ENOTSUP);
4934 return eth_err(port_id, (*dev->dev_ops->l2_tunnel_eth_type_conf)(dev,
4935 l2_tunnel));
4936}
4937
4938int
4939rte_eth_dev_l2_tunnel_offload_set(uint16_t port_id,
4940 struct rte_eth_l2_tunnel_conf *l2_tunnel,
4941 uint32_t mask,
4942 uint8_t en)
4943{
4944 struct rte_eth_dev *dev;
4945
4946 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4947
4948 if (l2_tunnel == NULL) {
4949 RTE_ETHDEV_LOG(ERR, "Invalid l2_tunnel parameter\n");
4950 return -EINVAL;
4951 }
4952
4953 if (l2_tunnel->l2_tunnel_type >= RTE_TUNNEL_TYPE_MAX) {
4954 RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n");
4955 return -EINVAL;
4956 }
4957
4958 if (mask == 0) {
4959 RTE_ETHDEV_LOG(ERR, "Mask should have a value\n");
4960 return -EINVAL;
4961 }
4962
4963 dev = &rte_eth_devices[port_id];
4964 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->l2_tunnel_offload_set,
4965 -ENOTSUP);
4966 return eth_err(port_id, (*dev->dev_ops->l2_tunnel_offload_set)(dev,
4967 l2_tunnel, mask, en));
4968}
4969
4970static void
4971rte_eth_dev_adjust_nb_desc(uint16_t *nb_desc,
4972 const struct rte_eth_desc_lim *desc_lim)
4973{
4974 if (desc_lim->nb_align != 0)
4975 *nb_desc = RTE_ALIGN_CEIL(*nb_desc, desc_lim->nb_align);
4976
4977 if (desc_lim->nb_max != 0)
4978 *nb_desc = RTE_MIN(*nb_desc, desc_lim->nb_max);
4979
4980 *nb_desc = RTE_MAX(*nb_desc, desc_lim->nb_min);
4981}
4982
4983int
4984rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id,
4985 uint16_t *nb_rx_desc,
4986 uint16_t *nb_tx_desc)
4987{
11fdf7f2 4988 struct rte_eth_dev_info dev_info;
f67539c2 4989 int ret;
11fdf7f2
TL
4990
4991 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4992
f67539c2
TL
4993 ret = rte_eth_dev_info_get(port_id, &dev_info);
4994 if (ret != 0)
4995 return ret;
11fdf7f2
TL
4996
4997 if (nb_rx_desc != NULL)
4998 rte_eth_dev_adjust_nb_desc(nb_rx_desc, &dev_info.rx_desc_lim);
4999
5000 if (nb_tx_desc != NULL)
5001 rte_eth_dev_adjust_nb_desc(nb_tx_desc, &dev_info.tx_desc_lim);
5002
5003 return 0;
5004}
5005
f67539c2
TL
5006int
5007rte_eth_dev_hairpin_capability_get(uint16_t port_id,
5008 struct rte_eth_hairpin_cap *cap)
5009{
5010 struct rte_eth_dev *dev;
5011
5012 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
5013
5014 dev = &rte_eth_devices[port_id];
5015 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_cap_get, -ENOTSUP);
5016 memset(cap, 0, sizeof(*cap));
5017 return eth_err(port_id, (*dev->dev_ops->hairpin_cap_get)(dev, cap));
5018}
5019
5020int
5021rte_eth_dev_is_rx_hairpin_queue(struct rte_eth_dev *dev, uint16_t queue_id)
5022{
5023 if (dev->data->rx_queue_state[queue_id] ==
5024 RTE_ETH_QUEUE_STATE_HAIRPIN)
5025 return 1;
5026 return 0;
5027}
5028
5029int
5030rte_eth_dev_is_tx_hairpin_queue(struct rte_eth_dev *dev, uint16_t queue_id)
5031{
5032 if (dev->data->tx_queue_state[queue_id] ==
5033 RTE_ETH_QUEUE_STATE_HAIRPIN)
5034 return 1;
5035 return 0;
5036}
5037
11fdf7f2
TL
5038int
5039rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool)
5040{
5041 struct rte_eth_dev *dev;
5042
5043 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
5044
5045 if (pool == NULL)
5046 return -EINVAL;
5047
5048 dev = &rte_eth_devices[port_id];
5049
5050 if (*dev->dev_ops->pool_ops_supported == NULL)
5051 return 1; /* all pools are supported */
5052
5053 return (*dev->dev_ops->pool_ops_supported)(dev, pool);
5054}
5055
5056/**
5057 * A set of values to describe the possible states of a switch domain.
5058 */
5059enum rte_eth_switch_domain_state {
5060 RTE_ETH_SWITCH_DOMAIN_UNUSED = 0,
5061 RTE_ETH_SWITCH_DOMAIN_ALLOCATED
5062};
5063
5064/**
5065 * Array of switch domains available for allocation. Array is sized to
5066 * RTE_MAX_ETHPORTS elements as there cannot be more active switch domains than
5067 * ethdev ports in a single process.
5068 */
9f95a23c 5069static struct rte_eth_dev_switch {
11fdf7f2
TL
5070 enum rte_eth_switch_domain_state state;
5071} rte_eth_switch_domains[RTE_MAX_ETHPORTS];
5072
f67539c2 5073int
11fdf7f2
TL
5074rte_eth_switch_domain_alloc(uint16_t *domain_id)
5075{
5076 unsigned int i;
5077
5078 *domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID;
5079
f67539c2 5080 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
11fdf7f2
TL
5081 if (rte_eth_switch_domains[i].state ==
5082 RTE_ETH_SWITCH_DOMAIN_UNUSED) {
5083 rte_eth_switch_domains[i].state =
5084 RTE_ETH_SWITCH_DOMAIN_ALLOCATED;
5085 *domain_id = i;
5086 return 0;
5087 }
5088 }
5089
5090 return -ENOSPC;
5091}
5092
f67539c2 5093int
11fdf7f2
TL
5094rte_eth_switch_domain_free(uint16_t domain_id)
5095{
5096 if (domain_id == RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID ||
5097 domain_id >= RTE_MAX_ETHPORTS)
5098 return -EINVAL;
5099
5100 if (rte_eth_switch_domains[domain_id].state !=
5101 RTE_ETH_SWITCH_DOMAIN_ALLOCATED)
5102 return -EINVAL;
5103
5104 rte_eth_switch_domains[domain_id].state = RTE_ETH_SWITCH_DOMAIN_UNUSED;
5105
5106 return 0;
5107}
5108
11fdf7f2
TL
5109static int
5110rte_eth_devargs_tokenise(struct rte_kvargs *arglist, const char *str_in)
5111{
5112 int state;
5113 struct rte_kvargs_pair *pair;
5114 char *letter;
5115
5116 arglist->str = strdup(str_in);
5117 if (arglist->str == NULL)
5118 return -ENOMEM;
5119
5120 letter = arglist->str;
5121 state = 0;
5122 arglist->count = 0;
5123 pair = &arglist->pairs[0];
5124 while (1) {
5125 switch (state) {
5126 case 0: /* Initial */
5127 if (*letter == '=')
5128 return -EINVAL;
5129 else if (*letter == '\0')
5130 return 0;
5131
5132 state = 1;
5133 pair->key = letter;
5134 /* fall-thru */
5135
5136 case 1: /* Parsing key */
5137 if (*letter == '=') {
5138 *letter = '\0';
5139 pair->value = letter + 1;
5140 state = 2;
5141 } else if (*letter == ',' || *letter == '\0')
5142 return -EINVAL;
5143 break;
5144
5145
5146 case 2: /* Parsing value */
5147 if (*letter == '[')
5148 state = 3;
5149 else if (*letter == ',') {
5150 *letter = '\0';
5151 arglist->count++;
5152 pair = &arglist->pairs[arglist->count];
5153 state = 0;
5154 } else if (*letter == '\0') {
5155 letter--;
5156 arglist->count++;
5157 pair = &arglist->pairs[arglist->count];
5158 state = 0;
5159 }
5160 break;
5161
5162 case 3: /* Parsing list */
5163 if (*letter == ']')
5164 state = 2;
5165 else if (*letter == '\0')
5166 return -EINVAL;
5167 break;
5168 }
5169 letter++;
5170 }
5171}
5172
f67539c2 5173int
11fdf7f2
TL
5174rte_eth_devargs_parse(const char *dargs, struct rte_eth_devargs *eth_da)
5175{
5176 struct rte_kvargs args;
5177 struct rte_kvargs_pair *pair;
5178 unsigned int i;
5179 int result = 0;
5180
5181 memset(eth_da, 0, sizeof(*eth_da));
5182
5183 result = rte_eth_devargs_tokenise(&args, dargs);
5184 if (result < 0)
5185 goto parse_cleanup;
5186
5187 for (i = 0; i < args.count; i++) {
5188 pair = &args.pairs[i];
5189 if (strcmp("representor", pair->key) == 0) {
5190 result = rte_eth_devargs_parse_list(pair->value,
5191 rte_eth_devargs_parse_representor_ports,
5192 eth_da);
5193 if (result < 0)
5194 goto parse_cleanup;
5195 }
5196 }
5197
5198parse_cleanup:
5199 if (args.str)
5200 free(args.str);
5201
5202 return result;
5203}
5204
f67539c2
TL
5205static int
5206handle_port_list(const char *cmd __rte_unused,
5207 const char *params __rte_unused,
5208 struct rte_tel_data *d)
5209{
5210 int port_id;
5211
5212 rte_tel_data_start_array(d, RTE_TEL_INT_VAL);
5213 RTE_ETH_FOREACH_DEV(port_id)
5214 rte_tel_data_add_array_int(d, port_id);
5215 return 0;
5216}
5217
5218static int
5219handle_port_xstats(const char *cmd __rte_unused,
5220 const char *params,
5221 struct rte_tel_data *d)
5222{
5223 struct rte_eth_xstat *eth_xstats;
5224 struct rte_eth_xstat_name *xstat_names;
5225 int port_id, num_xstats;
5226 int i, ret;
5227
5228 if (params == NULL || strlen(params) == 0 || !isdigit(*params))
5229 return -1;
5230
5231 port_id = atoi(params);
5232 if (!rte_eth_dev_is_valid_port(port_id))
5233 return -1;
5234
5235 num_xstats = rte_eth_xstats_get(port_id, NULL, 0);
5236 if (num_xstats < 0)
5237 return -1;
5238
5239 /* use one malloc for both names and stats */
5240 eth_xstats = malloc((sizeof(struct rte_eth_xstat) +
5241 sizeof(struct rte_eth_xstat_name)) * num_xstats);
5242 if (eth_xstats == NULL)
5243 return -1;
5244 xstat_names = (void *)&eth_xstats[num_xstats];
5245
5246 ret = rte_eth_xstats_get_names(port_id, xstat_names, num_xstats);
5247 if (ret < 0 || ret > num_xstats) {
5248 free(eth_xstats);
5249 return -1;
5250 }
5251
5252 ret = rte_eth_xstats_get(port_id, eth_xstats, num_xstats);
5253 if (ret < 0 || ret > num_xstats) {
5254 free(eth_xstats);
5255 return -1;
5256 }
5257
5258 rte_tel_data_start_dict(d);
5259 for (i = 0; i < num_xstats; i++)
5260 rte_tel_data_add_dict_u64(d, xstat_names[i].name,
5261 eth_xstats[i].value);
5262 return 0;
5263}
5264
5265static int
5266handle_port_link_status(const char *cmd __rte_unused,
5267 const char *params,
5268 struct rte_tel_data *d)
5269{
5270 static const char *status_str = "status";
5271 int ret, port_id;
5272 struct rte_eth_link link;
5273
5274 if (params == NULL || strlen(params) == 0 || !isdigit(*params))
5275 return -1;
5276
5277 port_id = atoi(params);
5278 if (!rte_eth_dev_is_valid_port(port_id))
5279 return -1;
5280
5281 ret = rte_eth_link_get(port_id, &link);
5282 if (ret < 0)
5283 return -1;
5284
5285 rte_tel_data_start_dict(d);
5286 if (!link.link_status) {
5287 rte_tel_data_add_dict_string(d, status_str, "DOWN");
5288 return 0;
5289 }
5290 rte_tel_data_add_dict_string(d, status_str, "UP");
5291 rte_tel_data_add_dict_u64(d, "speed", link.link_speed);
5292 rte_tel_data_add_dict_string(d, "duplex",
5293 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
5294 "full-duplex" : "half-duplex");
5295 return 0;
5296}
5297
11fdf7f2
TL
5298RTE_INIT(ethdev_init_log)
5299{
5300 rte_eth_dev_logtype = rte_log_register("lib.ethdev");
5301 if (rte_eth_dev_logtype >= 0)
5302 rte_log_set_level(rte_eth_dev_logtype, RTE_LOG_INFO);
f67539c2
TL
5303 rte_telemetry_register_cmd("/ethdev/list", handle_port_list,
5304 "Returns list of available ethdev ports. Takes no parameters");
5305 rte_telemetry_register_cmd("/ethdev/xstats", handle_port_xstats,
5306 "Returns the extended stats for a port. Parameters: int port_id");
5307 rte_telemetry_register_cmd("/ethdev/link_status",
5308 handle_port_link_status,
5309 "Returns the link status for a port. Parameters: int port_id");
11fdf7f2 5310}