]> git.proxmox.com Git - ceph.git/blame - ceph/src/spdk/dpdk/drivers/net/ring/rte_eth_ring.c
update source to Ceph Pacific 16.2.2
[ceph.git] / ceph / src / spdk / dpdk / drivers / net / ring / rte_eth_ring.c
CommitLineData
11fdf7f2
TL
1/* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2015 Intel Corporation
7c673cae
FG
3 */
4
5#include "rte_eth_ring.h"
6#include <rte_mbuf.h>
11fdf7f2 7#include <rte_ethdev_driver.h>
7c673cae
FG
8#include <rte_malloc.h>
9#include <rte_memcpy.h>
7c673cae 10#include <rte_string_fns.h>
11fdf7f2 11#include <rte_bus_vdev.h>
7c673cae
FG
12#include <rte_kvargs.h>
13#include <rte_errno.h>
14
15#define ETH_RING_NUMA_NODE_ACTION_ARG "nodeaction"
16#define ETH_RING_ACTION_CREATE "CREATE"
17#define ETH_RING_ACTION_ATTACH "ATTACH"
11fdf7f2 18#define ETH_RING_INTERNAL_ARG "internal"
7c673cae
FG
19
20static const char *valid_arguments[] = {
21 ETH_RING_NUMA_NODE_ACTION_ARG,
11fdf7f2 22 ETH_RING_INTERNAL_ARG,
7c673cae
FG
23 NULL
24};
25
11fdf7f2
TL
26struct ring_internal_args {
27 struct rte_ring * const *rx_queues;
28 const unsigned int nb_rx_queues;
29 struct rte_ring * const *tx_queues;
30 const unsigned int nb_tx_queues;
31 const unsigned int numa_node;
32 void *addr; /* self addr for sanity check */
33};
34
7c673cae
FG
35enum dev_action {
36 DEV_CREATE,
37 DEV_ATTACH
38};
39
40struct ring_queue {
41 struct rte_ring *rng;
42 rte_atomic64_t rx_pkts;
43 rte_atomic64_t tx_pkts;
7c673cae
FG
44};
45
46struct pmd_internals {
9f95a23c
TL
47 unsigned int max_rx_queues;
48 unsigned int max_tx_queues;
7c673cae
FG
49
50 struct ring_queue rx_ring_queues[RTE_PMD_RING_MAX_RX_RINGS];
51 struct ring_queue tx_ring_queues[RTE_PMD_RING_MAX_TX_RINGS];
52
f67539c2 53 struct rte_ether_addr address;
7c673cae
FG
54 enum dev_action action;
55};
56
7c673cae 57static struct rte_eth_link pmd_link = {
9f95a23c
TL
58 .link_speed = ETH_SPEED_NUM_10G,
59 .link_duplex = ETH_LINK_FULL_DUPLEX,
60 .link_status = ETH_LINK_DOWN,
61 .link_autoneg = ETH_LINK_FIXED,
7c673cae
FG
62};
63
11fdf7f2
TL
64static int eth_ring_logtype;
65
66#define PMD_LOG(level, fmt, args...) \
67 rte_log(RTE_LOG_ ## level, eth_ring_logtype, \
68 "%s(): " fmt "\n", __func__, ##args)
69
7c673cae
FG
70static uint16_t
71eth_ring_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
72{
73 void **ptrs = (void *)&bufs[0];
74 struct ring_queue *r = q;
75 const uint16_t nb_rx = (uint16_t)rte_ring_dequeue_burst(r->rng,
11fdf7f2 76 ptrs, nb_bufs, NULL);
7c673cae
FG
77 if (r->rng->flags & RING_F_SC_DEQ)
78 r->rx_pkts.cnt += nb_rx;
79 else
80 rte_atomic64_add(&(r->rx_pkts), nb_rx);
81 return nb_rx;
82}
83
84static uint16_t
85eth_ring_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
86{
87 void **ptrs = (void *)&bufs[0];
88 struct ring_queue *r = q;
89 const uint16_t nb_tx = (uint16_t)rte_ring_enqueue_burst(r->rng,
11fdf7f2 90 ptrs, nb_bufs, NULL);
f67539c2 91 if (r->rng->flags & RING_F_SP_ENQ)
7c673cae 92 r->tx_pkts.cnt += nb_tx;
f67539c2 93 else
7c673cae 94 rte_atomic64_add(&(r->tx_pkts), nb_tx);
7c673cae
FG
95 return nb_tx;
96}
97
98static int
99eth_dev_configure(struct rte_eth_dev *dev __rte_unused) { return 0; }
100
101static int
102eth_dev_start(struct rte_eth_dev *dev)
103{
104 dev->data->dev_link.link_status = ETH_LINK_UP;
105 return 0;
106}
107
108static void
109eth_dev_stop(struct rte_eth_dev *dev)
110{
111 dev->data->dev_link.link_status = ETH_LINK_DOWN;
112}
113
114static int
115eth_dev_set_link_down(struct rte_eth_dev *dev)
116{
117 dev->data->dev_link.link_status = ETH_LINK_DOWN;
118 return 0;
119}
120
121static int
122eth_dev_set_link_up(struct rte_eth_dev *dev)
123{
124 dev->data->dev_link.link_status = ETH_LINK_UP;
125 return 0;
126}
127
128static int
129eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
130 uint16_t nb_rx_desc __rte_unused,
131 unsigned int socket_id __rte_unused,
132 const struct rte_eth_rxconf *rx_conf __rte_unused,
133 struct rte_mempool *mb_pool __rte_unused)
134{
135 struct pmd_internals *internals = dev->data->dev_private;
9f95a23c 136
7c673cae
FG
137 dev->data->rx_queues[rx_queue_id] = &internals->rx_ring_queues[rx_queue_id];
138 return 0;
139}
140
141static int
142eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
143 uint16_t nb_tx_desc __rte_unused,
144 unsigned int socket_id __rte_unused,
145 const struct rte_eth_txconf *tx_conf __rte_unused)
146{
147 struct pmd_internals *internals = dev->data->dev_private;
9f95a23c 148
7c673cae
FG
149 dev->data->tx_queues[tx_queue_id] = &internals->tx_ring_queues[tx_queue_id];
150 return 0;
151}
152
153
f67539c2 154static int
7c673cae 155eth_dev_info(struct rte_eth_dev *dev,
9f95a23c 156 struct rte_eth_dev_info *dev_info)
7c673cae
FG
157{
158 struct pmd_internals *internals = dev->data->dev_private;
9f95a23c 159
7c673cae
FG
160 dev_info->max_mac_addrs = 1;
161 dev_info->max_rx_pktlen = (uint32_t)-1;
162 dev_info->max_rx_queues = (uint16_t)internals->max_rx_queues;
163 dev_info->max_tx_queues = (uint16_t)internals->max_tx_queues;
164 dev_info->min_rx_bufsize = 0;
f67539c2
TL
165
166 return 0;
7c673cae
FG
167}
168
11fdf7f2 169static int
7c673cae
FG
170eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
171{
9f95a23c 172 unsigned int i;
f67539c2 173 unsigned long rx_total = 0, tx_total = 0;
7c673cae
FG
174 const struct pmd_internals *internal = dev->data->dev_private;
175
176 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
177 i < dev->data->nb_rx_queues; i++) {
178 stats->q_ipackets[i] = internal->rx_ring_queues[i].rx_pkts.cnt;
179 rx_total += stats->q_ipackets[i];
180 }
181
182 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
183 i < dev->data->nb_tx_queues; i++) {
184 stats->q_opackets[i] = internal->tx_ring_queues[i].tx_pkts.cnt;
7c673cae 185 tx_total += stats->q_opackets[i];
7c673cae
FG
186 }
187
188 stats->ipackets = rx_total;
189 stats->opackets = tx_total;
11fdf7f2
TL
190
191 return 0;
7c673cae
FG
192}
193
f67539c2 194static int
7c673cae
FG
195eth_stats_reset(struct rte_eth_dev *dev)
196{
9f95a23c 197 unsigned int i;
7c673cae 198 struct pmd_internals *internal = dev->data->dev_private;
9f95a23c 199
7c673cae
FG
200 for (i = 0; i < dev->data->nb_rx_queues; i++)
201 internal->rx_ring_queues[i].rx_pkts.cnt = 0;
f67539c2 202 for (i = 0; i < dev->data->nb_tx_queues; i++)
7c673cae 203 internal->tx_ring_queues[i].tx_pkts.cnt = 0;
f67539c2
TL
204
205 return 0;
7c673cae
FG
206}
207
208static void
209eth_mac_addr_remove(struct rte_eth_dev *dev __rte_unused,
210 uint32_t index __rte_unused)
211{
212}
213
11fdf7f2 214static int
7c673cae 215eth_mac_addr_add(struct rte_eth_dev *dev __rte_unused,
f67539c2 216 struct rte_ether_addr *mac_addr __rte_unused,
7c673cae
FG
217 uint32_t index __rte_unused,
218 uint32_t vmdq __rte_unused)
219{
11fdf7f2 220 return 0;
7c673cae
FG
221}
222
223static void
224eth_queue_release(void *q __rte_unused) { ; }
225static int
226eth_link_update(struct rte_eth_dev *dev __rte_unused,
227 int wait_to_complete __rte_unused) { return 0; }
228
229static const struct eth_dev_ops ops = {
230 .dev_start = eth_dev_start,
231 .dev_stop = eth_dev_stop,
232 .dev_set_link_up = eth_dev_set_link_up,
233 .dev_set_link_down = eth_dev_set_link_down,
234 .dev_configure = eth_dev_configure,
235 .dev_infos_get = eth_dev_info,
236 .rx_queue_setup = eth_rx_queue_setup,
237 .tx_queue_setup = eth_tx_queue_setup,
238 .rx_queue_release = eth_queue_release,
239 .tx_queue_release = eth_queue_release,
240 .link_update = eth_link_update,
241 .stats_get = eth_stats_get,
242 .stats_reset = eth_stats_reset,
243 .mac_addr_remove = eth_mac_addr_remove,
244 .mac_addr_add = eth_mac_addr_add,
245};
246
247static int
248do_eth_dev_ring_create(const char *name,
f67539c2 249 struct rte_vdev_device *vdev,
9f95a23c
TL
250 struct rte_ring * const rx_queues[],
251 const unsigned int nb_rx_queues,
252 struct rte_ring *const tx_queues[],
253 const unsigned int nb_tx_queues,
11fdf7f2
TL
254 const unsigned int numa_node, enum dev_action action,
255 struct rte_eth_dev **eth_dev_p)
7c673cae
FG
256{
257 struct rte_eth_dev_data *data = NULL;
258 struct pmd_internals *internals = NULL;
259 struct rte_eth_dev *eth_dev = NULL;
11fdf7f2
TL
260 void **rx_queues_local = NULL;
261 void **tx_queues_local = NULL;
9f95a23c 262 unsigned int i;
7c673cae 263
11fdf7f2 264 PMD_LOG(INFO, "Creating rings-backed ethdev on numa socket %u",
7c673cae
FG
265 numa_node);
266
9f95a23c
TL
267 rx_queues_local = rte_calloc_socket(name, nb_rx_queues,
268 sizeof(void *), 0, numa_node);
11fdf7f2 269 if (rx_queues_local == NULL) {
7c673cae
FG
270 rte_errno = ENOMEM;
271 goto error;
272 }
273
9f95a23c
TL
274 tx_queues_local = rte_calloc_socket(name, nb_tx_queues,
275 sizeof(void *), 0, numa_node);
11fdf7f2 276 if (tx_queues_local == NULL) {
7c673cae
FG
277 rte_errno = ENOMEM;
278 goto error;
279 }
280
281 internals = rte_zmalloc_socket(name, sizeof(*internals), 0, numa_node);
282 if (internals == NULL) {
283 rte_errno = ENOMEM;
284 goto error;
285 }
286
287 /* reserve an ethdev entry */
288 eth_dev = rte_eth_dev_allocate(name);
289 if (eth_dev == NULL) {
290 rte_errno = ENOSPC;
291 goto error;
292 }
293
294 /* now put it all together
f67539c2 295 * - store EAL device in eth_dev,
7c673cae
FG
296 * - store queue data in internals,
297 * - store numa_node info in eth_dev_data
298 * - point eth_dev_data to internals
299 * - and point eth_dev structure to new eth_dev_data structure
300 */
11fdf7f2 301
f67539c2
TL
302 eth_dev->device = &vdev->device;
303
11fdf7f2
TL
304 data = eth_dev->data;
305 data->rx_queues = rx_queues_local;
306 data->tx_queues = tx_queues_local;
7c673cae
FG
307
308 internals->action = action;
309 internals->max_rx_queues = nb_rx_queues;
310 internals->max_tx_queues = nb_tx_queues;
311 for (i = 0; i < nb_rx_queues; i++) {
312 internals->rx_ring_queues[i].rng = rx_queues[i];
313 data->rx_queues[i] = &internals->rx_ring_queues[i];
314 }
315 for (i = 0; i < nb_tx_queues; i++) {
316 internals->tx_ring_queues[i].rng = tx_queues[i];
317 data->tx_queues[i] = &internals->tx_ring_queues[i];
318 }
319
320 data->dev_private = internals;
7c673cae
FG
321 data->nb_rx_queues = (uint16_t)nb_rx_queues;
322 data->nb_tx_queues = (uint16_t)nb_tx_queues;
323 data->dev_link = pmd_link;
324 data->mac_addrs = &internals->address;
f67539c2
TL
325 data->promiscuous = 1;
326 data->all_multicast = 1;
7c673cae 327
7c673cae 328 eth_dev->dev_ops = &ops;
7c673cae 329 data->kdrv = RTE_KDRV_NONE;
7c673cae
FG
330 data->numa_node = numa_node;
331
7c673cae
FG
332 /* finally assign rx and tx ops */
333 eth_dev->rx_pkt_burst = eth_ring_rx;
334 eth_dev->tx_pkt_burst = eth_ring_tx;
335
11fdf7f2
TL
336 rte_eth_dev_probing_finish(eth_dev);
337 *eth_dev_p = eth_dev;
338
7c673cae
FG
339 return data->port_id;
340
341error:
11fdf7f2
TL
342 rte_free(rx_queues_local);
343 rte_free(tx_queues_local);
7c673cae
FG
344 rte_free(internals);
345
346 return -1;
347}
348
349int
350rte_eth_from_rings(const char *name, struct rte_ring *const rx_queues[],
9f95a23c 351 const unsigned int nb_rx_queues,
7c673cae 352 struct rte_ring *const tx_queues[],
9f95a23c
TL
353 const unsigned int nb_tx_queues,
354 const unsigned int numa_node)
7c673cae 355{
11fdf7f2
TL
356 struct ring_internal_args args = {
357 .rx_queues = rx_queues,
358 .nb_rx_queues = nb_rx_queues,
359 .tx_queues = tx_queues,
360 .nb_tx_queues = nb_tx_queues,
361 .numa_node = numa_node,
362 .addr = &args,
363 };
9f95a23c
TL
364 char args_str[32];
365 char ring_name[RTE_RING_NAMESIZE];
11fdf7f2
TL
366 uint16_t port_id = RTE_MAX_ETHPORTS;
367 int ret;
368
7c673cae
FG
369 /* do some parameter checking */
370 if (rx_queues == NULL && nb_rx_queues > 0) {
371 rte_errno = EINVAL;
372 return -1;
373 }
374 if (tx_queues == NULL && nb_tx_queues > 0) {
375 rte_errno = EINVAL;
376 return -1;
377 }
378 if (nb_rx_queues > RTE_PMD_RING_MAX_RX_RINGS) {
379 rte_errno = EINVAL;
380 return -1;
381 }
382
9f95a23c
TL
383 snprintf(args_str, sizeof(args_str), "%s=%p",
384 ETH_RING_INTERNAL_ARG, &args);
385
386 ret = snprintf(ring_name, sizeof(ring_name), "net_ring_%s", name);
387 if (ret >= (int)sizeof(ring_name)) {
388 rte_errno = ENAMETOOLONG;
389 return -1;
390 }
11fdf7f2
TL
391
392 ret = rte_vdev_init(ring_name, args_str);
393 if (ret) {
394 rte_errno = EINVAL;
395 return -1;
396 }
397
9f95a23c
TL
398 ret = rte_eth_dev_get_port_by_name(ring_name, &port_id);
399 if (ret) {
400 rte_errno = ENODEV;
401 return -1;
402 }
11fdf7f2
TL
403
404 return port_id;
7c673cae
FG
405}
406
407int
408rte_eth_from_ring(struct rte_ring *r)
409{
410 return rte_eth_from_rings(r->name, &r, 1, &r, 1,
411 r->memzone ? r->memzone->socket_id : SOCKET_ID_ANY);
412}
413
414static int
f67539c2
TL
415eth_dev_ring_create(const char *name,
416 struct rte_vdev_device *vdev,
417 const unsigned int numa_node,
11fdf7f2 418 enum dev_action action, struct rte_eth_dev **eth_dev)
7c673cae
FG
419{
420 /* rx and tx are so-called from point of view of first port.
421 * They are inverted from the point of view of second port
422 */
423 struct rte_ring *rxtx[RTE_PMD_RING_MAX_RX_RINGS];
9f95a23c 424 unsigned int i;
7c673cae 425 char rng_name[RTE_RING_NAMESIZE];
9f95a23c 426 unsigned int num_rings = RTE_MIN(RTE_PMD_RING_MAX_RX_RINGS,
7c673cae
FG
427 RTE_PMD_RING_MAX_TX_RINGS);
428
429 for (i = 0; i < num_rings; i++) {
9f95a23c
TL
430 int cc;
431
432 cc = snprintf(rng_name, sizeof(rng_name),
433 "ETH_RXTX%u_%s", i, name);
434 if (cc >= (int)sizeof(rng_name)) {
435 rte_errno = ENAMETOOLONG;
436 return -1;
437 }
438
7c673cae
FG
439 rxtx[i] = (action == DEV_CREATE) ?
440 rte_ring_create(rng_name, 1024, numa_node,
441 RING_F_SP_ENQ|RING_F_SC_DEQ) :
442 rte_ring_lookup(rng_name);
443 if (rxtx[i] == NULL)
444 return -1;
445 }
446
f67539c2 447 if (do_eth_dev_ring_create(name, vdev, rxtx, num_rings, rxtx, num_rings,
11fdf7f2 448 numa_node, action, eth_dev) < 0)
7c673cae
FG
449 return -1;
450
451 return 0;
452}
453
454struct node_action_pair {
455 char name[PATH_MAX];
9f95a23c 456 unsigned int node;
7c673cae
FG
457 enum dev_action action;
458};
459
460struct node_action_list {
9f95a23c
TL
461 unsigned int total;
462 unsigned int count;
7c673cae
FG
463 struct node_action_pair *list;
464};
465
9f95a23c
TL
466static int parse_kvlist(const char *key __rte_unused,
467 const char *value, void *data)
7c673cae
FG
468{
469 struct node_action_list *info = data;
470 int ret;
471 char *name;
472 char *action;
473 char *node;
474 char *end;
475
476 name = strdup(value);
477
478 ret = -EINVAL;
479
480 if (!name) {
11fdf7f2 481 PMD_LOG(WARNING, "command line parameter is empty for ring pmd!");
7c673cae
FG
482 goto out;
483 }
484
485 node = strchr(name, ':');
486 if (!node) {
11fdf7f2
TL
487 PMD_LOG(WARNING, "could not parse node value from %s",
488 name);
7c673cae
FG
489 goto out;
490 }
491
492 *node = '\0';
493 node++;
494
495 action = strchr(node, ':');
496 if (!action) {
11fdf7f2
TL
497 PMD_LOG(WARNING, "could not parse action value from %s",
498 node);
7c673cae
FG
499 goto out;
500 }
501
502 *action = '\0';
503 action++;
504
505 /*
506 * Need to do some sanity checking here
507 */
508
509 if (strcmp(action, ETH_RING_ACTION_ATTACH) == 0)
510 info->list[info->count].action = DEV_ATTACH;
511 else if (strcmp(action, ETH_RING_ACTION_CREATE) == 0)
512 info->list[info->count].action = DEV_CREATE;
513 else
514 goto out;
515
516 errno = 0;
517 info->list[info->count].node = strtol(node, &end, 10);
518
519 if ((errno != 0) || (*end != '\0')) {
11fdf7f2
TL
520 PMD_LOG(WARNING,
521 "node value %s is unparseable as a number", node);
7c673cae
FG
522 goto out;
523 }
524
9f95a23c
TL
525 strlcpy(info->list[info->count].name, name,
526 sizeof(info->list[info->count].name));
7c673cae
FG
527
528 info->count++;
529
530 ret = 0;
531out:
532 free(name);
533 return ret;
534}
535
536static int
11fdf7f2
TL
537parse_internal_args(const char *key __rte_unused, const char *value,
538 void *data)
539{
540 struct ring_internal_args **internal_args = data;
541 void *args;
542
543 sscanf(value, "%p", &args);
544
545 *internal_args = args;
546
547 if ((*internal_args)->addr != args)
548 return -1;
549
550 return 0;
551}
552
553static int
554rte_pmd_ring_probe(struct rte_vdev_device *dev)
7c673cae 555{
11fdf7f2 556 const char *name, *params;
7c673cae
FG
557 struct rte_kvargs *kvlist = NULL;
558 int ret = 0;
559 struct node_action_list *info = NULL;
11fdf7f2
TL
560 struct rte_eth_dev *eth_dev = NULL;
561 struct ring_internal_args *internal_args;
562
563 name = rte_vdev_device_name(dev);
564 params = rte_vdev_device_args(dev);
7c673cae 565
11fdf7f2 566 PMD_LOG(INFO, "Initializing pmd_ring for %s", name);
7c673cae
FG
567
568 if (params == NULL || params[0] == '\0') {
f67539c2 569 ret = eth_dev_ring_create(name, dev, rte_socket_id(), DEV_CREATE,
11fdf7f2 570 &eth_dev);
7c673cae 571 if (ret == -1) {
11fdf7f2
TL
572 PMD_LOG(INFO,
573 "Attach to pmd_ring for %s", name);
f67539c2 574 ret = eth_dev_ring_create(name, dev, rte_socket_id(),
11fdf7f2 575 DEV_ATTACH, &eth_dev);
7c673cae 576 }
11fdf7f2 577 } else {
7c673cae
FG
578 kvlist = rte_kvargs_parse(params, valid_arguments);
579
580 if (!kvlist) {
9f95a23c
TL
581 PMD_LOG(INFO,
582 "Ignoring unsupported parameters when creatingrings-backed ethernet device");
f67539c2 583 ret = eth_dev_ring_create(name, dev, rte_socket_id(),
11fdf7f2 584 DEV_CREATE, &eth_dev);
7c673cae 585 if (ret == -1) {
11fdf7f2
TL
586 PMD_LOG(INFO,
587 "Attach to pmd_ring for %s",
7c673cae 588 name);
f67539c2 589 ret = eth_dev_ring_create(name, dev, rte_socket_id(),
11fdf7f2 590 DEV_ATTACH, &eth_dev);
7c673cae 591 }
11fdf7f2 592
7c673cae 593 return ret;
11fdf7f2
TL
594 }
595
596 if (rte_kvargs_count(kvlist, ETH_RING_INTERNAL_ARG) == 1) {
597 ret = rte_kvargs_process(kvlist, ETH_RING_INTERNAL_ARG,
598 parse_internal_args,
599 &internal_args);
600 if (ret < 0)
601 goto out_free;
602
f67539c2 603 ret = do_eth_dev_ring_create(name, dev,
11fdf7f2
TL
604 internal_args->rx_queues,
605 internal_args->nb_rx_queues,
606 internal_args->tx_queues,
607 internal_args->nb_tx_queues,
608 internal_args->numa_node,
609 DEV_ATTACH,
610 &eth_dev);
611 if (ret >= 0)
612 ret = 0;
7c673cae
FG
613 } else {
614 ret = rte_kvargs_count(kvlist, ETH_RING_NUMA_NODE_ACTION_ARG);
615 info = rte_zmalloc("struct node_action_list",
616 sizeof(struct node_action_list) +
617 (sizeof(struct node_action_pair) * ret),
618 0);
619 if (!info)
620 goto out_free;
621
622 info->total = ret;
9f95a23c 623 info->list = (struct node_action_pair *)(info + 1);
7c673cae
FG
624
625 ret = rte_kvargs_process(kvlist, ETH_RING_NUMA_NODE_ACTION_ARG,
626 parse_kvlist, info);
627
628 if (ret < 0)
629 goto out_free;
630
631 for (info->count = 0; info->count < info->total; info->count++) {
632 ret = eth_dev_ring_create(info->list[info->count].name,
f67539c2 633 dev,
7c673cae 634 info->list[info->count].node,
11fdf7f2
TL
635 info->list[info->count].action,
636 &eth_dev);
7c673cae
FG
637 if ((ret == -1) &&
638 (info->list[info->count].action == DEV_CREATE)) {
11fdf7f2
TL
639 PMD_LOG(INFO,
640 "Attach to pmd_ring for %s",
7c673cae 641 name);
f67539c2 642 ret = eth_dev_ring_create(name, dev,
7c673cae 643 info->list[info->count].node,
11fdf7f2
TL
644 DEV_ATTACH,
645 &eth_dev);
7c673cae
FG
646 }
647 }
648 }
649 }
650
651out_free:
652 rte_kvargs_free(kvlist);
653 rte_free(info);
654 return ret;
655}
656
657static int
11fdf7f2 658rte_pmd_ring_remove(struct rte_vdev_device *dev)
7c673cae 659{
11fdf7f2 660 const char *name = rte_vdev_device_name(dev);
7c673cae
FG
661 struct rte_eth_dev *eth_dev = NULL;
662 struct pmd_internals *internals = NULL;
663 struct ring_queue *r = NULL;
664 uint16_t i;
665
11fdf7f2 666 PMD_LOG(INFO, "Un-Initializing pmd_ring for %s", name);
7c673cae
FG
667
668 if (name == NULL)
669 return -EINVAL;
670
671 /* find an ethdev entry */
672 eth_dev = rte_eth_dev_allocated(name);
673 if (eth_dev == NULL)
674 return -ENODEV;
675
676 eth_dev_stop(eth_dev);
677
678 internals = eth_dev->data->dev_private;
679 if (internals->action == DEV_CREATE) {
680 /*
681 * it is only necessary to delete the rings in rx_queues because
682 * they are the same used in tx_queues
683 */
684 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
685 r = eth_dev->data->rx_queues[i];
686 rte_ring_free(r->rng);
687 }
688 }
689
9f95a23c
TL
690 /* mac_addrs must not be freed alone because part of dev_private */
691 eth_dev->data->mac_addrs = NULL;
7c673cae
FG
692 rte_eth_dev_release_port(eth_dev);
693 return 0;
694}
695
696static struct rte_vdev_driver pmd_ring_drv = {
697 .probe = rte_pmd_ring_probe,
698 .remove = rte_pmd_ring_remove,
699};
700
701RTE_PMD_REGISTER_VDEV(net_ring, pmd_ring_drv);
702RTE_PMD_REGISTER_ALIAS(net_ring, eth_ring);
703RTE_PMD_REGISTER_PARAM_STRING(net_ring,
704 ETH_RING_NUMA_NODE_ACTION_ARG "=name:node:action(ATTACH|CREATE)");
11fdf7f2
TL
705
706RTE_INIT(eth_ring_init_log)
707{
708 eth_ring_logtype = rte_log_register("pmd.net.ring");
709 if (eth_ring_logtype >= 0)
710 rte_log_set_level(eth_ring_logtype, RTE_LOG_NOTICE);
711}