]> git.proxmox.com Git - ceph.git/blob - ceph/src/spdk/dpdk/test/test/virtual_pmd.c
update download target update for octopus release
[ceph.git] / ceph / src / spdk / dpdk / test / test / virtual_pmd.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
3 */
4
5 #include <rte_mbuf.h>
6 #include <rte_ethdev.h>
7 #include <rte_ethdev_driver.h>
8 #include <rte_pci.h>
9 #include <rte_bus_pci.h>
10 #include <rte_malloc.h>
11 #include <rte_memcpy.h>
12 #include <rte_memory.h>
13 #include <rte_ring.h>
14
15 #include "virtual_pmd.h"
16
17 #define MAX_PKT_BURST 512
18
19 static const char *virtual_ethdev_driver_name = "Virtual PMD";
20
21 struct virtual_ethdev_private {
22 struct eth_dev_ops dev_ops;
23 struct rte_eth_stats eth_stats;
24
25 struct rte_ring *rx_queue;
26 struct rte_ring *tx_queue;
27
28 int tx_burst_fail_count;
29 };
30
31 struct virtual_ethdev_queue {
32 int port_id;
33 int queue_id;
34 };
35
36 static int
37 virtual_ethdev_start_success(struct rte_eth_dev *eth_dev __rte_unused)
38 {
39 eth_dev->data->dev_started = 1;
40
41 return 0;
42 }
43
44 static int
45 virtual_ethdev_start_fail(struct rte_eth_dev *eth_dev __rte_unused)
46 {
47 eth_dev->data->dev_started = 0;
48
49 return -1;
50 }
51 static void virtual_ethdev_stop(struct rte_eth_dev *eth_dev __rte_unused)
52 {
53 void *pkt = NULL;
54 struct virtual_ethdev_private *prv = eth_dev->data->dev_private;
55
56 eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
57 eth_dev->data->dev_started = 0;
58 while (rte_ring_dequeue(prv->rx_queue, &pkt) != -ENOENT)
59 rte_pktmbuf_free(pkt);
60
61 while (rte_ring_dequeue(prv->tx_queue, &pkt) != -ENOENT)
62 rte_pktmbuf_free(pkt);
63 }
64
65 static void
66 virtual_ethdev_close(struct rte_eth_dev *dev __rte_unused)
67 {}
68
69 static int
70 virtual_ethdev_configure_success(struct rte_eth_dev *dev __rte_unused)
71 {
72 return 0;
73 }
74
75 static int
76 virtual_ethdev_configure_fail(struct rte_eth_dev *dev __rte_unused)
77 {
78 return -1;
79 }
80
81 static void
82 virtual_ethdev_info_get(struct rte_eth_dev *dev __rte_unused,
83 struct rte_eth_dev_info *dev_info)
84 {
85 dev_info->driver_name = virtual_ethdev_driver_name;
86 dev_info->max_mac_addrs = 1;
87
88 dev_info->max_rx_pktlen = (uint32_t)2048;
89
90 dev_info->max_rx_queues = (uint16_t)128;
91 dev_info->max_tx_queues = (uint16_t)512;
92
93 dev_info->min_rx_bufsize = 0;
94 dev_info->rx_offload_capa = DEV_RX_OFFLOAD_CRC_STRIP;
95 }
96
97 static int
98 virtual_ethdev_rx_queue_setup_success(struct rte_eth_dev *dev,
99 uint16_t rx_queue_id, uint16_t nb_rx_desc __rte_unused,
100 unsigned int socket_id,
101 const struct rte_eth_rxconf *rx_conf __rte_unused,
102 struct rte_mempool *mb_pool __rte_unused)
103 {
104 struct virtual_ethdev_queue *rx_q;
105
106 rx_q = (struct virtual_ethdev_queue *)rte_zmalloc_socket(NULL,
107 sizeof(struct virtual_ethdev_queue), 0, socket_id);
108
109 if (rx_q == NULL)
110 return -1;
111
112 rx_q->port_id = dev->data->port_id;
113 rx_q->queue_id = rx_queue_id;
114
115 dev->data->rx_queues[rx_queue_id] = rx_q;
116
117 return 0;
118 }
119
120 static int
121 virtual_ethdev_rx_queue_setup_fail(struct rte_eth_dev *dev __rte_unused,
122 uint16_t rx_queue_id __rte_unused, uint16_t nb_rx_desc __rte_unused,
123 unsigned int socket_id __rte_unused,
124 const struct rte_eth_rxconf *rx_conf __rte_unused,
125 struct rte_mempool *mb_pool __rte_unused)
126 {
127 return -1;
128 }
129
130 static int
131 virtual_ethdev_tx_queue_setup_success(struct rte_eth_dev *dev,
132 uint16_t tx_queue_id, uint16_t nb_tx_desc __rte_unused,
133 unsigned int socket_id,
134 const struct rte_eth_txconf *tx_conf __rte_unused)
135 {
136 struct virtual_ethdev_queue *tx_q;
137
138 tx_q = (struct virtual_ethdev_queue *)rte_zmalloc_socket(NULL,
139 sizeof(struct virtual_ethdev_queue), 0, socket_id);
140
141 if (tx_q == NULL)
142 return -1;
143
144 tx_q->port_id = dev->data->port_id;
145 tx_q->queue_id = tx_queue_id;
146
147 dev->data->tx_queues[tx_queue_id] = tx_q;
148
149 return 0;
150 }
151
152 static int
153 virtual_ethdev_tx_queue_setup_fail(struct rte_eth_dev *dev __rte_unused,
154 uint16_t tx_queue_id __rte_unused, uint16_t nb_tx_desc __rte_unused,
155 unsigned int socket_id __rte_unused,
156 const struct rte_eth_txconf *tx_conf __rte_unused)
157 {
158 return -1;
159 }
160
161 static void
162 virtual_ethdev_rx_queue_release(void *q __rte_unused)
163 {
164 }
165
166 static void
167 virtual_ethdev_tx_queue_release(void *q __rte_unused)
168 {
169 }
170
171 static int
172 virtual_ethdev_link_update_success(struct rte_eth_dev *bonded_eth_dev,
173 int wait_to_complete __rte_unused)
174 {
175 if (!bonded_eth_dev->data->dev_started)
176 bonded_eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
177
178 return 0;
179 }
180
181 static int
182 virtual_ethdev_link_update_fail(struct rte_eth_dev *bonded_eth_dev __rte_unused,
183 int wait_to_complete __rte_unused)
184 {
185 return -1;
186 }
187
188 static int
189 virtual_ethdev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
190 {
191 struct virtual_ethdev_private *dev_private = dev->data->dev_private;
192
193 if (stats)
194 rte_memcpy(stats, &dev_private->eth_stats, sizeof(*stats));
195
196 return 0;
197 }
198
199 static void
200 virtual_ethdev_stats_reset(struct rte_eth_dev *dev)
201 {
202 struct virtual_ethdev_private *dev_private = dev->data->dev_private;
203 void *pkt = NULL;
204
205 while (rte_ring_dequeue(dev_private->tx_queue, &pkt) == -ENOBUFS)
206 rte_pktmbuf_free(pkt);
207
208 /* Reset internal statistics */
209 memset(&dev_private->eth_stats, 0, sizeof(dev_private->eth_stats));
210 }
211
212 static void
213 virtual_ethdev_promiscuous_mode_enable(struct rte_eth_dev *dev __rte_unused)
214 {}
215
216 static void
217 virtual_ethdev_promiscuous_mode_disable(struct rte_eth_dev *dev __rte_unused)
218 {}
219
220 static int
221 virtual_ethdev_mac_address_set(__rte_unused struct rte_eth_dev *dev,
222 __rte_unused struct ether_addr *addr)
223 {
224 return 0;
225 }
226
227 static const struct eth_dev_ops virtual_ethdev_default_dev_ops = {
228 .dev_configure = virtual_ethdev_configure_success,
229 .dev_start = virtual_ethdev_start_success,
230 .dev_stop = virtual_ethdev_stop,
231 .dev_close = virtual_ethdev_close,
232 .dev_infos_get = virtual_ethdev_info_get,
233 .rx_queue_setup = virtual_ethdev_rx_queue_setup_success,
234 .tx_queue_setup = virtual_ethdev_tx_queue_setup_success,
235 .rx_queue_release = virtual_ethdev_rx_queue_release,
236 .tx_queue_release = virtual_ethdev_tx_queue_release,
237 .link_update = virtual_ethdev_link_update_success,
238 .mac_addr_set = virtual_ethdev_mac_address_set,
239 .stats_get = virtual_ethdev_stats_get,
240 .stats_reset = virtual_ethdev_stats_reset,
241 .promiscuous_enable = virtual_ethdev_promiscuous_mode_enable,
242 .promiscuous_disable = virtual_ethdev_promiscuous_mode_disable
243 };
244
245 void
246 virtual_ethdev_start_fn_set_success(uint16_t port_id, uint8_t success)
247 {
248 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
249 struct virtual_ethdev_private *dev_private = dev->data->dev_private;
250 struct eth_dev_ops *dev_ops = &dev_private->dev_ops;
251
252 if (success)
253 dev_ops->dev_start = virtual_ethdev_start_success;
254 else
255 dev_ops->dev_start = virtual_ethdev_start_fail;
256
257 }
258
259 void
260 virtual_ethdev_configure_fn_set_success(uint16_t port_id, uint8_t success)
261 {
262 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
263 struct virtual_ethdev_private *dev_private = dev->data->dev_private;
264 struct eth_dev_ops *dev_ops = &dev_private->dev_ops;
265
266 if (success)
267 dev_ops->dev_configure = virtual_ethdev_configure_success;
268 else
269 dev_ops->dev_configure = virtual_ethdev_configure_fail;
270 }
271
272 void
273 virtual_ethdev_rx_queue_setup_fn_set_success(uint16_t port_id, uint8_t success)
274 {
275 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
276 struct virtual_ethdev_private *dev_private = dev->data->dev_private;
277 struct eth_dev_ops *dev_ops = &dev_private->dev_ops;
278
279 if (success)
280 dev_ops->rx_queue_setup = virtual_ethdev_rx_queue_setup_success;
281 else
282 dev_ops->rx_queue_setup = virtual_ethdev_rx_queue_setup_fail;
283 }
284
285 void
286 virtual_ethdev_tx_queue_setup_fn_set_success(uint16_t port_id, uint8_t success)
287 {
288 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
289 struct virtual_ethdev_private *dev_private = dev->data->dev_private;
290 struct eth_dev_ops *dev_ops = &dev_private->dev_ops;
291
292 if (success)
293 dev_ops->tx_queue_setup = virtual_ethdev_tx_queue_setup_success;
294 else
295 dev_ops->tx_queue_setup = virtual_ethdev_tx_queue_setup_fail;
296 }
297
298 void
299 virtual_ethdev_link_update_fn_set_success(uint16_t port_id, uint8_t success)
300 {
301 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
302 struct virtual_ethdev_private *dev_private = dev->data->dev_private;
303 struct eth_dev_ops *dev_ops = &dev_private->dev_ops;
304
305 if (success)
306 dev_ops->link_update = virtual_ethdev_link_update_success;
307 else
308 dev_ops->link_update = virtual_ethdev_link_update_fail;
309 }
310
311
312 static uint16_t
313 virtual_ethdev_rx_burst_success(void *queue __rte_unused,
314 struct rte_mbuf **bufs,
315 uint16_t nb_pkts)
316 {
317 struct rte_eth_dev *vrtl_eth_dev;
318 struct virtual_ethdev_queue *pq_map;
319 struct virtual_ethdev_private *dev_private;
320
321 int rx_count, i;
322
323 pq_map = (struct virtual_ethdev_queue *)queue;
324 vrtl_eth_dev = &rte_eth_devices[pq_map->port_id];
325 dev_private = vrtl_eth_dev->data->dev_private;
326
327 rx_count = rte_ring_dequeue_burst(dev_private->rx_queue, (void **) bufs,
328 nb_pkts, NULL);
329
330 /* increments ipackets count */
331 dev_private->eth_stats.ipackets += rx_count;
332
333 /* increments ibytes count */
334 for (i = 0; i < rx_count; i++)
335 dev_private->eth_stats.ibytes += rte_pktmbuf_pkt_len(bufs[i]);
336
337 return rx_count;
338 }
339
340 static uint16_t
341 virtual_ethdev_rx_burst_fail(void *queue __rte_unused,
342 struct rte_mbuf **bufs __rte_unused,
343 uint16_t nb_pkts __rte_unused)
344 {
345 return 0;
346 }
347
348 static uint16_t
349 virtual_ethdev_tx_burst_success(void *queue, struct rte_mbuf **bufs,
350 uint16_t nb_pkts)
351 {
352 struct virtual_ethdev_queue *tx_q = queue;
353
354 struct rte_eth_dev *vrtl_eth_dev;
355 struct virtual_ethdev_private *dev_private;
356
357 int i;
358
359 vrtl_eth_dev = &rte_eth_devices[tx_q->port_id];
360 dev_private = vrtl_eth_dev->data->dev_private;
361
362 if (!vrtl_eth_dev->data->dev_link.link_status)
363 nb_pkts = 0;
364 else
365 nb_pkts = rte_ring_enqueue_burst(dev_private->tx_queue, (void **)bufs,
366 nb_pkts, NULL);
367
368 /* increment opacket count */
369 dev_private->eth_stats.opackets += nb_pkts;
370
371 /* increment obytes count */
372 for (i = 0; i < nb_pkts; i++)
373 dev_private->eth_stats.obytes += rte_pktmbuf_pkt_len(bufs[i]);
374
375 return nb_pkts;
376 }
377
378 static uint16_t
379 virtual_ethdev_tx_burst_fail(void *queue, struct rte_mbuf **bufs,
380 uint16_t nb_pkts)
381 {
382 struct rte_eth_dev *vrtl_eth_dev = NULL;
383 struct virtual_ethdev_queue *tx_q = NULL;
384 struct virtual_ethdev_private *dev_private = NULL;
385
386 int i;
387
388 tx_q = queue;
389 vrtl_eth_dev = &rte_eth_devices[tx_q->port_id];
390 dev_private = vrtl_eth_dev->data->dev_private;
391
392 if (dev_private->tx_burst_fail_count < nb_pkts) {
393 int successfully_txd = nb_pkts - dev_private->tx_burst_fail_count;
394
395 /* increment opacket count */
396 dev_private->eth_stats.opackets += successfully_txd;
397
398 /* free packets in burst */
399 for (i = 0; i < successfully_txd; i++) {
400 /* free packets in burst */
401 if (bufs[i] != NULL)
402 rte_pktmbuf_free(bufs[i]);
403
404 bufs[i] = NULL;
405 }
406
407 return successfully_txd;
408 }
409
410 return 0;
411 }
412
413
414 void
415 virtual_ethdev_rx_burst_fn_set_success(uint16_t port_id, uint8_t success)
416 {
417 struct rte_eth_dev *vrtl_eth_dev = &rte_eth_devices[port_id];
418
419 if (success)
420 vrtl_eth_dev->rx_pkt_burst = virtual_ethdev_rx_burst_success;
421 else
422 vrtl_eth_dev->rx_pkt_burst = virtual_ethdev_rx_burst_fail;
423 }
424
425
426 void
427 virtual_ethdev_tx_burst_fn_set_success(uint16_t port_id, uint8_t success)
428 {
429 struct virtual_ethdev_private *dev_private = NULL;
430 struct rte_eth_dev *vrtl_eth_dev = &rte_eth_devices[port_id];
431
432 dev_private = vrtl_eth_dev->data->dev_private;
433
434 if (success)
435 vrtl_eth_dev->tx_pkt_burst = virtual_ethdev_tx_burst_success;
436 else
437 vrtl_eth_dev->tx_pkt_burst = virtual_ethdev_tx_burst_fail;
438
439 dev_private->tx_burst_fail_count = 0;
440 }
441
442 void
443 virtual_ethdev_tx_burst_fn_set_tx_pkt_fail_count(uint16_t port_id,
444 uint8_t packet_fail_count)
445 {
446 struct virtual_ethdev_private *dev_private = NULL;
447 struct rte_eth_dev *vrtl_eth_dev = &rte_eth_devices[port_id];
448
449
450 dev_private = vrtl_eth_dev->data->dev_private;
451 dev_private->tx_burst_fail_count = packet_fail_count;
452 }
453
454 void
455 virtual_ethdev_set_link_status(uint16_t port_id, uint8_t link_status)
456 {
457 struct rte_eth_dev *vrtl_eth_dev = &rte_eth_devices[port_id];
458
459 vrtl_eth_dev->data->dev_link.link_status = link_status;
460 }
461
462 void
463 virtual_ethdev_simulate_link_status_interrupt(uint16_t port_id,
464 uint8_t link_status)
465 {
466 struct rte_eth_dev *vrtl_eth_dev = &rte_eth_devices[port_id];
467
468 vrtl_eth_dev->data->dev_link.link_status = link_status;
469
470 _rte_eth_dev_callback_process(vrtl_eth_dev, RTE_ETH_EVENT_INTR_LSC,
471 NULL);
472 }
473
474 int
475 virtual_ethdev_add_mbufs_to_rx_queue(uint16_t port_id,
476 struct rte_mbuf **pkt_burst, int burst_length)
477 {
478 struct rte_eth_dev *vrtl_eth_dev = &rte_eth_devices[port_id];
479 struct virtual_ethdev_private *dev_private =
480 vrtl_eth_dev->data->dev_private;
481
482 return rte_ring_enqueue_burst(dev_private->rx_queue, (void **)pkt_burst,
483 burst_length, NULL);
484 }
485
486 int
487 virtual_ethdev_get_mbufs_from_tx_queue(uint16_t port_id,
488 struct rte_mbuf **pkt_burst, int burst_length)
489 {
490 struct virtual_ethdev_private *dev_private;
491 struct rte_eth_dev *vrtl_eth_dev = &rte_eth_devices[port_id];
492
493 dev_private = vrtl_eth_dev->data->dev_private;
494 return rte_ring_dequeue_burst(dev_private->tx_queue, (void **)pkt_burst,
495 burst_length, NULL);
496 }
497
498
499 int
500 virtual_ethdev_create(const char *name, struct ether_addr *mac_addr,
501 uint8_t socket_id, uint8_t isr_support)
502 {
503 struct rte_pci_device *pci_dev = NULL;
504 struct rte_eth_dev *eth_dev = NULL;
505 struct rte_pci_driver *pci_drv = NULL;
506 struct rte_pci_id *id_table = NULL;
507 struct virtual_ethdev_private *dev_private = NULL;
508 char name_buf[RTE_RING_NAMESIZE];
509
510
511 /* now do all data allocation - for eth_dev structure, dummy pci driver
512 * and internal (dev_private) data
513 */
514
515 pci_dev = rte_zmalloc_socket(name, sizeof(*pci_dev), 0, socket_id);
516 if (pci_dev == NULL)
517 goto err;
518
519 pci_drv = rte_zmalloc_socket(name, sizeof(*pci_drv), 0, socket_id);
520 if (pci_drv == NULL)
521 goto err;
522
523 id_table = rte_zmalloc_socket(name, sizeof(*id_table), 0, socket_id);
524 if (id_table == NULL)
525 goto err;
526 id_table->device_id = 0xBEEF;
527
528 dev_private = rte_zmalloc_socket(name, sizeof(*dev_private), 0, socket_id);
529 if (dev_private == NULL)
530 goto err;
531
532 snprintf(name_buf, sizeof(name_buf), "%s_rxQ", name);
533 dev_private->rx_queue = rte_ring_create(name_buf, MAX_PKT_BURST, socket_id,
534 0);
535 if (dev_private->rx_queue == NULL)
536 goto err;
537
538 snprintf(name_buf, sizeof(name_buf), "%s_txQ", name);
539 dev_private->tx_queue = rte_ring_create(name_buf, MAX_PKT_BURST, socket_id,
540 0);
541 if (dev_private->tx_queue == NULL)
542 goto err;
543
544 /* reserve an ethdev entry */
545 eth_dev = rte_eth_dev_allocate(name);
546 if (eth_dev == NULL)
547 goto err;
548
549 pci_dev->device.numa_node = socket_id;
550 pci_dev->device.name = eth_dev->data->name;
551 pci_drv->driver.name = virtual_ethdev_driver_name;
552 pci_drv->id_table = id_table;
553
554 if (isr_support)
555 pci_drv->drv_flags |= RTE_PCI_DRV_INTR_LSC;
556 else
557 pci_drv->drv_flags &= ~RTE_PCI_DRV_INTR_LSC;
558
559
560 eth_dev->device = &pci_dev->device;
561 eth_dev->device->driver = &pci_drv->driver;
562
563 eth_dev->data->nb_rx_queues = (uint16_t)1;
564 eth_dev->data->nb_tx_queues = (uint16_t)1;
565
566 eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
567 eth_dev->data->dev_link.link_speed = ETH_SPEED_NUM_10G;
568 eth_dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
569
570 eth_dev->data->mac_addrs = rte_zmalloc(name, ETHER_ADDR_LEN, 0);
571 if (eth_dev->data->mac_addrs == NULL)
572 goto err;
573
574 memcpy(eth_dev->data->mac_addrs, mac_addr,
575 sizeof(*eth_dev->data->mac_addrs));
576
577 eth_dev->data->dev_started = 0;
578 eth_dev->data->promiscuous = 0;
579 eth_dev->data->scattered_rx = 0;
580 eth_dev->data->all_multicast = 0;
581
582 eth_dev->data->dev_private = dev_private;
583
584 /* Copy default device operation functions */
585 dev_private->dev_ops = virtual_ethdev_default_dev_ops;
586 eth_dev->dev_ops = &dev_private->dev_ops;
587
588 pci_dev->device.driver = &pci_drv->driver;
589 eth_dev->device = &pci_dev->device;
590
591 eth_dev->rx_pkt_burst = virtual_ethdev_rx_burst_success;
592 eth_dev->tx_pkt_burst = virtual_ethdev_tx_burst_success;
593
594 rte_eth_dev_probing_finish(eth_dev);
595
596 return eth_dev->data->port_id;
597
598 err:
599 rte_free(pci_dev);
600 rte_free(pci_drv);
601 rte_free(id_table);
602 rte_free(dev_private);
603
604 return -1;
605 }