]> git.proxmox.com Git - ceph.git/blame - ceph/src/spdk/dpdk/drivers/net/pcap/rte_eth_pcap.c
import 15.2.0 Octopus source
[ceph.git] / ceph / src / spdk / dpdk / drivers / net / pcap / rte_eth_pcap.c
CommitLineData
11fdf7f2
TL
1/* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2016 Intel Corporation.
3 * Copyright(c) 2014 6WIND S.A.
4 * All rights reserved.
7c673cae
FG
5 */
6
7#include <time.h>
8
9#include <net/if.h>
9f95a23c
TL
10#include <sys/socket.h>
11#include <sys/ioctl.h>
12#include <unistd.h>
13
14#if defined(RTE_EXEC_ENV_FREEBSD)
15#include <sys/sysctl.h>
16#include <net/if_dl.h>
17#endif
7c673cae
FG
18
19#include <pcap.h>
20
21#include <rte_cycles.h>
11fdf7f2
TL
22#include <rte_ethdev_driver.h>
23#include <rte_ethdev_vdev.h>
7c673cae
FG
24#include <rte_kvargs.h>
25#include <rte_malloc.h>
26#include <rte_mbuf.h>
11fdf7f2 27#include <rte_bus_vdev.h>
9f95a23c 28#include <rte_string_fns.h>
7c673cae
FG
29
30#define RTE_ETH_PCAP_SNAPSHOT_LEN 65535
31#define RTE_ETH_PCAP_SNAPLEN ETHER_MAX_JUMBO_FRAME_LEN
32#define RTE_ETH_PCAP_PROMISC 1
33#define RTE_ETH_PCAP_TIMEOUT -1
34
35#define ETH_PCAP_RX_PCAP_ARG "rx_pcap"
36#define ETH_PCAP_TX_PCAP_ARG "tx_pcap"
37#define ETH_PCAP_RX_IFACE_ARG "rx_iface"
11fdf7f2 38#define ETH_PCAP_RX_IFACE_IN_ARG "rx_iface_in"
7c673cae
FG
39#define ETH_PCAP_TX_IFACE_ARG "tx_iface"
40#define ETH_PCAP_IFACE_ARG "iface"
9f95a23c 41#define ETH_PCAP_PHY_MAC_ARG "phy_mac"
7c673cae
FG
42
43#define ETH_PCAP_ARG_MAXLEN 64
44
45#define RTE_PMD_PCAP_MAX_QUEUES 16
46
47static char errbuf[PCAP_ERRBUF_SIZE];
48static unsigned char tx_pcap_data[RTE_ETH_PCAP_SNAPLEN];
49static struct timeval start_time;
50static uint64_t start_cycles;
51static uint64_t hz;
9f95a23c 52static uint8_t iface_idx;
7c673cae
FG
53
54struct queue_stat {
55 volatile unsigned long pkts;
56 volatile unsigned long bytes;
57 volatile unsigned long err_pkts;
58};
59
60struct pcap_rx_queue {
9f95a23c
TL
61 uint16_t port_id;
62 uint16_t queue_id;
7c673cae
FG
63 struct rte_mempool *mb_pool;
64 struct queue_stat rx_stat;
65 char name[PATH_MAX];
66 char type[ETH_PCAP_ARG_MAXLEN];
67};
68
69struct pcap_tx_queue {
9f95a23c
TL
70 uint16_t port_id;
71 uint16_t queue_id;
7c673cae
FG
72 struct queue_stat tx_stat;
73 char name[PATH_MAX];
74 char type[ETH_PCAP_ARG_MAXLEN];
75};
76
77struct pmd_internals {
78 struct pcap_rx_queue rx_queue[RTE_PMD_PCAP_MAX_QUEUES];
79 struct pcap_tx_queue tx_queue[RTE_PMD_PCAP_MAX_QUEUES];
9f95a23c
TL
80 char devargs[ETH_PCAP_ARG_MAXLEN];
81 struct ether_addr eth_addr;
7c673cae
FG
82 int if_index;
83 int single_iface;
9f95a23c
TL
84 int phy_mac;
85};
86
87struct pmd_process_private {
88 pcap_t *rx_pcap[RTE_PMD_PCAP_MAX_QUEUES];
89 pcap_t *tx_pcap[RTE_PMD_PCAP_MAX_QUEUES];
90 pcap_dumper_t *tx_dumper[RTE_PMD_PCAP_MAX_QUEUES];
7c673cae
FG
91};
92
93struct pmd_devargs {
94 unsigned int num_of_queue;
95 struct devargs_queue {
96 pcap_dumper_t *dumper;
97 pcap_t *pcap;
98 const char *name;
99 const char *type;
100 } queue[RTE_PMD_PCAP_MAX_QUEUES];
9f95a23c 101 int phy_mac;
7c673cae
FG
102};
103
104static const char *valid_arguments[] = {
105 ETH_PCAP_RX_PCAP_ARG,
106 ETH_PCAP_TX_PCAP_ARG,
107 ETH_PCAP_RX_IFACE_ARG,
11fdf7f2 108 ETH_PCAP_RX_IFACE_IN_ARG,
7c673cae
FG
109 ETH_PCAP_TX_IFACE_ARG,
110 ETH_PCAP_IFACE_ARG,
9f95a23c 111 ETH_PCAP_PHY_MAC_ARG,
7c673cae
FG
112 NULL
113};
114
7c673cae
FG
115static struct rte_eth_link pmd_link = {
116 .link_speed = ETH_SPEED_NUM_10G,
117 .link_duplex = ETH_LINK_FULL_DUPLEX,
118 .link_status = ETH_LINK_DOWN,
11fdf7f2 119 .link_autoneg = ETH_LINK_FIXED,
7c673cae
FG
120};
121
11fdf7f2
TL
122static int eth_pcap_logtype;
123
124#define PMD_LOG(level, fmt, args...) \
125 rte_log(RTE_LOG_ ## level, eth_pcap_logtype, \
126 "%s(): " fmt "\n", __func__, ##args)
127
7c673cae
FG
128static int
129eth_pcap_rx_jumbo(struct rte_mempool *mb_pool, struct rte_mbuf *mbuf,
130 const u_char *data, uint16_t data_len)
131{
132 /* Copy the first segment. */
133 uint16_t len = rte_pktmbuf_tailroom(mbuf);
134 struct rte_mbuf *m = mbuf;
135
136 rte_memcpy(rte_pktmbuf_append(mbuf, len), data, len);
137 data_len -= len;
138 data += len;
139
140 while (data_len > 0) {
141 /* Allocate next mbuf and point to that. */
142 m->next = rte_pktmbuf_alloc(mb_pool);
143
144 if (unlikely(!m->next))
145 return -1;
146
147 m = m->next;
148
149 /* Headroom is not needed in chained mbufs. */
150 rte_pktmbuf_prepend(m, rte_pktmbuf_headroom(m));
151 m->pkt_len = 0;
152 m->data_len = 0;
153
154 /* Copy next segment. */
155 len = RTE_MIN(rte_pktmbuf_tailroom(m), data_len);
156 rte_memcpy(rte_pktmbuf_append(m, len), data, len);
157
158 mbuf->nb_segs++;
159 data_len -= len;
160 data += len;
161 }
162
163 return mbuf->nb_segs;
164}
165
166/* Copy data from mbuf chain to a buffer suitable for writing to a PCAP file. */
167static void
168eth_pcap_gather_data(unsigned char *data, struct rte_mbuf *mbuf)
169{
170 uint16_t data_len = 0;
171
172 while (mbuf) {
173 rte_memcpy(data + data_len, rte_pktmbuf_mtod(mbuf, void *),
174 mbuf->data_len);
175
176 data_len += mbuf->data_len;
177 mbuf = mbuf->next;
178 }
179}
180
181static uint16_t
182eth_pcap_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
183{
184 unsigned int i;
185 struct pcap_pkthdr header;
9f95a23c 186 struct pmd_process_private *pp;
7c673cae
FG
187 const u_char *packet;
188 struct rte_mbuf *mbuf;
189 struct pcap_rx_queue *pcap_q = queue;
190 uint16_t num_rx = 0;
191 uint16_t buf_size;
192 uint32_t rx_bytes = 0;
9f95a23c 193 pcap_t *pcap;
7c673cae 194
9f95a23c
TL
195 pp = rte_eth_devices[pcap_q->port_id].process_private;
196 pcap = pp->rx_pcap[pcap_q->queue_id];
197
198 if (unlikely(pcap == NULL || nb_pkts == 0))
7c673cae
FG
199 return 0;
200
201 /* Reads the given number of packets from the pcap file one by one
202 * and copies the packet data into a newly allocated mbuf to return.
203 */
204 for (i = 0; i < nb_pkts; i++) {
205 /* Get the next PCAP packet */
9f95a23c 206 packet = pcap_next(pcap, &header);
7c673cae
FG
207 if (unlikely(packet == NULL))
208 break;
209
210 mbuf = rte_pktmbuf_alloc(pcap_q->mb_pool);
211 if (unlikely(mbuf == NULL))
212 break;
213
214 /* Now get the space available for data in the mbuf */
215 buf_size = rte_pktmbuf_data_room_size(pcap_q->mb_pool) -
216 RTE_PKTMBUF_HEADROOM;
217
218 if (header.caplen <= buf_size) {
219 /* pcap packet will fit in the mbuf, can copy it */
220 rte_memcpy(rte_pktmbuf_mtod(mbuf, void *), packet,
221 header.caplen);
222 mbuf->data_len = (uint16_t)header.caplen;
223 } else {
224 /* Try read jumbo frame into multi mbufs. */
225 if (unlikely(eth_pcap_rx_jumbo(pcap_q->mb_pool,
226 mbuf,
227 packet,
228 header.caplen) == -1)) {
229 rte_pktmbuf_free(mbuf);
230 break;
231 }
232 }
233
234 mbuf->pkt_len = (uint16_t)header.caplen;
9f95a23c 235 mbuf->port = pcap_q->port_id;
7c673cae
FG
236 bufs[num_rx] = mbuf;
237 num_rx++;
238 rx_bytes += header.caplen;
239 }
240 pcap_q->rx_stat.pkts += num_rx;
241 pcap_q->rx_stat.bytes += rx_bytes;
242
243 return num_rx;
244}
245
246static inline void
247calculate_timestamp(struct timeval *ts) {
248 uint64_t cycles;
249 struct timeval cur_time;
250
251 cycles = rte_get_timer_cycles() - start_cycles;
252 cur_time.tv_sec = cycles / hz;
11fdf7f2 253 cur_time.tv_usec = (cycles % hz) * 1e6 / hz;
7c673cae
FG
254 timeradd(&start_time, &cur_time, ts);
255}
256
257/*
258 * Callback to handle writing packets to a pcap file.
259 */
260static uint16_t
261eth_pcap_tx_dumper(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
262{
263 unsigned int i;
264 struct rte_mbuf *mbuf;
9f95a23c 265 struct pmd_process_private *pp;
7c673cae
FG
266 struct pcap_tx_queue *dumper_q = queue;
267 uint16_t num_tx = 0;
268 uint32_t tx_bytes = 0;
269 struct pcap_pkthdr header;
9f95a23c 270 pcap_dumper_t *dumper;
7c673cae 271
9f95a23c
TL
272 pp = rte_eth_devices[dumper_q->port_id].process_private;
273 dumper = pp->tx_dumper[dumper_q->queue_id];
274
275 if (dumper == NULL || nb_pkts == 0)
7c673cae
FG
276 return 0;
277
278 /* writes the nb_pkts packets to the previously opened pcap file
279 * dumper */
280 for (i = 0; i < nb_pkts; i++) {
281 mbuf = bufs[i];
282 calculate_timestamp(&header.ts);
283 header.len = mbuf->pkt_len;
284 header.caplen = header.len;
285
286 if (likely(mbuf->nb_segs == 1)) {
9f95a23c 287 pcap_dump((u_char *)dumper, &header,
7c673cae
FG
288 rte_pktmbuf_mtod(mbuf, void*));
289 } else {
290 if (mbuf->pkt_len <= ETHER_MAX_JUMBO_FRAME_LEN) {
291 eth_pcap_gather_data(tx_pcap_data, mbuf);
9f95a23c 292 pcap_dump((u_char *)dumper, &header,
7c673cae
FG
293 tx_pcap_data);
294 } else {
11fdf7f2
TL
295 PMD_LOG(ERR,
296 "Dropping PCAP packet. Size (%d) > max jumbo size (%d).",
7c673cae
FG
297 mbuf->pkt_len,
298 ETHER_MAX_JUMBO_FRAME_LEN);
299
300 rte_pktmbuf_free(mbuf);
301 break;
302 }
303 }
304
7c673cae
FG
305 num_tx++;
306 tx_bytes += mbuf->pkt_len;
11fdf7f2 307 rte_pktmbuf_free(mbuf);
7c673cae
FG
308 }
309
310 /*
311 * Since there's no place to hook a callback when the forwarding
312 * process stops and to make sure the pcap file is actually written,
313 * we flush the pcap dumper within each burst.
314 */
9f95a23c 315 pcap_dump_flush(dumper);
7c673cae
FG
316 dumper_q->tx_stat.pkts += num_tx;
317 dumper_q->tx_stat.bytes += tx_bytes;
318 dumper_q->tx_stat.err_pkts += nb_pkts - num_tx;
319
320 return num_tx;
321}
322
323/*
324 * Callback to handle sending packets through a real NIC.
325 */
326static uint16_t
327eth_pcap_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
328{
329 unsigned int i;
330 int ret;
331 struct rte_mbuf *mbuf;
9f95a23c 332 struct pmd_process_private *pp;
7c673cae
FG
333 struct pcap_tx_queue *tx_queue = queue;
334 uint16_t num_tx = 0;
335 uint32_t tx_bytes = 0;
9f95a23c
TL
336 pcap_t *pcap;
337
338 pp = rte_eth_devices[tx_queue->port_id].process_private;
339 pcap = pp->tx_pcap[tx_queue->queue_id];
7c673cae 340
9f95a23c 341 if (unlikely(nb_pkts == 0 || pcap == NULL))
7c673cae
FG
342 return 0;
343
344 for (i = 0; i < nb_pkts; i++) {
345 mbuf = bufs[i];
346
347 if (likely(mbuf->nb_segs == 1)) {
9f95a23c 348 ret = pcap_sendpacket(pcap,
7c673cae
FG
349 rte_pktmbuf_mtod(mbuf, u_char *),
350 mbuf->pkt_len);
351 } else {
352 if (mbuf->pkt_len <= ETHER_MAX_JUMBO_FRAME_LEN) {
353 eth_pcap_gather_data(tx_pcap_data, mbuf);
9f95a23c 354 ret = pcap_sendpacket(pcap,
7c673cae
FG
355 tx_pcap_data, mbuf->pkt_len);
356 } else {
11fdf7f2
TL
357 PMD_LOG(ERR,
358 "Dropping PCAP packet. Size (%d) > max jumbo size (%d).",
7c673cae
FG
359 mbuf->pkt_len,
360 ETHER_MAX_JUMBO_FRAME_LEN);
361
362 rte_pktmbuf_free(mbuf);
363 break;
364 }
365 }
366
367 if (unlikely(ret != 0))
368 break;
369 num_tx++;
370 tx_bytes += mbuf->pkt_len;
371 rte_pktmbuf_free(mbuf);
372 }
373
374 tx_queue->tx_stat.pkts += num_tx;
375 tx_queue->tx_stat.bytes += tx_bytes;
376 tx_queue->tx_stat.err_pkts += nb_pkts - num_tx;
377
378 return num_tx;
379}
380
381/*
382 * pcap_open_live wrapper function
383 */
384static inline int
385open_iface_live(const char *iface, pcap_t **pcap) {
386 *pcap = pcap_open_live(iface, RTE_ETH_PCAP_SNAPLEN,
387 RTE_ETH_PCAP_PROMISC, RTE_ETH_PCAP_TIMEOUT, errbuf);
388
389 if (*pcap == NULL) {
11fdf7f2 390 PMD_LOG(ERR, "Couldn't open %s: %s", iface, errbuf);
7c673cae
FG
391 return -1;
392 }
393
394 return 0;
395}
396
397static int
398open_single_iface(const char *iface, pcap_t **pcap)
399{
400 if (open_iface_live(iface, pcap) < 0) {
11fdf7f2 401 PMD_LOG(ERR, "Couldn't open interface %s", iface);
7c673cae
FG
402 return -1;
403 }
404
405 return 0;
406}
407
408static int
409open_single_tx_pcap(const char *pcap_filename, pcap_dumper_t **dumper)
410{
411 pcap_t *tx_pcap;
412
413 /*
414 * We need to create a dummy empty pcap_t to use it
415 * with pcap_dump_open(). We create big enough an Ethernet
416 * pcap holder.
417 */
418 tx_pcap = pcap_open_dead(DLT_EN10MB, RTE_ETH_PCAP_SNAPSHOT_LEN);
419 if (tx_pcap == NULL) {
11fdf7f2 420 PMD_LOG(ERR, "Couldn't create dead pcap");
7c673cae
FG
421 return -1;
422 }
423
424 /* The dumper is created using the previous pcap_t reference */
425 *dumper = pcap_dump_open(tx_pcap, pcap_filename);
426 if (*dumper == NULL) {
11fdf7f2
TL
427 pcap_close(tx_pcap);
428 PMD_LOG(ERR, "Couldn't open %s for writing.",
7c673cae
FG
429 pcap_filename);
430 return -1;
431 }
432
11fdf7f2 433 pcap_close(tx_pcap);
7c673cae
FG
434 return 0;
435}
436
437static int
438open_single_rx_pcap(const char *pcap_filename, pcap_t **pcap)
439{
440 *pcap = pcap_open_offline(pcap_filename, errbuf);
441 if (*pcap == NULL) {
11fdf7f2 442 PMD_LOG(ERR, "Couldn't open %s: %s", pcap_filename,
7c673cae
FG
443 errbuf);
444 return -1;
445 }
446
447 return 0;
448}
449
450static int
451eth_dev_start(struct rte_eth_dev *dev)
452{
453 unsigned int i;
454 struct pmd_internals *internals = dev->data->dev_private;
9f95a23c 455 struct pmd_process_private *pp = dev->process_private;
7c673cae
FG
456 struct pcap_tx_queue *tx;
457 struct pcap_rx_queue *rx;
458
459 /* Special iface case. Single pcap is open and shared between tx/rx. */
460 if (internals->single_iface) {
461 tx = &internals->tx_queue[0];
462 rx = &internals->rx_queue[0];
463
9f95a23c
TL
464 if (!pp->tx_pcap[0] &&
465 strcmp(tx->type, ETH_PCAP_IFACE_ARG) == 0) {
466 if (open_single_iface(tx->name, &pp->tx_pcap[0]) < 0)
7c673cae 467 return -1;
9f95a23c 468 pp->rx_pcap[0] = pp->tx_pcap[0];
7c673cae 469 }
11fdf7f2 470
7c673cae
FG
471 goto status_up;
472 }
473
474 /* If not open already, open tx pcaps/dumpers */
475 for (i = 0; i < dev->data->nb_tx_queues; i++) {
476 tx = &internals->tx_queue[i];
477
9f95a23c 478 if (!pp->tx_dumper[i] &&
7c673cae 479 strcmp(tx->type, ETH_PCAP_TX_PCAP_ARG) == 0) {
9f95a23c
TL
480 if (open_single_tx_pcap(tx->name,
481 &pp->tx_dumper[i]) < 0)
7c673cae 482 return -1;
9f95a23c 483 } else if (!pp->tx_pcap[i] &&
7c673cae 484 strcmp(tx->type, ETH_PCAP_TX_IFACE_ARG) == 0) {
9f95a23c 485 if (open_single_iface(tx->name, &pp->tx_pcap[i]) < 0)
7c673cae
FG
486 return -1;
487 }
488 }
489
490 /* If not open already, open rx pcaps */
491 for (i = 0; i < dev->data->nb_rx_queues; i++) {
492 rx = &internals->rx_queue[i];
493
9f95a23c 494 if (pp->rx_pcap[i] != NULL)
7c673cae
FG
495 continue;
496
497 if (strcmp(rx->type, ETH_PCAP_RX_PCAP_ARG) == 0) {
9f95a23c 498 if (open_single_rx_pcap(rx->name, &pp->rx_pcap[i]) < 0)
7c673cae
FG
499 return -1;
500 } else if (strcmp(rx->type, ETH_PCAP_RX_IFACE_ARG) == 0) {
9f95a23c 501 if (open_single_iface(rx->name, &pp->rx_pcap[i]) < 0)
7c673cae
FG
502 return -1;
503 }
504 }
505
506status_up:
11fdf7f2
TL
507 for (i = 0; i < dev->data->nb_rx_queues; i++)
508 dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED;
509
510 for (i = 0; i < dev->data->nb_tx_queues; i++)
511 dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED;
512
7c673cae
FG
513 dev->data->dev_link.link_status = ETH_LINK_UP;
514
515 return 0;
516}
517
518/*
519 * This function gets called when the current port gets stopped.
520 * Is the only place for us to close all the tx streams dumpers.
521 * If not called the dumpers will be flushed within each tx burst.
522 */
523static void
524eth_dev_stop(struct rte_eth_dev *dev)
525{
526 unsigned int i;
527 struct pmd_internals *internals = dev->data->dev_private;
9f95a23c 528 struct pmd_process_private *pp = dev->process_private;
7c673cae
FG
529
530 /* Special iface case. Single pcap is open and shared between tx/rx. */
531 if (internals->single_iface) {
9f95a23c
TL
532 pcap_close(pp->tx_pcap[0]);
533 pp->tx_pcap[0] = NULL;
534 pp->rx_pcap[0] = NULL;
7c673cae
FG
535 goto status_down;
536 }
537
538 for (i = 0; i < dev->data->nb_tx_queues; i++) {
9f95a23c
TL
539 if (pp->tx_dumper[i] != NULL) {
540 pcap_dump_close(pp->tx_dumper[i]);
541 pp->tx_dumper[i] = NULL;
7c673cae
FG
542 }
543
9f95a23c
TL
544 if (pp->tx_pcap[i] != NULL) {
545 pcap_close(pp->tx_pcap[i]);
546 pp->tx_pcap[i] = NULL;
7c673cae
FG
547 }
548 }
549
550 for (i = 0; i < dev->data->nb_rx_queues; i++) {
9f95a23c
TL
551 if (pp->rx_pcap[i] != NULL) {
552 pcap_close(pp->rx_pcap[i]);
553 pp->rx_pcap[i] = NULL;
7c673cae
FG
554 }
555 }
556
557status_down:
11fdf7f2
TL
558 for (i = 0; i < dev->data->nb_rx_queues; i++)
559 dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
560
561 for (i = 0; i < dev->data->nb_tx_queues; i++)
562 dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
563
7c673cae
FG
564 dev->data->dev_link.link_status = ETH_LINK_DOWN;
565}
566
567static int
568eth_dev_configure(struct rte_eth_dev *dev __rte_unused)
569{
570 return 0;
571}
572
573static void
574eth_dev_info(struct rte_eth_dev *dev,
575 struct rte_eth_dev_info *dev_info)
576{
577 struct pmd_internals *internals = dev->data->dev_private;
578
7c673cae
FG
579 dev_info->if_index = internals->if_index;
580 dev_info->max_mac_addrs = 1;
581 dev_info->max_rx_pktlen = (uint32_t) -1;
582 dev_info->max_rx_queues = dev->data->nb_rx_queues;
583 dev_info->max_tx_queues = dev->data->nb_tx_queues;
584 dev_info->min_rx_bufsize = 0;
7c673cae
FG
585}
586
11fdf7f2 587static int
7c673cae
FG
588eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
589{
590 unsigned int i;
591 unsigned long rx_packets_total = 0, rx_bytes_total = 0;
592 unsigned long tx_packets_total = 0, tx_bytes_total = 0;
593 unsigned long tx_packets_err_total = 0;
594 const struct pmd_internals *internal = dev->data->dev_private;
595
596 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
597 i < dev->data->nb_rx_queues; i++) {
598 stats->q_ipackets[i] = internal->rx_queue[i].rx_stat.pkts;
599 stats->q_ibytes[i] = internal->rx_queue[i].rx_stat.bytes;
600 rx_packets_total += stats->q_ipackets[i];
601 rx_bytes_total += stats->q_ibytes[i];
602 }
603
604 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
605 i < dev->data->nb_tx_queues; i++) {
606 stats->q_opackets[i] = internal->tx_queue[i].tx_stat.pkts;
607 stats->q_obytes[i] = internal->tx_queue[i].tx_stat.bytes;
608 stats->q_errors[i] = internal->tx_queue[i].tx_stat.err_pkts;
609 tx_packets_total += stats->q_opackets[i];
610 tx_bytes_total += stats->q_obytes[i];
611 tx_packets_err_total += stats->q_errors[i];
612 }
613
614 stats->ipackets = rx_packets_total;
615 stats->ibytes = rx_bytes_total;
616 stats->opackets = tx_packets_total;
617 stats->obytes = tx_bytes_total;
618 stats->oerrors = tx_packets_err_total;
11fdf7f2
TL
619
620 return 0;
7c673cae
FG
621}
622
623static void
624eth_stats_reset(struct rte_eth_dev *dev)
625{
626 unsigned int i;
627 struct pmd_internals *internal = dev->data->dev_private;
628
629 for (i = 0; i < dev->data->nb_rx_queues; i++) {
630 internal->rx_queue[i].rx_stat.pkts = 0;
631 internal->rx_queue[i].rx_stat.bytes = 0;
632 }
633
634 for (i = 0; i < dev->data->nb_tx_queues; i++) {
635 internal->tx_queue[i].tx_stat.pkts = 0;
636 internal->tx_queue[i].tx_stat.bytes = 0;
637 internal->tx_queue[i].tx_stat.err_pkts = 0;
638 }
639}
640
641static void
642eth_dev_close(struct rte_eth_dev *dev __rte_unused)
643{
644}
645
646static void
647eth_queue_release(void *q __rte_unused)
648{
649}
650
651static int
652eth_link_update(struct rte_eth_dev *dev __rte_unused,
653 int wait_to_complete __rte_unused)
654{
655 return 0;
656}
657
658static int
659eth_rx_queue_setup(struct rte_eth_dev *dev,
660 uint16_t rx_queue_id,
661 uint16_t nb_rx_desc __rte_unused,
662 unsigned int socket_id __rte_unused,
663 const struct rte_eth_rxconf *rx_conf __rte_unused,
664 struct rte_mempool *mb_pool)
665{
666 struct pmd_internals *internals = dev->data->dev_private;
667 struct pcap_rx_queue *pcap_q = &internals->rx_queue[rx_queue_id];
668
669 pcap_q->mb_pool = mb_pool;
9f95a23c
TL
670 pcap_q->port_id = dev->data->port_id;
671 pcap_q->queue_id = rx_queue_id;
7c673cae 672 dev->data->rx_queues[rx_queue_id] = pcap_q;
7c673cae
FG
673
674 return 0;
675}
676
677static int
678eth_tx_queue_setup(struct rte_eth_dev *dev,
679 uint16_t tx_queue_id,
680 uint16_t nb_tx_desc __rte_unused,
681 unsigned int socket_id __rte_unused,
682 const struct rte_eth_txconf *tx_conf __rte_unused)
683{
684 struct pmd_internals *internals = dev->data->dev_private;
9f95a23c 685 struct pcap_tx_queue *pcap_q = &internals->tx_queue[tx_queue_id];
7c673cae 686
9f95a23c
TL
687 pcap_q->port_id = dev->data->port_id;
688 pcap_q->queue_id = tx_queue_id;
689 dev->data->tx_queues[tx_queue_id] = pcap_q;
7c673cae
FG
690
691 return 0;
692}
693
11fdf7f2
TL
694static int
695eth_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
696{
697 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
698
699 return 0;
700}
701
702static int
703eth_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
704{
705 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
706
707 return 0;
708}
709
710static int
711eth_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
712{
713 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
714
715 return 0;
716}
717
718static int
719eth_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
720{
721 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
722
723 return 0;
724}
725
7c673cae
FG
726static const struct eth_dev_ops ops = {
727 .dev_start = eth_dev_start,
728 .dev_stop = eth_dev_stop,
729 .dev_close = eth_dev_close,
730 .dev_configure = eth_dev_configure,
731 .dev_infos_get = eth_dev_info,
732 .rx_queue_setup = eth_rx_queue_setup,
733 .tx_queue_setup = eth_tx_queue_setup,
11fdf7f2
TL
734 .rx_queue_start = eth_rx_queue_start,
735 .tx_queue_start = eth_tx_queue_start,
736 .rx_queue_stop = eth_rx_queue_stop,
737 .tx_queue_stop = eth_tx_queue_stop,
7c673cae
FG
738 .rx_queue_release = eth_queue_release,
739 .tx_queue_release = eth_queue_release,
740 .link_update = eth_link_update,
741 .stats_get = eth_stats_get,
742 .stats_reset = eth_stats_reset,
743};
744
11fdf7f2
TL
745static int
746add_queue(struct pmd_devargs *pmd, const char *name, const char *type,
747 pcap_t *pcap, pcap_dumper_t *dumper)
748{
749 if (pmd->num_of_queue >= RTE_PMD_PCAP_MAX_QUEUES)
750 return -1;
751 if (pcap)
752 pmd->queue[pmd->num_of_queue].pcap = pcap;
753 if (dumper)
754 pmd->queue[pmd->num_of_queue].dumper = dumper;
755 pmd->queue[pmd->num_of_queue].name = name;
756 pmd->queue[pmd->num_of_queue].type = type;
757 pmd->num_of_queue++;
758 return 0;
759}
760
7c673cae
FG
761/*
762 * Function handler that opens the pcap file for reading a stores a
763 * reference of it for use it later on.
764 */
765static int
766open_rx_pcap(const char *key, const char *value, void *extra_args)
767{
7c673cae
FG
768 const char *pcap_filename = value;
769 struct pmd_devargs *rx = extra_args;
770 pcap_t *pcap = NULL;
771
11fdf7f2
TL
772 if (open_single_rx_pcap(pcap_filename, &pcap) < 0)
773 return -1;
7c673cae 774
11fdf7f2
TL
775 if (add_queue(rx, pcap_filename, key, pcap, NULL) < 0) {
776 pcap_close(pcap);
777 return -1;
7c673cae
FG
778 }
779
780 return 0;
781}
782
783/*
784 * Opens a pcap file for writing and stores a reference to it
785 * for use it later on.
786 */
787static int
788open_tx_pcap(const char *key, const char *value, void *extra_args)
789{
7c673cae
FG
790 const char *pcap_filename = value;
791 struct pmd_devargs *dumpers = extra_args;
792 pcap_dumper_t *dumper;
793
11fdf7f2
TL
794 if (open_single_tx_pcap(pcap_filename, &dumper) < 0)
795 return -1;
7c673cae 796
11fdf7f2
TL
797 if (add_queue(dumpers, pcap_filename, key, NULL, dumper) < 0) {
798 pcap_dump_close(dumper);
799 return -1;
7c673cae
FG
800 }
801
802 return 0;
803}
804
805/*
806 * Opens an interface for reading and writing
807 */
808static inline int
809open_rx_tx_iface(const char *key, const char *value, void *extra_args)
810{
811 const char *iface = value;
812 struct pmd_devargs *tx = extra_args;
813 pcap_t *pcap = NULL;
814
815 if (open_single_iface(iface, &pcap) < 0)
816 return -1;
817
818 tx->queue[0].pcap = pcap;
819 tx->queue[0].name = iface;
820 tx->queue[0].type = key;
821
822 return 0;
823}
824
11fdf7f2
TL
825static inline int
826set_iface_direction(const char *iface, pcap_t *pcap,
827 pcap_direction_t direction)
828{
829 const char *direction_str = (direction == PCAP_D_IN) ? "IN" : "OUT";
830 if (pcap_setdirection(pcap, direction) < 0) {
831 PMD_LOG(ERR, "Setting %s pcap direction %s failed - %s\n",
832 iface, direction_str, pcap_geterr(pcap));
833 return -1;
834 }
835 PMD_LOG(INFO, "Setting %s pcap direction %s\n",
836 iface, direction_str);
837 return 0;
838}
839
840static inline int
841open_iface(const char *key, const char *value, void *extra_args)
842{
843 const char *iface = value;
844 struct pmd_devargs *pmd = extra_args;
845 pcap_t *pcap = NULL;
846
847 if (open_single_iface(iface, &pcap) < 0)
848 return -1;
849 if (add_queue(pmd, iface, key, pcap, NULL) < 0) {
850 pcap_close(pcap);
851 return -1;
852 }
853
854 return 0;
855}
856
7c673cae
FG
857/*
858 * Opens a NIC for reading packets from it
859 */
860static inline int
861open_rx_iface(const char *key, const char *value, void *extra_args)
862{
11fdf7f2
TL
863 int ret = open_iface(key, value, extra_args);
864 if (ret < 0)
865 return ret;
866 if (strcmp(key, ETH_PCAP_RX_IFACE_IN_ARG) == 0) {
867 struct pmd_devargs *pmd = extra_args;
868 unsigned int qid = pmd->num_of_queue - 1;
7c673cae 869
11fdf7f2
TL
870 set_iface_direction(pmd->queue[qid].name,
871 pmd->queue[qid].pcap,
872 PCAP_D_IN);
7c673cae
FG
873 }
874
875 return 0;
876}
877
11fdf7f2
TL
878static inline int
879rx_iface_args_process(const char *key, const char *value, void *extra_args)
880{
881 if (strcmp(key, ETH_PCAP_RX_IFACE_ARG) == 0 ||
882 strcmp(key, ETH_PCAP_RX_IFACE_IN_ARG) == 0)
883 return open_rx_iface(key, value, extra_args);
884
885 return 0;
886}
887
7c673cae
FG
888/*
889 * Opens a NIC for writing packets to it
890 */
891static int
892open_tx_iface(const char *key, const char *value, void *extra_args)
893{
11fdf7f2 894 return open_iface(key, value, extra_args);
7c673cae
FG
895}
896
9f95a23c
TL
897static int
898select_phy_mac(const char *key __rte_unused, const char *value,
899 void *extra_args)
900{
901 if (extra_args) {
902 const int phy_mac = atoi(value);
903 int *enable_phy_mac = extra_args;
904
905 if (phy_mac)
906 *enable_phy_mac = 1;
907 }
908 return 0;
909}
910
11fdf7f2
TL
911static struct rte_vdev_driver pmd_pcap_drv;
912
7c673cae 913static int
11fdf7f2
TL
914pmd_init_internals(struct rte_vdev_device *vdev,
915 const unsigned int nb_rx_queues,
7c673cae
FG
916 const unsigned int nb_tx_queues,
917 struct pmd_internals **internals,
918 struct rte_eth_dev **eth_dev)
919{
11fdf7f2 920 struct rte_eth_dev_data *data;
9f95a23c 921 struct pmd_process_private *pp;
11fdf7f2 922 unsigned int numa_node = vdev->device.numa_node;
7c673cae 923
11fdf7f2 924 PMD_LOG(INFO, "Creating pcap-backed ethdev on numa socket %d",
7c673cae
FG
925 numa_node);
926
9f95a23c
TL
927 pp = (struct pmd_process_private *)
928 rte_zmalloc(NULL, sizeof(struct pmd_process_private),
929 RTE_CACHE_LINE_SIZE);
930
931 if (pp == NULL) {
932 PMD_LOG(ERR,
933 "Failed to allocate memory for process private");
934 return -1;
935 }
936
7c673cae 937 /* reserve an ethdev entry */
11fdf7f2 938 *eth_dev = rte_eth_vdev_allocate(vdev, sizeof(**internals));
9f95a23c
TL
939 if (!(*eth_dev)) {
940 rte_free(pp);
11fdf7f2 941 return -1;
9f95a23c
TL
942 }
943 (*eth_dev)->process_private = pp;
7c673cae
FG
944 /* now put it all together
945 * - store queue data in internals,
946 * - store numa_node info in eth_dev
947 * - point eth_dev_data to internals
948 * - and point eth_dev structure to new eth_dev_data structure
949 */
11fdf7f2 950 *internals = (*eth_dev)->data->dev_private;
9f95a23c
TL
951 /*
952 * Interface MAC = 02:70:63:61:70:<iface_idx>
953 * derived from: 'locally administered':'p':'c':'a':'p':'iface_idx'
954 * where the middle 4 characters are converted to hex.
955 */
956 (*internals)->eth_addr = (struct ether_addr) {
957 .addr_bytes = { 0x02, 0x70, 0x63, 0x61, 0x70, iface_idx++ }
958 };
959 (*internals)->phy_mac = 0;
11fdf7f2 960 data = (*eth_dev)->data;
7c673cae
FG
961 data->nb_rx_queues = (uint16_t)nb_rx_queues;
962 data->nb_tx_queues = (uint16_t)nb_tx_queues;
963 data->dev_link = pmd_link;
9f95a23c 964 data->mac_addrs = &(*internals)->eth_addr;
7c673cae
FG
965
966 /*
967 * NOTE: we'll replace the data element, of originally allocated
968 * eth_dev so the rings are local per-process
969 */
7c673cae 970 (*eth_dev)->dev_ops = &ops;
7c673cae 971
9f95a23c
TL
972 strlcpy((*internals)->devargs, rte_vdev_device_args(vdev),
973 ETH_PCAP_ARG_MAXLEN);
974
975 return 0;
976}
977
978static int
979eth_pcap_update_mac(const char *if_name, struct rte_eth_dev *eth_dev,
980 const unsigned int numa_node)
981{
982#if defined(RTE_EXEC_ENV_LINUX)
983 void *mac_addrs;
984 struct ifreq ifr;
985 int if_fd = socket(AF_INET, SOCK_DGRAM, 0);
986
987 if (if_fd == -1)
988 return -1;
989
990 rte_strscpy(ifr.ifr_name, if_name, sizeof(ifr.ifr_name));
991 if (ioctl(if_fd, SIOCGIFHWADDR, &ifr)) {
992 close(if_fd);
993 return -1;
994 }
995
996 mac_addrs = rte_zmalloc_socket(NULL, ETHER_ADDR_LEN, 0, numa_node);
997 if (!mac_addrs) {
998 close(if_fd);
999 return -1;
1000 }
1001
1002 PMD_LOG(INFO, "Setting phy MAC for %s", if_name);
1003 eth_dev->data->mac_addrs = mac_addrs;
1004 rte_memcpy(eth_dev->data->mac_addrs[0].addr_bytes,
1005 ifr.ifr_hwaddr.sa_data, ETHER_ADDR_LEN);
1006
1007 close(if_fd);
1008
1009 return 0;
1010
1011#elif defined(RTE_EXEC_ENV_FREEBSD)
1012 void *mac_addrs;
1013 struct if_msghdr *ifm;
1014 struct sockaddr_dl *sdl;
1015 int mib[6];
1016 size_t len = 0;
1017 char *buf;
1018
1019 mib[0] = CTL_NET;
1020 mib[1] = AF_ROUTE;
1021 mib[2] = 0;
1022 mib[3] = AF_LINK;
1023 mib[4] = NET_RT_IFLIST;
1024 mib[5] = if_nametoindex(if_name);
1025
1026 if (sysctl(mib, 6, NULL, &len, NULL, 0) < 0)
1027 return -1;
1028
1029 if (len == 0)
1030 return -1;
1031
1032 buf = rte_malloc(NULL, len, 0);
1033 if (!buf)
1034 return -1;
1035
1036 if (sysctl(mib, 6, buf, &len, NULL, 0) < 0) {
1037 rte_free(buf);
1038 return -1;
1039 }
1040 ifm = (struct if_msghdr *)buf;
1041 sdl = (struct sockaddr_dl *)(ifm + 1);
1042
1043 mac_addrs = rte_zmalloc_socket(NULL, ETHER_ADDR_LEN, 0, numa_node);
1044 if (!mac_addrs) {
1045 rte_free(buf);
1046 return -1;
1047 }
1048
1049 PMD_LOG(INFO, "Setting phy MAC for %s", if_name);
1050 eth_dev->data->mac_addrs = mac_addrs;
1051 rte_memcpy(eth_dev->data->mac_addrs[0].addr_bytes,
1052 LLADDR(sdl), ETHER_ADDR_LEN);
1053
1054 rte_free(buf);
1055
7c673cae 1056 return 0;
9f95a23c
TL
1057#else
1058 return -1;
1059#endif
7c673cae
FG
1060}
1061
1062static int
11fdf7f2
TL
1063eth_from_pcaps_common(struct rte_vdev_device *vdev,
1064 struct pmd_devargs *rx_queues, const unsigned int nb_rx_queues,
1065 struct pmd_devargs *tx_queues, const unsigned int nb_tx_queues,
9f95a23c 1066 struct pmd_internals **internals, struct rte_eth_dev **eth_dev)
7c673cae 1067{
9f95a23c 1068 struct pmd_process_private *pp;
7c673cae
FG
1069 unsigned int i;
1070
1071 /* do some parameter checking */
1072 if (rx_queues == NULL && nb_rx_queues > 0)
1073 return -1;
1074 if (tx_queues == NULL && nb_tx_queues > 0)
1075 return -1;
1076
11fdf7f2 1077 if (pmd_init_internals(vdev, nb_rx_queues, nb_tx_queues, internals,
7c673cae
FG
1078 eth_dev) < 0)
1079 return -1;
1080
9f95a23c 1081 pp = (*eth_dev)->process_private;
7c673cae
FG
1082 for (i = 0; i < nb_rx_queues; i++) {
1083 struct pcap_rx_queue *rx = &(*internals)->rx_queue[i];
1084 struct devargs_queue *queue = &rx_queues->queue[i];
1085
9f95a23c
TL
1086 pp->rx_pcap[i] = queue->pcap;
1087 strlcpy(rx->name, queue->name, sizeof(rx->name));
1088 strlcpy(rx->type, queue->type, sizeof(rx->type));
7c673cae
FG
1089 }
1090
1091 for (i = 0; i < nb_tx_queues; i++) {
1092 struct pcap_tx_queue *tx = &(*internals)->tx_queue[i];
1093 struct devargs_queue *queue = &tx_queues->queue[i];
1094
9f95a23c
TL
1095 pp->tx_dumper[i] = queue->dumper;
1096 pp->tx_pcap[i] = queue->pcap;
1097 strlcpy(tx->name, queue->name, sizeof(tx->name));
1098 strlcpy(tx->type, queue->type, sizeof(tx->type));
7c673cae
FG
1099 }
1100
7c673cae
FG
1101 return 0;
1102}
1103
1104static int
11fdf7f2
TL
1105eth_from_pcaps(struct rte_vdev_device *vdev,
1106 struct pmd_devargs *rx_queues, const unsigned int nb_rx_queues,
1107 struct pmd_devargs *tx_queues, const unsigned int nb_tx_queues,
9f95a23c 1108 int single_iface, unsigned int using_dumpers)
7c673cae
FG
1109{
1110 struct pmd_internals *internals = NULL;
1111 struct rte_eth_dev *eth_dev = NULL;
1112 int ret;
1113
11fdf7f2 1114 ret = eth_from_pcaps_common(vdev, rx_queues, nb_rx_queues,
9f95a23c 1115 tx_queues, nb_tx_queues, &internals, &eth_dev);
7c673cae
FG
1116
1117 if (ret < 0)
1118 return ret;
1119
1120 /* store weather we are using a single interface for rx/tx or not */
1121 internals->single_iface = single_iface;
1122
9f95a23c
TL
1123 if (single_iface) {
1124 internals->if_index = if_nametoindex(rx_queues->queue[0].name);
1125
1126 /* phy_mac arg is applied only only if "iface" devarg is provided */
1127 if (rx_queues->phy_mac) {
1128 int ret = eth_pcap_update_mac(rx_queues->queue[0].name,
1129 eth_dev, vdev->device.numa_node);
1130 if (ret == 0)
1131 internals->phy_mac = 1;
1132 }
1133 }
1134
7c673cae
FG
1135 eth_dev->rx_pkt_burst = eth_pcap_rx;
1136
1137 if (using_dumpers)
1138 eth_dev->tx_pkt_burst = eth_pcap_tx_dumper;
1139 else
1140 eth_dev->tx_pkt_burst = eth_pcap_tx;
1141
11fdf7f2 1142 rte_eth_dev_probing_finish(eth_dev);
7c673cae
FG
1143 return 0;
1144}
1145
1146static int
11fdf7f2 1147pmd_pcap_probe(struct rte_vdev_device *dev)
7c673cae 1148{
11fdf7f2 1149 const char *name;
7c673cae
FG
1150 unsigned int is_rx_pcap = 0, is_tx_pcap = 0;
1151 struct rte_kvargs *kvlist;
1152 struct pmd_devargs pcaps = {0};
1153 struct pmd_devargs dumpers = {0};
9f95a23c
TL
1154 struct rte_eth_dev *eth_dev = NULL;
1155 struct pmd_internals *internal;
7c673cae
FG
1156 int single_iface = 0;
1157 int ret;
1158
11fdf7f2
TL
1159 name = rte_vdev_device_name(dev);
1160 PMD_LOG(INFO, "Initializing pmd_pcap for %s", name);
7c673cae
FG
1161
1162 gettimeofday(&start_time, NULL);
1163 start_cycles = rte_get_timer_cycles();
1164 hz = rte_get_timer_hz();
1165
9f95a23c 1166 if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
11fdf7f2
TL
1167 eth_dev = rte_eth_dev_attach_secondary(name);
1168 if (!eth_dev) {
1169 PMD_LOG(ERR, "Failed to probe %s", name);
1170 return -1;
1171 }
11fdf7f2 1172
9f95a23c
TL
1173 internal = eth_dev->data->dev_private;
1174
1175 kvlist = rte_kvargs_parse(internal->devargs, valid_arguments);
1176 if (kvlist == NULL)
1177 return -1;
1178 } else {
1179 kvlist = rte_kvargs_parse(rte_vdev_device_args(dev),
1180 valid_arguments);
1181 if (kvlist == NULL)
1182 return -1;
1183 }
7c673cae
FG
1184
1185 /*
1186 * If iface argument is passed we open the NICs and use them for
1187 * reading / writing
1188 */
1189 if (rte_kvargs_count(kvlist, ETH_PCAP_IFACE_ARG) == 1) {
1190
1191 ret = rte_kvargs_process(kvlist, ETH_PCAP_IFACE_ARG,
1192 &open_rx_tx_iface, &pcaps);
7c673cae
FG
1193 if (ret < 0)
1194 goto free_kvlist;
1195
1196 dumpers.queue[0] = pcaps.queue[0];
1197
9f95a23c
TL
1198 ret = rte_kvargs_process(kvlist, ETH_PCAP_PHY_MAC_ARG,
1199 &select_phy_mac, &pcaps.phy_mac);
1200 if (ret < 0)
1201 goto free_kvlist;
1202
1203 dumpers.phy_mac = pcaps.phy_mac;
1204
7c673cae
FG
1205 single_iface = 1;
1206 pcaps.num_of_queue = 1;
1207 dumpers.num_of_queue = 1;
1208
1209 goto create_eth;
1210 }
1211
1212 /*
1213 * We check whether we want to open a RX stream from a real NIC or a
1214 * pcap file
1215 */
11fdf7f2
TL
1216 is_rx_pcap = rte_kvargs_count(kvlist, ETH_PCAP_RX_PCAP_ARG) ? 1 : 0;
1217 pcaps.num_of_queue = 0;
7c673cae 1218
11fdf7f2 1219 if (is_rx_pcap) {
7c673cae
FG
1220 ret = rte_kvargs_process(kvlist, ETH_PCAP_RX_PCAP_ARG,
1221 &open_rx_pcap, &pcaps);
11fdf7f2
TL
1222 } else {
1223 ret = rte_kvargs_process(kvlist, NULL,
1224 &rx_iface_args_process, &pcaps);
1225 }
7c673cae
FG
1226
1227 if (ret < 0)
1228 goto free_kvlist;
1229
1230 /*
1231 * We check whether we want to open a TX stream to a real NIC or a
1232 * pcap file
1233 */
11fdf7f2
TL
1234 is_tx_pcap = rte_kvargs_count(kvlist, ETH_PCAP_TX_PCAP_ARG) ? 1 : 0;
1235 dumpers.num_of_queue = 0;
7c673cae
FG
1236
1237 if (is_tx_pcap)
1238 ret = rte_kvargs_process(kvlist, ETH_PCAP_TX_PCAP_ARG,
1239 &open_tx_pcap, &dumpers);
1240 else
1241 ret = rte_kvargs_process(kvlist, ETH_PCAP_TX_IFACE_ARG,
1242 &open_tx_iface, &dumpers);
1243
1244 if (ret < 0)
1245 goto free_kvlist;
1246
1247create_eth:
9f95a23c
TL
1248 if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
1249 struct pmd_process_private *pp;
1250 unsigned int i;
1251
1252 internal = eth_dev->data->dev_private;
1253 pp = (struct pmd_process_private *)
1254 rte_zmalloc(NULL,
1255 sizeof(struct pmd_process_private),
1256 RTE_CACHE_LINE_SIZE);
1257
1258 if (pp == NULL) {
1259 PMD_LOG(ERR,
1260 "Failed to allocate memory for process private");
1261 ret = -1;
1262 goto free_kvlist;
1263 }
1264
1265 eth_dev->dev_ops = &ops;
1266 eth_dev->device = &dev->device;
1267
1268 /* setup process private */
1269 for (i = 0; i < pcaps.num_of_queue; i++)
1270 pp->rx_pcap[i] = pcaps.queue[i].pcap;
1271
1272 for (i = 0; i < dumpers.num_of_queue; i++) {
1273 pp->tx_dumper[i] = dumpers.queue[i].dumper;
1274 pp->tx_pcap[i] = dumpers.queue[i].pcap;
1275 }
1276
1277 eth_dev->process_private = pp;
1278 eth_dev->rx_pkt_burst = eth_pcap_rx;
1279 if (is_tx_pcap)
1280 eth_dev->tx_pkt_burst = eth_pcap_tx_dumper;
1281 else
1282 eth_dev->tx_pkt_burst = eth_pcap_tx;
1283
1284 rte_eth_dev_probing_finish(eth_dev);
1285 goto free_kvlist;
1286 }
1287
11fdf7f2 1288 ret = eth_from_pcaps(dev, &pcaps, pcaps.num_of_queue, &dumpers,
9f95a23c 1289 dumpers.num_of_queue, single_iface, is_tx_pcap);
7c673cae
FG
1290
1291free_kvlist:
1292 rte_kvargs_free(kvlist);
1293
1294 return ret;
1295}
1296
1297static int
11fdf7f2 1298pmd_pcap_remove(struct rte_vdev_device *dev)
7c673cae 1299{
9f95a23c 1300 struct pmd_internals *internals = NULL;
7c673cae
FG
1301 struct rte_eth_dev *eth_dev = NULL;
1302
11fdf7f2 1303 PMD_LOG(INFO, "Closing pcap ethdev on numa socket %d",
7c673cae
FG
1304 rte_socket_id());
1305
11fdf7f2 1306 if (!dev)
7c673cae
FG
1307 return -1;
1308
1309 /* reserve an ethdev entry */
11fdf7f2 1310 eth_dev = rte_eth_dev_allocated(rte_vdev_device_name(dev));
7c673cae
FG
1311 if (eth_dev == NULL)
1312 return -1;
1313
9f95a23c
TL
1314 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1315 internals = eth_dev->data->dev_private;
1316 if (internals != NULL && internals->phy_mac == 0)
1317 /* not dynamically allocated, must not be freed */
1318 eth_dev->data->mac_addrs = NULL;
1319 }
7c673cae 1320
9f95a23c 1321 rte_free(eth_dev->process_private);
7c673cae
FG
1322 rte_eth_dev_release_port(eth_dev);
1323
1324 return 0;
1325}
1326
1327static struct rte_vdev_driver pmd_pcap_drv = {
1328 .probe = pmd_pcap_probe,
1329 .remove = pmd_pcap_remove,
1330};
1331
1332RTE_PMD_REGISTER_VDEV(net_pcap, pmd_pcap_drv);
1333RTE_PMD_REGISTER_ALIAS(net_pcap, eth_pcap);
1334RTE_PMD_REGISTER_PARAM_STRING(net_pcap,
1335 ETH_PCAP_RX_PCAP_ARG "=<string> "
1336 ETH_PCAP_TX_PCAP_ARG "=<string> "
1337 ETH_PCAP_RX_IFACE_ARG "=<ifc> "
11fdf7f2 1338 ETH_PCAP_RX_IFACE_IN_ARG "=<ifc> "
7c673cae 1339 ETH_PCAP_TX_IFACE_ARG "=<ifc> "
9f95a23c
TL
1340 ETH_PCAP_IFACE_ARG "=<ifc> "
1341 ETH_PCAP_PHY_MAC_ARG "=<int>");
11fdf7f2
TL
1342
1343RTE_INIT(eth_pcap_init_log)
1344{
1345 eth_pcap_logtype = rte_log_register("pmd.net.pcap");
1346 if (eth_pcap_logtype >= 0)
1347 rte_log_set_level(eth_pcap_logtype, RTE_LOG_NOTICE);
1348}