]> git.proxmox.com Git - ceph.git/blame - ceph/src/spdk/dpdk/drivers/net/af_packet/rte_eth_af_packet.c
import 15.2.0 Octopus source
[ceph.git] / ceph / src / spdk / dpdk / drivers / net / af_packet / rte_eth_af_packet.c
CommitLineData
11fdf7f2
TL
1/* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2014 John W. Linville <linville@tuxdriver.com>
3 * Originally based upon librte_pmd_pcap code:
4 * Copyright(c) 2010-2015 Intel Corporation.
5 * Copyright(c) 2014 6WIND S.A.
6 * All rights reserved.
7c673cae
FG
7 */
8
9f95a23c 9#include <rte_string_fns.h>
7c673cae 10#include <rte_mbuf.h>
11fdf7f2
TL
11#include <rte_ethdev_driver.h>
12#include <rte_ethdev_vdev.h>
7c673cae
FG
13#include <rte_malloc.h>
14#include <rte_kvargs.h>
11fdf7f2 15#include <rte_bus_vdev.h>
7c673cae
FG
16
17#include <linux/if_ether.h>
18#include <linux/if_packet.h>
19#include <arpa/inet.h>
20#include <net/if.h>
21#include <sys/types.h>
22#include <sys/socket.h>
23#include <sys/ioctl.h>
24#include <sys/mman.h>
25#include <unistd.h>
26#include <poll.h>
27
28#define ETH_AF_PACKET_IFACE_ARG "iface"
29#define ETH_AF_PACKET_NUM_Q_ARG "qpairs"
30#define ETH_AF_PACKET_BLOCKSIZE_ARG "blocksz"
31#define ETH_AF_PACKET_FRAMESIZE_ARG "framesz"
32#define ETH_AF_PACKET_FRAMECOUNT_ARG "framecnt"
11fdf7f2 33#define ETH_AF_PACKET_QDISC_BYPASS_ARG "qdisc_bypass"
7c673cae
FG
34
35#define DFLT_BLOCK_SIZE (1 << 12)
36#define DFLT_FRAME_SIZE (1 << 11)
37#define DFLT_FRAME_COUNT (1 << 9)
38
39#define RTE_PMD_AF_PACKET_MAX_RINGS 16
40
41struct pkt_rx_queue {
42 int sockfd;
43
44 struct iovec *rd;
45 uint8_t *map;
46 unsigned int framecount;
47 unsigned int framenum;
48
49 struct rte_mempool *mb_pool;
11fdf7f2 50 uint16_t in_port;
7c673cae
FG
51
52 volatile unsigned long rx_pkts;
53 volatile unsigned long err_pkts;
54 volatile unsigned long rx_bytes;
55};
56
57struct pkt_tx_queue {
58 int sockfd;
11fdf7f2 59 unsigned int frame_data_size;
7c673cae
FG
60
61 struct iovec *rd;
62 uint8_t *map;
63 unsigned int framecount;
64 unsigned int framenum;
65
66 volatile unsigned long tx_pkts;
67 volatile unsigned long err_pkts;
68 volatile unsigned long tx_bytes;
69};
70
71struct pmd_internals {
72 unsigned nb_queues;
73
74 int if_index;
11fdf7f2 75 char *if_name;
7c673cae
FG
76 struct ether_addr eth_addr;
77
78 struct tpacket_req req;
79
80 struct pkt_rx_queue rx_queue[RTE_PMD_AF_PACKET_MAX_RINGS];
81 struct pkt_tx_queue tx_queue[RTE_PMD_AF_PACKET_MAX_RINGS];
82};
83
84static const char *valid_arguments[] = {
85 ETH_AF_PACKET_IFACE_ARG,
86 ETH_AF_PACKET_NUM_Q_ARG,
87 ETH_AF_PACKET_BLOCKSIZE_ARG,
88 ETH_AF_PACKET_FRAMESIZE_ARG,
89 ETH_AF_PACKET_FRAMECOUNT_ARG,
11fdf7f2 90 ETH_AF_PACKET_QDISC_BYPASS_ARG,
7c673cae
FG
91 NULL
92};
93
7c673cae
FG
94static struct rte_eth_link pmd_link = {
95 .link_speed = ETH_SPEED_NUM_10G,
96 .link_duplex = ETH_LINK_FULL_DUPLEX,
97 .link_status = ETH_LINK_DOWN,
11fdf7f2 98 .link_autoneg = ETH_LINK_FIXED,
7c673cae
FG
99};
100
11fdf7f2
TL
101static int af_packet_logtype;
102
103#define PMD_LOG(level, fmt, args...) \
104 rte_log(RTE_LOG_ ## level, af_packet_logtype, \
105 "%s(): " fmt "\n", __func__, ##args)
106
7c673cae
FG
107static uint16_t
108eth_af_packet_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
109{
110 unsigned i;
111 struct tpacket2_hdr *ppd;
112 struct rte_mbuf *mbuf;
113 uint8_t *pbuf;
114 struct pkt_rx_queue *pkt_q = queue;
115 uint16_t num_rx = 0;
116 unsigned long num_rx_bytes = 0;
117 unsigned int framecount, framenum;
118
119 if (unlikely(nb_pkts == 0))
120 return 0;
121
122 /*
123 * Reads the given number of packets from the AF_PACKET socket one by
124 * one and copies the packet data into a newly allocated mbuf.
125 */
126 framecount = pkt_q->framecount;
127 framenum = pkt_q->framenum;
128 for (i = 0; i < nb_pkts; i++) {
129 /* point at the next incoming frame */
130 ppd = (struct tpacket2_hdr *) pkt_q->rd[framenum].iov_base;
131 if ((ppd->tp_status & TP_STATUS_USER) == 0)
132 break;
133
134 /* allocate the next mbuf */
135 mbuf = rte_pktmbuf_alloc(pkt_q->mb_pool);
136 if (unlikely(mbuf == NULL))
137 break;
138
139 /* packet will fit in the mbuf, go ahead and receive it */
140 rte_pktmbuf_pkt_len(mbuf) = rte_pktmbuf_data_len(mbuf) = ppd->tp_snaplen;
141 pbuf = (uint8_t *) ppd + ppd->tp_mac;
142 memcpy(rte_pktmbuf_mtod(mbuf, void *), pbuf, rte_pktmbuf_data_len(mbuf));
143
11fdf7f2
TL
144 /* check for vlan info */
145 if (ppd->tp_status & TP_STATUS_VLAN_VALID) {
146 mbuf->vlan_tci = ppd->tp_vlan_tci;
147 mbuf->ol_flags |= (PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED);
148 }
149
7c673cae
FG
150 /* release incoming frame and advance ring buffer */
151 ppd->tp_status = TP_STATUS_KERNEL;
152 if (++framenum >= framecount)
153 framenum = 0;
154 mbuf->port = pkt_q->in_port;
155
156 /* account for the receive frame */
157 bufs[i] = mbuf;
158 num_rx++;
159 num_rx_bytes += mbuf->pkt_len;
160 }
161 pkt_q->framenum = framenum;
162 pkt_q->rx_pkts += num_rx;
163 pkt_q->rx_bytes += num_rx_bytes;
164 return num_rx;
165}
166
167/*
168 * Callback to handle sending packets through a real NIC.
169 */
170static uint16_t
171eth_af_packet_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
172{
173 struct tpacket2_hdr *ppd;
174 struct rte_mbuf *mbuf;
175 uint8_t *pbuf;
176 unsigned int framecount, framenum;
177 struct pollfd pfd;
178 struct pkt_tx_queue *pkt_q = queue;
179 uint16_t num_tx = 0;
180 unsigned long num_tx_bytes = 0;
181 int i;
182
183 if (unlikely(nb_pkts == 0))
184 return 0;
185
186 memset(&pfd, 0, sizeof(pfd));
187 pfd.fd = pkt_q->sockfd;
188 pfd.events = POLLOUT;
189 pfd.revents = 0;
190
191 framecount = pkt_q->framecount;
192 framenum = pkt_q->framenum;
193 ppd = (struct tpacket2_hdr *) pkt_q->rd[framenum].iov_base;
194 for (i = 0; i < nb_pkts; i++) {
11fdf7f2
TL
195 mbuf = *bufs++;
196
197 /* drop oversized packets */
198 if (mbuf->pkt_len > pkt_q->frame_data_size) {
199 rte_pktmbuf_free(mbuf);
200 continue;
201 }
202
203 /* insert vlan info if necessary */
204 if (mbuf->ol_flags & PKT_TX_VLAN_PKT) {
205 if (rte_vlan_insert(&mbuf)) {
206 rte_pktmbuf_free(mbuf);
207 continue;
208 }
209 }
210
7c673cae
FG
211 /* point at the next incoming frame */
212 if ((ppd->tp_status != TP_STATUS_AVAILABLE) &&
213 (poll(&pfd, 1, -1) < 0))
11fdf7f2 214 break;
7c673cae
FG
215
216 /* copy the tx frame data */
7c673cae
FG
217 pbuf = (uint8_t *) ppd + TPACKET2_HDRLEN -
218 sizeof(struct sockaddr_ll);
11fdf7f2
TL
219
220 struct rte_mbuf *tmp_mbuf = mbuf;
221 while (tmp_mbuf) {
222 uint16_t data_len = rte_pktmbuf_data_len(tmp_mbuf);
223 memcpy(pbuf, rte_pktmbuf_mtod(tmp_mbuf, void*), data_len);
224 pbuf += data_len;
225 tmp_mbuf = tmp_mbuf->next;
226 }
227
228 ppd->tp_len = mbuf->pkt_len;
229 ppd->tp_snaplen = mbuf->pkt_len;
7c673cae
FG
230
231 /* release incoming frame and advance ring buffer */
232 ppd->tp_status = TP_STATUS_SEND_REQUEST;
233 if (++framenum >= framecount)
234 framenum = 0;
235 ppd = (struct tpacket2_hdr *) pkt_q->rd[framenum].iov_base;
236
237 num_tx++;
238 num_tx_bytes += mbuf->pkt_len;
239 rte_pktmbuf_free(mbuf);
240 }
241
242 /* kick-off transmits */
11fdf7f2
TL
243 if (sendto(pkt_q->sockfd, NULL, 0, MSG_DONTWAIT, NULL, 0) == -1) {
244 /* error sending -- no packets transmitted */
245 num_tx = 0;
246 num_tx_bytes = 0;
247 }
7c673cae
FG
248
249 pkt_q->framenum = framenum;
250 pkt_q->tx_pkts += num_tx;
11fdf7f2 251 pkt_q->err_pkts += i - num_tx;
7c673cae 252 pkt_q->tx_bytes += num_tx_bytes;
11fdf7f2 253 return i;
7c673cae
FG
254}
255
256static int
257eth_dev_start(struct rte_eth_dev *dev)
258{
259 dev->data->dev_link.link_status = ETH_LINK_UP;
260 return 0;
261}
262
263/*
264 * This function gets called when the current port gets stopped.
265 */
266static void
267eth_dev_stop(struct rte_eth_dev *dev)
268{
269 unsigned i;
270 int sockfd;
271 struct pmd_internals *internals = dev->data->dev_private;
272
273 for (i = 0; i < internals->nb_queues; i++) {
274 sockfd = internals->rx_queue[i].sockfd;
275 if (sockfd != -1)
276 close(sockfd);
11fdf7f2
TL
277
278 /* Prevent use after free in case tx fd == rx fd */
279 if (sockfd != internals->tx_queue[i].sockfd) {
280 sockfd = internals->tx_queue[i].sockfd;
281 if (sockfd != -1)
282 close(sockfd);
283 }
284
285 internals->rx_queue[i].sockfd = -1;
286 internals->tx_queue[i].sockfd = -1;
7c673cae
FG
287 }
288
289 dev->data->dev_link.link_status = ETH_LINK_DOWN;
290}
291
292static int
293eth_dev_configure(struct rte_eth_dev *dev __rte_unused)
294{
295 return 0;
296}
297
298static void
299eth_dev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
300{
301 struct pmd_internals *internals = dev->data->dev_private;
302
7c673cae
FG
303 dev_info->if_index = internals->if_index;
304 dev_info->max_mac_addrs = 1;
305 dev_info->max_rx_pktlen = (uint32_t)ETH_FRAME_LEN;
306 dev_info->max_rx_queues = (uint16_t)internals->nb_queues;
307 dev_info->max_tx_queues = (uint16_t)internals->nb_queues;
308 dev_info->min_rx_bufsize = 0;
7c673cae
FG
309}
310
11fdf7f2 311static int
7c673cae
FG
312eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *igb_stats)
313{
314 unsigned i, imax;
315 unsigned long rx_total = 0, tx_total = 0, tx_err_total = 0;
316 unsigned long rx_bytes_total = 0, tx_bytes_total = 0;
317 const struct pmd_internals *internal = dev->data->dev_private;
318
319 imax = (internal->nb_queues < RTE_ETHDEV_QUEUE_STAT_CNTRS ?
320 internal->nb_queues : RTE_ETHDEV_QUEUE_STAT_CNTRS);
321 for (i = 0; i < imax; i++) {
322 igb_stats->q_ipackets[i] = internal->rx_queue[i].rx_pkts;
323 igb_stats->q_ibytes[i] = internal->rx_queue[i].rx_bytes;
324 rx_total += igb_stats->q_ipackets[i];
325 rx_bytes_total += igb_stats->q_ibytes[i];
326 }
327
328 imax = (internal->nb_queues < RTE_ETHDEV_QUEUE_STAT_CNTRS ?
329 internal->nb_queues : RTE_ETHDEV_QUEUE_STAT_CNTRS);
330 for (i = 0; i < imax; i++) {
331 igb_stats->q_opackets[i] = internal->tx_queue[i].tx_pkts;
332 igb_stats->q_errors[i] = internal->tx_queue[i].err_pkts;
333 igb_stats->q_obytes[i] = internal->tx_queue[i].tx_bytes;
334 tx_total += igb_stats->q_opackets[i];
335 tx_err_total += igb_stats->q_errors[i];
336 tx_bytes_total += igb_stats->q_obytes[i];
337 }
338
339 igb_stats->ipackets = rx_total;
340 igb_stats->ibytes = rx_bytes_total;
341 igb_stats->opackets = tx_total;
342 igb_stats->oerrors = tx_err_total;
343 igb_stats->obytes = tx_bytes_total;
11fdf7f2 344 return 0;
7c673cae
FG
345}
346
347static void
348eth_stats_reset(struct rte_eth_dev *dev)
349{
350 unsigned i;
351 struct pmd_internals *internal = dev->data->dev_private;
352
353 for (i = 0; i < internal->nb_queues; i++) {
354 internal->rx_queue[i].rx_pkts = 0;
355 internal->rx_queue[i].rx_bytes = 0;
356 }
357
358 for (i = 0; i < internal->nb_queues; i++) {
359 internal->tx_queue[i].tx_pkts = 0;
360 internal->tx_queue[i].err_pkts = 0;
361 internal->tx_queue[i].tx_bytes = 0;
362 }
363}
364
365static void
366eth_dev_close(struct rte_eth_dev *dev __rte_unused)
367{
368}
369
370static void
371eth_queue_release(void *q __rte_unused)
372{
373}
374
375static int
376eth_link_update(struct rte_eth_dev *dev __rte_unused,
377 int wait_to_complete __rte_unused)
378{
379 return 0;
380}
381
382static int
383eth_rx_queue_setup(struct rte_eth_dev *dev,
384 uint16_t rx_queue_id,
385 uint16_t nb_rx_desc __rte_unused,
386 unsigned int socket_id __rte_unused,
387 const struct rte_eth_rxconf *rx_conf __rte_unused,
388 struct rte_mempool *mb_pool)
389{
390 struct pmd_internals *internals = dev->data->dev_private;
391 struct pkt_rx_queue *pkt_q = &internals->rx_queue[rx_queue_id];
11fdf7f2 392 unsigned int buf_size, data_size;
7c673cae
FG
393
394 pkt_q->mb_pool = mb_pool;
395
396 /* Now get the space available for data in the mbuf */
11fdf7f2
TL
397 buf_size = rte_pktmbuf_data_room_size(pkt_q->mb_pool) -
398 RTE_PKTMBUF_HEADROOM;
399 data_size = internals->req.tp_frame_size;
400 data_size -= TPACKET2_HDRLEN - sizeof(struct sockaddr_ll);
401
402 if (data_size > buf_size) {
403 PMD_LOG(ERR,
404 "%s: %d bytes will not fit in mbuf (%d bytes)",
405 dev->device->name, data_size, buf_size);
7c673cae
FG
406 return -ENOMEM;
407 }
408
409 dev->data->rx_queues[rx_queue_id] = pkt_q;
410 pkt_q->in_port = dev->data->port_id;
411
412 return 0;
413}
414
415static int
416eth_tx_queue_setup(struct rte_eth_dev *dev,
417 uint16_t tx_queue_id,
418 uint16_t nb_tx_desc __rte_unused,
419 unsigned int socket_id __rte_unused,
420 const struct rte_eth_txconf *tx_conf __rte_unused)
421{
422
423 struct pmd_internals *internals = dev->data->dev_private;
424
425 dev->data->tx_queues[tx_queue_id] = &internals->tx_queue[tx_queue_id];
426 return 0;
427}
428
11fdf7f2
TL
429static int
430eth_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
431{
432 struct pmd_internals *internals = dev->data->dev_private;
433 struct ifreq ifr = { .ifr_mtu = mtu };
434 int ret;
435 int s;
436 unsigned int data_size = internals->req.tp_frame_size -
9f95a23c 437 TPACKET2_HDRLEN;
11fdf7f2
TL
438
439 if (mtu > data_size)
440 return -EINVAL;
441
442 s = socket(PF_INET, SOCK_DGRAM, 0);
443 if (s < 0)
444 return -EINVAL;
445
9f95a23c 446 strlcpy(ifr.ifr_name, internals->if_name, IFNAMSIZ);
11fdf7f2
TL
447 ret = ioctl(s, SIOCSIFMTU, &ifr);
448 close(s);
449
450 if (ret < 0)
451 return -EINVAL;
452
453 return 0;
454}
455
456static void
457eth_dev_change_flags(char *if_name, uint32_t flags, uint32_t mask)
458{
459 struct ifreq ifr;
460 int s;
461
462 s = socket(PF_INET, SOCK_DGRAM, 0);
463 if (s < 0)
464 return;
465
9f95a23c 466 strlcpy(ifr.ifr_name, if_name, IFNAMSIZ);
11fdf7f2
TL
467 if (ioctl(s, SIOCGIFFLAGS, &ifr) < 0)
468 goto out;
469 ifr.ifr_flags &= mask;
470 ifr.ifr_flags |= flags;
471 if (ioctl(s, SIOCSIFFLAGS, &ifr) < 0)
472 goto out;
473out:
474 close(s);
475}
476
477static void
478eth_dev_promiscuous_enable(struct rte_eth_dev *dev)
479{
480 struct pmd_internals *internals = dev->data->dev_private;
481
482 eth_dev_change_flags(internals->if_name, IFF_PROMISC, ~0);
483}
484
485static void
486eth_dev_promiscuous_disable(struct rte_eth_dev *dev)
487{
488 struct pmd_internals *internals = dev->data->dev_private;
489
490 eth_dev_change_flags(internals->if_name, 0, ~IFF_PROMISC);
491}
492
7c673cae
FG
493static const struct eth_dev_ops ops = {
494 .dev_start = eth_dev_start,
495 .dev_stop = eth_dev_stop,
496 .dev_close = eth_dev_close,
497 .dev_configure = eth_dev_configure,
498 .dev_infos_get = eth_dev_info,
11fdf7f2
TL
499 .mtu_set = eth_dev_mtu_set,
500 .promiscuous_enable = eth_dev_promiscuous_enable,
501 .promiscuous_disable = eth_dev_promiscuous_disable,
7c673cae
FG
502 .rx_queue_setup = eth_rx_queue_setup,
503 .tx_queue_setup = eth_tx_queue_setup,
504 .rx_queue_release = eth_queue_release,
505 .tx_queue_release = eth_queue_release,
506 .link_update = eth_link_update,
507 .stats_get = eth_stats_get,
508 .stats_reset = eth_stats_reset,
509};
510
511/*
512 * Opens an AF_PACKET socket
513 */
514static int
515open_packet_iface(const char *key __rte_unused,
516 const char *value __rte_unused,
517 void *extra_args)
518{
519 int *sockfd = extra_args;
520
521 /* Open an AF_PACKET socket... */
522 *sockfd = socket(AF_PACKET, SOCK_RAW, htons(ETH_P_ALL));
523 if (*sockfd == -1) {
11fdf7f2 524 PMD_LOG(ERR, "Could not open AF_PACKET socket");
7c673cae
FG
525 return -1;
526 }
527
528 return 0;
529}
530
11fdf7f2
TL
531static struct rte_vdev_driver pmd_af_packet_drv;
532
7c673cae 533static int
11fdf7f2 534rte_pmd_init_internals(struct rte_vdev_device *dev,
7c673cae
FG
535 const int sockfd,
536 const unsigned nb_queues,
537 unsigned int blocksize,
538 unsigned int blockcnt,
539 unsigned int framesize,
540 unsigned int framecnt,
11fdf7f2 541 unsigned int qdisc_bypass,
7c673cae
FG
542 struct pmd_internals **internals,
543 struct rte_eth_dev **eth_dev,
544 struct rte_kvargs *kvlist)
545{
11fdf7f2
TL
546 const char *name = rte_vdev_device_name(dev);
547 const unsigned int numa_node = dev->device.numa_node;
7c673cae
FG
548 struct rte_eth_dev_data *data = NULL;
549 struct rte_kvargs_pair *pair = NULL;
550 struct ifreq ifr;
551 size_t ifnamelen;
552 unsigned k_idx;
553 struct sockaddr_ll sockaddr;
554 struct tpacket_req *req;
555 struct pkt_rx_queue *rx_queue;
556 struct pkt_tx_queue *tx_queue;
557 int rc, tpver, discard;
558 int qsockfd = -1;
559 unsigned int i, q, rdsize;
11fdf7f2
TL
560#if defined(PACKET_FANOUT)
561 int fanout_arg;
562#endif
7c673cae
FG
563
564 for (k_idx = 0; k_idx < kvlist->count; k_idx++) {
565 pair = &kvlist->pairs[k_idx];
566 if (strstr(pair->key, ETH_AF_PACKET_IFACE_ARG) != NULL)
567 break;
568 }
569 if (pair == NULL) {
11fdf7f2
TL
570 PMD_LOG(ERR,
571 "%s: no interface specified for AF_PACKET ethdev",
7c673cae 572 name);
11fdf7f2 573 return -1;
7c673cae
FG
574 }
575
11fdf7f2
TL
576 PMD_LOG(INFO,
577 "%s: creating AF_PACKET-backed ethdev on numa socket %u",
7c673cae
FG
578 name, numa_node);
579
7c673cae
FG
580 *internals = rte_zmalloc_socket(name, sizeof(**internals),
581 0, numa_node);
582 if (*internals == NULL)
11fdf7f2 583 return -1;
7c673cae
FG
584
585 for (q = 0; q < nb_queues; q++) {
586 (*internals)->rx_queue[q].map = MAP_FAILED;
587 (*internals)->tx_queue[q].map = MAP_FAILED;
588 }
589
590 req = &((*internals)->req);
591
592 req->tp_block_size = blocksize;
593 req->tp_block_nr = blockcnt;
594 req->tp_frame_size = framesize;
595 req->tp_frame_nr = framecnt;
596
597 ifnamelen = strlen(pair->value);
598 if (ifnamelen < sizeof(ifr.ifr_name)) {
599 memcpy(ifr.ifr_name, pair->value, ifnamelen);
600 ifr.ifr_name[ifnamelen] = '\0';
601 } else {
11fdf7f2
TL
602 PMD_LOG(ERR,
603 "%s: I/F name too long (%s)",
7c673cae 604 name, pair->value);
11fdf7f2 605 return -1;
7c673cae
FG
606 }
607 if (ioctl(sockfd, SIOCGIFINDEX, &ifr) == -1) {
11fdf7f2
TL
608 PMD_LOG(ERR,
609 "%s: ioctl failed (SIOCGIFINDEX)",
7c673cae 610 name);
11fdf7f2 611 return -1;
7c673cae 612 }
11fdf7f2
TL
613 (*internals)->if_name = strdup(pair->value);
614 if ((*internals)->if_name == NULL)
615 return -1;
7c673cae
FG
616 (*internals)->if_index = ifr.ifr_ifindex;
617
618 if (ioctl(sockfd, SIOCGIFHWADDR, &ifr) == -1) {
11fdf7f2
TL
619 PMD_LOG(ERR,
620 "%s: ioctl failed (SIOCGIFHWADDR)",
7c673cae 621 name);
11fdf7f2 622 return -1;
7c673cae
FG
623 }
624 memcpy(&(*internals)->eth_addr, ifr.ifr_hwaddr.sa_data, ETH_ALEN);
625
626 memset(&sockaddr, 0, sizeof(sockaddr));
627 sockaddr.sll_family = AF_PACKET;
628 sockaddr.sll_protocol = htons(ETH_P_ALL);
629 sockaddr.sll_ifindex = (*internals)->if_index;
630
631#if defined(PACKET_FANOUT)
632 fanout_arg = (getpid() ^ (*internals)->if_index) & 0xffff;
633 fanout_arg |= (PACKET_FANOUT_HASH | PACKET_FANOUT_FLAG_DEFRAG) << 16;
634#if defined(PACKET_FANOUT_FLAG_ROLLOVER)
635 fanout_arg |= PACKET_FANOUT_FLAG_ROLLOVER << 16;
636#endif
637#endif
638
639 for (q = 0; q < nb_queues; q++) {
640 /* Open an AF_PACKET socket for this queue... */
641 qsockfd = socket(AF_PACKET, SOCK_RAW, htons(ETH_P_ALL));
642 if (qsockfd == -1) {
11fdf7f2
TL
643 PMD_LOG(ERR,
644 "%s: could not open AF_PACKET socket",
7c673cae
FG
645 name);
646 return -1;
647 }
648
649 tpver = TPACKET_V2;
650 rc = setsockopt(qsockfd, SOL_PACKET, PACKET_VERSION,
651 &tpver, sizeof(tpver));
652 if (rc == -1) {
11fdf7f2
TL
653 PMD_LOG(ERR,
654 "%s: could not set PACKET_VERSION on AF_PACKET socket for %s",
655 name, pair->value);
7c673cae
FG
656 goto error;
657 }
658
659 discard = 1;
660 rc = setsockopt(qsockfd, SOL_PACKET, PACKET_LOSS,
661 &discard, sizeof(discard));
662 if (rc == -1) {
11fdf7f2
TL
663 PMD_LOG(ERR,
664 "%s: could not set PACKET_LOSS on AF_PACKET socket for %s",
665 name, pair->value);
7c673cae
FG
666 goto error;
667 }
668
669#if defined(PACKET_QDISC_BYPASS)
7c673cae 670 rc = setsockopt(qsockfd, SOL_PACKET, PACKET_QDISC_BYPASS,
11fdf7f2 671 &qdisc_bypass, sizeof(qdisc_bypass));
7c673cae 672 if (rc == -1) {
11fdf7f2
TL
673 PMD_LOG(ERR,
674 "%s: could not set PACKET_QDISC_BYPASS on AF_PACKET socket for %s",
675 name, pair->value);
7c673cae
FG
676 goto error;
677 }
11fdf7f2
TL
678#else
679 RTE_SET_USED(qdisc_bypass);
7c673cae
FG
680#endif
681
682 rc = setsockopt(qsockfd, SOL_PACKET, PACKET_RX_RING, req, sizeof(*req));
683 if (rc == -1) {
11fdf7f2
TL
684 PMD_LOG(ERR,
685 "%s: could not set PACKET_RX_RING on AF_PACKET socket for %s",
686 name, pair->value);
7c673cae
FG
687 goto error;
688 }
689
690 rc = setsockopt(qsockfd, SOL_PACKET, PACKET_TX_RING, req, sizeof(*req));
691 if (rc == -1) {
11fdf7f2 692 PMD_LOG(ERR,
7c673cae 693 "%s: could not set PACKET_TX_RING on AF_PACKET "
11fdf7f2 694 "socket for %s", name, pair->value);
7c673cae
FG
695 goto error;
696 }
697
698 rx_queue = &((*internals)->rx_queue[q]);
699 rx_queue->framecount = req->tp_frame_nr;
700
701 rx_queue->map = mmap(NULL, 2 * req->tp_block_size * req->tp_block_nr,
702 PROT_READ | PROT_WRITE, MAP_SHARED | MAP_LOCKED,
703 qsockfd, 0);
704 if (rx_queue->map == MAP_FAILED) {
11fdf7f2
TL
705 PMD_LOG(ERR,
706 "%s: call to mmap failed on AF_PACKET socket for %s",
7c673cae
FG
707 name, pair->value);
708 goto error;
709 }
710
711 /* rdsize is same for both Tx and Rx */
712 rdsize = req->tp_frame_nr * sizeof(*(rx_queue->rd));
713
714 rx_queue->rd = rte_zmalloc_socket(name, rdsize, 0, numa_node);
715 if (rx_queue->rd == NULL)
716 goto error;
717 for (i = 0; i < req->tp_frame_nr; ++i) {
718 rx_queue->rd[i].iov_base = rx_queue->map + (i * framesize);
719 rx_queue->rd[i].iov_len = req->tp_frame_size;
720 }
721 rx_queue->sockfd = qsockfd;
722
723 tx_queue = &((*internals)->tx_queue[q]);
724 tx_queue->framecount = req->tp_frame_nr;
11fdf7f2
TL
725 tx_queue->frame_data_size = req->tp_frame_size;
726 tx_queue->frame_data_size -= TPACKET2_HDRLEN -
727 sizeof(struct sockaddr_ll);
7c673cae
FG
728
729 tx_queue->map = rx_queue->map + req->tp_block_size * req->tp_block_nr;
730
731 tx_queue->rd = rte_zmalloc_socket(name, rdsize, 0, numa_node);
732 if (tx_queue->rd == NULL)
733 goto error;
734 for (i = 0; i < req->tp_frame_nr; ++i) {
735 tx_queue->rd[i].iov_base = tx_queue->map + (i * framesize);
736 tx_queue->rd[i].iov_len = req->tp_frame_size;
737 }
738 tx_queue->sockfd = qsockfd;
739
740 rc = bind(qsockfd, (const struct sockaddr*)&sockaddr, sizeof(sockaddr));
741 if (rc == -1) {
11fdf7f2
TL
742 PMD_LOG(ERR,
743 "%s: could not bind AF_PACKET socket to %s",
7c673cae
FG
744 name, pair->value);
745 goto error;
746 }
747
748#if defined(PACKET_FANOUT)
749 rc = setsockopt(qsockfd, SOL_PACKET, PACKET_FANOUT,
750 &fanout_arg, sizeof(fanout_arg));
751 if (rc == -1) {
11fdf7f2 752 PMD_LOG(ERR,
7c673cae 753 "%s: could not set PACKET_FANOUT on AF_PACKET socket "
11fdf7f2 754 "for %s", name, pair->value);
7c673cae
FG
755 goto error;
756 }
757#endif
758 }
759
760 /* reserve an ethdev entry */
11fdf7f2 761 *eth_dev = rte_eth_vdev_allocate(dev, 0);
7c673cae
FG
762 if (*eth_dev == NULL)
763 goto error;
764
765 /*
766 * now put it all together
767 * - store queue data in internals,
768 * - store numa_node in eth_dev
769 * - point eth_dev_data to internals
770 * - and point eth_dev structure to new eth_dev_data structure
771 */
772
773 (*internals)->nb_queues = nb_queues;
774
11fdf7f2 775 data = (*eth_dev)->data;
7c673cae 776 data->dev_private = *internals;
7c673cae
FG
777 data->nb_rx_queues = (uint16_t)nb_queues;
778 data->nb_tx_queues = (uint16_t)nb_queues;
779 data->dev_link = pmd_link;
780 data->mac_addrs = &(*internals)->eth_addr;
7c673cae 781
7c673cae 782 (*eth_dev)->dev_ops = &ops;
7c673cae
FG
783
784 return 0;
785
786error:
787 if (qsockfd != -1)
788 close(qsockfd);
789 for (q = 0; q < nb_queues; q++) {
790 munmap((*internals)->rx_queue[q].map,
791 2 * req->tp_block_size * req->tp_block_nr);
792
793 rte_free((*internals)->rx_queue[q].rd);
794 rte_free((*internals)->tx_queue[q].rd);
795 if (((*internals)->rx_queue[q].sockfd != 0) &&
796 ((*internals)->rx_queue[q].sockfd != qsockfd))
797 close((*internals)->rx_queue[q].sockfd);
798 }
11fdf7f2 799 free((*internals)->if_name);
7c673cae 800 rte_free(*internals);
7c673cae
FG
801 return -1;
802}
803
804static int
11fdf7f2 805rte_eth_from_packet(struct rte_vdev_device *dev,
7c673cae 806 int const *sockfd,
7c673cae
FG
807 struct rte_kvargs *kvlist)
808{
11fdf7f2 809 const char *name = rte_vdev_device_name(dev);
7c673cae
FG
810 struct pmd_internals *internals = NULL;
811 struct rte_eth_dev *eth_dev = NULL;
812 struct rte_kvargs_pair *pair = NULL;
813 unsigned k_idx;
814 unsigned int blockcount;
815 unsigned int blocksize = DFLT_BLOCK_SIZE;
816 unsigned int framesize = DFLT_FRAME_SIZE;
817 unsigned int framecount = DFLT_FRAME_COUNT;
818 unsigned int qpairs = 1;
11fdf7f2 819 unsigned int qdisc_bypass = 1;
7c673cae
FG
820
821 /* do some parameter checking */
822 if (*sockfd < 0)
823 return -1;
824
825 /*
826 * Walk arguments for configurable settings
827 */
828 for (k_idx = 0; k_idx < kvlist->count; k_idx++) {
829 pair = &kvlist->pairs[k_idx];
830 if (strstr(pair->key, ETH_AF_PACKET_NUM_Q_ARG) != NULL) {
831 qpairs = atoi(pair->value);
832 if (qpairs < 1 ||
833 qpairs > RTE_PMD_AF_PACKET_MAX_RINGS) {
11fdf7f2
TL
834 PMD_LOG(ERR,
835 "%s: invalid qpairs value",
7c673cae
FG
836 name);
837 return -1;
838 }
839 continue;
840 }
841 if (strstr(pair->key, ETH_AF_PACKET_BLOCKSIZE_ARG) != NULL) {
842 blocksize = atoi(pair->value);
843 if (!blocksize) {
11fdf7f2
TL
844 PMD_LOG(ERR,
845 "%s: invalid blocksize value",
7c673cae
FG
846 name);
847 return -1;
848 }
849 continue;
850 }
851 if (strstr(pair->key, ETH_AF_PACKET_FRAMESIZE_ARG) != NULL) {
852 framesize = atoi(pair->value);
853 if (!framesize) {
11fdf7f2
TL
854 PMD_LOG(ERR,
855 "%s: invalid framesize value",
7c673cae
FG
856 name);
857 return -1;
858 }
859 continue;
860 }
861 if (strstr(pair->key, ETH_AF_PACKET_FRAMECOUNT_ARG) != NULL) {
862 framecount = atoi(pair->value);
863 if (!framecount) {
11fdf7f2
TL
864 PMD_LOG(ERR,
865 "%s: invalid framecount value",
7c673cae
FG
866 name);
867 return -1;
868 }
869 continue;
870 }
11fdf7f2
TL
871 if (strstr(pair->key, ETH_AF_PACKET_QDISC_BYPASS_ARG) != NULL) {
872 qdisc_bypass = atoi(pair->value);
873 if (qdisc_bypass > 1) {
874 PMD_LOG(ERR,
875 "%s: invalid bypass value",
876 name);
877 return -1;
878 }
879 continue;
880 }
7c673cae
FG
881 }
882
883 if (framesize > blocksize) {
11fdf7f2
TL
884 PMD_LOG(ERR,
885 "%s: AF_PACKET MMAP frame size exceeds block size!",
7c673cae
FG
886 name);
887 return -1;
888 }
889
890 blockcount = framecount / (blocksize / framesize);
891 if (!blockcount) {
11fdf7f2
TL
892 PMD_LOG(ERR,
893 "%s: invalid AF_PACKET MMAP parameters", name);
7c673cae
FG
894 return -1;
895 }
896
11fdf7f2
TL
897 PMD_LOG(INFO, "%s: AF_PACKET MMAP parameters:", name);
898 PMD_LOG(INFO, "%s:\tblock size %d", name, blocksize);
899 PMD_LOG(INFO, "%s:\tblock count %d", name, blockcount);
900 PMD_LOG(INFO, "%s:\tframe size %d", name, framesize);
901 PMD_LOG(INFO, "%s:\tframe count %d", name, framecount);
902
903 if (rte_pmd_init_internals(dev, *sockfd, qpairs,
904 blocksize, blockcount,
905 framesize, framecount,
906 qdisc_bypass,
907 &internals, &eth_dev,
908 kvlist) < 0)
7c673cae
FG
909 return -1;
910
911 eth_dev->rx_pkt_burst = eth_af_packet_rx;
912 eth_dev->tx_pkt_burst = eth_af_packet_tx;
913
11fdf7f2 914 rte_eth_dev_probing_finish(eth_dev);
7c673cae
FG
915 return 0;
916}
917
918static int
11fdf7f2 919rte_pmd_af_packet_probe(struct rte_vdev_device *dev)
7c673cae 920{
7c673cae
FG
921 int ret = 0;
922 struct rte_kvargs *kvlist;
923 int sockfd = -1;
11fdf7f2
TL
924 struct rte_eth_dev *eth_dev;
925 const char *name = rte_vdev_device_name(dev);
7c673cae 926
11fdf7f2 927 PMD_LOG(INFO, "Initializing pmd_af_packet for %s", name);
7c673cae 928
9f95a23c 929 if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
11fdf7f2
TL
930 eth_dev = rte_eth_dev_attach_secondary(name);
931 if (!eth_dev) {
932 PMD_LOG(ERR, "Failed to probe %s", name);
933 return -1;
934 }
935 /* TODO: request info from primary to set up Rx and Tx */
936 eth_dev->dev_ops = &ops;
937 eth_dev->device = &dev->device;
938 rte_eth_dev_probing_finish(eth_dev);
939 return 0;
940 }
7c673cae 941
11fdf7f2 942 kvlist = rte_kvargs_parse(rte_vdev_device_args(dev), valid_arguments);
7c673cae
FG
943 if (kvlist == NULL) {
944 ret = -1;
945 goto exit;
946 }
947
948 /*
949 * If iface argument is passed we open the NICs and use them for
950 * reading / writing
951 */
952 if (rte_kvargs_count(kvlist, ETH_AF_PACKET_IFACE_ARG) == 1) {
953
954 ret = rte_kvargs_process(kvlist, ETH_AF_PACKET_IFACE_ARG,
955 &open_packet_iface, &sockfd);
956 if (ret < 0)
957 goto exit;
958 }
959
11fdf7f2
TL
960 if (dev->device.numa_node == SOCKET_ID_ANY)
961 dev->device.numa_node = rte_socket_id();
962
963 ret = rte_eth_from_packet(dev, &sockfd, kvlist);
7c673cae
FG
964 close(sockfd); /* no longer needed */
965
966exit:
967 rte_kvargs_free(kvlist);
968 return ret;
969}
970
971static int
11fdf7f2 972rte_pmd_af_packet_remove(struct rte_vdev_device *dev)
7c673cae
FG
973{
974 struct rte_eth_dev *eth_dev = NULL;
975 struct pmd_internals *internals;
976 unsigned q;
977
11fdf7f2
TL
978 PMD_LOG(INFO, "Closing AF_PACKET ethdev on numa socket %u",
979 rte_socket_id());
7c673cae 980
11fdf7f2 981 if (dev == NULL)
7c673cae
FG
982 return -1;
983
984 /* find the ethdev entry */
11fdf7f2 985 eth_dev = rte_eth_dev_allocated(rte_vdev_device_name(dev));
7c673cae
FG
986 if (eth_dev == NULL)
987 return -1;
988
9f95a23c
TL
989 /* mac_addrs must not be freed alone because part of dev_private */
990 eth_dev->data->mac_addrs = NULL;
991
992 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
993 return rte_eth_dev_release_port(eth_dev);
994
7c673cae
FG
995 internals = eth_dev->data->dev_private;
996 for (q = 0; q < internals->nb_queues; q++) {
997 rte_free(internals->rx_queue[q].rd);
998 rte_free(internals->tx_queue[q].rd);
999 }
11fdf7f2 1000 free(internals->if_name);
7c673cae 1001
7c673cae
FG
1002 rte_eth_dev_release_port(eth_dev);
1003
1004 return 0;
1005}
1006
1007static struct rte_vdev_driver pmd_af_packet_drv = {
1008 .probe = rte_pmd_af_packet_probe,
1009 .remove = rte_pmd_af_packet_remove,
1010};
1011
1012RTE_PMD_REGISTER_VDEV(net_af_packet, pmd_af_packet_drv);
1013RTE_PMD_REGISTER_ALIAS(net_af_packet, eth_af_packet);
1014RTE_PMD_REGISTER_PARAM_STRING(net_af_packet,
1015 "iface=<string> "
1016 "qpairs=<int> "
1017 "blocksz=<int> "
1018 "framesz=<int> "
11fdf7f2
TL
1019 "framecnt=<int> "
1020 "qdisc_bypass=<0|1>");
1021
1022RTE_INIT(af_packet_init_log)
1023{
1024 af_packet_logtype = rte_log_register("pmd.net.packet");
1025 if (af_packet_logtype >= 0)
1026 rte_log_set_level(af_packet_logtype, RTE_LOG_NOTICE);
1027}