]> git.proxmox.com Git - mirror_ovs.git/blame - lib/netdev-dpdk.c
netdev-dpdk: add dpdk vhost-cuse ports
[mirror_ovs.git] / lib / netdev-dpdk.c
CommitLineData
8a9562d2
PS
1/*
2 * Copyright (c) 2014 Nicira, Inc.
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include <config.h>
18
19#include <stdio.h>
20#include <string.h>
21#include <signal.h>
22#include <stdlib.h>
23#include <pthread.h>
24#include <config.h>
25#include <errno.h>
26#include <sched.h>
27#include <stdlib.h>
28#include <unistd.h>
29#include <stdio.h>
30
e14deea0 31#include "dp-packet.h"
8a9562d2
PS
32#include "dpif-netdev.h"
33#include "list.h"
34#include "netdev-dpdk.h"
35#include "netdev-provider.h"
36#include "netdev-vport.h"
37#include "odp-util.h"
38#include "ofp-print.h"
94143fc4 39#include "ovs-numa.h"
8a9562d2
PS
40#include "ovs-thread.h"
41#include "ovs-rcu.h"
42#include "packets.h"
43#include "shash.h"
8a9562d2
PS
44#include "sset.h"
45#include "unaligned.h"
46#include "timeval.h"
47#include "unixctl.h"
e6211adc 48#include "openvswitch/vlog.h"
8a9562d2 49
b8e57534
MK
50#include "rte_config.h"
51#include "rte_mbuf.h"
58397e6c 52#include "rte_virtio_net.h"
b8e57534 53
8a9562d2
PS
54VLOG_DEFINE_THIS_MODULE(dpdk);
55static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 20);
56
57#define DPDK_PORT_WATCHDOG_INTERVAL 5
58
59#define OVS_CACHE_LINE_SIZE CACHE_LINE_SIZE
60#define OVS_VPORT_DPDK "ovs_dpdk"
61
62/*
63 * need to reserve tons of extra space in the mbufs so we can align the
64 * DMA addresses to 4KB.
65 */
66
67#define MTU_TO_MAX_LEN(mtu) ((mtu) + ETHER_HDR_LEN + ETHER_CRC_LEN)
68#define MBUF_SIZE(mtu) (MTU_TO_MAX_LEN(mtu) + (512) + \
69 sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM)
70
645b8934 71/* XXX: mempool size should be based on system resources. */
8a9562d2
PS
72#define NB_MBUF (4096 * 64)
73#define MP_CACHE_SZ (256 * 2)
74#define SOCKET0 0
75
79f5354c
PM
76#define NIC_PORT_RX_Q_SIZE 2048 /* Size of Physical NIC RX Queue, Max (n+32<=4096)*/
77#define NIC_PORT_TX_Q_SIZE 2048 /* Size of Physical NIC TX Queue, Max (n+32<=4096)*/
78
645b8934 79/* XXX: Needs per NIC value for these constants. */
8a9562d2
PS
80#define RX_PTHRESH 32 /* Default values of RX prefetch threshold reg. */
81#define RX_HTHRESH 32 /* Default values of RX host threshold reg. */
82#define RX_WTHRESH 16 /* Default values of RX write-back threshold reg. */
83
84#define TX_PTHRESH 36 /* Default values of TX prefetch threshold reg. */
85#define TX_HTHRESH 0 /* Default values of TX host threshold reg. */
86#define TX_WTHRESH 0 /* Default values of TX write-back threshold reg. */
87
58397e6c
KT
88#define MAX_PKT_BURST 32 /* Max burst size for RX/TX */
89
90/* Character device cuse_dev_name. */
91char *cuse_dev_name = NULL;
92
8a9562d2 93static const struct rte_eth_conf port_conf = {
a28ddd11
DDP
94 .rxmode = {
95 .mq_mode = ETH_MQ_RX_RSS,
96 .split_hdr_size = 0,
97 .header_split = 0, /* Header Split disabled */
98 .hw_ip_checksum = 0, /* IP checksum offload disabled */
99 .hw_vlan_filter = 0, /* VLAN filtering disabled */
100 .jumbo_frame = 0, /* Jumbo Frame Support disabled */
101 .hw_strip_crc = 0,
102 },
103 .rx_adv_conf = {
104 .rss_conf = {
105 .rss_key = NULL,
61a2647e
DDP
106 .rss_hf = ETH_RSS_IPV4_TCP | ETH_RSS_IPV4 | ETH_RSS_IPV6
107 | ETH_RSS_IPV4_UDP | ETH_RSS_IPV6_TCP | ETH_RSS_IPV6_UDP,
8a9562d2 108 },
a28ddd11
DDP
109 },
110 .txmode = {
111 .mq_mode = ETH_MQ_TX_NONE,
112 },
8a9562d2
PS
113};
114
115static const struct rte_eth_rxconf rx_conf = {
a28ddd11
DDP
116 .rx_thresh = {
117 .pthresh = RX_PTHRESH,
118 .hthresh = RX_HTHRESH,
119 .wthresh = RX_WTHRESH,
120 },
8a9562d2
PS
121};
122
123static const struct rte_eth_txconf tx_conf = {
a28ddd11
DDP
124 .tx_thresh = {
125 .pthresh = TX_PTHRESH,
126 .hthresh = TX_HTHRESH,
127 .wthresh = TX_WTHRESH,
128 },
129 .tx_free_thresh = 0,
130 .tx_rs_thresh = 0,
94777510 131 .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS|ETH_TXQ_FLAGS_NOOFFLOADS,
8a9562d2
PS
132};
133
3a100265
DDP
134enum { MAX_RX_QUEUE_LEN = 192 };
135enum { MAX_TX_QUEUE_LEN = 384 };
58f7c37b
DDP
136enum { DPDK_RING_SIZE = 256 };
137BUILD_ASSERT_DECL(IS_POW2(DPDK_RING_SIZE));
8a9562d2
PS
138enum { DRAIN_TSC = 200000ULL };
139
58397e6c
KT
140enum dpdk_dev_type {
141 DPDK_DEV_ETH = 0,
142 DPDK_DEV_VHOST = 1
143};
144
8a9562d2
PS
145static int rte_eal_init_ret = ENODEV;
146
147static struct ovs_mutex dpdk_mutex = OVS_MUTEX_INITIALIZER;
148
149/* Contains all 'struct dpdk_dev's. */
ca6ba700 150static struct ovs_list dpdk_list OVS_GUARDED_BY(dpdk_mutex)
55951e15 151 = OVS_LIST_INITIALIZER(&dpdk_list);
8a9562d2 152
ca6ba700 153static struct ovs_list dpdk_mp_list OVS_GUARDED_BY(dpdk_mutex)
55951e15 154 = OVS_LIST_INITIALIZER(&dpdk_mp_list);
8a9562d2 155
db73f716
DDP
156/* This mutex must be used by non pmd threads when allocating or freeing
157 * mbufs through mempools. Since dpdk_queue_pkts() and dpdk_queue_flush() may
158 * use mempools, a non pmd thread should hold this mutex while calling them */
159struct ovs_mutex nonpmd_mempool_mutex = OVS_MUTEX_INITIALIZER;
160
8a9562d2
PS
161struct dpdk_mp {
162 struct rte_mempool *mp;
163 int mtu;
164 int socket_id;
165 int refcount;
ca6ba700 166 struct ovs_list list_node OVS_GUARDED_BY(dpdk_mutex);
8a9562d2
PS
167};
168
5a034064
AW
169/* There should be one 'struct dpdk_tx_queue' created for
170 * each cpu core. */
8a9562d2 171struct dpdk_tx_queue {
94143fc4
AW
172 bool flush_tx; /* Set to true to flush queue everytime */
173 /* pkts are queued. */
8a9562d2
PS
174 int count;
175 uint64_t tsc;
176 struct rte_mbuf *burst_pkts[MAX_TX_QUEUE_LEN];
177};
178
95fb793a 179/* dpdk has no way to remove dpdk ring ethernet devices
180 so we have to keep them around once they've been created
181*/
182
ca6ba700 183static struct ovs_list dpdk_ring_list OVS_GUARDED_BY(dpdk_mutex)
55951e15 184 = OVS_LIST_INITIALIZER(&dpdk_ring_list);
95fb793a 185
186struct dpdk_ring {
187 /* For the client rings */
188 struct rte_ring *cring_tx;
189 struct rte_ring *cring_rx;
190 int user_port_id; /* User given port no, parsed from port name */
191 int eth_port_id; /* ethernet device port id */
ca6ba700 192 struct ovs_list list_node OVS_GUARDED_BY(dpdk_mutex);
95fb793a 193};
194
8a9562d2
PS
195struct netdev_dpdk {
196 struct netdev up;
197 int port_id;
198 int max_packet_len;
58397e6c 199 enum dpdk_dev_type type;
8a9562d2 200
5a034064 201 struct dpdk_tx_queue *tx_q;
8a9562d2
PS
202
203 struct ovs_mutex mutex OVS_ACQ_AFTER(dpdk_mutex);
204
205 struct dpdk_mp *dpdk_mp;
206 int mtu;
207 int socket_id;
208 int buf_size;
8a9562d2
PS
209 struct netdev_stats stats;
210
211 uint8_t hwaddr[ETH_ADDR_LEN];
212 enum netdev_flags flags;
213
214 struct rte_eth_link link;
215 int link_reset_cnt;
216
58397e6c
KT
217 /* virtio-net structure for vhost device */
218 OVSRCU_TYPE(struct virtio_net *) virtio_dev;
219
8a9562d2 220 /* In dpdk_list. */
ca6ba700 221 struct ovs_list list_node OVS_GUARDED_BY(dpdk_mutex);
58397e6c 222 rte_spinlock_t txq_lock;
8a9562d2
PS
223};
224
225struct netdev_rxq_dpdk {
226 struct netdev_rxq up;
227 int port_id;
228};
229
db73f716
DDP
230static bool thread_is_pmd(void);
231
8a9562d2
PS
232static int netdev_dpdk_construct(struct netdev *);
233
58397e6c
KT
234struct virtio_net * netdev_dpdk_get_virtio(const struct netdev_dpdk *dev);
235
8a9562d2
PS
236static bool
237is_dpdk_class(const struct netdev_class *class)
238{
239 return class->construct == netdev_dpdk_construct;
240}
241
58397e6c
KT
242/* XXX: use dpdk malloc for entire OVS. in fact huge page should be used
243 * for all other segments data, bss and text. */
8a9562d2
PS
244
245static void *
246dpdk_rte_mzalloc(size_t sz)
247{
248 void *ptr;
249
250 ptr = rte_zmalloc(OVS_VPORT_DPDK, sz, OVS_CACHE_LINE_SIZE);
251 if (ptr == NULL) {
252 out_of_memory();
253 }
254 return ptr;
255}
256
db73f716
DDP
257/* XXX this function should be called only by pmd threads (or by non pmd
258 * threads holding the nonpmd_mempool_mutex) */
8a9562d2 259void
e14deea0 260free_dpdk_buf(struct dp_packet *p)
8a9562d2 261{
db73f716 262 struct rte_mbuf *pkt = (struct rte_mbuf *) p;
8a9562d2 263
db73f716 264 rte_pktmbuf_free_seg(pkt);
8a9562d2
PS
265}
266
b3cd9f9d
PS
267static void
268__rte_pktmbuf_init(struct rte_mempool *mp,
269 void *opaque_arg OVS_UNUSED,
270 void *_m,
271 unsigned i OVS_UNUSED)
272{
273 struct rte_mbuf *m = _m;
e14deea0 274 uint32_t buf_len = mp->elt_size - sizeof(struct dp_packet);
b3cd9f9d 275
e14deea0 276 RTE_MBUF_ASSERT(mp->elt_size >= sizeof(struct dp_packet));
b3cd9f9d
PS
277
278 memset(m, 0, mp->elt_size);
279
280 /* start of buffer is just after mbuf structure */
e14deea0 281 m->buf_addr = (char *)m + sizeof(struct dp_packet);
b3cd9f9d 282 m->buf_physaddr = rte_mempool_virt2phy(mp, m) +
e14deea0 283 sizeof(struct dp_packet);
b3cd9f9d
PS
284 m->buf_len = (uint16_t)buf_len;
285
286 /* keep some headroom between start of buffer and data */
b8e57534 287 m->data_off = RTE_MIN(RTE_PKTMBUF_HEADROOM, m->buf_len);
b3cd9f9d
PS
288
289 /* init some constant fields */
b3cd9f9d 290 m->pool = mp;
b8e57534
MK
291 m->nb_segs = 1;
292 m->port = 0xff;
b3cd9f9d
PS
293}
294
295static void
296ovs_rte_pktmbuf_init(struct rte_mempool *mp,
297 void *opaque_arg OVS_UNUSED,
298 void *_m,
299 unsigned i OVS_UNUSED)
300{
301 struct rte_mbuf *m = _m;
302
303 __rte_pktmbuf_init(mp, opaque_arg, _m, i);
304
cf62fa4c 305 dp_packet_init_dpdk((struct dp_packet *) m, m->buf_len);
b3cd9f9d
PS
306}
307
8a9562d2
PS
308static struct dpdk_mp *
309dpdk_mp_get(int socket_id, int mtu) OVS_REQUIRES(dpdk_mutex)
310{
311 struct dpdk_mp *dmp = NULL;
312 char mp_name[RTE_MEMPOOL_NAMESIZE];
313
314 LIST_FOR_EACH (dmp, list_node, &dpdk_mp_list) {
315 if (dmp->socket_id == socket_id && dmp->mtu == mtu) {
316 dmp->refcount++;
317 return dmp;
318 }
319 }
320
321 dmp = dpdk_rte_mzalloc(sizeof *dmp);
322 dmp->socket_id = socket_id;
323 dmp->mtu = mtu;
324 dmp->refcount = 1;
325
34631d72
AW
326 if (snprintf(mp_name, RTE_MEMPOOL_NAMESIZE, "ovs_mp_%d_%d", dmp->mtu,
327 dmp->socket_id) < 0) {
95fb793a 328 return NULL;
329 }
330
8a9562d2
PS
331 dmp->mp = rte_mempool_create(mp_name, NB_MBUF, MBUF_SIZE(mtu),
332 MP_CACHE_SZ,
333 sizeof(struct rte_pktmbuf_pool_private),
334 rte_pktmbuf_pool_init, NULL,
b3cd9f9d 335 ovs_rte_pktmbuf_init, NULL,
8a9562d2
PS
336 socket_id, 0);
337
338 if (dmp->mp == NULL) {
339 return NULL;
340 }
341
342 list_push_back(&dpdk_mp_list, &dmp->list_node);
343 return dmp;
344}
345
346static void
347dpdk_mp_put(struct dpdk_mp *dmp)
348{
349
350 if (!dmp) {
351 return;
352 }
353
354 dmp->refcount--;
355 ovs_assert(dmp->refcount >= 0);
356
357#if 0
358 /* I could not find any API to destroy mp. */
359 if (dmp->refcount == 0) {
360 list_delete(dmp->list_node);
361 /* destroy mp-pool. */
362 }
363#endif
364}
365
366static void
367check_link_status(struct netdev_dpdk *dev)
368{
369 struct rte_eth_link link;
370
371 rte_eth_link_get_nowait(dev->port_id, &link);
372
373 if (dev->link.link_status != link.link_status) {
3e912ffc 374 netdev_change_seq_changed(&dev->up);
8a9562d2
PS
375
376 dev->link_reset_cnt++;
377 dev->link = link;
378 if (dev->link.link_status) {
379 VLOG_DBG_RL(&rl, "Port %d Link Up - speed %u Mbps - %s",
380 dev->port_id, (unsigned)dev->link.link_speed,
381 (dev->link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
382 ("full-duplex") : ("half-duplex"));
383 } else {
384 VLOG_DBG_RL(&rl, "Port %d Link Down", dev->port_id);
385 }
386 }
387}
388
389static void *
390dpdk_watchdog(void *dummy OVS_UNUSED)
391{
392 struct netdev_dpdk *dev;
393
394 pthread_detach(pthread_self());
395
396 for (;;) {
397 ovs_mutex_lock(&dpdk_mutex);
398 LIST_FOR_EACH (dev, list_node, &dpdk_list) {
399 ovs_mutex_lock(&dev->mutex);
400 check_link_status(dev);
401 ovs_mutex_unlock(&dev->mutex);
402 }
403 ovs_mutex_unlock(&dpdk_mutex);
404 xsleep(DPDK_PORT_WATCHDOG_INTERVAL);
405 }
406
407 return NULL;
408}
409
410static int
411dpdk_eth_dev_init(struct netdev_dpdk *dev) OVS_REQUIRES(dpdk_mutex)
412{
413 struct rte_pktmbuf_pool_private *mbp_priv;
414 struct ether_addr eth_addr;
415 int diag;
416 int i;
417
418 if (dev->port_id < 0 || dev->port_id >= rte_eth_dev_count()) {
95fb793a 419 return ENODEV;
8a9562d2
PS
420 }
421
5496878c
AW
422 diag = rte_eth_dev_configure(dev->port_id, dev->up.n_rxq, dev->up.n_txq,
423 &port_conf);
8a9562d2
PS
424 if (diag) {
425 VLOG_ERR("eth dev config error %d",diag);
95fb793a 426 return -diag;
8a9562d2
PS
427 }
428
5496878c 429 for (i = 0; i < dev->up.n_txq; i++) {
79f5354c 430 diag = rte_eth_tx_queue_setup(dev->port_id, i, NIC_PORT_TX_Q_SIZE,
d221ffa1 431 dev->socket_id, &tx_conf);
8a9562d2
PS
432 if (diag) {
433 VLOG_ERR("eth dev tx queue setup error %d",diag);
95fb793a 434 return -diag;
8a9562d2
PS
435 }
436 }
437
5496878c 438 for (i = 0; i < dev->up.n_rxq; i++) {
79f5354c 439 diag = rte_eth_rx_queue_setup(dev->port_id, i, NIC_PORT_RX_Q_SIZE,
d221ffa1 440 dev->socket_id,
a715f600 441 &rx_conf, dev->dpdk_mp->mp);
8a9562d2
PS
442 if (diag) {
443 VLOG_ERR("eth dev rx queue setup error %d",diag);
95fb793a 444 return -diag;
8a9562d2
PS
445 }
446 }
447
448 diag = rte_eth_dev_start(dev->port_id);
449 if (diag) {
450 VLOG_ERR("eth dev start error %d",diag);
95fb793a 451 return -diag;
8a9562d2
PS
452 }
453
454 rte_eth_promiscuous_enable(dev->port_id);
455 rte_eth_allmulticast_enable(dev->port_id);
456
457 memset(&eth_addr, 0x0, sizeof(eth_addr));
458 rte_eth_macaddr_get(dev->port_id, &eth_addr);
459 VLOG_INFO_RL(&rl, "Port %d: "ETH_ADDR_FMT"",
460 dev->port_id, ETH_ADDR_ARGS(eth_addr.addr_bytes));
461
462 memcpy(dev->hwaddr, eth_addr.addr_bytes, ETH_ADDR_LEN);
463 rte_eth_link_get_nowait(dev->port_id, &dev->link);
464
465 mbp_priv = rte_mempool_get_priv(dev->dpdk_mp->mp);
466 dev->buf_size = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM;
467
468 dev->flags = NETDEV_UP | NETDEV_PROMISC;
469 return 0;
470}
471
472static struct netdev_dpdk *
473netdev_dpdk_cast(const struct netdev *netdev)
474{
475 return CONTAINER_OF(netdev, struct netdev_dpdk, up);
476}
477
478static struct netdev *
479netdev_dpdk_alloc(void)
480{
481 struct netdev_dpdk *netdev = dpdk_rte_mzalloc(sizeof *netdev);
482 return &netdev->up;
483}
484
5a034064 485static void
91968eb0 486netdev_dpdk_alloc_txq(struct netdev_dpdk *netdev, unsigned int n_txqs)
5a034064
AW
487{
488 int i;
489
490 netdev->tx_q = dpdk_rte_mzalloc(n_txqs * sizeof *netdev->tx_q);
94143fc4
AW
491 /* Each index is considered as a cpu core id, since there should
492 * be one tx queue for each cpu core. */
5a034064 493 for (i = 0; i < n_txqs; i++) {
ba0358a1 494 int numa_id = ovs_numa_get_numa_id(i);
94143fc4 495
94143fc4
AW
496 /* If the corresponding core is not on the same numa node
497 * as 'netdev', flags the 'flush_tx'. */
ba0358a1 498 netdev->tx_q[i].flush_tx = netdev->socket_id == numa_id;
5a034064
AW
499 }
500}
501
8a9562d2 502static int
58397e6c
KT
503netdev_dpdk_init(struct netdev *netdev_, unsigned int port_no,
504 enum dpdk_dev_type type)
5a034064 505 OVS_REQUIRES(dpdk_mutex)
8a9562d2
PS
506{
507 struct netdev_dpdk *netdev = netdev_dpdk_cast(netdev_);
1b7a04e0 508 int sid;
95fb793a 509 int err = 0;
8a9562d2 510
95fb793a 511 ovs_mutex_init(&netdev->mutex);
95fb793a 512 ovs_mutex_lock(&netdev->mutex);
8a9562d2 513
1b7a04e0
AW
514 /* If the 'sid' is negative, it means that the kernel fails
515 * to obtain the pci numa info. In that situation, always
516 * use 'SOCKET0'. */
58397e6c
KT
517 if (type == DPDK_DEV_ETH) {
518 sid = rte_eth_dev_socket_id(port_no);
519 } else {
520 sid = rte_lcore_to_socket_id(rte_get_master_lcore());
521 }
522
1b7a04e0 523 netdev->socket_id = sid < 0 ? SOCKET0 : sid;
95fb793a 524 netdev->port_id = port_no;
58397e6c 525 netdev->type = type;
8a9562d2 526 netdev->flags = 0;
8a9562d2
PS
527 netdev->mtu = ETHER_MTU;
528 netdev->max_packet_len = MTU_TO_MAX_LEN(netdev->mtu);
58397e6c 529 rte_spinlock_init(&netdev->txq_lock);
8a9562d2 530
8a9562d2
PS
531 netdev->dpdk_mp = dpdk_mp_get(netdev->socket_id, netdev->mtu);
532 if (!netdev->dpdk_mp) {
533 err = ENOMEM;
95fb793a 534 goto unlock;
8a9562d2
PS
535 }
536
5496878c
AW
537 netdev_->n_txq = NR_QUEUE;
538 netdev_->n_rxq = NR_QUEUE;
58397e6c
KT
539
540 if (type == DPDK_DEV_ETH) {
541 netdev_dpdk_alloc_txq(netdev, NR_QUEUE);
542 err = dpdk_eth_dev_init(netdev);
543 if (err) {
544 goto unlock;
545 }
8a9562d2 546 }
8a9562d2
PS
547
548 list_push_back(&dpdk_list, &netdev->list_node);
549
95fb793a 550unlock:
5a034064
AW
551 if (err) {
552 rte_free(netdev->tx_q);
553 }
8a9562d2 554 ovs_mutex_unlock(&netdev->mutex);
95fb793a 555 return err;
556}
557
558static int
559dpdk_dev_parse_name(const char dev_name[], const char prefix[],
560 unsigned int *port_no)
561{
562 const char *cport;
563
564 if (strncmp(dev_name, prefix, strlen(prefix))) {
565 return ENODEV;
566 }
567
568 cport = dev_name + strlen(prefix);
569 *port_no = strtol(cport, 0, 0); /* string must be null terminated */
570 return 0;
571}
572
58397e6c
KT
573static int
574netdev_dpdk_vhost_construct(struct netdev *netdev_)
575{
576 int err;
577
578 if (rte_eal_init_ret) {
579 return rte_eal_init_ret;
580 }
581
582 ovs_mutex_lock(&dpdk_mutex);
583 err = netdev_dpdk_init(netdev_, -1, DPDK_DEV_VHOST);
584 ovs_mutex_unlock(&dpdk_mutex);
585
586 return err;
587}
588
95fb793a 589static int
590netdev_dpdk_construct(struct netdev *netdev)
591{
592 unsigned int port_no;
593 int err;
594
595 if (rte_eal_init_ret) {
596 return rte_eal_init_ret;
597 }
598
599 /* Names always start with "dpdk" */
600 err = dpdk_dev_parse_name(netdev->name, "dpdk", &port_no);
601 if (err) {
602 return err;
603 }
604
605 ovs_mutex_lock(&dpdk_mutex);
58397e6c 606 err = netdev_dpdk_init(netdev, port_no, DPDK_DEV_ETH);
8a9562d2
PS
607 ovs_mutex_unlock(&dpdk_mutex);
608 return err;
609}
610
611static void
612netdev_dpdk_destruct(struct netdev *netdev_)
613{
614 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev_);
615
616 ovs_mutex_lock(&dev->mutex);
617 rte_eth_dev_stop(dev->port_id);
618 ovs_mutex_unlock(&dev->mutex);
619
620 ovs_mutex_lock(&dpdk_mutex);
5a034064 621 rte_free(dev->tx_q);
8a9562d2
PS
622 list_remove(&dev->list_node);
623 dpdk_mp_put(dev->dpdk_mp);
624 ovs_mutex_unlock(&dpdk_mutex);
58397e6c 625}
8a9562d2 626
58397e6c
KT
627static void
628netdev_dpdk_vhost_destruct(struct netdev *netdev_)
629{
630 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev_);
631
632 /* Can't remove a port while a guest is attached to it. */
633 if (netdev_dpdk_get_virtio(dev) != NULL) {
634 VLOG_ERR("Can not remove port, vhost device still attached");
635 return;
636 }
637
638 ovs_mutex_lock(&dpdk_mutex);
639 list_remove(&dev->list_node);
640 dpdk_mp_put(dev->dpdk_mp);
641 ovs_mutex_unlock(&dpdk_mutex);
8a9562d2
PS
642}
643
644static void
645netdev_dpdk_dealloc(struct netdev *netdev_)
646{
647 struct netdev_dpdk *netdev = netdev_dpdk_cast(netdev_);
648
649 rte_free(netdev);
650}
651
652static int
653netdev_dpdk_get_config(const struct netdev *netdev_, struct smap *args)
654{
655 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev_);
656
657 ovs_mutex_lock(&dev->mutex);
658
47659062
AW
659 smap_add_format(args, "configured_rx_queues", "%d", netdev_->n_rxq);
660 smap_add_format(args, "configured_tx_queues", "%d", netdev_->n_txq);
8a9562d2
PS
661 ovs_mutex_unlock(&dev->mutex);
662
663 return 0;
664}
665
7dec44fe
AW
666static int
667netdev_dpdk_get_numa_id(const struct netdev *netdev_)
668{
669 struct netdev_dpdk *netdev = netdev_dpdk_cast(netdev_);
670
671 return netdev->socket_id;
672}
673
5496878c
AW
674/* Sets the number of tx queues and rx queues for the dpdk interface.
675 * If the configuration fails, do not try restoring its old configuration
676 * and just returns the error. */
677static int
678netdev_dpdk_set_multiq(struct netdev *netdev_, unsigned int n_txq,
679 unsigned int n_rxq)
680{
681 struct netdev_dpdk *netdev = netdev_dpdk_cast(netdev_);
682 int err = 0;
683
684 if (netdev->up.n_txq == n_txq && netdev->up.n_rxq == n_rxq) {
685 return err;
686 }
687
b7ccaf67 688 ovs_mutex_lock(&dpdk_mutex);
5496878c 689 ovs_mutex_lock(&netdev->mutex);
91968eb0 690
5496878c 691 rte_eth_dev_stop(netdev->port_id);
91968eb0 692
5496878c
AW
693 netdev->up.n_txq = n_txq;
694 netdev->up.n_rxq = n_rxq;
58397e6c 695
91968eb0
AW
696 rte_free(netdev->tx_q);
697 netdev_dpdk_alloc_txq(netdev, n_txq);
5496878c 698 err = dpdk_eth_dev_init(netdev);
91968eb0 699
5496878c 700 ovs_mutex_unlock(&netdev->mutex);
b7ccaf67 701 ovs_mutex_unlock(&dpdk_mutex);
5496878c
AW
702
703 return err;
704}
705
58397e6c
KT
706static int
707netdev_dpdk_vhost_set_multiq(struct netdev *netdev_, unsigned int n_txq,
708 unsigned int n_rxq)
709{
710 struct netdev_dpdk *netdev = netdev_dpdk_cast(netdev_);
711 int err = 0;
712
713 if (netdev->up.n_txq == n_txq && netdev->up.n_rxq == n_rxq) {
714 return err;
715 }
716
717 ovs_mutex_lock(&dpdk_mutex);
718 ovs_mutex_lock(&netdev->mutex);
719
720 netdev->up.n_txq = n_txq;
721 netdev->up.n_rxq = n_rxq;
722
723 ovs_mutex_unlock(&netdev->mutex);
724 ovs_mutex_unlock(&dpdk_mutex);
725
726 return err;
727}
728
8a9562d2
PS
729static struct netdev_rxq *
730netdev_dpdk_rxq_alloc(void)
731{
732 struct netdev_rxq_dpdk *rx = dpdk_rte_mzalloc(sizeof *rx);
733
734 return &rx->up;
735}
736
737static struct netdev_rxq_dpdk *
738netdev_rxq_dpdk_cast(const struct netdev_rxq *rx)
739{
740 return CONTAINER_OF(rx, struct netdev_rxq_dpdk, up);
741}
742
743static int
744netdev_dpdk_rxq_construct(struct netdev_rxq *rxq_)
745{
746 struct netdev_rxq_dpdk *rx = netdev_rxq_dpdk_cast(rxq_);
747 struct netdev_dpdk *netdev = netdev_dpdk_cast(rx->up.netdev);
748
749 ovs_mutex_lock(&netdev->mutex);
750 rx->port_id = netdev->port_id;
751 ovs_mutex_unlock(&netdev->mutex);
752
753 return 0;
754}
755
756static void
757netdev_dpdk_rxq_destruct(struct netdev_rxq *rxq_ OVS_UNUSED)
758{
759}
760
761static void
762netdev_dpdk_rxq_dealloc(struct netdev_rxq *rxq_)
763{
764 struct netdev_rxq_dpdk *rx = netdev_rxq_dpdk_cast(rxq_);
765
766 rte_free(rx);
767}
768
b170db2a
RW
769static inline void
770dpdk_queue_flush__(struct netdev_dpdk *dev, int qid)
8a9562d2
PS
771{
772 struct dpdk_tx_queue *txq = &dev->tx_q[qid];
1304f1f8
DDP
773 uint32_t nb_tx = 0;
774
775 while (nb_tx != txq->count) {
776 uint32_t ret;
777
778 ret = rte_eth_tx_burst(dev->port_id, qid, txq->burst_pkts + nb_tx,
779 txq->count - nb_tx);
780 if (!ret) {
781 break;
782 }
783
784 nb_tx += ret;
785 }
8a9562d2 786
b170db2a 787 if (OVS_UNLIKELY(nb_tx != txq->count)) {
db73f716
DDP
788 /* free buffers, which we couldn't transmit, one at a time (each
789 * packet could come from a different mempool) */
790 int i;
791
792 for (i = nb_tx; i < txq->count; i++) {
793 rte_pktmbuf_free_seg(txq->burst_pkts[i]);
794 }
1304f1f8
DDP
795 ovs_mutex_lock(&dev->mutex);
796 dev->stats.tx_dropped += txq->count-nb_tx;
797 ovs_mutex_unlock(&dev->mutex);
8a9562d2 798 }
1304f1f8 799
8a9562d2 800 txq->count = 0;
844f2d74 801 txq->tsc = rte_get_timer_cycles();
b170db2a
RW
802}
803
804static inline void
805dpdk_queue_flush(struct netdev_dpdk *dev, int qid)
806{
807 struct dpdk_tx_queue *txq = &dev->tx_q[qid];
808
809 if (txq->count == 0) {
810 return;
811 }
b170db2a 812 dpdk_queue_flush__(dev, qid);
8a9562d2
PS
813}
814
58397e6c
KT
815static bool
816is_vhost_running(struct virtio_net *dev)
817{
818 return (dev != NULL && (dev->flags & VIRTIO_DEV_RUNNING));
819}
820
821/*
822 * The receive path for the vhost port is the TX path out from guest.
823 */
824static int
825netdev_dpdk_vhost_rxq_recv(struct netdev_rxq *rxq_,
826 struct dp_packet **packets, int *c)
827{
828 struct netdev_rxq_dpdk *rx = netdev_rxq_dpdk_cast(rxq_);
829 struct netdev *netdev = rx->up.netdev;
830 struct netdev_dpdk *vhost_dev = netdev_dpdk_cast(netdev);
831 struct virtio_net *virtio_dev = netdev_dpdk_get_virtio(vhost_dev);
832 int qid = 1;
833 uint16_t nb_rx = 0;
834
835 if (OVS_UNLIKELY(!is_vhost_running(virtio_dev))) {
836 return EAGAIN;
837 }
838
839 nb_rx = rte_vhost_dequeue_burst(virtio_dev, qid,
840 vhost_dev->dpdk_mp->mp,
841 (struct rte_mbuf **)packets,
842 MAX_PKT_BURST);
843 if (!nb_rx) {
844 return EAGAIN;
845 }
846
847 vhost_dev->stats.rx_packets += (uint64_t)nb_rx;
848 *c = (int) nb_rx;
849 return 0;
850}
851
8a9562d2 852static int
e14deea0 853netdev_dpdk_rxq_recv(struct netdev_rxq *rxq_, struct dp_packet **packets,
91088554 854 int *c)
8a9562d2
PS
855{
856 struct netdev_rxq_dpdk *rx = netdev_rxq_dpdk_cast(rxq_);
857 struct netdev *netdev = rx->up.netdev;
858 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
8a9562d2 859 int nb_rx;
8a9562d2 860
5496878c
AW
861 /* There is only one tx queue for this core. Do not flush other
862 * queueus. */
863 if (rxq_->queue_id == rte_lcore_id()) {
864 dpdk_queue_flush(dev, rxq_->queue_id);
865 }
8a9562d2
PS
866
867 nb_rx = rte_eth_rx_burst(rx->port_id, rxq_->queue_id,
7d08d53e
DDP
868 (struct rte_mbuf **) packets,
869 MIN((int)NETDEV_MAX_RX_BATCH,
870 (int)MAX_RX_QUEUE_LEN));
8a9562d2
PS
871 if (!nb_rx) {
872 return EAGAIN;
873 }
874
8a9562d2
PS
875 *c = nb_rx;
876
877 return 0;
878}
879
58397e6c
KT
880static void
881__netdev_dpdk_vhost_send(struct netdev *netdev, struct dp_packet **pkts,
882 int cnt, bool may_steal)
883{
884 struct netdev_dpdk *vhost_dev = netdev_dpdk_cast(netdev);
885 struct virtio_net *virtio_dev = netdev_dpdk_get_virtio(vhost_dev);
886 int tx_pkts, i;
887
888 if (OVS_UNLIKELY(!is_vhost_running(virtio_dev))) {
889 ovs_mutex_lock(&vhost_dev->mutex);
890 vhost_dev->stats.tx_dropped+= cnt;
891 ovs_mutex_unlock(&vhost_dev->mutex);
892 goto out;
893 }
894
895 /* There is vHost TX single queue, So we need to lock it for TX. */
896 rte_spinlock_lock(&vhost_dev->txq_lock);
897 tx_pkts = rte_vhost_enqueue_burst(virtio_dev, VIRTIO_RXQ,
898 (struct rte_mbuf **)pkts, cnt);
899
900 vhost_dev->stats.tx_packets += tx_pkts;
901 vhost_dev->stats.tx_dropped += (cnt - tx_pkts);
902 rte_spinlock_unlock(&vhost_dev->txq_lock);
903
904out:
905 if (may_steal) {
906 for (i = 0; i < cnt; i++) {
907 dp_packet_delete(pkts[i]);
908 }
909 }
910}
911
8a9562d2 912inline static void
f4fd623c
DDP
913dpdk_queue_pkts(struct netdev_dpdk *dev, int qid,
914 struct rte_mbuf **pkts, int cnt)
8a9562d2
PS
915{
916 struct dpdk_tx_queue *txq = &dev->tx_q[qid];
917 uint64_t diff_tsc;
8a9562d2 918
f4fd623c
DDP
919 int i = 0;
920
f4fd623c
DDP
921 while (i < cnt) {
922 int freeslots = MAX_TX_QUEUE_LEN - txq->count;
923 int tocopy = MIN(freeslots, cnt-i);
8a9562d2 924
f4fd623c
DDP
925 memcpy(&txq->burst_pkts[txq->count], &pkts[i],
926 tocopy * sizeof (struct rte_mbuf *));
927
928 txq->count += tocopy;
929 i += tocopy;
930
94143fc4 931 if (txq->count == MAX_TX_QUEUE_LEN || txq->flush_tx) {
b170db2a 932 dpdk_queue_flush__(dev, qid);
f4fd623c 933 }
844f2d74 934 diff_tsc = rte_get_timer_cycles() - txq->tsc;
f4fd623c 935 if (diff_tsc >= DRAIN_TSC) {
b170db2a 936 dpdk_queue_flush__(dev, qid);
f4fd623c 937 }
8a9562d2 938 }
8a9562d2
PS
939}
940
941/* Tx function. Transmit packets indefinitely */
942static void
58397e6c 943dpdk_do_tx_copy(struct netdev *netdev, int qid, struct dp_packet **pkts,
2654cc33 944 int cnt)
db73f716 945 OVS_NO_THREAD_SAFETY_ANALYSIS
8a9562d2
PS
946{
947 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
f4fd623c 948 struct rte_mbuf *mbufs[cnt];
175cf4de
RW
949 int dropped = 0;
950 int newcnt = 0;
951 int i;
8a9562d2 952
db73f716
DDP
953 /* If we are on a non pmd thread we have to use the mempool mutex, because
954 * every non pmd thread shares the same mempool cache */
955
956 if (!thread_is_pmd()) {
957 ovs_mutex_lock(&nonpmd_mempool_mutex);
958 }
959
f4fd623c 960 for (i = 0; i < cnt; i++) {
cf62fa4c 961 int size = dp_packet_size(pkts[i]);
95fb793a 962
f98d7864 963 if (OVS_UNLIKELY(size > dev->max_packet_len)) {
f4fd623c
DDP
964 VLOG_WARN_RL(&rl, "Too big size %d max_packet_len %d",
965 (int)size , dev->max_packet_len);
966
175cf4de 967 dropped++;
f4fd623c
DDP
968 continue;
969 }
8a9562d2 970
f4fd623c 971 mbufs[newcnt] = rte_pktmbuf_alloc(dev->dpdk_mp->mp);
8a9562d2 972
f4fd623c 973 if (!mbufs[newcnt]) {
175cf4de
RW
974 dropped += cnt - i;
975 break;
f4fd623c
DDP
976 }
977
978 /* We have to do a copy for now */
b8e57534 979 memcpy(rte_pktmbuf_mtod(mbufs[newcnt], void *), dp_packet_data(pkts[i]), size);
f4fd623c
DDP
980
981 rte_pktmbuf_data_len(mbufs[newcnt]) = size;
982 rte_pktmbuf_pkt_len(mbufs[newcnt]) = size;
983
984 newcnt++;
985 }
8a9562d2 986
f98d7864 987 if (OVS_UNLIKELY(dropped)) {
175cf4de
RW
988 ovs_mutex_lock(&dev->mutex);
989 dev->stats.tx_dropped += dropped;
990 ovs_mutex_unlock(&dev->mutex);
991 }
992
58397e6c
KT
993 if (dev->type == DPDK_DEV_VHOST) {
994 __netdev_dpdk_vhost_send(netdev, (struct dp_packet **) mbufs, newcnt, true);
995 } else {
996 dpdk_queue_pkts(dev, qid, mbufs, newcnt);
997 dpdk_queue_flush(dev, qid);
998 }
db73f716
DDP
999
1000 if (!thread_is_pmd()) {
1001 ovs_mutex_unlock(&nonpmd_mempool_mutex);
1002 }
8a9562d2
PS
1003}
1004
58397e6c
KT
1005static int
1006netdev_dpdk_vhost_send(struct netdev *netdev, int qid OVS_UNUSED, struct dp_packet **pkts,
1007 int cnt, bool may_steal)
1008{
1009 if (OVS_UNLIKELY(pkts[0]->source != DPBUF_DPDK)) {
1010 int i;
1011
1012 dpdk_do_tx_copy(netdev, qid, pkts, cnt);
1013 if (may_steal) {
1014 for (i = 0; i < cnt; i++) {
1015 dp_packet_delete(pkts[i]);
1016 }
1017 }
1018 } else {
1019 __netdev_dpdk_vhost_send(netdev, pkts, cnt, may_steal);
1020 }
1021 return 0;
1022}
1023
7251515e
DV
1024static inline void
1025netdev_dpdk_send__(struct netdev_dpdk *dev, int qid,
e14deea0 1026 struct dp_packet **pkts, int cnt, bool may_steal)
8a9562d2 1027{
f4fd623c 1028 int i;
8a9562d2 1029
7251515e 1030 if (OVS_UNLIKELY(!may_steal ||
cf62fa4c 1031 pkts[0]->source != DPBUF_DPDK)) {
7251515e
DV
1032 struct netdev *netdev = &dev->up;
1033
2654cc33 1034 dpdk_do_tx_copy(netdev, qid, pkts, cnt);
b3cd9f9d
PS
1035
1036 if (may_steal) {
f4fd623c 1037 for (i = 0; i < cnt; i++) {
e14deea0 1038 dp_packet_delete(pkts[i]);
f4fd623c 1039 }
b3cd9f9d 1040 }
8a9562d2 1041 } else {
f4fd623c
DDP
1042 int next_tx_idx = 0;
1043 int dropped = 0;
8a9562d2 1044
f4fd623c 1045 for (i = 0; i < cnt; i++) {
cf62fa4c 1046 int size = dp_packet_size(pkts[i]);
f4fd623c
DDP
1047 if (OVS_UNLIKELY(size > dev->max_packet_len)) {
1048 if (next_tx_idx != i) {
1049 dpdk_queue_pkts(dev, qid,
1050 (struct rte_mbuf **)&pkts[next_tx_idx],
1051 i-next_tx_idx);
1ebfe1ac 1052 }
f4fd623c 1053
1ebfe1ac
DDP
1054 VLOG_WARN_RL(&rl, "Too big size %d max_packet_len %d",
1055 (int)size , dev->max_packet_len);
f4fd623c 1056
e14deea0 1057 dp_packet_delete(pkts[i]);
1ebfe1ac 1058 dropped++;
f4fd623c
DDP
1059 next_tx_idx = i + 1;
1060 }
1061 }
1062 if (next_tx_idx != cnt) {
1063 dpdk_queue_pkts(dev, qid,
1064 (struct rte_mbuf **)&pkts[next_tx_idx],
1065 cnt-next_tx_idx);
1066 }
8a9562d2 1067
f4fd623c
DDP
1068 if (OVS_UNLIKELY(dropped)) {
1069 ovs_mutex_lock(&dev->mutex);
1070 dev->stats.tx_dropped += dropped;
1071 ovs_mutex_unlock(&dev->mutex);
1072 }
8a9562d2 1073 }
7251515e
DV
1074}
1075
1076static int
1077netdev_dpdk_eth_send(struct netdev *netdev, int qid,
e14deea0 1078 struct dp_packet **pkts, int cnt, bool may_steal)
7251515e
DV
1079{
1080 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
8a9562d2 1081
7251515e
DV
1082 netdev_dpdk_send__(dev, qid, pkts, cnt, may_steal);
1083 return 0;
8a9562d2
PS
1084}
1085
1086static int
1087netdev_dpdk_set_etheraddr(struct netdev *netdev,
1088 const uint8_t mac[ETH_ADDR_LEN])
1089{
1090 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1091
1092 ovs_mutex_lock(&dev->mutex);
1093 if (!eth_addr_equals(dev->hwaddr, mac)) {
1094 memcpy(dev->hwaddr, mac, ETH_ADDR_LEN);
045c0d1a 1095 netdev_change_seq_changed(netdev);
8a9562d2
PS
1096 }
1097 ovs_mutex_unlock(&dev->mutex);
1098
1099 return 0;
1100}
1101
1102static int
1103netdev_dpdk_get_etheraddr(const struct netdev *netdev,
1104 uint8_t mac[ETH_ADDR_LEN])
1105{
1106 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1107
1108 ovs_mutex_lock(&dev->mutex);
1109 memcpy(mac, dev->hwaddr, ETH_ADDR_LEN);
1110 ovs_mutex_unlock(&dev->mutex);
1111
1112 return 0;
1113}
1114
1115static int
1116netdev_dpdk_get_mtu(const struct netdev *netdev, int *mtup)
1117{
1118 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1119
1120 ovs_mutex_lock(&dev->mutex);
1121 *mtup = dev->mtu;
1122 ovs_mutex_unlock(&dev->mutex);
1123
1124 return 0;
1125}
1126
1127static int
1128netdev_dpdk_set_mtu(const struct netdev *netdev, int mtu)
1129{
1130 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1131 int old_mtu, err;
1132 struct dpdk_mp *old_mp;
1133 struct dpdk_mp *mp;
1134
1135 ovs_mutex_lock(&dpdk_mutex);
1136 ovs_mutex_lock(&dev->mutex);
1137 if (dev->mtu == mtu) {
1138 err = 0;
1139 goto out;
1140 }
1141
1142 mp = dpdk_mp_get(dev->socket_id, dev->mtu);
1143 if (!mp) {
1144 err = ENOMEM;
1145 goto out;
1146 }
1147
1148 rte_eth_dev_stop(dev->port_id);
1149
1150 old_mtu = dev->mtu;
1151 old_mp = dev->dpdk_mp;
1152 dev->dpdk_mp = mp;
1153 dev->mtu = mtu;
1154 dev->max_packet_len = MTU_TO_MAX_LEN(dev->mtu);
1155
1156 err = dpdk_eth_dev_init(dev);
1157 if (err) {
8a9562d2
PS
1158 dpdk_mp_put(mp);
1159 dev->mtu = old_mtu;
1160 dev->dpdk_mp = old_mp;
1161 dev->max_packet_len = MTU_TO_MAX_LEN(dev->mtu);
1162 dpdk_eth_dev_init(dev);
1163 goto out;
1164 }
1165
1166 dpdk_mp_put(old_mp);
045c0d1a 1167 netdev_change_seq_changed(netdev);
8a9562d2
PS
1168out:
1169 ovs_mutex_unlock(&dev->mutex);
1170 ovs_mutex_unlock(&dpdk_mutex);
1171 return err;
1172}
1173
1174static int
1175netdev_dpdk_get_carrier(const struct netdev *netdev_, bool *carrier);
1176
58397e6c
KT
1177static int
1178netdev_dpdk_vhost_get_stats(const struct netdev *netdev,
1179 struct netdev_stats *stats)
1180{
1181 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1182
1183 ovs_mutex_lock(&dev->mutex);
1184 memset(stats, 0, sizeof(*stats));
1185 /* Unsupported Stats */
1186 stats->rx_errors = UINT64_MAX;
1187 stats->tx_errors = UINT64_MAX;
1188 stats->multicast = UINT64_MAX;
1189 stats->collisions = UINT64_MAX;
1190 stats->rx_crc_errors = UINT64_MAX;
1191 stats->rx_fifo_errors = UINT64_MAX;
1192 stats->rx_frame_errors = UINT64_MAX;
1193 stats->rx_length_errors = UINT64_MAX;
1194 stats->rx_missed_errors = UINT64_MAX;
1195 stats->rx_over_errors = UINT64_MAX;
1196 stats->tx_aborted_errors = UINT64_MAX;
1197 stats->tx_carrier_errors = UINT64_MAX;
1198 stats->tx_errors = UINT64_MAX;
1199 stats->tx_fifo_errors = UINT64_MAX;
1200 stats->tx_heartbeat_errors = UINT64_MAX;
1201 stats->tx_window_errors = UINT64_MAX;
1202 stats->rx_bytes += UINT64_MAX;
1203 stats->rx_dropped += UINT64_MAX;
1204 stats->tx_bytes += UINT64_MAX;
1205
1206 /* Supported Stats */
1207 stats->rx_packets += dev->stats.rx_packets;
1208 stats->tx_packets += dev->stats.tx_packets;
1209 stats->tx_dropped += dev->stats.tx_dropped;
1210 ovs_mutex_unlock(&dev->mutex);
1211
1212 return 0;
1213}
1214
8a9562d2
PS
1215static int
1216netdev_dpdk_get_stats(const struct netdev *netdev, struct netdev_stats *stats)
1217{
1218 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1219 struct rte_eth_stats rte_stats;
1220 bool gg;
1221
1222 netdev_dpdk_get_carrier(netdev, &gg);
1223 ovs_mutex_lock(&dev->mutex);
1224 rte_eth_stats_get(dev->port_id, &rte_stats);
1225
2f9dd77f 1226 memset(stats, 0, sizeof(*stats));
8a9562d2 1227
2f9dd77f
PS
1228 stats->rx_packets = rte_stats.ipackets;
1229 stats->tx_packets = rte_stats.opackets;
1230 stats->rx_bytes = rte_stats.ibytes;
1231 stats->tx_bytes = rte_stats.obytes;
1232 stats->rx_errors = rte_stats.ierrors;
1233 stats->tx_errors = rte_stats.oerrors;
1234 stats->multicast = rte_stats.imcasts;
8a9562d2 1235
2f9dd77f 1236 stats->tx_dropped = dev->stats.tx_dropped;
8a9562d2
PS
1237 ovs_mutex_unlock(&dev->mutex);
1238
1239 return 0;
1240}
1241
1242static int
1243netdev_dpdk_get_features(const struct netdev *netdev_,
1244 enum netdev_features *current,
1245 enum netdev_features *advertised OVS_UNUSED,
1246 enum netdev_features *supported OVS_UNUSED,
1247 enum netdev_features *peer OVS_UNUSED)
1248{
1249 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev_);
1250 struct rte_eth_link link;
1251
1252 ovs_mutex_lock(&dev->mutex);
1253 link = dev->link;
1254 ovs_mutex_unlock(&dev->mutex);
1255
1256 if (link.link_duplex == ETH_LINK_AUTONEG_DUPLEX) {
1257 if (link.link_speed == ETH_LINK_SPEED_AUTONEG) {
1258 *current = NETDEV_F_AUTONEG;
1259 }
1260 } else if (link.link_duplex == ETH_LINK_HALF_DUPLEX) {
1261 if (link.link_speed == ETH_LINK_SPEED_10) {
1262 *current = NETDEV_F_10MB_HD;
1263 }
1264 if (link.link_speed == ETH_LINK_SPEED_100) {
1265 *current = NETDEV_F_100MB_HD;
1266 }
1267 if (link.link_speed == ETH_LINK_SPEED_1000) {
1268 *current = NETDEV_F_1GB_HD;
1269 }
1270 } else if (link.link_duplex == ETH_LINK_FULL_DUPLEX) {
1271 if (link.link_speed == ETH_LINK_SPEED_10) {
1272 *current = NETDEV_F_10MB_FD;
1273 }
1274 if (link.link_speed == ETH_LINK_SPEED_100) {
1275 *current = NETDEV_F_100MB_FD;
1276 }
1277 if (link.link_speed == ETH_LINK_SPEED_1000) {
1278 *current = NETDEV_F_1GB_FD;
1279 }
1280 if (link.link_speed == ETH_LINK_SPEED_10000) {
1281 *current = NETDEV_F_10GB_FD;
1282 }
1283 }
1284
1285 return 0;
1286}
1287
1288static int
1289netdev_dpdk_get_ifindex(const struct netdev *netdev)
1290{
1291 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1292 int ifindex;
1293
1294 ovs_mutex_lock(&dev->mutex);
1295 ifindex = dev->port_id;
1296 ovs_mutex_unlock(&dev->mutex);
1297
1298 return ifindex;
1299}
1300
1301static int
1302netdev_dpdk_get_carrier(const struct netdev *netdev_, bool *carrier)
1303{
1304 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev_);
1305
1306 ovs_mutex_lock(&dev->mutex);
1307 check_link_status(dev);
1308 *carrier = dev->link.link_status;
58397e6c
KT
1309
1310 ovs_mutex_unlock(&dev->mutex);
1311
1312 return 0;
1313}
1314
1315static int
1316netdev_dpdk_vhost_get_carrier(const struct netdev *netdev_, bool *carrier)
1317{
1318 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev_);
1319 struct virtio_net *virtio_dev = netdev_dpdk_get_virtio(dev);
1320
1321 ovs_mutex_lock(&dev->mutex);
1322
1323 if (is_vhost_running(virtio_dev)) {
1324 *carrier = 1;
1325 } else {
1326 *carrier = 0;
1327 }
1328
8a9562d2
PS
1329 ovs_mutex_unlock(&dev->mutex);
1330
1331 return 0;
1332}
1333
1334static long long int
1335netdev_dpdk_get_carrier_resets(const struct netdev *netdev_)
1336{
1337 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev_);
1338 long long int carrier_resets;
1339
1340 ovs_mutex_lock(&dev->mutex);
1341 carrier_resets = dev->link_reset_cnt;
1342 ovs_mutex_unlock(&dev->mutex);
1343
1344 return carrier_resets;
1345}
1346
1347static int
1348netdev_dpdk_set_miimon(struct netdev *netdev_ OVS_UNUSED,
1349 long long int interval OVS_UNUSED)
1350{
ee32150e 1351 return EOPNOTSUPP;
8a9562d2
PS
1352}
1353
1354static int
1355netdev_dpdk_update_flags__(struct netdev_dpdk *dev,
1356 enum netdev_flags off, enum netdev_flags on,
95fb793a 1357 enum netdev_flags *old_flagsp) OVS_REQUIRES(dev->mutex)
8a9562d2
PS
1358{
1359 int err;
1360
1361 if ((off | on) & ~(NETDEV_UP | NETDEV_PROMISC)) {
1362 return EINVAL;
1363 }
1364
1365 *old_flagsp = dev->flags;
1366 dev->flags |= on;
1367 dev->flags &= ~off;
1368
1369 if (dev->flags == *old_flagsp) {
1370 return 0;
1371 }
1372
58397e6c
KT
1373 if (dev->type == DPDK_DEV_ETH) {
1374 if (dev->flags & NETDEV_UP) {
1375 err = rte_eth_dev_start(dev->port_id);
1376 if (err)
1377 return -err;
1378 }
8a9562d2 1379
58397e6c
KT
1380 if (dev->flags & NETDEV_PROMISC) {
1381 rte_eth_promiscuous_enable(dev->port_id);
1382 }
8a9562d2 1383
58397e6c
KT
1384 if (!(dev->flags & NETDEV_UP)) {
1385 rte_eth_dev_stop(dev->port_id);
1386 }
8a9562d2
PS
1387 }
1388
1389 return 0;
1390}
1391
1392static int
1393netdev_dpdk_update_flags(struct netdev *netdev_,
1394 enum netdev_flags off, enum netdev_flags on,
1395 enum netdev_flags *old_flagsp)
1396{
1397 struct netdev_dpdk *netdev = netdev_dpdk_cast(netdev_);
1398 int error;
1399
1400 ovs_mutex_lock(&netdev->mutex);
1401 error = netdev_dpdk_update_flags__(netdev, off, on, old_flagsp);
1402 ovs_mutex_unlock(&netdev->mutex);
1403
1404 return error;
1405}
1406
1407static int
1408netdev_dpdk_get_status(const struct netdev *netdev_, struct smap *args)
1409{
1410 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev_);
1411 struct rte_eth_dev_info dev_info;
1412
e0a801c7 1413 if (dev->port_id < 0)
8a9562d2
PS
1414 return ENODEV;
1415
1416 ovs_mutex_lock(&dev->mutex);
1417 rte_eth_dev_info_get(dev->port_id, &dev_info);
1418 ovs_mutex_unlock(&dev->mutex);
1419
1420 smap_add_format(args, "driver_name", "%s", dev_info.driver_name);
1421
95fb793a 1422 smap_add_format(args, "port_no", "%d", dev->port_id);
8a9562d2
PS
1423 smap_add_format(args, "numa_id", "%d", rte_eth_dev_socket_id(dev->port_id));
1424 smap_add_format(args, "driver_name", "%s", dev_info.driver_name);
1425 smap_add_format(args, "min_rx_bufsize", "%u", dev_info.min_rx_bufsize);
1426 smap_add_format(args, "max_rx_pktlen", "%u", dev_info.max_rx_pktlen);
1427 smap_add_format(args, "max_rx_queues", "%u", dev_info.max_rx_queues);
1428 smap_add_format(args, "max_tx_queues", "%u", dev_info.max_tx_queues);
1429 smap_add_format(args, "max_mac_addrs", "%u", dev_info.max_mac_addrs);
1430 smap_add_format(args, "max_hash_mac_addrs", "%u", dev_info.max_hash_mac_addrs);
1431 smap_add_format(args, "max_vfs", "%u", dev_info.max_vfs);
1432 smap_add_format(args, "max_vmdq_pools", "%u", dev_info.max_vmdq_pools);
1433
1434 smap_add_format(args, "pci-vendor_id", "0x%u", dev_info.pci_dev->id.vendor_id);
1435 smap_add_format(args, "pci-device_id", "0x%x", dev_info.pci_dev->id.device_id);
1436
1437 return 0;
1438}
1439
1440static void
1441netdev_dpdk_set_admin_state__(struct netdev_dpdk *dev, bool admin_state)
1442 OVS_REQUIRES(dev->mutex)
1443{
1444 enum netdev_flags old_flags;
1445
1446 if (admin_state) {
1447 netdev_dpdk_update_flags__(dev, 0, NETDEV_UP, &old_flags);
1448 } else {
1449 netdev_dpdk_update_flags__(dev, NETDEV_UP, 0, &old_flags);
1450 }
1451}
1452
1453static void
1454netdev_dpdk_set_admin_state(struct unixctl_conn *conn, int argc,
1455 const char *argv[], void *aux OVS_UNUSED)
1456{
1457 bool up;
1458
1459 if (!strcasecmp(argv[argc - 1], "up")) {
1460 up = true;
1461 } else if ( !strcasecmp(argv[argc - 1], "down")) {
1462 up = false;
1463 } else {
1464 unixctl_command_reply_error(conn, "Invalid Admin State");
1465 return;
1466 }
1467
1468 if (argc > 2) {
1469 struct netdev *netdev = netdev_from_name(argv[1]);
1470 if (netdev && is_dpdk_class(netdev->netdev_class)) {
1471 struct netdev_dpdk *dpdk_dev = netdev_dpdk_cast(netdev);
1472
1473 ovs_mutex_lock(&dpdk_dev->mutex);
1474 netdev_dpdk_set_admin_state__(dpdk_dev, up);
1475 ovs_mutex_unlock(&dpdk_dev->mutex);
1476
1477 netdev_close(netdev);
1478 } else {
1479 unixctl_command_reply_error(conn, "Not a DPDK Interface");
1480 netdev_close(netdev);
1481 return;
1482 }
1483 } else {
1484 struct netdev_dpdk *netdev;
1485
1486 ovs_mutex_lock(&dpdk_mutex);
1487 LIST_FOR_EACH (netdev, list_node, &dpdk_list) {
1488 ovs_mutex_lock(&netdev->mutex);
1489 netdev_dpdk_set_admin_state__(netdev, up);
1490 ovs_mutex_unlock(&netdev->mutex);
1491 }
1492 ovs_mutex_unlock(&dpdk_mutex);
1493 }
1494 unixctl_command_reply(conn, "OK");
1495}
1496
58397e6c
KT
1497/*
1498 * Set virtqueue flags so that we do not receive interrupts.
1499 */
1500static void
1501set_irq_status(struct virtio_net *dev)
1502{
1503 dev->virtqueue[VIRTIO_RXQ]->used->flags = VRING_USED_F_NO_NOTIFY;
1504 dev->virtqueue[VIRTIO_TXQ]->used->flags = VRING_USED_F_NO_NOTIFY;
1505}
1506
1507/*
1508 * A new virtio-net device is added to a vhost port.
1509 */
1510static int
1511new_device(struct virtio_net *dev)
1512{
1513 struct netdev_dpdk *netdev;
1514 bool exists = false;
1515
1516 ovs_mutex_lock(&dpdk_mutex);
1517 /* Add device to the vhost port with the same name as that passed down. */
1518 LIST_FOR_EACH(netdev, list_node, &dpdk_list) {
1519 if (strncmp(dev->ifname, netdev->up.name, IFNAMSIZ) == 0) {
1520 ovs_mutex_lock(&netdev->mutex);
1521 ovsrcu_set(&netdev->virtio_dev, dev);
1522 ovs_mutex_unlock(&netdev->mutex);
1523 exists = true;
1524 dev->flags |= VIRTIO_DEV_RUNNING;
1525 /* Disable notifications. */
1526 set_irq_status(dev);
1527 break;
1528 }
1529 }
1530 ovs_mutex_unlock(&dpdk_mutex);
1531
1532 if (!exists) {
1533 VLOG_INFO("vHost Device '%s' (%ld) can't be added - name not found",
1534 dev->ifname, dev->device_fh);
1535
1536 return -1;
1537 }
1538
1539 VLOG_INFO("vHost Device '%s' (%ld) has been added",
1540 dev->ifname, dev->device_fh);
1541 return 0;
1542}
1543
1544/*
1545 * Remove a virtio-net device from the specific vhost port. Use dev->remove
1546 * flag to stop any more packets from being sent or received to/from a VM and
1547 * ensure all currently queued packets have been sent/received before removing
1548 * the device.
1549 */
1550static void
1551destroy_device(volatile struct virtio_net *dev)
1552{
1553 struct netdev_dpdk *vhost_dev;
1554
1555 ovs_mutex_lock(&dpdk_mutex);
1556 LIST_FOR_EACH (vhost_dev, list_node, &dpdk_list) {
1557 if (netdev_dpdk_get_virtio(vhost_dev) == dev) {
1558
1559 ovs_mutex_lock(&vhost_dev->mutex);
1560 dev->flags &= ~VIRTIO_DEV_RUNNING;
1561 ovsrcu_set(&vhost_dev->virtio_dev, NULL);
1562 ovs_mutex_unlock(&vhost_dev->mutex);
1563
1564 /*
1565 * Wait for other threads to quiesce before
1566 * setting the virtio_dev to NULL.
1567 */
1568 ovsrcu_synchronize();
1569 }
1570 }
1571 ovs_mutex_unlock(&dpdk_mutex);
1572
1573 VLOG_INFO("vHost Device '%s' (%ld) has been removed",
1574 dev->ifname, dev->device_fh);
1575}
1576
1577struct virtio_net *
1578netdev_dpdk_get_virtio(const struct netdev_dpdk *dev)
1579{
1580 return ovsrcu_get(struct virtio_net *, &dev->virtio_dev);
1581}
1582
1583/*
1584 * These callbacks allow virtio-net devices to be added to vhost ports when
1585 * configuration has been fully complete.
1586 */
1587const struct virtio_net_device_ops virtio_net_device_ops =
1588{
1589 .new_device = new_device,
1590 .destroy_device = destroy_device,
1591};
1592
1593static void *
1594start_cuse_session_loop(void *dummy OVS_UNUSED)
1595{
1596 pthread_detach(pthread_self());
1597 rte_vhost_driver_session_start();
1598 return NULL;
1599}
1600
1601static int
1602dpdk_vhost_class_init(void)
1603{
1604 pthread_t thread;
1605 int err = -1;
1606
1607 rte_vhost_driver_callback_register(&virtio_net_device_ops);
1608
1609 /* Register CUSE device to handle IOCTLs.
1610 * Unless otherwise specified on the vswitchd command line, cuse_dev_name
1611 * is set to vhost-net.
1612 */
1613 err = rte_vhost_driver_register(cuse_dev_name);
1614
1615 if (err != 0) {
1616 VLOG_ERR("CUSE device setup failure.");
1617 return -1;
1618 }
1619
1620 /* start_cuse_session_loop blocks OVS RCU quiescent state, so directly use
1621 * pthread API. */
1622 return pthread_create(&thread, NULL, start_cuse_session_loop, NULL);
1623}
1624
033e9df2
DDP
1625static void
1626dpdk_common_init(void)
1627{
1628 unixctl_command_register("netdev-dpdk/set-admin-state",
1629 "[netdev] up|down", 1, 2,
1630 netdev_dpdk_set_admin_state, NULL);
1631
1632 ovs_thread_create("dpdk_watchdog", dpdk_watchdog, NULL);
1633}
1634
95fb793a 1635/* Client Rings */
1636
95fb793a 1637static int
1638dpdk_ring_create(const char dev_name[], unsigned int port_no,
1639 unsigned int *eth_port_id)
1640{
1641 struct dpdk_ring *ivshmem;
1642 char ring_name[10];
1643 int err;
1644
1645 ivshmem = dpdk_rte_mzalloc(sizeof *ivshmem);
1646 if (ivshmem == NULL) {
1647 return ENOMEM;
1648 }
1649
7251515e 1650 /* XXX: Add support for multiquque ring. */
95fb793a 1651 err = snprintf(ring_name, 10, "%s_tx", dev_name);
1652 if (err < 0) {
1653 return -err;
1654 }
1655
7251515e
DV
1656 /* Create single consumer/producer rings, netdev does explicit locking. */
1657 ivshmem->cring_tx = rte_ring_create(ring_name, DPDK_RING_SIZE, SOCKET0,
1658 RING_F_SP_ENQ | RING_F_SC_DEQ);
95fb793a 1659 if (ivshmem->cring_tx == NULL) {
1660 rte_free(ivshmem);
1661 return ENOMEM;
1662 }
1663
1664 err = snprintf(ring_name, 10, "%s_rx", dev_name);
1665 if (err < 0) {
1666 return -err;
1667 }
1668
7251515e
DV
1669 /* Create single consumer/producer rings, netdev does explicit locking. */
1670 ivshmem->cring_rx = rte_ring_create(ring_name, DPDK_RING_SIZE, SOCKET0,
1671 RING_F_SP_ENQ | RING_F_SC_DEQ);
95fb793a 1672 if (ivshmem->cring_rx == NULL) {
1673 rte_free(ivshmem);
1674 return ENOMEM;
1675 }
1676
d7310583
DDP
1677 err = rte_eth_from_rings(dev_name, &ivshmem->cring_rx, 1,
1678 &ivshmem->cring_tx, 1, SOCKET0);
1679
95fb793a 1680 if (err < 0) {
1681 rte_free(ivshmem);
1682 return ENODEV;
1683 }
1684
1685 ivshmem->user_port_id = port_no;
1686 ivshmem->eth_port_id = rte_eth_dev_count() - 1;
1687 list_push_back(&dpdk_ring_list, &ivshmem->list_node);
1688
1689 *eth_port_id = ivshmem->eth_port_id;
1690 return 0;
1691}
1692
1693static int
1694dpdk_ring_open(const char dev_name[], unsigned int *eth_port_id) OVS_REQUIRES(dpdk_mutex)
1695{
1696 struct dpdk_ring *ivshmem;
1697 unsigned int port_no;
1698 int err = 0;
1699
1700 /* Names always start with "dpdkr" */
1701 err = dpdk_dev_parse_name(dev_name, "dpdkr", &port_no);
1702 if (err) {
1703 return err;
1704 }
1705
1706 /* look through our list to find the device */
1707 LIST_FOR_EACH (ivshmem, list_node, &dpdk_ring_list) {
1708 if (ivshmem->user_port_id == port_no) {
58397e6c 1709 VLOG_INFO("Found dpdk ring device %s:", dev_name);
95fb793a 1710 *eth_port_id = ivshmem->eth_port_id; /* really all that is needed */
1711 return 0;
1712 }
1713 }
1714 /* Need to create the device rings */
1715 return dpdk_ring_create(dev_name, port_no, eth_port_id);
1716}
1717
7251515e
DV
1718static int
1719netdev_dpdk_ring_send(struct netdev *netdev, int qid OVS_UNUSED,
e14deea0 1720 struct dp_packet **pkts, int cnt, bool may_steal)
7251515e
DV
1721{
1722 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1723
1724 /* DPDK Rings have a single TX queue, Therefore needs locking. */
58397e6c 1725 rte_spinlock_lock(&dev->txq_lock);
7251515e 1726 netdev_dpdk_send__(dev, 0, pkts, cnt, may_steal);
58397e6c 1727 rte_spinlock_unlock(&dev->txq_lock);
7251515e
DV
1728 return 0;
1729}
1730
95fb793a 1731static int
1732netdev_dpdk_ring_construct(struct netdev *netdev)
1733{
1734 unsigned int port_no = 0;
1735 int err = 0;
1736
1737 if (rte_eal_init_ret) {
1738 return rte_eal_init_ret;
1739 }
1740
1741 ovs_mutex_lock(&dpdk_mutex);
1742
1743 err = dpdk_ring_open(netdev->name, &port_no);
1744 if (err) {
1745 goto unlock_dpdk;
1746 }
1747
58397e6c 1748 err = netdev_dpdk_init(netdev, port_no, DPDK_DEV_ETH);
95fb793a 1749
1750unlock_dpdk:
1751 ovs_mutex_unlock(&dpdk_mutex);
1752 return err;
1753}
1754
58397e6c
KT
1755#define NETDEV_DPDK_CLASS(NAME, INIT, CONSTRUCT, DESTRUCT, MULTIQ, SEND, \
1756 GET_CARRIER, GET_STATS, GET_FEATURES, GET_STATUS, RXQ_RECV) \
95fb793a 1757{ \
1758 NAME, \
1759 INIT, /* init */ \
1760 NULL, /* netdev_dpdk_run */ \
1761 NULL, /* netdev_dpdk_wait */ \
1762 \
1763 netdev_dpdk_alloc, \
1764 CONSTRUCT, \
58397e6c 1765 DESTRUCT, \
95fb793a 1766 netdev_dpdk_dealloc, \
1767 netdev_dpdk_get_config, \
1768 NULL, /* netdev_dpdk_set_config */ \
1769 NULL, /* get_tunnel_config */ \
58397e6c
KT
1770 NULL, /* build header */ \
1771 NULL, /* push header */ \
1772 NULL, /* pop header */ \
7dec44fe 1773 netdev_dpdk_get_numa_id, /* get_numa_id */ \
5496878c 1774 MULTIQ, /* set_multiq */ \
95fb793a 1775 \
7251515e 1776 SEND, /* send */ \
95fb793a 1777 NULL, /* send_wait */ \
1778 \
1779 netdev_dpdk_set_etheraddr, \
1780 netdev_dpdk_get_etheraddr, \
1781 netdev_dpdk_get_mtu, \
1782 netdev_dpdk_set_mtu, \
1783 netdev_dpdk_get_ifindex, \
58397e6c 1784 GET_CARRIER, \
95fb793a 1785 netdev_dpdk_get_carrier_resets, \
1786 netdev_dpdk_set_miimon, \
58397e6c
KT
1787 GET_STATS, \
1788 GET_FEATURES, \
95fb793a 1789 NULL, /* set_advertisements */ \
1790 \
1791 NULL, /* set_policing */ \
1792 NULL, /* get_qos_types */ \
1793 NULL, /* get_qos_capabilities */ \
1794 NULL, /* get_qos */ \
1795 NULL, /* set_qos */ \
1796 NULL, /* get_queue */ \
1797 NULL, /* set_queue */ \
1798 NULL, /* delete_queue */ \
1799 NULL, /* get_queue_stats */ \
1800 NULL, /* queue_dump_start */ \
1801 NULL, /* queue_dump_next */ \
1802 NULL, /* queue_dump_done */ \
1803 NULL, /* dump_queue_stats */ \
1804 \
1805 NULL, /* get_in4 */ \
1806 NULL, /* set_in4 */ \
1807 NULL, /* get_in6 */ \
1808 NULL, /* add_router */ \
1809 NULL, /* get_next_hop */ \
58397e6c 1810 GET_STATUS, \
95fb793a 1811 NULL, /* arp_lookup */ \
1812 \
1813 netdev_dpdk_update_flags, \
1814 \
1815 netdev_dpdk_rxq_alloc, \
1816 netdev_dpdk_rxq_construct, \
1817 netdev_dpdk_rxq_destruct, \
1818 netdev_dpdk_rxq_dealloc, \
58397e6c 1819 RXQ_RECV, \
95fb793a 1820 NULL, /* rx_wait */ \
1821 NULL, /* rxq_drain */ \
1822}
8a9562d2
PS
1823
1824int
1825dpdk_init(int argc, char **argv)
1826{
1827 int result;
58397e6c
KT
1828 int base = 0;
1829 char *pragram_name = argv[0];
8a9562d2 1830
9441caf3 1831 if (argc < 2 || strcmp(argv[1], "--dpdk"))
8a9562d2
PS
1832 return 0;
1833
58397e6c 1834 /* Remove the --dpdk argument from arg list.*/
8a9562d2
PS
1835 argc--;
1836 argv++;
1837
58397e6c
KT
1838 /* If the cuse_dev_name parameter has been provided, set 'cuse_dev_name' to
1839 * this string if it meets the correct criteria. Otherwise, set it to the
1840 * default (vhost-net).
1841 */
1842 if (!strcmp(argv[1], "--cuse_dev_name") &&
1843 (strlen(argv[2]) <= NAME_MAX)) {
1844
1845 cuse_dev_name = strdup(argv[2]);
1846
1847 /* Remove the cuse_dev_name configuration parameters from the argument
1848 * list, so that the correct elements are passed to the DPDK
1849 * initialization function
1850 */
1851 argc -= 2;
1852 argv += 2; /* Increment by two to bypass the cuse_dev_name arguments */
1853 base = 2;
1854
1855 VLOG_ERR("User-provided cuse_dev_name in use: /dev/%s", cuse_dev_name);
1856 } else {
1857 cuse_dev_name = "vhost-net";
1858 VLOG_INFO("No cuse_dev_name provided - defaulting to /dev/vhost-net");
1859 }
1860
1861 /* Keep the program name argument as this is needed for call to
1862 * rte_eal_init()
1863 */
1864 argv[0] = pragram_name;
1865
8a9562d2
PS
1866 /* Make sure things are initialized ... */
1867 result = rte_eal_init(argc, argv);
451450fa 1868 if (result < 0) {
58397e6c 1869 ovs_abort(result, "Cannot init EAL");
451450fa 1870 }
8a9562d2 1871
d7310583 1872 rte_memzone_dump(stdout);
8a9562d2
PS
1873 rte_eal_init_ret = 0;
1874
451450fa 1875 if (argc > result) {
9441caf3 1876 argv[result] = argv[0];
451450fa 1877 }
9441caf3 1878
db73f716
DDP
1879 /* We are called from the main thread here */
1880 thread_set_nonpmd();
1881
58397e6c 1882 return result + 1 + base;
8a9562d2
PS
1883}
1884
95fb793a 1885const struct netdev_class dpdk_class =
1886 NETDEV_DPDK_CLASS(
1887 "dpdk",
b8e57534 1888 NULL,
5496878c 1889 netdev_dpdk_construct,
58397e6c 1890 netdev_dpdk_destruct,
7251515e 1891 netdev_dpdk_set_multiq,
58397e6c
KT
1892 netdev_dpdk_eth_send,
1893 netdev_dpdk_get_carrier,
1894 netdev_dpdk_get_stats,
1895 netdev_dpdk_get_features,
1896 netdev_dpdk_get_status,
1897 netdev_dpdk_rxq_recv);
95fb793a 1898
1899const struct netdev_class dpdk_ring_class =
1900 NETDEV_DPDK_CLASS(
1901 "dpdkr",
033e9df2 1902 NULL,
5496878c 1903 netdev_dpdk_ring_construct,
58397e6c
KT
1904 netdev_dpdk_destruct,
1905 NULL,
1906 netdev_dpdk_ring_send,
1907 netdev_dpdk_get_carrier,
1908 netdev_dpdk_get_stats,
1909 netdev_dpdk_get_features,
1910 netdev_dpdk_get_status,
1911 netdev_dpdk_rxq_recv);
1912
1913const struct netdev_class dpdk_vhost_class =
1914 NETDEV_DPDK_CLASS(
1915 "dpdkvhost",
1916 dpdk_vhost_class_init,
1917 netdev_dpdk_vhost_construct,
1918 netdev_dpdk_vhost_destruct,
1919 netdev_dpdk_vhost_set_multiq,
1920 netdev_dpdk_vhost_send,
1921 netdev_dpdk_vhost_get_carrier,
1922 netdev_dpdk_vhost_get_stats,
1923 NULL,
7251515e 1924 NULL,
58397e6c 1925 netdev_dpdk_vhost_rxq_recv);
95fb793a 1926
8a9562d2
PS
1927void
1928netdev_dpdk_register(void)
1929{
95fb793a 1930 static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER;
1931
033e9df2
DDP
1932 if (rte_eal_init_ret) {
1933 return;
1934 }
1935
95fb793a 1936 if (ovsthread_once_start(&once)) {
033e9df2 1937 dpdk_common_init();
95fb793a 1938 netdev_register_provider(&dpdk_class);
1939 netdev_register_provider(&dpdk_ring_class);
58397e6c 1940 netdev_register_provider(&dpdk_vhost_class);
95fb793a 1941 ovsthread_once_done(&once);
1942 }
8a9562d2 1943}
8617afff
PS
1944
1945int
1946pmd_thread_setaffinity_cpu(int cpu)
1947{
1948 cpu_set_t cpuset;
1949 int err;
1950
1951 CPU_ZERO(&cpuset);
1952 CPU_SET(cpu, &cpuset);
1953 err = pthread_setaffinity_np(pthread_self(), sizeof(cpu_set_t), &cpuset);
1954 if (err) {
1955 VLOG_ERR("Thread affinity error %d",err);
1956 return err;
1957 }
abb5943d
AW
1958 /* NON_PMD_CORE_ID is reserved for use by non pmd threads. */
1959 ovs_assert(cpu != NON_PMD_CORE_ID);
65f13b50 1960 RTE_PER_LCORE(_lcore_id) = cpu;
8617afff
PS
1961
1962 return 0;
1963}
db73f716
DDP
1964
1965void
1966thread_set_nonpmd(void)
1967{
abb5943d
AW
1968 /* We have to use NON_PMD_CORE_ID to allow non-pmd threads to perform
1969 * certain DPDK operations, like rte_eth_dev_configure(). */
1970 RTE_PER_LCORE(_lcore_id) = NON_PMD_CORE_ID;
db73f716
DDP
1971}
1972
1973static bool
1974thread_is_pmd(void)
1975{
abb5943d 1976 return rte_lcore_id() != NON_PMD_CORE_ID;
db73f716 1977}