]> git.proxmox.com Git - mirror_ovs.git/blame - lib/netdev-dpdk.c
netdev-dpdk: Put cuse thread into quiescent state.
[mirror_ovs.git] / lib / netdev-dpdk.c
CommitLineData
8a9562d2
PS
1/*
2 * Copyright (c) 2014 Nicira, Inc.
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include <config.h>
18
19#include <stdio.h>
20#include <string.h>
21#include <signal.h>
22#include <stdlib.h>
23#include <pthread.h>
24#include <config.h>
25#include <errno.h>
26#include <sched.h>
27#include <stdlib.h>
28#include <unistd.h>
29#include <stdio.h>
30
e14deea0 31#include "dp-packet.h"
8a9562d2
PS
32#include "dpif-netdev.h"
33#include "list.h"
34#include "netdev-dpdk.h"
35#include "netdev-provider.h"
36#include "netdev-vport.h"
37#include "odp-util.h"
38#include "ofp-print.h"
94143fc4 39#include "ovs-numa.h"
8a9562d2
PS
40#include "ovs-thread.h"
41#include "ovs-rcu.h"
42#include "packets.h"
43#include "shash.h"
8a9562d2
PS
44#include "sset.h"
45#include "unaligned.h"
46#include "timeval.h"
47#include "unixctl.h"
e6211adc 48#include "openvswitch/vlog.h"
8a9562d2 49
b8e57534
MK
50#include "rte_config.h"
51#include "rte_mbuf.h"
58397e6c 52#include "rte_virtio_net.h"
b8e57534 53
8a9562d2
PS
54VLOG_DEFINE_THIS_MODULE(dpdk);
55static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 20);
56
57#define DPDK_PORT_WATCHDOG_INTERVAL 5
58
59#define OVS_CACHE_LINE_SIZE CACHE_LINE_SIZE
60#define OVS_VPORT_DPDK "ovs_dpdk"
61
62/*
63 * need to reserve tons of extra space in the mbufs so we can align the
64 * DMA addresses to 4KB.
65 */
66
67#define MTU_TO_MAX_LEN(mtu) ((mtu) + ETHER_HDR_LEN + ETHER_CRC_LEN)
68#define MBUF_SIZE(mtu) (MTU_TO_MAX_LEN(mtu) + (512) + \
69 sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM)
70
da79ce2b
DDP
71/* Max and min number of packets in the mempool. OVS tries to allocate a
72 * mempool with MAX_NB_MBUF: if this fails (because the system doesn't have
73 * enough hugepages) we keep halving the number until the allocation succeeds
74 * or we reach MIN_NB_MBUF */
75
76#define MAX_NB_MBUF (4096 * 64)
77#define MIN_NB_MBUF (4096 * 4)
78#define MP_CACHE_SZ RTE_MEMPOOL_CACHE_MAX_SIZE
79
80/* MAX_NB_MBUF can be divided by 2 many times, until MIN_NB_MBUF */
81BUILD_ASSERT_DECL(MAX_NB_MBUF % ROUND_DOWN_POW2(MAX_NB_MBUF/MIN_NB_MBUF) == 0);
82
83/* The smallest possible NB_MBUF that we're going to try should be a multiple
84 * of MP_CACHE_SZ. This is advised by DPDK documentation. */
85BUILD_ASSERT_DECL((MAX_NB_MBUF / ROUND_DOWN_POW2(MAX_NB_MBUF/MIN_NB_MBUF))
86 % MP_CACHE_SZ == 0);
87
8a9562d2
PS
88#define SOCKET0 0
89
79f5354c
PM
90#define NIC_PORT_RX_Q_SIZE 2048 /* Size of Physical NIC RX Queue, Max (n+32<=4096)*/
91#define NIC_PORT_TX_Q_SIZE 2048 /* Size of Physical NIC TX Queue, Max (n+32<=4096)*/
92
645b8934 93/* XXX: Needs per NIC value for these constants. */
8a9562d2
PS
94#define RX_PTHRESH 32 /* Default values of RX prefetch threshold reg. */
95#define RX_HTHRESH 32 /* Default values of RX host threshold reg. */
96#define RX_WTHRESH 16 /* Default values of RX write-back threshold reg. */
97
98#define TX_PTHRESH 36 /* Default values of TX prefetch threshold reg. */
99#define TX_HTHRESH 0 /* Default values of TX host threshold reg. */
100#define TX_WTHRESH 0 /* Default values of TX write-back threshold reg. */
101
58397e6c
KT
102#define MAX_PKT_BURST 32 /* Max burst size for RX/TX */
103
104/* Character device cuse_dev_name. */
105char *cuse_dev_name = NULL;
106
8a9562d2 107static const struct rte_eth_conf port_conf = {
a28ddd11
DDP
108 .rxmode = {
109 .mq_mode = ETH_MQ_RX_RSS,
110 .split_hdr_size = 0,
111 .header_split = 0, /* Header Split disabled */
112 .hw_ip_checksum = 0, /* IP checksum offload disabled */
113 .hw_vlan_filter = 0, /* VLAN filtering disabled */
114 .jumbo_frame = 0, /* Jumbo Frame Support disabled */
115 .hw_strip_crc = 0,
116 },
117 .rx_adv_conf = {
118 .rss_conf = {
119 .rss_key = NULL,
61a2647e
DDP
120 .rss_hf = ETH_RSS_IPV4_TCP | ETH_RSS_IPV4 | ETH_RSS_IPV6
121 | ETH_RSS_IPV4_UDP | ETH_RSS_IPV6_TCP | ETH_RSS_IPV6_UDP,
8a9562d2 122 },
a28ddd11
DDP
123 },
124 .txmode = {
125 .mq_mode = ETH_MQ_TX_NONE,
126 },
8a9562d2
PS
127};
128
129static const struct rte_eth_rxconf rx_conf = {
a28ddd11
DDP
130 .rx_thresh = {
131 .pthresh = RX_PTHRESH,
132 .hthresh = RX_HTHRESH,
133 .wthresh = RX_WTHRESH,
134 },
8a9562d2
PS
135};
136
137static const struct rte_eth_txconf tx_conf = {
a28ddd11
DDP
138 .tx_thresh = {
139 .pthresh = TX_PTHRESH,
140 .hthresh = TX_HTHRESH,
141 .wthresh = TX_WTHRESH,
142 },
143 .tx_free_thresh = 0,
144 .tx_rs_thresh = 0,
94777510 145 .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS|ETH_TXQ_FLAGS_NOOFFLOADS,
8a9562d2
PS
146};
147
3a100265
DDP
148enum { MAX_RX_QUEUE_LEN = 192 };
149enum { MAX_TX_QUEUE_LEN = 384 };
58f7c37b
DDP
150enum { DPDK_RING_SIZE = 256 };
151BUILD_ASSERT_DECL(IS_POW2(DPDK_RING_SIZE));
8a9562d2
PS
152enum { DRAIN_TSC = 200000ULL };
153
58397e6c
KT
154enum dpdk_dev_type {
155 DPDK_DEV_ETH = 0,
156 DPDK_DEV_VHOST = 1
157};
158
8a9562d2
PS
159static int rte_eal_init_ret = ENODEV;
160
161static struct ovs_mutex dpdk_mutex = OVS_MUTEX_INITIALIZER;
162
163/* Contains all 'struct dpdk_dev's. */
ca6ba700 164static struct ovs_list dpdk_list OVS_GUARDED_BY(dpdk_mutex)
55951e15 165 = OVS_LIST_INITIALIZER(&dpdk_list);
8a9562d2 166
ca6ba700 167static struct ovs_list dpdk_mp_list OVS_GUARDED_BY(dpdk_mutex)
55951e15 168 = OVS_LIST_INITIALIZER(&dpdk_mp_list);
8a9562d2 169
db73f716
DDP
170/* This mutex must be used by non pmd threads when allocating or freeing
171 * mbufs through mempools. Since dpdk_queue_pkts() and dpdk_queue_flush() may
172 * use mempools, a non pmd thread should hold this mutex while calling them */
173struct ovs_mutex nonpmd_mempool_mutex = OVS_MUTEX_INITIALIZER;
174
8a9562d2
PS
175struct dpdk_mp {
176 struct rte_mempool *mp;
177 int mtu;
178 int socket_id;
179 int refcount;
ca6ba700 180 struct ovs_list list_node OVS_GUARDED_BY(dpdk_mutex);
8a9562d2
PS
181};
182
5a034064
AW
183/* There should be one 'struct dpdk_tx_queue' created for
184 * each cpu core. */
8a9562d2 185struct dpdk_tx_queue {
94143fc4
AW
186 bool flush_tx; /* Set to true to flush queue everytime */
187 /* pkts are queued. */
8a9562d2
PS
188 int count;
189 uint64_t tsc;
190 struct rte_mbuf *burst_pkts[MAX_TX_QUEUE_LEN];
191};
192
95fb793a 193/* dpdk has no way to remove dpdk ring ethernet devices
194 so we have to keep them around once they've been created
195*/
196
ca6ba700 197static struct ovs_list dpdk_ring_list OVS_GUARDED_BY(dpdk_mutex)
55951e15 198 = OVS_LIST_INITIALIZER(&dpdk_ring_list);
95fb793a 199
200struct dpdk_ring {
201 /* For the client rings */
202 struct rte_ring *cring_tx;
203 struct rte_ring *cring_rx;
204 int user_port_id; /* User given port no, parsed from port name */
205 int eth_port_id; /* ethernet device port id */
ca6ba700 206 struct ovs_list list_node OVS_GUARDED_BY(dpdk_mutex);
95fb793a 207};
208
8a9562d2
PS
209struct netdev_dpdk {
210 struct netdev up;
211 int port_id;
212 int max_packet_len;
58397e6c 213 enum dpdk_dev_type type;
8a9562d2 214
5a034064 215 struct dpdk_tx_queue *tx_q;
8a9562d2
PS
216
217 struct ovs_mutex mutex OVS_ACQ_AFTER(dpdk_mutex);
218
219 struct dpdk_mp *dpdk_mp;
220 int mtu;
221 int socket_id;
222 int buf_size;
8a9562d2
PS
223 struct netdev_stats stats;
224
225 uint8_t hwaddr[ETH_ADDR_LEN];
226 enum netdev_flags flags;
227
228 struct rte_eth_link link;
229 int link_reset_cnt;
230
58397e6c
KT
231 /* virtio-net structure for vhost device */
232 OVSRCU_TYPE(struct virtio_net *) virtio_dev;
233
8a9562d2 234 /* In dpdk_list. */
ca6ba700 235 struct ovs_list list_node OVS_GUARDED_BY(dpdk_mutex);
58397e6c 236 rte_spinlock_t txq_lock;
8a9562d2
PS
237};
238
239struct netdev_rxq_dpdk {
240 struct netdev_rxq up;
241 int port_id;
242};
243
db73f716
DDP
244static bool thread_is_pmd(void);
245
8a9562d2
PS
246static int netdev_dpdk_construct(struct netdev *);
247
58397e6c
KT
248struct virtio_net * netdev_dpdk_get_virtio(const struct netdev_dpdk *dev);
249
8a9562d2
PS
250static bool
251is_dpdk_class(const struct netdev_class *class)
252{
253 return class->construct == netdev_dpdk_construct;
254}
255
58397e6c
KT
256/* XXX: use dpdk malloc for entire OVS. in fact huge page should be used
257 * for all other segments data, bss and text. */
8a9562d2
PS
258
259static void *
260dpdk_rte_mzalloc(size_t sz)
261{
262 void *ptr;
263
264 ptr = rte_zmalloc(OVS_VPORT_DPDK, sz, OVS_CACHE_LINE_SIZE);
265 if (ptr == NULL) {
266 out_of_memory();
267 }
268 return ptr;
269}
270
db73f716
DDP
271/* XXX this function should be called only by pmd threads (or by non pmd
272 * threads holding the nonpmd_mempool_mutex) */
8a9562d2 273void
e14deea0 274free_dpdk_buf(struct dp_packet *p)
8a9562d2 275{
db73f716 276 struct rte_mbuf *pkt = (struct rte_mbuf *) p;
8a9562d2 277
db73f716 278 rte_pktmbuf_free_seg(pkt);
8a9562d2
PS
279}
280
b3cd9f9d
PS
281static void
282__rte_pktmbuf_init(struct rte_mempool *mp,
283 void *opaque_arg OVS_UNUSED,
284 void *_m,
285 unsigned i OVS_UNUSED)
286{
287 struct rte_mbuf *m = _m;
e14deea0 288 uint32_t buf_len = mp->elt_size - sizeof(struct dp_packet);
b3cd9f9d 289
e14deea0 290 RTE_MBUF_ASSERT(mp->elt_size >= sizeof(struct dp_packet));
b3cd9f9d
PS
291
292 memset(m, 0, mp->elt_size);
293
294 /* start of buffer is just after mbuf structure */
e14deea0 295 m->buf_addr = (char *)m + sizeof(struct dp_packet);
b3cd9f9d 296 m->buf_physaddr = rte_mempool_virt2phy(mp, m) +
e14deea0 297 sizeof(struct dp_packet);
b3cd9f9d
PS
298 m->buf_len = (uint16_t)buf_len;
299
300 /* keep some headroom between start of buffer and data */
b8e57534 301 m->data_off = RTE_MIN(RTE_PKTMBUF_HEADROOM, m->buf_len);
b3cd9f9d
PS
302
303 /* init some constant fields */
b3cd9f9d 304 m->pool = mp;
b8e57534
MK
305 m->nb_segs = 1;
306 m->port = 0xff;
b3cd9f9d
PS
307}
308
309static void
310ovs_rte_pktmbuf_init(struct rte_mempool *mp,
311 void *opaque_arg OVS_UNUSED,
312 void *_m,
313 unsigned i OVS_UNUSED)
314{
315 struct rte_mbuf *m = _m;
316
317 __rte_pktmbuf_init(mp, opaque_arg, _m, i);
318
cf62fa4c 319 dp_packet_init_dpdk((struct dp_packet *) m, m->buf_len);
b3cd9f9d
PS
320}
321
8a9562d2
PS
322static struct dpdk_mp *
323dpdk_mp_get(int socket_id, int mtu) OVS_REQUIRES(dpdk_mutex)
324{
325 struct dpdk_mp *dmp = NULL;
326 char mp_name[RTE_MEMPOOL_NAMESIZE];
da79ce2b 327 unsigned mp_size;
8a9562d2
PS
328
329 LIST_FOR_EACH (dmp, list_node, &dpdk_mp_list) {
330 if (dmp->socket_id == socket_id && dmp->mtu == mtu) {
331 dmp->refcount++;
332 return dmp;
333 }
334 }
335
336 dmp = dpdk_rte_mzalloc(sizeof *dmp);
337 dmp->socket_id = socket_id;
338 dmp->mtu = mtu;
339 dmp->refcount = 1;
340
da79ce2b
DDP
341 mp_size = MAX_NB_MBUF;
342 do {
343 if (snprintf(mp_name, RTE_MEMPOOL_NAMESIZE, "ovs_mp_%d_%d_%u",
344 dmp->mtu, dmp->socket_id, mp_size) < 0) {
345 return NULL;
346 }
95fb793a 347
da79ce2b
DDP
348 dmp->mp = rte_mempool_create(mp_name, mp_size, MBUF_SIZE(mtu),
349 MP_CACHE_SZ,
350 sizeof(struct rte_pktmbuf_pool_private),
351 rte_pktmbuf_pool_init, NULL,
352 ovs_rte_pktmbuf_init, NULL,
353 socket_id, 0);
354 } while (!dmp->mp && rte_errno == ENOMEM && (mp_size /= 2) >= MIN_NB_MBUF);
8a9562d2
PS
355
356 if (dmp->mp == NULL) {
357 return NULL;
da79ce2b
DDP
358 } else {
359 VLOG_DBG("Allocated \"%s\" mempool with %u mbufs", mp_name, mp_size );
8a9562d2
PS
360 }
361
362 list_push_back(&dpdk_mp_list, &dmp->list_node);
363 return dmp;
364}
365
366static void
367dpdk_mp_put(struct dpdk_mp *dmp)
368{
369
370 if (!dmp) {
371 return;
372 }
373
374 dmp->refcount--;
375 ovs_assert(dmp->refcount >= 0);
376
377#if 0
378 /* I could not find any API to destroy mp. */
379 if (dmp->refcount == 0) {
380 list_delete(dmp->list_node);
381 /* destroy mp-pool. */
382 }
383#endif
384}
385
386static void
387check_link_status(struct netdev_dpdk *dev)
388{
389 struct rte_eth_link link;
390
391 rte_eth_link_get_nowait(dev->port_id, &link);
392
393 if (dev->link.link_status != link.link_status) {
3e912ffc 394 netdev_change_seq_changed(&dev->up);
8a9562d2
PS
395
396 dev->link_reset_cnt++;
397 dev->link = link;
398 if (dev->link.link_status) {
399 VLOG_DBG_RL(&rl, "Port %d Link Up - speed %u Mbps - %s",
400 dev->port_id, (unsigned)dev->link.link_speed,
401 (dev->link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
402 ("full-duplex") : ("half-duplex"));
403 } else {
404 VLOG_DBG_RL(&rl, "Port %d Link Down", dev->port_id);
405 }
406 }
407}
408
409static void *
410dpdk_watchdog(void *dummy OVS_UNUSED)
411{
412 struct netdev_dpdk *dev;
413
414 pthread_detach(pthread_self());
415
416 for (;;) {
417 ovs_mutex_lock(&dpdk_mutex);
418 LIST_FOR_EACH (dev, list_node, &dpdk_list) {
419 ovs_mutex_lock(&dev->mutex);
420 check_link_status(dev);
421 ovs_mutex_unlock(&dev->mutex);
422 }
423 ovs_mutex_unlock(&dpdk_mutex);
424 xsleep(DPDK_PORT_WATCHDOG_INTERVAL);
425 }
426
427 return NULL;
428}
429
430static int
431dpdk_eth_dev_init(struct netdev_dpdk *dev) OVS_REQUIRES(dpdk_mutex)
432{
433 struct rte_pktmbuf_pool_private *mbp_priv;
434 struct ether_addr eth_addr;
435 int diag;
436 int i;
437
438 if (dev->port_id < 0 || dev->port_id >= rte_eth_dev_count()) {
95fb793a 439 return ENODEV;
8a9562d2
PS
440 }
441
5496878c
AW
442 diag = rte_eth_dev_configure(dev->port_id, dev->up.n_rxq, dev->up.n_txq,
443 &port_conf);
8a9562d2
PS
444 if (diag) {
445 VLOG_ERR("eth dev config error %d",diag);
95fb793a 446 return -diag;
8a9562d2
PS
447 }
448
5496878c 449 for (i = 0; i < dev->up.n_txq; i++) {
79f5354c 450 diag = rte_eth_tx_queue_setup(dev->port_id, i, NIC_PORT_TX_Q_SIZE,
d221ffa1 451 dev->socket_id, &tx_conf);
8a9562d2
PS
452 if (diag) {
453 VLOG_ERR("eth dev tx queue setup error %d",diag);
95fb793a 454 return -diag;
8a9562d2
PS
455 }
456 }
457
5496878c 458 for (i = 0; i < dev->up.n_rxq; i++) {
79f5354c 459 diag = rte_eth_rx_queue_setup(dev->port_id, i, NIC_PORT_RX_Q_SIZE,
d221ffa1 460 dev->socket_id,
a715f600 461 &rx_conf, dev->dpdk_mp->mp);
8a9562d2
PS
462 if (diag) {
463 VLOG_ERR("eth dev rx queue setup error %d",diag);
95fb793a 464 return -diag;
8a9562d2
PS
465 }
466 }
467
468 diag = rte_eth_dev_start(dev->port_id);
469 if (diag) {
470 VLOG_ERR("eth dev start error %d",diag);
95fb793a 471 return -diag;
8a9562d2
PS
472 }
473
474 rte_eth_promiscuous_enable(dev->port_id);
475 rte_eth_allmulticast_enable(dev->port_id);
476
477 memset(&eth_addr, 0x0, sizeof(eth_addr));
478 rte_eth_macaddr_get(dev->port_id, &eth_addr);
479 VLOG_INFO_RL(&rl, "Port %d: "ETH_ADDR_FMT"",
480 dev->port_id, ETH_ADDR_ARGS(eth_addr.addr_bytes));
481
482 memcpy(dev->hwaddr, eth_addr.addr_bytes, ETH_ADDR_LEN);
483 rte_eth_link_get_nowait(dev->port_id, &dev->link);
484
485 mbp_priv = rte_mempool_get_priv(dev->dpdk_mp->mp);
486 dev->buf_size = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM;
487
488 dev->flags = NETDEV_UP | NETDEV_PROMISC;
489 return 0;
490}
491
492static struct netdev_dpdk *
493netdev_dpdk_cast(const struct netdev *netdev)
494{
495 return CONTAINER_OF(netdev, struct netdev_dpdk, up);
496}
497
498static struct netdev *
499netdev_dpdk_alloc(void)
500{
501 struct netdev_dpdk *netdev = dpdk_rte_mzalloc(sizeof *netdev);
502 return &netdev->up;
503}
504
5a034064 505static void
91968eb0 506netdev_dpdk_alloc_txq(struct netdev_dpdk *netdev, unsigned int n_txqs)
5a034064
AW
507{
508 int i;
509
510 netdev->tx_q = dpdk_rte_mzalloc(n_txqs * sizeof *netdev->tx_q);
94143fc4
AW
511 /* Each index is considered as a cpu core id, since there should
512 * be one tx queue for each cpu core. */
5a034064 513 for (i = 0; i < n_txqs; i++) {
ba0358a1 514 int numa_id = ovs_numa_get_numa_id(i);
94143fc4 515
94143fc4
AW
516 /* If the corresponding core is not on the same numa node
517 * as 'netdev', flags the 'flush_tx'. */
ba0358a1 518 netdev->tx_q[i].flush_tx = netdev->socket_id == numa_id;
5a034064
AW
519 }
520}
521
8a9562d2 522static int
58397e6c
KT
523netdev_dpdk_init(struct netdev *netdev_, unsigned int port_no,
524 enum dpdk_dev_type type)
5a034064 525 OVS_REQUIRES(dpdk_mutex)
8a9562d2
PS
526{
527 struct netdev_dpdk *netdev = netdev_dpdk_cast(netdev_);
1b7a04e0 528 int sid;
95fb793a 529 int err = 0;
8a9562d2 530
95fb793a 531 ovs_mutex_init(&netdev->mutex);
95fb793a 532 ovs_mutex_lock(&netdev->mutex);
8a9562d2 533
1b7a04e0
AW
534 /* If the 'sid' is negative, it means that the kernel fails
535 * to obtain the pci numa info. In that situation, always
536 * use 'SOCKET0'. */
58397e6c
KT
537 if (type == DPDK_DEV_ETH) {
538 sid = rte_eth_dev_socket_id(port_no);
539 } else {
540 sid = rte_lcore_to_socket_id(rte_get_master_lcore());
541 }
542
1b7a04e0 543 netdev->socket_id = sid < 0 ? SOCKET0 : sid;
95fb793a 544 netdev->port_id = port_no;
58397e6c 545 netdev->type = type;
8a9562d2 546 netdev->flags = 0;
8a9562d2
PS
547 netdev->mtu = ETHER_MTU;
548 netdev->max_packet_len = MTU_TO_MAX_LEN(netdev->mtu);
58397e6c 549 rte_spinlock_init(&netdev->txq_lock);
8a9562d2 550
8a9562d2
PS
551 netdev->dpdk_mp = dpdk_mp_get(netdev->socket_id, netdev->mtu);
552 if (!netdev->dpdk_mp) {
553 err = ENOMEM;
95fb793a 554 goto unlock;
8a9562d2
PS
555 }
556
5496878c
AW
557 netdev_->n_txq = NR_QUEUE;
558 netdev_->n_rxq = NR_QUEUE;
58397e6c
KT
559
560 if (type == DPDK_DEV_ETH) {
561 netdev_dpdk_alloc_txq(netdev, NR_QUEUE);
562 err = dpdk_eth_dev_init(netdev);
563 if (err) {
564 goto unlock;
565 }
8a9562d2 566 }
8a9562d2
PS
567
568 list_push_back(&dpdk_list, &netdev->list_node);
569
95fb793a 570unlock:
5a034064
AW
571 if (err) {
572 rte_free(netdev->tx_q);
573 }
8a9562d2 574 ovs_mutex_unlock(&netdev->mutex);
95fb793a 575 return err;
576}
577
578static int
579dpdk_dev_parse_name(const char dev_name[], const char prefix[],
580 unsigned int *port_no)
581{
582 const char *cport;
583
584 if (strncmp(dev_name, prefix, strlen(prefix))) {
585 return ENODEV;
586 }
587
588 cport = dev_name + strlen(prefix);
589 *port_no = strtol(cport, 0, 0); /* string must be null terminated */
590 return 0;
591}
592
58397e6c
KT
593static int
594netdev_dpdk_vhost_construct(struct netdev *netdev_)
595{
596 int err;
597
598 if (rte_eal_init_ret) {
599 return rte_eal_init_ret;
600 }
601
602 ovs_mutex_lock(&dpdk_mutex);
603 err = netdev_dpdk_init(netdev_, -1, DPDK_DEV_VHOST);
604 ovs_mutex_unlock(&dpdk_mutex);
605
606 return err;
607}
608
95fb793a 609static int
610netdev_dpdk_construct(struct netdev *netdev)
611{
612 unsigned int port_no;
613 int err;
614
615 if (rte_eal_init_ret) {
616 return rte_eal_init_ret;
617 }
618
619 /* Names always start with "dpdk" */
620 err = dpdk_dev_parse_name(netdev->name, "dpdk", &port_no);
621 if (err) {
622 return err;
623 }
624
625 ovs_mutex_lock(&dpdk_mutex);
58397e6c 626 err = netdev_dpdk_init(netdev, port_no, DPDK_DEV_ETH);
8a9562d2
PS
627 ovs_mutex_unlock(&dpdk_mutex);
628 return err;
629}
630
631static void
632netdev_dpdk_destruct(struct netdev *netdev_)
633{
634 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev_);
635
636 ovs_mutex_lock(&dev->mutex);
637 rte_eth_dev_stop(dev->port_id);
638 ovs_mutex_unlock(&dev->mutex);
639
640 ovs_mutex_lock(&dpdk_mutex);
5a034064 641 rte_free(dev->tx_q);
8a9562d2
PS
642 list_remove(&dev->list_node);
643 dpdk_mp_put(dev->dpdk_mp);
644 ovs_mutex_unlock(&dpdk_mutex);
58397e6c 645}
8a9562d2 646
58397e6c
KT
647static void
648netdev_dpdk_vhost_destruct(struct netdev *netdev_)
649{
650 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev_);
651
652 /* Can't remove a port while a guest is attached to it. */
653 if (netdev_dpdk_get_virtio(dev) != NULL) {
654 VLOG_ERR("Can not remove port, vhost device still attached");
655 return;
656 }
657
658 ovs_mutex_lock(&dpdk_mutex);
659 list_remove(&dev->list_node);
660 dpdk_mp_put(dev->dpdk_mp);
661 ovs_mutex_unlock(&dpdk_mutex);
8a9562d2
PS
662}
663
664static void
665netdev_dpdk_dealloc(struct netdev *netdev_)
666{
667 struct netdev_dpdk *netdev = netdev_dpdk_cast(netdev_);
668
669 rte_free(netdev);
670}
671
672static int
673netdev_dpdk_get_config(const struct netdev *netdev_, struct smap *args)
674{
675 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev_);
676
677 ovs_mutex_lock(&dev->mutex);
678
47659062
AW
679 smap_add_format(args, "configured_rx_queues", "%d", netdev_->n_rxq);
680 smap_add_format(args, "configured_tx_queues", "%d", netdev_->n_txq);
8a9562d2
PS
681 ovs_mutex_unlock(&dev->mutex);
682
683 return 0;
684}
685
7dec44fe
AW
686static int
687netdev_dpdk_get_numa_id(const struct netdev *netdev_)
688{
689 struct netdev_dpdk *netdev = netdev_dpdk_cast(netdev_);
690
691 return netdev->socket_id;
692}
693
5496878c
AW
694/* Sets the number of tx queues and rx queues for the dpdk interface.
695 * If the configuration fails, do not try restoring its old configuration
696 * and just returns the error. */
697static int
698netdev_dpdk_set_multiq(struct netdev *netdev_, unsigned int n_txq,
699 unsigned int n_rxq)
700{
701 struct netdev_dpdk *netdev = netdev_dpdk_cast(netdev_);
702 int err = 0;
703
704 if (netdev->up.n_txq == n_txq && netdev->up.n_rxq == n_rxq) {
705 return err;
706 }
707
b7ccaf67 708 ovs_mutex_lock(&dpdk_mutex);
5496878c 709 ovs_mutex_lock(&netdev->mutex);
91968eb0 710
5496878c 711 rte_eth_dev_stop(netdev->port_id);
91968eb0 712
5496878c
AW
713 netdev->up.n_txq = n_txq;
714 netdev->up.n_rxq = n_rxq;
58397e6c 715
91968eb0
AW
716 rte_free(netdev->tx_q);
717 netdev_dpdk_alloc_txq(netdev, n_txq);
5496878c 718 err = dpdk_eth_dev_init(netdev);
91968eb0 719
5496878c 720 ovs_mutex_unlock(&netdev->mutex);
b7ccaf67 721 ovs_mutex_unlock(&dpdk_mutex);
5496878c
AW
722
723 return err;
724}
725
58397e6c
KT
726static int
727netdev_dpdk_vhost_set_multiq(struct netdev *netdev_, unsigned int n_txq,
728 unsigned int n_rxq)
729{
730 struct netdev_dpdk *netdev = netdev_dpdk_cast(netdev_);
731 int err = 0;
732
733 if (netdev->up.n_txq == n_txq && netdev->up.n_rxq == n_rxq) {
734 return err;
735 }
736
737 ovs_mutex_lock(&dpdk_mutex);
738 ovs_mutex_lock(&netdev->mutex);
739
740 netdev->up.n_txq = n_txq;
741 netdev->up.n_rxq = n_rxq;
742
743 ovs_mutex_unlock(&netdev->mutex);
744 ovs_mutex_unlock(&dpdk_mutex);
745
746 return err;
747}
748
8a9562d2
PS
749static struct netdev_rxq *
750netdev_dpdk_rxq_alloc(void)
751{
752 struct netdev_rxq_dpdk *rx = dpdk_rte_mzalloc(sizeof *rx);
753
754 return &rx->up;
755}
756
757static struct netdev_rxq_dpdk *
758netdev_rxq_dpdk_cast(const struct netdev_rxq *rx)
759{
760 return CONTAINER_OF(rx, struct netdev_rxq_dpdk, up);
761}
762
763static int
764netdev_dpdk_rxq_construct(struct netdev_rxq *rxq_)
765{
766 struct netdev_rxq_dpdk *rx = netdev_rxq_dpdk_cast(rxq_);
767 struct netdev_dpdk *netdev = netdev_dpdk_cast(rx->up.netdev);
768
769 ovs_mutex_lock(&netdev->mutex);
770 rx->port_id = netdev->port_id;
771 ovs_mutex_unlock(&netdev->mutex);
772
773 return 0;
774}
775
776static void
777netdev_dpdk_rxq_destruct(struct netdev_rxq *rxq_ OVS_UNUSED)
778{
779}
780
781static void
782netdev_dpdk_rxq_dealloc(struct netdev_rxq *rxq_)
783{
784 struct netdev_rxq_dpdk *rx = netdev_rxq_dpdk_cast(rxq_);
785
786 rte_free(rx);
787}
788
b170db2a
RW
789static inline void
790dpdk_queue_flush__(struct netdev_dpdk *dev, int qid)
8a9562d2
PS
791{
792 struct dpdk_tx_queue *txq = &dev->tx_q[qid];
1304f1f8
DDP
793 uint32_t nb_tx = 0;
794
795 while (nb_tx != txq->count) {
796 uint32_t ret;
797
798 ret = rte_eth_tx_burst(dev->port_id, qid, txq->burst_pkts + nb_tx,
799 txq->count - nb_tx);
800 if (!ret) {
801 break;
802 }
803
804 nb_tx += ret;
805 }
8a9562d2 806
b170db2a 807 if (OVS_UNLIKELY(nb_tx != txq->count)) {
db73f716
DDP
808 /* free buffers, which we couldn't transmit, one at a time (each
809 * packet could come from a different mempool) */
810 int i;
811
812 for (i = nb_tx; i < txq->count; i++) {
813 rte_pktmbuf_free_seg(txq->burst_pkts[i]);
814 }
1304f1f8
DDP
815 ovs_mutex_lock(&dev->mutex);
816 dev->stats.tx_dropped += txq->count-nb_tx;
817 ovs_mutex_unlock(&dev->mutex);
8a9562d2 818 }
1304f1f8 819
8a9562d2 820 txq->count = 0;
844f2d74 821 txq->tsc = rte_get_timer_cycles();
b170db2a
RW
822}
823
824static inline void
825dpdk_queue_flush(struct netdev_dpdk *dev, int qid)
826{
827 struct dpdk_tx_queue *txq = &dev->tx_q[qid];
828
829 if (txq->count == 0) {
830 return;
831 }
b170db2a 832 dpdk_queue_flush__(dev, qid);
8a9562d2
PS
833}
834
58397e6c
KT
835static bool
836is_vhost_running(struct virtio_net *dev)
837{
838 return (dev != NULL && (dev->flags & VIRTIO_DEV_RUNNING));
839}
840
841/*
842 * The receive path for the vhost port is the TX path out from guest.
843 */
844static int
845netdev_dpdk_vhost_rxq_recv(struct netdev_rxq *rxq_,
846 struct dp_packet **packets, int *c)
847{
848 struct netdev_rxq_dpdk *rx = netdev_rxq_dpdk_cast(rxq_);
849 struct netdev *netdev = rx->up.netdev;
850 struct netdev_dpdk *vhost_dev = netdev_dpdk_cast(netdev);
851 struct virtio_net *virtio_dev = netdev_dpdk_get_virtio(vhost_dev);
852 int qid = 1;
853 uint16_t nb_rx = 0;
854
855 if (OVS_UNLIKELY(!is_vhost_running(virtio_dev))) {
856 return EAGAIN;
857 }
858
859 nb_rx = rte_vhost_dequeue_burst(virtio_dev, qid,
860 vhost_dev->dpdk_mp->mp,
861 (struct rte_mbuf **)packets,
862 MAX_PKT_BURST);
863 if (!nb_rx) {
864 return EAGAIN;
865 }
866
867 vhost_dev->stats.rx_packets += (uint64_t)nb_rx;
868 *c = (int) nb_rx;
869 return 0;
870}
871
8a9562d2 872static int
e14deea0 873netdev_dpdk_rxq_recv(struct netdev_rxq *rxq_, struct dp_packet **packets,
91088554 874 int *c)
8a9562d2
PS
875{
876 struct netdev_rxq_dpdk *rx = netdev_rxq_dpdk_cast(rxq_);
877 struct netdev *netdev = rx->up.netdev;
878 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
8a9562d2 879 int nb_rx;
8a9562d2 880
5496878c
AW
881 /* There is only one tx queue for this core. Do not flush other
882 * queueus. */
883 if (rxq_->queue_id == rte_lcore_id()) {
884 dpdk_queue_flush(dev, rxq_->queue_id);
885 }
8a9562d2
PS
886
887 nb_rx = rte_eth_rx_burst(rx->port_id, rxq_->queue_id,
7d08d53e
DDP
888 (struct rte_mbuf **) packets,
889 MIN((int)NETDEV_MAX_RX_BATCH,
890 (int)MAX_RX_QUEUE_LEN));
8a9562d2
PS
891 if (!nb_rx) {
892 return EAGAIN;
893 }
894
8a9562d2
PS
895 *c = nb_rx;
896
897 return 0;
898}
899
58397e6c
KT
900static void
901__netdev_dpdk_vhost_send(struct netdev *netdev, struct dp_packet **pkts,
902 int cnt, bool may_steal)
903{
904 struct netdev_dpdk *vhost_dev = netdev_dpdk_cast(netdev);
905 struct virtio_net *virtio_dev = netdev_dpdk_get_virtio(vhost_dev);
906 int tx_pkts, i;
907
908 if (OVS_UNLIKELY(!is_vhost_running(virtio_dev))) {
909 ovs_mutex_lock(&vhost_dev->mutex);
910 vhost_dev->stats.tx_dropped+= cnt;
911 ovs_mutex_unlock(&vhost_dev->mutex);
912 goto out;
913 }
914
915 /* There is vHost TX single queue, So we need to lock it for TX. */
916 rte_spinlock_lock(&vhost_dev->txq_lock);
917 tx_pkts = rte_vhost_enqueue_burst(virtio_dev, VIRTIO_RXQ,
918 (struct rte_mbuf **)pkts, cnt);
919
920 vhost_dev->stats.tx_packets += tx_pkts;
921 vhost_dev->stats.tx_dropped += (cnt - tx_pkts);
922 rte_spinlock_unlock(&vhost_dev->txq_lock);
923
924out:
925 if (may_steal) {
926 for (i = 0; i < cnt; i++) {
927 dp_packet_delete(pkts[i]);
928 }
929 }
930}
931
8a9562d2 932inline static void
f4fd623c
DDP
933dpdk_queue_pkts(struct netdev_dpdk *dev, int qid,
934 struct rte_mbuf **pkts, int cnt)
8a9562d2
PS
935{
936 struct dpdk_tx_queue *txq = &dev->tx_q[qid];
937 uint64_t diff_tsc;
8a9562d2 938
f4fd623c
DDP
939 int i = 0;
940
f4fd623c
DDP
941 while (i < cnt) {
942 int freeslots = MAX_TX_QUEUE_LEN - txq->count;
943 int tocopy = MIN(freeslots, cnt-i);
8a9562d2 944
f4fd623c
DDP
945 memcpy(&txq->burst_pkts[txq->count], &pkts[i],
946 tocopy * sizeof (struct rte_mbuf *));
947
948 txq->count += tocopy;
949 i += tocopy;
950
94143fc4 951 if (txq->count == MAX_TX_QUEUE_LEN || txq->flush_tx) {
b170db2a 952 dpdk_queue_flush__(dev, qid);
f4fd623c 953 }
844f2d74 954 diff_tsc = rte_get_timer_cycles() - txq->tsc;
f4fd623c 955 if (diff_tsc >= DRAIN_TSC) {
b170db2a 956 dpdk_queue_flush__(dev, qid);
f4fd623c 957 }
8a9562d2 958 }
8a9562d2
PS
959}
960
961/* Tx function. Transmit packets indefinitely */
962static void
58397e6c 963dpdk_do_tx_copy(struct netdev *netdev, int qid, struct dp_packet **pkts,
2654cc33 964 int cnt)
db73f716 965 OVS_NO_THREAD_SAFETY_ANALYSIS
8a9562d2
PS
966{
967 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
f4fd623c 968 struct rte_mbuf *mbufs[cnt];
175cf4de
RW
969 int dropped = 0;
970 int newcnt = 0;
971 int i;
8a9562d2 972
db73f716
DDP
973 /* If we are on a non pmd thread we have to use the mempool mutex, because
974 * every non pmd thread shares the same mempool cache */
975
976 if (!thread_is_pmd()) {
977 ovs_mutex_lock(&nonpmd_mempool_mutex);
978 }
979
f4fd623c 980 for (i = 0; i < cnt; i++) {
cf62fa4c 981 int size = dp_packet_size(pkts[i]);
95fb793a 982
f98d7864 983 if (OVS_UNLIKELY(size > dev->max_packet_len)) {
f4fd623c
DDP
984 VLOG_WARN_RL(&rl, "Too big size %d max_packet_len %d",
985 (int)size , dev->max_packet_len);
986
175cf4de 987 dropped++;
f4fd623c
DDP
988 continue;
989 }
8a9562d2 990
f4fd623c 991 mbufs[newcnt] = rte_pktmbuf_alloc(dev->dpdk_mp->mp);
8a9562d2 992
f4fd623c 993 if (!mbufs[newcnt]) {
175cf4de
RW
994 dropped += cnt - i;
995 break;
f4fd623c
DDP
996 }
997
998 /* We have to do a copy for now */
b8e57534 999 memcpy(rte_pktmbuf_mtod(mbufs[newcnt], void *), dp_packet_data(pkts[i]), size);
f4fd623c
DDP
1000
1001 rte_pktmbuf_data_len(mbufs[newcnt]) = size;
1002 rte_pktmbuf_pkt_len(mbufs[newcnt]) = size;
1003
1004 newcnt++;
1005 }
8a9562d2 1006
f98d7864 1007 if (OVS_UNLIKELY(dropped)) {
175cf4de
RW
1008 ovs_mutex_lock(&dev->mutex);
1009 dev->stats.tx_dropped += dropped;
1010 ovs_mutex_unlock(&dev->mutex);
1011 }
1012
58397e6c
KT
1013 if (dev->type == DPDK_DEV_VHOST) {
1014 __netdev_dpdk_vhost_send(netdev, (struct dp_packet **) mbufs, newcnt, true);
1015 } else {
1016 dpdk_queue_pkts(dev, qid, mbufs, newcnt);
1017 dpdk_queue_flush(dev, qid);
1018 }
db73f716
DDP
1019
1020 if (!thread_is_pmd()) {
1021 ovs_mutex_unlock(&nonpmd_mempool_mutex);
1022 }
8a9562d2
PS
1023}
1024
58397e6c
KT
1025static int
1026netdev_dpdk_vhost_send(struct netdev *netdev, int qid OVS_UNUSED, struct dp_packet **pkts,
1027 int cnt, bool may_steal)
1028{
1029 if (OVS_UNLIKELY(pkts[0]->source != DPBUF_DPDK)) {
1030 int i;
1031
1032 dpdk_do_tx_copy(netdev, qid, pkts, cnt);
1033 if (may_steal) {
1034 for (i = 0; i < cnt; i++) {
1035 dp_packet_delete(pkts[i]);
1036 }
1037 }
1038 } else {
1039 __netdev_dpdk_vhost_send(netdev, pkts, cnt, may_steal);
1040 }
1041 return 0;
1042}
1043
7251515e
DV
1044static inline void
1045netdev_dpdk_send__(struct netdev_dpdk *dev, int qid,
e14deea0 1046 struct dp_packet **pkts, int cnt, bool may_steal)
8a9562d2 1047{
f4fd623c 1048 int i;
8a9562d2 1049
7251515e 1050 if (OVS_UNLIKELY(!may_steal ||
cf62fa4c 1051 pkts[0]->source != DPBUF_DPDK)) {
7251515e
DV
1052 struct netdev *netdev = &dev->up;
1053
2654cc33 1054 dpdk_do_tx_copy(netdev, qid, pkts, cnt);
b3cd9f9d
PS
1055
1056 if (may_steal) {
f4fd623c 1057 for (i = 0; i < cnt; i++) {
e14deea0 1058 dp_packet_delete(pkts[i]);
f4fd623c 1059 }
b3cd9f9d 1060 }
8a9562d2 1061 } else {
f4fd623c
DDP
1062 int next_tx_idx = 0;
1063 int dropped = 0;
8a9562d2 1064
f4fd623c 1065 for (i = 0; i < cnt; i++) {
cf62fa4c 1066 int size = dp_packet_size(pkts[i]);
f4fd623c
DDP
1067 if (OVS_UNLIKELY(size > dev->max_packet_len)) {
1068 if (next_tx_idx != i) {
1069 dpdk_queue_pkts(dev, qid,
1070 (struct rte_mbuf **)&pkts[next_tx_idx],
1071 i-next_tx_idx);
1ebfe1ac 1072 }
f4fd623c 1073
1ebfe1ac
DDP
1074 VLOG_WARN_RL(&rl, "Too big size %d max_packet_len %d",
1075 (int)size , dev->max_packet_len);
f4fd623c 1076
e14deea0 1077 dp_packet_delete(pkts[i]);
1ebfe1ac 1078 dropped++;
f4fd623c
DDP
1079 next_tx_idx = i + 1;
1080 }
1081 }
1082 if (next_tx_idx != cnt) {
1083 dpdk_queue_pkts(dev, qid,
1084 (struct rte_mbuf **)&pkts[next_tx_idx],
1085 cnt-next_tx_idx);
1086 }
8a9562d2 1087
f4fd623c
DDP
1088 if (OVS_UNLIKELY(dropped)) {
1089 ovs_mutex_lock(&dev->mutex);
1090 dev->stats.tx_dropped += dropped;
1091 ovs_mutex_unlock(&dev->mutex);
1092 }
8a9562d2 1093 }
7251515e
DV
1094}
1095
1096static int
1097netdev_dpdk_eth_send(struct netdev *netdev, int qid,
e14deea0 1098 struct dp_packet **pkts, int cnt, bool may_steal)
7251515e
DV
1099{
1100 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
8a9562d2 1101
7251515e
DV
1102 netdev_dpdk_send__(dev, qid, pkts, cnt, may_steal);
1103 return 0;
8a9562d2
PS
1104}
1105
1106static int
1107netdev_dpdk_set_etheraddr(struct netdev *netdev,
1108 const uint8_t mac[ETH_ADDR_LEN])
1109{
1110 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1111
1112 ovs_mutex_lock(&dev->mutex);
1113 if (!eth_addr_equals(dev->hwaddr, mac)) {
1114 memcpy(dev->hwaddr, mac, ETH_ADDR_LEN);
045c0d1a 1115 netdev_change_seq_changed(netdev);
8a9562d2
PS
1116 }
1117 ovs_mutex_unlock(&dev->mutex);
1118
1119 return 0;
1120}
1121
1122static int
1123netdev_dpdk_get_etheraddr(const struct netdev *netdev,
1124 uint8_t mac[ETH_ADDR_LEN])
1125{
1126 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1127
1128 ovs_mutex_lock(&dev->mutex);
1129 memcpy(mac, dev->hwaddr, ETH_ADDR_LEN);
1130 ovs_mutex_unlock(&dev->mutex);
1131
1132 return 0;
1133}
1134
1135static int
1136netdev_dpdk_get_mtu(const struct netdev *netdev, int *mtup)
1137{
1138 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1139
1140 ovs_mutex_lock(&dev->mutex);
1141 *mtup = dev->mtu;
1142 ovs_mutex_unlock(&dev->mutex);
1143
1144 return 0;
1145}
1146
1147static int
1148netdev_dpdk_set_mtu(const struct netdev *netdev, int mtu)
1149{
1150 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1151 int old_mtu, err;
1152 struct dpdk_mp *old_mp;
1153 struct dpdk_mp *mp;
1154
1155 ovs_mutex_lock(&dpdk_mutex);
1156 ovs_mutex_lock(&dev->mutex);
1157 if (dev->mtu == mtu) {
1158 err = 0;
1159 goto out;
1160 }
1161
1162 mp = dpdk_mp_get(dev->socket_id, dev->mtu);
1163 if (!mp) {
1164 err = ENOMEM;
1165 goto out;
1166 }
1167
1168 rte_eth_dev_stop(dev->port_id);
1169
1170 old_mtu = dev->mtu;
1171 old_mp = dev->dpdk_mp;
1172 dev->dpdk_mp = mp;
1173 dev->mtu = mtu;
1174 dev->max_packet_len = MTU_TO_MAX_LEN(dev->mtu);
1175
1176 err = dpdk_eth_dev_init(dev);
1177 if (err) {
8a9562d2
PS
1178 dpdk_mp_put(mp);
1179 dev->mtu = old_mtu;
1180 dev->dpdk_mp = old_mp;
1181 dev->max_packet_len = MTU_TO_MAX_LEN(dev->mtu);
1182 dpdk_eth_dev_init(dev);
1183 goto out;
1184 }
1185
1186 dpdk_mp_put(old_mp);
045c0d1a 1187 netdev_change_seq_changed(netdev);
8a9562d2
PS
1188out:
1189 ovs_mutex_unlock(&dev->mutex);
1190 ovs_mutex_unlock(&dpdk_mutex);
1191 return err;
1192}
1193
1194static int
1195netdev_dpdk_get_carrier(const struct netdev *netdev_, bool *carrier);
1196
58397e6c
KT
1197static int
1198netdev_dpdk_vhost_get_stats(const struct netdev *netdev,
1199 struct netdev_stats *stats)
1200{
1201 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1202
1203 ovs_mutex_lock(&dev->mutex);
1204 memset(stats, 0, sizeof(*stats));
1205 /* Unsupported Stats */
1206 stats->rx_errors = UINT64_MAX;
1207 stats->tx_errors = UINT64_MAX;
1208 stats->multicast = UINT64_MAX;
1209 stats->collisions = UINT64_MAX;
1210 stats->rx_crc_errors = UINT64_MAX;
1211 stats->rx_fifo_errors = UINT64_MAX;
1212 stats->rx_frame_errors = UINT64_MAX;
1213 stats->rx_length_errors = UINT64_MAX;
1214 stats->rx_missed_errors = UINT64_MAX;
1215 stats->rx_over_errors = UINT64_MAX;
1216 stats->tx_aborted_errors = UINT64_MAX;
1217 stats->tx_carrier_errors = UINT64_MAX;
1218 stats->tx_errors = UINT64_MAX;
1219 stats->tx_fifo_errors = UINT64_MAX;
1220 stats->tx_heartbeat_errors = UINT64_MAX;
1221 stats->tx_window_errors = UINT64_MAX;
1222 stats->rx_bytes += UINT64_MAX;
1223 stats->rx_dropped += UINT64_MAX;
1224 stats->tx_bytes += UINT64_MAX;
1225
1226 /* Supported Stats */
1227 stats->rx_packets += dev->stats.rx_packets;
1228 stats->tx_packets += dev->stats.tx_packets;
1229 stats->tx_dropped += dev->stats.tx_dropped;
1230 ovs_mutex_unlock(&dev->mutex);
1231
1232 return 0;
1233}
1234
8a9562d2
PS
1235static int
1236netdev_dpdk_get_stats(const struct netdev *netdev, struct netdev_stats *stats)
1237{
1238 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1239 struct rte_eth_stats rte_stats;
1240 bool gg;
1241
1242 netdev_dpdk_get_carrier(netdev, &gg);
1243 ovs_mutex_lock(&dev->mutex);
1244 rte_eth_stats_get(dev->port_id, &rte_stats);
1245
2f9dd77f 1246 memset(stats, 0, sizeof(*stats));
8a9562d2 1247
2f9dd77f
PS
1248 stats->rx_packets = rte_stats.ipackets;
1249 stats->tx_packets = rte_stats.opackets;
1250 stats->rx_bytes = rte_stats.ibytes;
1251 stats->tx_bytes = rte_stats.obytes;
1252 stats->rx_errors = rte_stats.ierrors;
1253 stats->tx_errors = rte_stats.oerrors;
1254 stats->multicast = rte_stats.imcasts;
8a9562d2 1255
2f9dd77f 1256 stats->tx_dropped = dev->stats.tx_dropped;
8a9562d2
PS
1257 ovs_mutex_unlock(&dev->mutex);
1258
1259 return 0;
1260}
1261
1262static int
1263netdev_dpdk_get_features(const struct netdev *netdev_,
1264 enum netdev_features *current,
1265 enum netdev_features *advertised OVS_UNUSED,
1266 enum netdev_features *supported OVS_UNUSED,
1267 enum netdev_features *peer OVS_UNUSED)
1268{
1269 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev_);
1270 struct rte_eth_link link;
1271
1272 ovs_mutex_lock(&dev->mutex);
1273 link = dev->link;
1274 ovs_mutex_unlock(&dev->mutex);
1275
1276 if (link.link_duplex == ETH_LINK_AUTONEG_DUPLEX) {
1277 if (link.link_speed == ETH_LINK_SPEED_AUTONEG) {
1278 *current = NETDEV_F_AUTONEG;
1279 }
1280 } else if (link.link_duplex == ETH_LINK_HALF_DUPLEX) {
1281 if (link.link_speed == ETH_LINK_SPEED_10) {
1282 *current = NETDEV_F_10MB_HD;
1283 }
1284 if (link.link_speed == ETH_LINK_SPEED_100) {
1285 *current = NETDEV_F_100MB_HD;
1286 }
1287 if (link.link_speed == ETH_LINK_SPEED_1000) {
1288 *current = NETDEV_F_1GB_HD;
1289 }
1290 } else if (link.link_duplex == ETH_LINK_FULL_DUPLEX) {
1291 if (link.link_speed == ETH_LINK_SPEED_10) {
1292 *current = NETDEV_F_10MB_FD;
1293 }
1294 if (link.link_speed == ETH_LINK_SPEED_100) {
1295 *current = NETDEV_F_100MB_FD;
1296 }
1297 if (link.link_speed == ETH_LINK_SPEED_1000) {
1298 *current = NETDEV_F_1GB_FD;
1299 }
1300 if (link.link_speed == ETH_LINK_SPEED_10000) {
1301 *current = NETDEV_F_10GB_FD;
1302 }
1303 }
1304
1305 return 0;
1306}
1307
1308static int
1309netdev_dpdk_get_ifindex(const struct netdev *netdev)
1310{
1311 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1312 int ifindex;
1313
1314 ovs_mutex_lock(&dev->mutex);
1315 ifindex = dev->port_id;
1316 ovs_mutex_unlock(&dev->mutex);
1317
1318 return ifindex;
1319}
1320
1321static int
1322netdev_dpdk_get_carrier(const struct netdev *netdev_, bool *carrier)
1323{
1324 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev_);
1325
1326 ovs_mutex_lock(&dev->mutex);
1327 check_link_status(dev);
1328 *carrier = dev->link.link_status;
58397e6c
KT
1329
1330 ovs_mutex_unlock(&dev->mutex);
1331
1332 return 0;
1333}
1334
1335static int
1336netdev_dpdk_vhost_get_carrier(const struct netdev *netdev_, bool *carrier)
1337{
1338 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev_);
1339 struct virtio_net *virtio_dev = netdev_dpdk_get_virtio(dev);
1340
1341 ovs_mutex_lock(&dev->mutex);
1342
1343 if (is_vhost_running(virtio_dev)) {
1344 *carrier = 1;
1345 } else {
1346 *carrier = 0;
1347 }
1348
8a9562d2
PS
1349 ovs_mutex_unlock(&dev->mutex);
1350
1351 return 0;
1352}
1353
1354static long long int
1355netdev_dpdk_get_carrier_resets(const struct netdev *netdev_)
1356{
1357 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev_);
1358 long long int carrier_resets;
1359
1360 ovs_mutex_lock(&dev->mutex);
1361 carrier_resets = dev->link_reset_cnt;
1362 ovs_mutex_unlock(&dev->mutex);
1363
1364 return carrier_resets;
1365}
1366
1367static int
1368netdev_dpdk_set_miimon(struct netdev *netdev_ OVS_UNUSED,
1369 long long int interval OVS_UNUSED)
1370{
ee32150e 1371 return EOPNOTSUPP;
8a9562d2
PS
1372}
1373
1374static int
1375netdev_dpdk_update_flags__(struct netdev_dpdk *dev,
1376 enum netdev_flags off, enum netdev_flags on,
95fb793a 1377 enum netdev_flags *old_flagsp) OVS_REQUIRES(dev->mutex)
8a9562d2
PS
1378{
1379 int err;
1380
1381 if ((off | on) & ~(NETDEV_UP | NETDEV_PROMISC)) {
1382 return EINVAL;
1383 }
1384
1385 *old_flagsp = dev->flags;
1386 dev->flags |= on;
1387 dev->flags &= ~off;
1388
1389 if (dev->flags == *old_flagsp) {
1390 return 0;
1391 }
1392
58397e6c
KT
1393 if (dev->type == DPDK_DEV_ETH) {
1394 if (dev->flags & NETDEV_UP) {
1395 err = rte_eth_dev_start(dev->port_id);
1396 if (err)
1397 return -err;
1398 }
8a9562d2 1399
58397e6c
KT
1400 if (dev->flags & NETDEV_PROMISC) {
1401 rte_eth_promiscuous_enable(dev->port_id);
1402 }
8a9562d2 1403
58397e6c
KT
1404 if (!(dev->flags & NETDEV_UP)) {
1405 rte_eth_dev_stop(dev->port_id);
1406 }
8a9562d2
PS
1407 }
1408
1409 return 0;
1410}
1411
1412static int
1413netdev_dpdk_update_flags(struct netdev *netdev_,
1414 enum netdev_flags off, enum netdev_flags on,
1415 enum netdev_flags *old_flagsp)
1416{
1417 struct netdev_dpdk *netdev = netdev_dpdk_cast(netdev_);
1418 int error;
1419
1420 ovs_mutex_lock(&netdev->mutex);
1421 error = netdev_dpdk_update_flags__(netdev, off, on, old_flagsp);
1422 ovs_mutex_unlock(&netdev->mutex);
1423
1424 return error;
1425}
1426
1427static int
1428netdev_dpdk_get_status(const struct netdev *netdev_, struct smap *args)
1429{
1430 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev_);
1431 struct rte_eth_dev_info dev_info;
1432
e0a801c7 1433 if (dev->port_id < 0)
8a9562d2
PS
1434 return ENODEV;
1435
1436 ovs_mutex_lock(&dev->mutex);
1437 rte_eth_dev_info_get(dev->port_id, &dev_info);
1438 ovs_mutex_unlock(&dev->mutex);
1439
1440 smap_add_format(args, "driver_name", "%s", dev_info.driver_name);
1441
95fb793a 1442 smap_add_format(args, "port_no", "%d", dev->port_id);
8a9562d2
PS
1443 smap_add_format(args, "numa_id", "%d", rte_eth_dev_socket_id(dev->port_id));
1444 smap_add_format(args, "driver_name", "%s", dev_info.driver_name);
1445 smap_add_format(args, "min_rx_bufsize", "%u", dev_info.min_rx_bufsize);
1446 smap_add_format(args, "max_rx_pktlen", "%u", dev_info.max_rx_pktlen);
1447 smap_add_format(args, "max_rx_queues", "%u", dev_info.max_rx_queues);
1448 smap_add_format(args, "max_tx_queues", "%u", dev_info.max_tx_queues);
1449 smap_add_format(args, "max_mac_addrs", "%u", dev_info.max_mac_addrs);
1450 smap_add_format(args, "max_hash_mac_addrs", "%u", dev_info.max_hash_mac_addrs);
1451 smap_add_format(args, "max_vfs", "%u", dev_info.max_vfs);
1452 smap_add_format(args, "max_vmdq_pools", "%u", dev_info.max_vmdq_pools);
1453
1454 smap_add_format(args, "pci-vendor_id", "0x%u", dev_info.pci_dev->id.vendor_id);
1455 smap_add_format(args, "pci-device_id", "0x%x", dev_info.pci_dev->id.device_id);
1456
1457 return 0;
1458}
1459
1460static void
1461netdev_dpdk_set_admin_state__(struct netdev_dpdk *dev, bool admin_state)
1462 OVS_REQUIRES(dev->mutex)
1463{
1464 enum netdev_flags old_flags;
1465
1466 if (admin_state) {
1467 netdev_dpdk_update_flags__(dev, 0, NETDEV_UP, &old_flags);
1468 } else {
1469 netdev_dpdk_update_flags__(dev, NETDEV_UP, 0, &old_flags);
1470 }
1471}
1472
1473static void
1474netdev_dpdk_set_admin_state(struct unixctl_conn *conn, int argc,
1475 const char *argv[], void *aux OVS_UNUSED)
1476{
1477 bool up;
1478
1479 if (!strcasecmp(argv[argc - 1], "up")) {
1480 up = true;
1481 } else if ( !strcasecmp(argv[argc - 1], "down")) {
1482 up = false;
1483 } else {
1484 unixctl_command_reply_error(conn, "Invalid Admin State");
1485 return;
1486 }
1487
1488 if (argc > 2) {
1489 struct netdev *netdev = netdev_from_name(argv[1]);
1490 if (netdev && is_dpdk_class(netdev->netdev_class)) {
1491 struct netdev_dpdk *dpdk_dev = netdev_dpdk_cast(netdev);
1492
1493 ovs_mutex_lock(&dpdk_dev->mutex);
1494 netdev_dpdk_set_admin_state__(dpdk_dev, up);
1495 ovs_mutex_unlock(&dpdk_dev->mutex);
1496
1497 netdev_close(netdev);
1498 } else {
1499 unixctl_command_reply_error(conn, "Not a DPDK Interface");
1500 netdev_close(netdev);
1501 return;
1502 }
1503 } else {
1504 struct netdev_dpdk *netdev;
1505
1506 ovs_mutex_lock(&dpdk_mutex);
1507 LIST_FOR_EACH (netdev, list_node, &dpdk_list) {
1508 ovs_mutex_lock(&netdev->mutex);
1509 netdev_dpdk_set_admin_state__(netdev, up);
1510 ovs_mutex_unlock(&netdev->mutex);
1511 }
1512 ovs_mutex_unlock(&dpdk_mutex);
1513 }
1514 unixctl_command_reply(conn, "OK");
1515}
1516
58397e6c
KT
1517/*
1518 * Set virtqueue flags so that we do not receive interrupts.
1519 */
1520static void
1521set_irq_status(struct virtio_net *dev)
1522{
1523 dev->virtqueue[VIRTIO_RXQ]->used->flags = VRING_USED_F_NO_NOTIFY;
1524 dev->virtqueue[VIRTIO_TXQ]->used->flags = VRING_USED_F_NO_NOTIFY;
1525}
1526
1527/*
1528 * A new virtio-net device is added to a vhost port.
1529 */
1530static int
1531new_device(struct virtio_net *dev)
1532{
1533 struct netdev_dpdk *netdev;
1534 bool exists = false;
1535
1536 ovs_mutex_lock(&dpdk_mutex);
1537 /* Add device to the vhost port with the same name as that passed down. */
1538 LIST_FOR_EACH(netdev, list_node, &dpdk_list) {
1539 if (strncmp(dev->ifname, netdev->up.name, IFNAMSIZ) == 0) {
1540 ovs_mutex_lock(&netdev->mutex);
1541 ovsrcu_set(&netdev->virtio_dev, dev);
1542 ovs_mutex_unlock(&netdev->mutex);
1543 exists = true;
1544 dev->flags |= VIRTIO_DEV_RUNNING;
1545 /* Disable notifications. */
1546 set_irq_status(dev);
1547 break;
1548 }
1549 }
1550 ovs_mutex_unlock(&dpdk_mutex);
1551
1552 if (!exists) {
1553 VLOG_INFO("vHost Device '%s' (%ld) can't be added - name not found",
1554 dev->ifname, dev->device_fh);
1555
1556 return -1;
1557 }
1558
1559 VLOG_INFO("vHost Device '%s' (%ld) has been added",
1560 dev->ifname, dev->device_fh);
1561 return 0;
1562}
1563
1564/*
1565 * Remove a virtio-net device from the specific vhost port. Use dev->remove
1566 * flag to stop any more packets from being sent or received to/from a VM and
1567 * ensure all currently queued packets have been sent/received before removing
1568 * the device.
1569 */
1570static void
1571destroy_device(volatile struct virtio_net *dev)
1572{
1573 struct netdev_dpdk *vhost_dev;
1574
1575 ovs_mutex_lock(&dpdk_mutex);
1576 LIST_FOR_EACH (vhost_dev, list_node, &dpdk_list) {
1577 if (netdev_dpdk_get_virtio(vhost_dev) == dev) {
1578
1579 ovs_mutex_lock(&vhost_dev->mutex);
1580 dev->flags &= ~VIRTIO_DEV_RUNNING;
1581 ovsrcu_set(&vhost_dev->virtio_dev, NULL);
1582 ovs_mutex_unlock(&vhost_dev->mutex);
1583
1584 /*
1585 * Wait for other threads to quiesce before
1586 * setting the virtio_dev to NULL.
1587 */
1588 ovsrcu_synchronize();
618f44f7
KT
1589 /*
1590 * As call to ovsrcu_synchronize() will end the quiescent state,
1591 * put thread back into quiescent state before returning.
1592 */
1593 ovsrcu_quiesce_start();
58397e6c
KT
1594 }
1595 }
1596 ovs_mutex_unlock(&dpdk_mutex);
1597
1598 VLOG_INFO("vHost Device '%s' (%ld) has been removed",
1599 dev->ifname, dev->device_fh);
1600}
1601
1602struct virtio_net *
1603netdev_dpdk_get_virtio(const struct netdev_dpdk *dev)
1604{
1605 return ovsrcu_get(struct virtio_net *, &dev->virtio_dev);
1606}
1607
1608/*
1609 * These callbacks allow virtio-net devices to be added to vhost ports when
1610 * configuration has been fully complete.
1611 */
1612const struct virtio_net_device_ops virtio_net_device_ops =
1613{
1614 .new_device = new_device,
1615 .destroy_device = destroy_device,
1616};
1617
1618static void *
1619start_cuse_session_loop(void *dummy OVS_UNUSED)
1620{
1621 pthread_detach(pthread_self());
618f44f7
KT
1622 /* Put the cuse thread into quiescent state. */
1623 ovsrcu_quiesce_start();
58397e6c
KT
1624 rte_vhost_driver_session_start();
1625 return NULL;
1626}
1627
1628static int
1629dpdk_vhost_class_init(void)
1630{
58397e6c
KT
1631 int err = -1;
1632
1633 rte_vhost_driver_callback_register(&virtio_net_device_ops);
1634
1635 /* Register CUSE device to handle IOCTLs.
1636 * Unless otherwise specified on the vswitchd command line, cuse_dev_name
1637 * is set to vhost-net.
1638 */
1639 err = rte_vhost_driver_register(cuse_dev_name);
1640
1641 if (err != 0) {
1642 VLOG_ERR("CUSE device setup failure.");
1643 return -1;
1644 }
1645
618f44f7
KT
1646 ovs_thread_create("cuse_thread", start_cuse_session_loop, NULL);
1647 return 0;
58397e6c
KT
1648}
1649
033e9df2
DDP
1650static void
1651dpdk_common_init(void)
1652{
1653 unixctl_command_register("netdev-dpdk/set-admin-state",
1654 "[netdev] up|down", 1, 2,
1655 netdev_dpdk_set_admin_state, NULL);
1656
1657 ovs_thread_create("dpdk_watchdog", dpdk_watchdog, NULL);
1658}
1659
95fb793a 1660/* Client Rings */
1661
95fb793a 1662static int
1663dpdk_ring_create(const char dev_name[], unsigned int port_no,
1664 unsigned int *eth_port_id)
1665{
1666 struct dpdk_ring *ivshmem;
1667 char ring_name[10];
1668 int err;
1669
1670 ivshmem = dpdk_rte_mzalloc(sizeof *ivshmem);
1671 if (ivshmem == NULL) {
1672 return ENOMEM;
1673 }
1674
7251515e 1675 /* XXX: Add support for multiquque ring. */
95fb793a 1676 err = snprintf(ring_name, 10, "%s_tx", dev_name);
1677 if (err < 0) {
1678 return -err;
1679 }
1680
7251515e
DV
1681 /* Create single consumer/producer rings, netdev does explicit locking. */
1682 ivshmem->cring_tx = rte_ring_create(ring_name, DPDK_RING_SIZE, SOCKET0,
1683 RING_F_SP_ENQ | RING_F_SC_DEQ);
95fb793a 1684 if (ivshmem->cring_tx == NULL) {
1685 rte_free(ivshmem);
1686 return ENOMEM;
1687 }
1688
1689 err = snprintf(ring_name, 10, "%s_rx", dev_name);
1690 if (err < 0) {
1691 return -err;
1692 }
1693
7251515e
DV
1694 /* Create single consumer/producer rings, netdev does explicit locking. */
1695 ivshmem->cring_rx = rte_ring_create(ring_name, DPDK_RING_SIZE, SOCKET0,
1696 RING_F_SP_ENQ | RING_F_SC_DEQ);
95fb793a 1697 if (ivshmem->cring_rx == NULL) {
1698 rte_free(ivshmem);
1699 return ENOMEM;
1700 }
1701
d7310583
DDP
1702 err = rte_eth_from_rings(dev_name, &ivshmem->cring_rx, 1,
1703 &ivshmem->cring_tx, 1, SOCKET0);
1704
95fb793a 1705 if (err < 0) {
1706 rte_free(ivshmem);
1707 return ENODEV;
1708 }
1709
1710 ivshmem->user_port_id = port_no;
1711 ivshmem->eth_port_id = rte_eth_dev_count() - 1;
1712 list_push_back(&dpdk_ring_list, &ivshmem->list_node);
1713
1714 *eth_port_id = ivshmem->eth_port_id;
1715 return 0;
1716}
1717
1718static int
1719dpdk_ring_open(const char dev_name[], unsigned int *eth_port_id) OVS_REQUIRES(dpdk_mutex)
1720{
1721 struct dpdk_ring *ivshmem;
1722 unsigned int port_no;
1723 int err = 0;
1724
1725 /* Names always start with "dpdkr" */
1726 err = dpdk_dev_parse_name(dev_name, "dpdkr", &port_no);
1727 if (err) {
1728 return err;
1729 }
1730
1731 /* look through our list to find the device */
1732 LIST_FOR_EACH (ivshmem, list_node, &dpdk_ring_list) {
1733 if (ivshmem->user_port_id == port_no) {
58397e6c 1734 VLOG_INFO("Found dpdk ring device %s:", dev_name);
95fb793a 1735 *eth_port_id = ivshmem->eth_port_id; /* really all that is needed */
1736 return 0;
1737 }
1738 }
1739 /* Need to create the device rings */
1740 return dpdk_ring_create(dev_name, port_no, eth_port_id);
1741}
1742
7251515e
DV
1743static int
1744netdev_dpdk_ring_send(struct netdev *netdev, int qid OVS_UNUSED,
e14deea0 1745 struct dp_packet **pkts, int cnt, bool may_steal)
7251515e
DV
1746{
1747 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1748
1749 /* DPDK Rings have a single TX queue, Therefore needs locking. */
58397e6c 1750 rte_spinlock_lock(&dev->txq_lock);
7251515e 1751 netdev_dpdk_send__(dev, 0, pkts, cnt, may_steal);
58397e6c 1752 rte_spinlock_unlock(&dev->txq_lock);
7251515e
DV
1753 return 0;
1754}
1755
95fb793a 1756static int
1757netdev_dpdk_ring_construct(struct netdev *netdev)
1758{
1759 unsigned int port_no = 0;
1760 int err = 0;
1761
1762 if (rte_eal_init_ret) {
1763 return rte_eal_init_ret;
1764 }
1765
1766 ovs_mutex_lock(&dpdk_mutex);
1767
1768 err = dpdk_ring_open(netdev->name, &port_no);
1769 if (err) {
1770 goto unlock_dpdk;
1771 }
1772
58397e6c 1773 err = netdev_dpdk_init(netdev, port_no, DPDK_DEV_ETH);
95fb793a 1774
1775unlock_dpdk:
1776 ovs_mutex_unlock(&dpdk_mutex);
1777 return err;
1778}
1779
58397e6c
KT
1780#define NETDEV_DPDK_CLASS(NAME, INIT, CONSTRUCT, DESTRUCT, MULTIQ, SEND, \
1781 GET_CARRIER, GET_STATS, GET_FEATURES, GET_STATUS, RXQ_RECV) \
95fb793a 1782{ \
1783 NAME, \
1784 INIT, /* init */ \
1785 NULL, /* netdev_dpdk_run */ \
1786 NULL, /* netdev_dpdk_wait */ \
1787 \
1788 netdev_dpdk_alloc, \
1789 CONSTRUCT, \
58397e6c 1790 DESTRUCT, \
95fb793a 1791 netdev_dpdk_dealloc, \
1792 netdev_dpdk_get_config, \
1793 NULL, /* netdev_dpdk_set_config */ \
1794 NULL, /* get_tunnel_config */ \
58397e6c
KT
1795 NULL, /* build header */ \
1796 NULL, /* push header */ \
1797 NULL, /* pop header */ \
7dec44fe 1798 netdev_dpdk_get_numa_id, /* get_numa_id */ \
5496878c 1799 MULTIQ, /* set_multiq */ \
95fb793a 1800 \
7251515e 1801 SEND, /* send */ \
95fb793a 1802 NULL, /* send_wait */ \
1803 \
1804 netdev_dpdk_set_etheraddr, \
1805 netdev_dpdk_get_etheraddr, \
1806 netdev_dpdk_get_mtu, \
1807 netdev_dpdk_set_mtu, \
1808 netdev_dpdk_get_ifindex, \
58397e6c 1809 GET_CARRIER, \
95fb793a 1810 netdev_dpdk_get_carrier_resets, \
1811 netdev_dpdk_set_miimon, \
58397e6c
KT
1812 GET_STATS, \
1813 GET_FEATURES, \
95fb793a 1814 NULL, /* set_advertisements */ \
1815 \
1816 NULL, /* set_policing */ \
1817 NULL, /* get_qos_types */ \
1818 NULL, /* get_qos_capabilities */ \
1819 NULL, /* get_qos */ \
1820 NULL, /* set_qos */ \
1821 NULL, /* get_queue */ \
1822 NULL, /* set_queue */ \
1823 NULL, /* delete_queue */ \
1824 NULL, /* get_queue_stats */ \
1825 NULL, /* queue_dump_start */ \
1826 NULL, /* queue_dump_next */ \
1827 NULL, /* queue_dump_done */ \
1828 NULL, /* dump_queue_stats */ \
1829 \
1830 NULL, /* get_in4 */ \
1831 NULL, /* set_in4 */ \
1832 NULL, /* get_in6 */ \
1833 NULL, /* add_router */ \
1834 NULL, /* get_next_hop */ \
58397e6c 1835 GET_STATUS, \
95fb793a 1836 NULL, /* arp_lookup */ \
1837 \
1838 netdev_dpdk_update_flags, \
1839 \
1840 netdev_dpdk_rxq_alloc, \
1841 netdev_dpdk_rxq_construct, \
1842 netdev_dpdk_rxq_destruct, \
1843 netdev_dpdk_rxq_dealloc, \
58397e6c 1844 RXQ_RECV, \
95fb793a 1845 NULL, /* rx_wait */ \
1846 NULL, /* rxq_drain */ \
1847}
8a9562d2
PS
1848
1849int
1850dpdk_init(int argc, char **argv)
1851{
1852 int result;
58397e6c
KT
1853 int base = 0;
1854 char *pragram_name = argv[0];
8a9562d2 1855
9441caf3 1856 if (argc < 2 || strcmp(argv[1], "--dpdk"))
8a9562d2
PS
1857 return 0;
1858
58397e6c 1859 /* Remove the --dpdk argument from arg list.*/
8a9562d2
PS
1860 argc--;
1861 argv++;
1862
58397e6c
KT
1863 /* If the cuse_dev_name parameter has been provided, set 'cuse_dev_name' to
1864 * this string if it meets the correct criteria. Otherwise, set it to the
1865 * default (vhost-net).
1866 */
1867 if (!strcmp(argv[1], "--cuse_dev_name") &&
1868 (strlen(argv[2]) <= NAME_MAX)) {
1869
1870 cuse_dev_name = strdup(argv[2]);
1871
1872 /* Remove the cuse_dev_name configuration parameters from the argument
1873 * list, so that the correct elements are passed to the DPDK
1874 * initialization function
1875 */
1876 argc -= 2;
1877 argv += 2; /* Increment by two to bypass the cuse_dev_name arguments */
1878 base = 2;
1879
1880 VLOG_ERR("User-provided cuse_dev_name in use: /dev/%s", cuse_dev_name);
1881 } else {
1882 cuse_dev_name = "vhost-net";
1883 VLOG_INFO("No cuse_dev_name provided - defaulting to /dev/vhost-net");
1884 }
1885
1886 /* Keep the program name argument as this is needed for call to
1887 * rte_eal_init()
1888 */
1889 argv[0] = pragram_name;
1890
8a9562d2
PS
1891 /* Make sure things are initialized ... */
1892 result = rte_eal_init(argc, argv);
451450fa 1893 if (result < 0) {
58397e6c 1894 ovs_abort(result, "Cannot init EAL");
451450fa 1895 }
8a9562d2 1896
d7310583 1897 rte_memzone_dump(stdout);
8a9562d2
PS
1898 rte_eal_init_ret = 0;
1899
451450fa 1900 if (argc > result) {
9441caf3 1901 argv[result] = argv[0];
451450fa 1902 }
9441caf3 1903
db73f716
DDP
1904 /* We are called from the main thread here */
1905 thread_set_nonpmd();
1906
58397e6c 1907 return result + 1 + base;
8a9562d2
PS
1908}
1909
95fb793a 1910const struct netdev_class dpdk_class =
1911 NETDEV_DPDK_CLASS(
1912 "dpdk",
b8e57534 1913 NULL,
5496878c 1914 netdev_dpdk_construct,
58397e6c 1915 netdev_dpdk_destruct,
7251515e 1916 netdev_dpdk_set_multiq,
58397e6c
KT
1917 netdev_dpdk_eth_send,
1918 netdev_dpdk_get_carrier,
1919 netdev_dpdk_get_stats,
1920 netdev_dpdk_get_features,
1921 netdev_dpdk_get_status,
1922 netdev_dpdk_rxq_recv);
95fb793a 1923
1924const struct netdev_class dpdk_ring_class =
1925 NETDEV_DPDK_CLASS(
1926 "dpdkr",
033e9df2 1927 NULL,
5496878c 1928 netdev_dpdk_ring_construct,
58397e6c
KT
1929 netdev_dpdk_destruct,
1930 NULL,
1931 netdev_dpdk_ring_send,
1932 netdev_dpdk_get_carrier,
1933 netdev_dpdk_get_stats,
1934 netdev_dpdk_get_features,
1935 netdev_dpdk_get_status,
1936 netdev_dpdk_rxq_recv);
1937
1938const struct netdev_class dpdk_vhost_class =
1939 NETDEV_DPDK_CLASS(
1940 "dpdkvhost",
1941 dpdk_vhost_class_init,
1942 netdev_dpdk_vhost_construct,
1943 netdev_dpdk_vhost_destruct,
1944 netdev_dpdk_vhost_set_multiq,
1945 netdev_dpdk_vhost_send,
1946 netdev_dpdk_vhost_get_carrier,
1947 netdev_dpdk_vhost_get_stats,
1948 NULL,
7251515e 1949 NULL,
58397e6c 1950 netdev_dpdk_vhost_rxq_recv);
95fb793a 1951
8a9562d2
PS
1952void
1953netdev_dpdk_register(void)
1954{
95fb793a 1955 static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER;
1956
033e9df2
DDP
1957 if (rte_eal_init_ret) {
1958 return;
1959 }
1960
95fb793a 1961 if (ovsthread_once_start(&once)) {
033e9df2 1962 dpdk_common_init();
95fb793a 1963 netdev_register_provider(&dpdk_class);
1964 netdev_register_provider(&dpdk_ring_class);
58397e6c 1965 netdev_register_provider(&dpdk_vhost_class);
95fb793a 1966 ovsthread_once_done(&once);
1967 }
8a9562d2 1968}
8617afff
PS
1969
1970int
1971pmd_thread_setaffinity_cpu(int cpu)
1972{
1973 cpu_set_t cpuset;
1974 int err;
1975
1976 CPU_ZERO(&cpuset);
1977 CPU_SET(cpu, &cpuset);
1978 err = pthread_setaffinity_np(pthread_self(), sizeof(cpu_set_t), &cpuset);
1979 if (err) {
1980 VLOG_ERR("Thread affinity error %d",err);
1981 return err;
1982 }
abb5943d
AW
1983 /* NON_PMD_CORE_ID is reserved for use by non pmd threads. */
1984 ovs_assert(cpu != NON_PMD_CORE_ID);
65f13b50 1985 RTE_PER_LCORE(_lcore_id) = cpu;
8617afff
PS
1986
1987 return 0;
1988}
db73f716
DDP
1989
1990void
1991thread_set_nonpmd(void)
1992{
abb5943d
AW
1993 /* We have to use NON_PMD_CORE_ID to allow non-pmd threads to perform
1994 * certain DPDK operations, like rte_eth_dev_configure(). */
1995 RTE_PER_LCORE(_lcore_id) = NON_PMD_CORE_ID;
db73f716
DDP
1996}
1997
1998static bool
1999thread_is_pmd(void)
2000{
abb5943d 2001 return rte_lcore_id() != NON_PMD_CORE_ID;
db73f716 2002}