]> git.proxmox.com Git - ovs.git/blob - lib/netdev-dpdk.c
netdev-dpdk: Restore txq/rxq number if initialization fails.
[ovs.git] / lib / netdev-dpdk.c
1 /*
2 * Copyright (c) 2014 Nicira, Inc.
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <config.h>
18
19 #include <string.h>
20 #include <signal.h>
21 #include <stdlib.h>
22 #include <pthread.h>
23 #include <config.h>
24 #include <errno.h>
25 #include <sched.h>
26 #include <stdlib.h>
27 #include <unistd.h>
28 #include <sys/stat.h>
29 #include <stdio.h>
30 #include <sys/types.h>
31 #include <sys/stat.h>
32
33 #include "dirs.h"
34 #include "dp-packet.h"
35 #include "dpif-netdev.h"
36 #include "list.h"
37 #include "netdev-dpdk.h"
38 #include "netdev-provider.h"
39 #include "netdev-vport.h"
40 #include "odp-util.h"
41 #include "ofp-print.h"
42 #include "ovs-numa.h"
43 #include "ovs-thread.h"
44 #include "ovs-rcu.h"
45 #include "packets.h"
46 #include "shash.h"
47 #include "sset.h"
48 #include "unaligned.h"
49 #include "timeval.h"
50 #include "unixctl.h"
51 #include "openvswitch/vlog.h"
52
53 #include "rte_config.h"
54 #include "rte_mbuf.h"
55 #include "rte_virtio_net.h"
56
57 VLOG_DEFINE_THIS_MODULE(dpdk);
58 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 20);
59
60 #define DPDK_PORT_WATCHDOG_INTERVAL 5
61
62 #define OVS_CACHE_LINE_SIZE CACHE_LINE_SIZE
63 #define OVS_VPORT_DPDK "ovs_dpdk"
64
65 /*
66 * need to reserve tons of extra space in the mbufs so we can align the
67 * DMA addresses to 4KB.
68 */
69
70 #define MTU_TO_MAX_LEN(mtu) ((mtu) + ETHER_HDR_LEN + ETHER_CRC_LEN)
71 #define MBUF_SIZE(mtu) (MTU_TO_MAX_LEN(mtu) + (512) + \
72 sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM)
73
74 /* Max and min number of packets in the mempool. OVS tries to allocate a
75 * mempool with MAX_NB_MBUF: if this fails (because the system doesn't have
76 * enough hugepages) we keep halving the number until the allocation succeeds
77 * or we reach MIN_NB_MBUF */
78
79 #define MAX_NB_MBUF (4096 * 64)
80 #define MIN_NB_MBUF (4096 * 4)
81 #define MP_CACHE_SZ RTE_MEMPOOL_CACHE_MAX_SIZE
82
83 /* MAX_NB_MBUF can be divided by 2 many times, until MIN_NB_MBUF */
84 BUILD_ASSERT_DECL(MAX_NB_MBUF % ROUND_DOWN_POW2(MAX_NB_MBUF/MIN_NB_MBUF) == 0);
85
86 /* The smallest possible NB_MBUF that we're going to try should be a multiple
87 * of MP_CACHE_SZ. This is advised by DPDK documentation. */
88 BUILD_ASSERT_DECL((MAX_NB_MBUF / ROUND_DOWN_POW2(MAX_NB_MBUF/MIN_NB_MBUF))
89 % MP_CACHE_SZ == 0);
90
91 #define SOCKET0 0
92
93 #define NIC_PORT_RX_Q_SIZE 2048 /* Size of Physical NIC RX Queue, Max (n+32<=4096)*/
94 #define NIC_PORT_TX_Q_SIZE 2048 /* Size of Physical NIC TX Queue, Max (n+32<=4096)*/
95
96 static char *cuse_dev_name = NULL; /* Character device cuse_dev_name. */
97 static char *vhost_sock_dir = NULL; /* Location of vhost-user sockets */
98
99 /*
100 * Maximum amount of time in micro seconds to try and enqueue to vhost.
101 */
102 #define VHOST_ENQ_RETRY_USECS 100
103
104 static const struct rte_eth_conf port_conf = {
105 .rxmode = {
106 .mq_mode = ETH_MQ_RX_RSS,
107 .split_hdr_size = 0,
108 .header_split = 0, /* Header Split disabled */
109 .hw_ip_checksum = 0, /* IP checksum offload disabled */
110 .hw_vlan_filter = 0, /* VLAN filtering disabled */
111 .jumbo_frame = 0, /* Jumbo Frame Support disabled */
112 .hw_strip_crc = 0,
113 },
114 .rx_adv_conf = {
115 .rss_conf = {
116 .rss_key = NULL,
117 .rss_hf = ETH_RSS_IP | ETH_RSS_UDP | ETH_RSS_TCP,
118 },
119 },
120 .txmode = {
121 .mq_mode = ETH_MQ_TX_NONE,
122 },
123 };
124
125 enum { MAX_TX_QUEUE_LEN = 384 };
126 enum { DPDK_RING_SIZE = 256 };
127 BUILD_ASSERT_DECL(IS_POW2(DPDK_RING_SIZE));
128 enum { DRAIN_TSC = 200000ULL };
129
130 enum dpdk_dev_type {
131 DPDK_DEV_ETH = 0,
132 DPDK_DEV_VHOST = 1,
133 };
134
135 static int rte_eal_init_ret = ENODEV;
136
137 static struct ovs_mutex dpdk_mutex = OVS_MUTEX_INITIALIZER;
138
139 /* Contains all 'struct dpdk_dev's. */
140 static struct ovs_list dpdk_list OVS_GUARDED_BY(dpdk_mutex)
141 = OVS_LIST_INITIALIZER(&dpdk_list);
142
143 static struct ovs_list dpdk_mp_list OVS_GUARDED_BY(dpdk_mutex)
144 = OVS_LIST_INITIALIZER(&dpdk_mp_list);
145
146 /* This mutex must be used by non pmd threads when allocating or freeing
147 * mbufs through mempools. Since dpdk_queue_pkts() and dpdk_queue_flush() may
148 * use mempools, a non pmd thread should hold this mutex while calling them */
149 static struct ovs_mutex nonpmd_mempool_mutex = OVS_MUTEX_INITIALIZER;
150
151 struct dpdk_mp {
152 struct rte_mempool *mp;
153 int mtu;
154 int socket_id;
155 int refcount;
156 struct ovs_list list_node OVS_GUARDED_BY(dpdk_mutex);
157 };
158
159 /* There should be one 'struct dpdk_tx_queue' created for
160 * each cpu core. */
161 struct dpdk_tx_queue {
162 bool flush_tx; /* Set to true to flush queue everytime */
163 /* pkts are queued. */
164 int count;
165 rte_spinlock_t tx_lock; /* Protects the members and the NIC queue
166 * from concurrent access. It is used only
167 * if the queue is shared among different
168 * pmd threads (see 'txq_needs_locking'). */
169 uint64_t tsc;
170 struct rte_mbuf *burst_pkts[MAX_TX_QUEUE_LEN];
171 };
172
173 /* dpdk has no way to remove dpdk ring ethernet devices
174 so we have to keep them around once they've been created
175 */
176
177 static struct ovs_list dpdk_ring_list OVS_GUARDED_BY(dpdk_mutex)
178 = OVS_LIST_INITIALIZER(&dpdk_ring_list);
179
180 struct dpdk_ring {
181 /* For the client rings */
182 struct rte_ring *cring_tx;
183 struct rte_ring *cring_rx;
184 int user_port_id; /* User given port no, parsed from port name */
185 int eth_port_id; /* ethernet device port id */
186 struct ovs_list list_node OVS_GUARDED_BY(dpdk_mutex);
187 };
188
189 struct netdev_dpdk {
190 struct netdev up;
191 int port_id;
192 int max_packet_len;
193 enum dpdk_dev_type type;
194
195 struct dpdk_tx_queue *tx_q;
196
197 struct ovs_mutex mutex OVS_ACQ_AFTER(dpdk_mutex);
198
199 struct dpdk_mp *dpdk_mp;
200 int mtu;
201 int socket_id;
202 int buf_size;
203 struct netdev_stats stats;
204 /* Protects stats */
205 rte_spinlock_t stats_lock;
206
207 uint8_t hwaddr[ETH_ADDR_LEN];
208 enum netdev_flags flags;
209
210 struct rte_eth_link link;
211 int link_reset_cnt;
212
213 /* The user might request more txqs than the NIC has. We remap those
214 * ('up.n_txq') on these ('real_n_txq').
215 * If the numbers match, 'txq_needs_locking' is false, otherwise it is
216 * true and we will take a spinlock on transmission */
217 int real_n_txq;
218 bool txq_needs_locking;
219
220 /* Spinlock for vhost transmission. Other DPDK devices use spinlocks in
221 * dpdk_tx_queue */
222 rte_spinlock_t vhost_tx_lock;
223
224 /* virtio-net structure for vhost device */
225 OVSRCU_TYPE(struct virtio_net *) virtio_dev;
226
227 /* Identifier used to distinguish vhost devices from each other */
228 char vhost_id[PATH_MAX];
229
230 /* In dpdk_list. */
231 struct ovs_list list_node OVS_GUARDED_BY(dpdk_mutex);
232 };
233
234 struct netdev_rxq_dpdk {
235 struct netdev_rxq up;
236 int port_id;
237 };
238
239 static bool thread_is_pmd(void);
240
241 static int netdev_dpdk_construct(struct netdev *);
242
243 struct virtio_net * netdev_dpdk_get_virtio(const struct netdev_dpdk *dev);
244
245 static bool
246 is_dpdk_class(const struct netdev_class *class)
247 {
248 return class->construct == netdev_dpdk_construct;
249 }
250
251 /* XXX: use dpdk malloc for entire OVS. in fact huge page should be used
252 * for all other segments data, bss and text. */
253
254 static void *
255 dpdk_rte_mzalloc(size_t sz)
256 {
257 void *ptr;
258
259 ptr = rte_zmalloc(OVS_VPORT_DPDK, sz, OVS_CACHE_LINE_SIZE);
260 if (ptr == NULL) {
261 out_of_memory();
262 }
263 return ptr;
264 }
265
266 /* XXX this function should be called only by pmd threads (or by non pmd
267 * threads holding the nonpmd_mempool_mutex) */
268 void
269 free_dpdk_buf(struct dp_packet *p)
270 {
271 struct rte_mbuf *pkt = (struct rte_mbuf *) p;
272
273 rte_pktmbuf_free_seg(pkt);
274 }
275
276 static void
277 __rte_pktmbuf_init(struct rte_mempool *mp,
278 void *opaque_arg OVS_UNUSED,
279 void *_m,
280 unsigned i OVS_UNUSED)
281 {
282 struct rte_mbuf *m = _m;
283 uint32_t buf_len = mp->elt_size - sizeof(struct dp_packet);
284
285 RTE_MBUF_ASSERT(mp->elt_size >= sizeof(struct dp_packet));
286
287 memset(m, 0, mp->elt_size);
288
289 /* start of buffer is just after mbuf structure */
290 m->buf_addr = (char *)m + sizeof(struct dp_packet);
291 m->buf_physaddr = rte_mempool_virt2phy(mp, m) +
292 sizeof(struct dp_packet);
293 m->buf_len = (uint16_t)buf_len;
294
295 /* keep some headroom between start of buffer and data */
296 m->data_off = RTE_MIN(RTE_PKTMBUF_HEADROOM, m->buf_len);
297
298 /* init some constant fields */
299 m->pool = mp;
300 m->nb_segs = 1;
301 m->port = 0xff;
302 }
303
304 static void
305 ovs_rte_pktmbuf_init(struct rte_mempool *mp,
306 void *opaque_arg OVS_UNUSED,
307 void *_m,
308 unsigned i OVS_UNUSED)
309 {
310 struct rte_mbuf *m = _m;
311
312 __rte_pktmbuf_init(mp, opaque_arg, _m, i);
313
314 dp_packet_init_dpdk((struct dp_packet *) m, m->buf_len);
315 }
316
317 static struct dpdk_mp *
318 dpdk_mp_get(int socket_id, int mtu) OVS_REQUIRES(dpdk_mutex)
319 {
320 struct dpdk_mp *dmp = NULL;
321 char mp_name[RTE_MEMPOOL_NAMESIZE];
322 unsigned mp_size;
323
324 LIST_FOR_EACH (dmp, list_node, &dpdk_mp_list) {
325 if (dmp->socket_id == socket_id && dmp->mtu == mtu) {
326 dmp->refcount++;
327 return dmp;
328 }
329 }
330
331 dmp = dpdk_rte_mzalloc(sizeof *dmp);
332 dmp->socket_id = socket_id;
333 dmp->mtu = mtu;
334 dmp->refcount = 1;
335
336 mp_size = MAX_NB_MBUF;
337 do {
338 if (snprintf(mp_name, RTE_MEMPOOL_NAMESIZE, "ovs_mp_%d_%d_%u",
339 dmp->mtu, dmp->socket_id, mp_size) < 0) {
340 return NULL;
341 }
342
343 dmp->mp = rte_mempool_create(mp_name, mp_size, MBUF_SIZE(mtu),
344 MP_CACHE_SZ,
345 sizeof(struct rte_pktmbuf_pool_private),
346 rte_pktmbuf_pool_init, NULL,
347 ovs_rte_pktmbuf_init, NULL,
348 socket_id, 0);
349 } while (!dmp->mp && rte_errno == ENOMEM && (mp_size /= 2) >= MIN_NB_MBUF);
350
351 if (dmp->mp == NULL) {
352 return NULL;
353 } else {
354 VLOG_DBG("Allocated \"%s\" mempool with %u mbufs", mp_name, mp_size );
355 }
356
357 list_push_back(&dpdk_mp_list, &dmp->list_node);
358 return dmp;
359 }
360
361 static void
362 dpdk_mp_put(struct dpdk_mp *dmp)
363 {
364
365 if (!dmp) {
366 return;
367 }
368
369 dmp->refcount--;
370 ovs_assert(dmp->refcount >= 0);
371
372 #if 0
373 /* I could not find any API to destroy mp. */
374 if (dmp->refcount == 0) {
375 list_delete(dmp->list_node);
376 /* destroy mp-pool. */
377 }
378 #endif
379 }
380
381 static void
382 check_link_status(struct netdev_dpdk *dev)
383 {
384 struct rte_eth_link link;
385
386 rte_eth_link_get_nowait(dev->port_id, &link);
387
388 if (dev->link.link_status != link.link_status) {
389 netdev_change_seq_changed(&dev->up);
390
391 dev->link_reset_cnt++;
392 dev->link = link;
393 if (dev->link.link_status) {
394 VLOG_DBG_RL(&rl, "Port %d Link Up - speed %u Mbps - %s",
395 dev->port_id, (unsigned)dev->link.link_speed,
396 (dev->link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
397 ("full-duplex") : ("half-duplex"));
398 } else {
399 VLOG_DBG_RL(&rl, "Port %d Link Down", dev->port_id);
400 }
401 }
402 }
403
404 static void *
405 dpdk_watchdog(void *dummy OVS_UNUSED)
406 {
407 struct netdev_dpdk *dev;
408
409 pthread_detach(pthread_self());
410
411 for (;;) {
412 ovs_mutex_lock(&dpdk_mutex);
413 LIST_FOR_EACH (dev, list_node, &dpdk_list) {
414 ovs_mutex_lock(&dev->mutex);
415 check_link_status(dev);
416 ovs_mutex_unlock(&dev->mutex);
417 }
418 ovs_mutex_unlock(&dpdk_mutex);
419 xsleep(DPDK_PORT_WATCHDOG_INTERVAL);
420 }
421
422 return NULL;
423 }
424
425 static int
426 dpdk_eth_dev_init(struct netdev_dpdk *dev) OVS_REQUIRES(dpdk_mutex)
427 {
428 struct rte_pktmbuf_pool_private *mbp_priv;
429 struct rte_eth_dev_info info;
430 struct ether_addr eth_addr;
431 int diag;
432 int i;
433
434 if (dev->port_id < 0 || dev->port_id >= rte_eth_dev_count()) {
435 return ENODEV;
436 }
437
438 rte_eth_dev_info_get(dev->port_id, &info);
439 dev->up.n_rxq = MIN(info.max_rx_queues, dev->up.n_rxq);
440 dev->real_n_txq = MIN(info.max_tx_queues, dev->up.n_txq);
441
442 diag = rte_eth_dev_configure(dev->port_id, dev->up.n_rxq, dev->real_n_txq,
443 &port_conf);
444 if (diag) {
445 VLOG_ERR("eth dev config error %d. rxq:%d txq:%d", diag, dev->up.n_rxq,
446 dev->real_n_txq);
447 return -diag;
448 }
449
450 for (i = 0; i < dev->real_n_txq; i++) {
451 diag = rte_eth_tx_queue_setup(dev->port_id, i, NIC_PORT_TX_Q_SIZE,
452 dev->socket_id, NULL);
453 if (diag) {
454 VLOG_ERR("eth dev tx queue setup error %d",diag);
455 return -diag;
456 }
457 }
458
459 for (i = 0; i < dev->up.n_rxq; i++) {
460 diag = rte_eth_rx_queue_setup(dev->port_id, i, NIC_PORT_RX_Q_SIZE,
461 dev->socket_id,
462 NULL, dev->dpdk_mp->mp);
463 if (diag) {
464 VLOG_ERR("eth dev rx queue setup error %d",diag);
465 return -diag;
466 }
467 }
468
469 diag = rte_eth_dev_start(dev->port_id);
470 if (diag) {
471 VLOG_ERR("eth dev start error %d",diag);
472 return -diag;
473 }
474
475 rte_eth_promiscuous_enable(dev->port_id);
476 rte_eth_allmulticast_enable(dev->port_id);
477
478 memset(&eth_addr, 0x0, sizeof(eth_addr));
479 rte_eth_macaddr_get(dev->port_id, &eth_addr);
480 VLOG_INFO_RL(&rl, "Port %d: "ETH_ADDR_FMT"",
481 dev->port_id, ETH_ADDR_ARGS(eth_addr.addr_bytes));
482
483 memcpy(dev->hwaddr, eth_addr.addr_bytes, ETH_ADDR_LEN);
484 rte_eth_link_get_nowait(dev->port_id, &dev->link);
485
486 mbp_priv = rte_mempool_get_priv(dev->dpdk_mp->mp);
487 dev->buf_size = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM;
488
489 dev->flags = NETDEV_UP | NETDEV_PROMISC;
490 return 0;
491 }
492
493 static struct netdev_dpdk *
494 netdev_dpdk_cast(const struct netdev *netdev)
495 {
496 return CONTAINER_OF(netdev, struct netdev_dpdk, up);
497 }
498
499 static struct netdev *
500 netdev_dpdk_alloc(void)
501 {
502 struct netdev_dpdk *netdev = dpdk_rte_mzalloc(sizeof *netdev);
503 return &netdev->up;
504 }
505
506 static void
507 netdev_dpdk_alloc_txq(struct netdev_dpdk *netdev, unsigned int n_txqs)
508 {
509 unsigned i;
510
511 netdev->tx_q = dpdk_rte_mzalloc(n_txqs * sizeof *netdev->tx_q);
512 for (i = 0; i < n_txqs; i++) {
513 int numa_id = ovs_numa_get_numa_id(i);
514
515 if (!netdev->txq_needs_locking) {
516 /* Each index is considered as a cpu core id, since there should
517 * be one tx queue for each cpu core. If the corresponding core
518 * is not on the same numa node as 'netdev', flags the
519 * 'flush_tx'. */
520 netdev->tx_q[i].flush_tx = netdev->socket_id == numa_id;
521 } else {
522 /* Queues are shared among CPUs. Always flush */
523 netdev->tx_q[i].flush_tx = true;
524 }
525 rte_spinlock_init(&netdev->tx_q[i].tx_lock);
526 }
527 }
528
529 static int
530 netdev_dpdk_init(struct netdev *netdev_, unsigned int port_no,
531 enum dpdk_dev_type type)
532 OVS_REQUIRES(dpdk_mutex)
533 {
534 struct netdev_dpdk *netdev = netdev_dpdk_cast(netdev_);
535 int sid;
536 int err = 0;
537
538 ovs_mutex_init(&netdev->mutex);
539 ovs_mutex_lock(&netdev->mutex);
540
541 rte_spinlock_init(&netdev->stats_lock);
542
543 /* If the 'sid' is negative, it means that the kernel fails
544 * to obtain the pci numa info. In that situation, always
545 * use 'SOCKET0'. */
546 if (type == DPDK_DEV_ETH) {
547 sid = rte_eth_dev_socket_id(port_no);
548 } else {
549 sid = rte_lcore_to_socket_id(rte_get_master_lcore());
550 }
551
552 netdev->socket_id = sid < 0 ? SOCKET0 : sid;
553 netdev->port_id = port_no;
554 netdev->type = type;
555 netdev->flags = 0;
556 netdev->mtu = ETHER_MTU;
557 netdev->max_packet_len = MTU_TO_MAX_LEN(netdev->mtu);
558
559 netdev->dpdk_mp = dpdk_mp_get(netdev->socket_id, netdev->mtu);
560 if (!netdev->dpdk_mp) {
561 err = ENOMEM;
562 goto unlock;
563 }
564
565 netdev_->n_txq = NR_QUEUE;
566 netdev_->n_rxq = NR_QUEUE;
567 netdev->real_n_txq = NR_QUEUE;
568
569 if (type == DPDK_DEV_ETH) {
570 netdev_dpdk_alloc_txq(netdev, NR_QUEUE);
571 err = dpdk_eth_dev_init(netdev);
572 if (err) {
573 goto unlock;
574 }
575 }
576
577 list_push_back(&dpdk_list, &netdev->list_node);
578
579 unlock:
580 if (err) {
581 rte_free(netdev->tx_q);
582 }
583 ovs_mutex_unlock(&netdev->mutex);
584 return err;
585 }
586
587 static int
588 dpdk_dev_parse_name(const char dev_name[], const char prefix[],
589 unsigned int *port_no)
590 {
591 const char *cport;
592
593 if (strncmp(dev_name, prefix, strlen(prefix))) {
594 return ENODEV;
595 }
596
597 cport = dev_name + strlen(prefix);
598 *port_no = strtol(cport, NULL, 0); /* string must be null terminated */
599 return 0;
600 }
601
602 static int
603 vhost_construct_helper(struct netdev *netdev_) OVS_REQUIRES(dpdk_mutex)
604 {
605 struct netdev_dpdk *netdev = netdev_dpdk_cast(netdev_);
606
607 if (rte_eal_init_ret) {
608 return rte_eal_init_ret;
609 }
610
611 rte_spinlock_init(&netdev->vhost_tx_lock);
612 return netdev_dpdk_init(netdev_, -1, DPDK_DEV_VHOST);
613 }
614
615 static int
616 netdev_dpdk_vhost_cuse_construct(struct netdev *netdev_)
617 {
618 struct netdev_dpdk *netdev = netdev_dpdk_cast(netdev_);
619 int err;
620
621 ovs_mutex_lock(&dpdk_mutex);
622 strncpy(netdev->vhost_id, netdev->up.name, sizeof(netdev->vhost_id));
623 err = vhost_construct_helper(netdev_);
624 ovs_mutex_unlock(&dpdk_mutex);
625 return err;
626 }
627
628 static int
629 netdev_dpdk_vhost_user_construct(struct netdev *netdev_)
630 {
631 struct netdev_dpdk *netdev = netdev_dpdk_cast(netdev_);
632 int err;
633
634 ovs_mutex_lock(&dpdk_mutex);
635 /* Take the name of the vhost-user port and append it to the location where
636 * the socket is to be created, then register the socket.
637 */
638 snprintf(netdev->vhost_id, sizeof(netdev->vhost_id), "%s/%s",
639 vhost_sock_dir, netdev_->name);
640 err = rte_vhost_driver_register(netdev->vhost_id);
641 if (err) {
642 VLOG_ERR("vhost-user socket device setup failure for socket %s\n",
643 netdev->vhost_id);
644 }
645 VLOG_INFO("Socket %s created for vhost-user port %s\n", netdev->vhost_id, netdev_->name);
646 err = vhost_construct_helper(netdev_);
647 ovs_mutex_unlock(&dpdk_mutex);
648 return err;
649 }
650
651 static int
652 netdev_dpdk_construct(struct netdev *netdev)
653 {
654 unsigned int port_no;
655 int err;
656
657 if (rte_eal_init_ret) {
658 return rte_eal_init_ret;
659 }
660
661 /* Names always start with "dpdk" */
662 err = dpdk_dev_parse_name(netdev->name, "dpdk", &port_no);
663 if (err) {
664 return err;
665 }
666
667 ovs_mutex_lock(&dpdk_mutex);
668 err = netdev_dpdk_init(netdev, port_no, DPDK_DEV_ETH);
669 ovs_mutex_unlock(&dpdk_mutex);
670 return err;
671 }
672
673 static void
674 netdev_dpdk_destruct(struct netdev *netdev_)
675 {
676 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev_);
677
678 ovs_mutex_lock(&dev->mutex);
679 rte_eth_dev_stop(dev->port_id);
680 ovs_mutex_unlock(&dev->mutex);
681
682 ovs_mutex_lock(&dpdk_mutex);
683 rte_free(dev->tx_q);
684 list_remove(&dev->list_node);
685 dpdk_mp_put(dev->dpdk_mp);
686 ovs_mutex_unlock(&dpdk_mutex);
687 }
688
689 static void
690 netdev_dpdk_vhost_destruct(struct netdev *netdev_)
691 {
692 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev_);
693
694 /* Can't remove a port while a guest is attached to it. */
695 if (netdev_dpdk_get_virtio(dev) != NULL) {
696 VLOG_ERR("Can not remove port, vhost device still attached");
697 return;
698 }
699
700 ovs_mutex_lock(&dpdk_mutex);
701 list_remove(&dev->list_node);
702 dpdk_mp_put(dev->dpdk_mp);
703 ovs_mutex_unlock(&dpdk_mutex);
704 }
705
706 static void
707 netdev_dpdk_dealloc(struct netdev *netdev_)
708 {
709 struct netdev_dpdk *netdev = netdev_dpdk_cast(netdev_);
710
711 rte_free(netdev);
712 }
713
714 static int
715 netdev_dpdk_get_config(const struct netdev *netdev_, struct smap *args)
716 {
717 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev_);
718
719 ovs_mutex_lock(&dev->mutex);
720
721 smap_add_format(args, "configured_rx_queues", "%d", netdev_->n_rxq);
722 smap_add_format(args, "requested_tx_queues", "%d", netdev_->n_txq);
723 smap_add_format(args, "configured_tx_queues", "%d", dev->real_n_txq);
724 ovs_mutex_unlock(&dev->mutex);
725
726 return 0;
727 }
728
729 static int
730 netdev_dpdk_get_numa_id(const struct netdev *netdev_)
731 {
732 struct netdev_dpdk *netdev = netdev_dpdk_cast(netdev_);
733
734 return netdev->socket_id;
735 }
736
737 /* Sets the number of tx queues and rx queues for the dpdk interface.
738 * If the configuration fails, do not try restoring its old configuration
739 * and just returns the error. */
740 static int
741 netdev_dpdk_set_multiq(struct netdev *netdev_, unsigned int n_txq,
742 unsigned int n_rxq)
743 {
744 struct netdev_dpdk *netdev = netdev_dpdk_cast(netdev_);
745 int err = 0;
746 int old_rxq, old_txq;
747
748 if (netdev->up.n_txq == n_txq && netdev->up.n_rxq == n_rxq) {
749 return err;
750 }
751
752 ovs_mutex_lock(&dpdk_mutex);
753 ovs_mutex_lock(&netdev->mutex);
754
755 rte_eth_dev_stop(netdev->port_id);
756
757 old_txq = netdev->up.n_txq;
758 old_rxq = netdev->up.n_rxq;
759 netdev->up.n_txq = n_txq;
760 netdev->up.n_rxq = n_rxq;
761
762 rte_free(netdev->tx_q);
763 err = dpdk_eth_dev_init(netdev);
764 netdev_dpdk_alloc_txq(netdev, netdev->real_n_txq);
765 if (err) {
766 /* If there has been an error, it means that the requested queues
767 * have not been created. Restore the old numbers. */
768 netdev->up.n_txq = old_txq;
769 netdev->up.n_rxq = old_rxq;
770 }
771
772 netdev->txq_needs_locking = netdev->real_n_txq != netdev->up.n_txq;
773
774 ovs_mutex_unlock(&netdev->mutex);
775 ovs_mutex_unlock(&dpdk_mutex);
776
777 return err;
778 }
779
780 static int
781 netdev_dpdk_vhost_set_multiq(struct netdev *netdev_, unsigned int n_txq,
782 unsigned int n_rxq)
783 {
784 struct netdev_dpdk *netdev = netdev_dpdk_cast(netdev_);
785 int err = 0;
786
787 if (netdev->up.n_txq == n_txq && netdev->up.n_rxq == n_rxq) {
788 return err;
789 }
790
791 ovs_mutex_lock(&dpdk_mutex);
792 ovs_mutex_lock(&netdev->mutex);
793
794 netdev->up.n_txq = n_txq;
795 netdev->real_n_txq = 1;
796 netdev->up.n_rxq = 1;
797
798 ovs_mutex_unlock(&netdev->mutex);
799 ovs_mutex_unlock(&dpdk_mutex);
800
801 return err;
802 }
803
804 static struct netdev_rxq *
805 netdev_dpdk_rxq_alloc(void)
806 {
807 struct netdev_rxq_dpdk *rx = dpdk_rte_mzalloc(sizeof *rx);
808
809 return &rx->up;
810 }
811
812 static struct netdev_rxq_dpdk *
813 netdev_rxq_dpdk_cast(const struct netdev_rxq *rx)
814 {
815 return CONTAINER_OF(rx, struct netdev_rxq_dpdk, up);
816 }
817
818 static int
819 netdev_dpdk_rxq_construct(struct netdev_rxq *rxq_)
820 {
821 struct netdev_rxq_dpdk *rx = netdev_rxq_dpdk_cast(rxq_);
822 struct netdev_dpdk *netdev = netdev_dpdk_cast(rx->up.netdev);
823
824 ovs_mutex_lock(&netdev->mutex);
825 rx->port_id = netdev->port_id;
826 ovs_mutex_unlock(&netdev->mutex);
827
828 return 0;
829 }
830
831 static void
832 netdev_dpdk_rxq_destruct(struct netdev_rxq *rxq_ OVS_UNUSED)
833 {
834 }
835
836 static void
837 netdev_dpdk_rxq_dealloc(struct netdev_rxq *rxq_)
838 {
839 struct netdev_rxq_dpdk *rx = netdev_rxq_dpdk_cast(rxq_);
840
841 rte_free(rx);
842 }
843
844 static inline void
845 dpdk_queue_flush__(struct netdev_dpdk *dev, int qid)
846 {
847 struct dpdk_tx_queue *txq = &dev->tx_q[qid];
848 uint32_t nb_tx = 0;
849
850 while (nb_tx != txq->count) {
851 uint32_t ret;
852
853 ret = rte_eth_tx_burst(dev->port_id, qid, txq->burst_pkts + nb_tx,
854 txq->count - nb_tx);
855 if (!ret) {
856 break;
857 }
858
859 nb_tx += ret;
860 }
861
862 if (OVS_UNLIKELY(nb_tx != txq->count)) {
863 /* free buffers, which we couldn't transmit, one at a time (each
864 * packet could come from a different mempool) */
865 int i;
866
867 for (i = nb_tx; i < txq->count; i++) {
868 rte_pktmbuf_free_seg(txq->burst_pkts[i]);
869 }
870 rte_spinlock_lock(&dev->stats_lock);
871 dev->stats.tx_dropped += txq->count-nb_tx;
872 rte_spinlock_unlock(&dev->stats_lock);
873 }
874
875 txq->count = 0;
876 txq->tsc = rte_get_timer_cycles();
877 }
878
879 static inline void
880 dpdk_queue_flush(struct netdev_dpdk *dev, int qid)
881 {
882 struct dpdk_tx_queue *txq = &dev->tx_q[qid];
883
884 if (txq->count == 0) {
885 return;
886 }
887 dpdk_queue_flush__(dev, qid);
888 }
889
890 static bool
891 is_vhost_running(struct virtio_net *dev)
892 {
893 return (dev != NULL && (dev->flags & VIRTIO_DEV_RUNNING));
894 }
895
896 /*
897 * The receive path for the vhost port is the TX path out from guest.
898 */
899 static int
900 netdev_dpdk_vhost_rxq_recv(struct netdev_rxq *rxq_,
901 struct dp_packet **packets, int *c)
902 {
903 struct netdev_rxq_dpdk *rx = netdev_rxq_dpdk_cast(rxq_);
904 struct netdev *netdev = rx->up.netdev;
905 struct netdev_dpdk *vhost_dev = netdev_dpdk_cast(netdev);
906 struct virtio_net *virtio_dev = netdev_dpdk_get_virtio(vhost_dev);
907 int qid = 1;
908 uint16_t nb_rx = 0;
909
910 if (OVS_UNLIKELY(!is_vhost_running(virtio_dev))) {
911 return EAGAIN;
912 }
913
914 nb_rx = rte_vhost_dequeue_burst(virtio_dev, qid,
915 vhost_dev->dpdk_mp->mp,
916 (struct rte_mbuf **)packets,
917 NETDEV_MAX_BURST);
918 if (!nb_rx) {
919 return EAGAIN;
920 }
921
922 rte_spinlock_lock(&vhost_dev->stats_lock);
923 vhost_dev->stats.rx_packets += (uint64_t)nb_rx;
924 rte_spinlock_unlock(&vhost_dev->stats_lock);
925
926 *c = (int) nb_rx;
927 return 0;
928 }
929
930 static int
931 netdev_dpdk_rxq_recv(struct netdev_rxq *rxq_, struct dp_packet **packets,
932 int *c)
933 {
934 struct netdev_rxq_dpdk *rx = netdev_rxq_dpdk_cast(rxq_);
935 struct netdev *netdev = rx->up.netdev;
936 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
937 int nb_rx;
938
939 /* There is only one tx queue for this core. Do not flush other
940 * queues.
941 * Do not flush tx queue which is shared among CPUs
942 * since it is always flushed */
943 if (rxq_->queue_id == rte_lcore_id() &&
944 OVS_LIKELY(!dev->txq_needs_locking)) {
945 dpdk_queue_flush(dev, rxq_->queue_id);
946 }
947
948 nb_rx = rte_eth_rx_burst(rx->port_id, rxq_->queue_id,
949 (struct rte_mbuf **) packets,
950 NETDEV_MAX_BURST);
951 if (!nb_rx) {
952 return EAGAIN;
953 }
954
955 *c = nb_rx;
956
957 return 0;
958 }
959
960 static void
961 __netdev_dpdk_vhost_send(struct netdev *netdev, struct dp_packet **pkts,
962 int cnt, bool may_steal)
963 {
964 struct netdev_dpdk *vhost_dev = netdev_dpdk_cast(netdev);
965 struct virtio_net *virtio_dev = netdev_dpdk_get_virtio(vhost_dev);
966 struct rte_mbuf **cur_pkts = (struct rte_mbuf **) pkts;
967 unsigned int total_pkts = cnt;
968 uint64_t start = 0;
969
970 if (OVS_UNLIKELY(!is_vhost_running(virtio_dev))) {
971 rte_spinlock_lock(&vhost_dev->stats_lock);
972 vhost_dev->stats.tx_dropped+= cnt;
973 rte_spinlock_unlock(&vhost_dev->stats_lock);
974 goto out;
975 }
976
977 /* There is vHost TX single queue, So we need to lock it for TX. */
978 rte_spinlock_lock(&vhost_dev->vhost_tx_lock);
979
980 do {
981 unsigned int tx_pkts;
982
983 tx_pkts = rte_vhost_enqueue_burst(virtio_dev, VIRTIO_RXQ,
984 cur_pkts, cnt);
985 if (OVS_LIKELY(tx_pkts)) {
986 /* Packets have been sent.*/
987 cnt -= tx_pkts;
988 /* Prepare for possible next iteration.*/
989 cur_pkts = &cur_pkts[tx_pkts];
990 } else {
991 uint64_t timeout = VHOST_ENQ_RETRY_USECS * rte_get_timer_hz() / 1E6;
992 unsigned int expired = 0;
993
994 if (!start) {
995 start = rte_get_timer_cycles();
996 }
997
998 /*
999 * Unable to enqueue packets to vhost interface.
1000 * Check available entries before retrying.
1001 */
1002 while (!rte_vring_available_entries(virtio_dev, VIRTIO_RXQ)) {
1003 if (OVS_UNLIKELY((rte_get_timer_cycles() - start) > timeout)) {
1004 expired = 1;
1005 break;
1006 }
1007 }
1008 if (expired) {
1009 /* break out of main loop. */
1010 break;
1011 }
1012 }
1013 } while (cnt);
1014 rte_spinlock_unlock(&vhost_dev->vhost_tx_lock);
1015
1016 rte_spinlock_lock(&vhost_dev->stats_lock);
1017 vhost_dev->stats.tx_packets += (total_pkts - cnt);
1018 vhost_dev->stats.tx_dropped += cnt;
1019 rte_spinlock_unlock(&vhost_dev->stats_lock);
1020
1021 out:
1022 if (may_steal) {
1023 int i;
1024
1025 for (i = 0; i < total_pkts; i++) {
1026 dp_packet_delete(pkts[i]);
1027 }
1028 }
1029 }
1030
1031 inline static void
1032 dpdk_queue_pkts(struct netdev_dpdk *dev, int qid,
1033 struct rte_mbuf **pkts, int cnt)
1034 {
1035 struct dpdk_tx_queue *txq = &dev->tx_q[qid];
1036 uint64_t diff_tsc;
1037
1038 int i = 0;
1039
1040 while (i < cnt) {
1041 int freeslots = MAX_TX_QUEUE_LEN - txq->count;
1042 int tocopy = MIN(freeslots, cnt-i);
1043
1044 memcpy(&txq->burst_pkts[txq->count], &pkts[i],
1045 tocopy * sizeof (struct rte_mbuf *));
1046
1047 txq->count += tocopy;
1048 i += tocopy;
1049
1050 if (txq->count == MAX_TX_QUEUE_LEN || txq->flush_tx) {
1051 dpdk_queue_flush__(dev, qid);
1052 }
1053 diff_tsc = rte_get_timer_cycles() - txq->tsc;
1054 if (diff_tsc >= DRAIN_TSC) {
1055 dpdk_queue_flush__(dev, qid);
1056 }
1057 }
1058 }
1059
1060 /* Tx function. Transmit packets indefinitely */
1061 static void
1062 dpdk_do_tx_copy(struct netdev *netdev, int qid, struct dp_packet **pkts,
1063 int cnt)
1064 OVS_NO_THREAD_SAFETY_ANALYSIS
1065 {
1066 #if !defined(__CHECKER__) && !defined(_WIN32)
1067 const size_t PKT_ARRAY_SIZE = cnt;
1068 #else
1069 /* Sparse or MSVC doesn't like variable length array. */
1070 enum { PKT_ARRAY_SIZE = NETDEV_MAX_BURST };
1071 #endif
1072 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1073 struct rte_mbuf *mbufs[PKT_ARRAY_SIZE];
1074 int dropped = 0;
1075 int newcnt = 0;
1076 int i;
1077
1078 /* If we are on a non pmd thread we have to use the mempool mutex, because
1079 * every non pmd thread shares the same mempool cache */
1080
1081 if (!thread_is_pmd()) {
1082 ovs_mutex_lock(&nonpmd_mempool_mutex);
1083 }
1084
1085 for (i = 0; i < cnt; i++) {
1086 int size = dp_packet_size(pkts[i]);
1087
1088 if (OVS_UNLIKELY(size > dev->max_packet_len)) {
1089 VLOG_WARN_RL(&rl, "Too big size %d max_packet_len %d",
1090 (int)size , dev->max_packet_len);
1091
1092 dropped++;
1093 continue;
1094 }
1095
1096 mbufs[newcnt] = rte_pktmbuf_alloc(dev->dpdk_mp->mp);
1097
1098 if (!mbufs[newcnt]) {
1099 dropped += cnt - i;
1100 break;
1101 }
1102
1103 /* We have to do a copy for now */
1104 memcpy(rte_pktmbuf_mtod(mbufs[newcnt], void *), dp_packet_data(pkts[i]), size);
1105
1106 rte_pktmbuf_data_len(mbufs[newcnt]) = size;
1107 rte_pktmbuf_pkt_len(mbufs[newcnt]) = size;
1108
1109 newcnt++;
1110 }
1111
1112 if (OVS_UNLIKELY(dropped)) {
1113 rte_spinlock_lock(&dev->stats_lock);
1114 dev->stats.tx_dropped += dropped;
1115 rte_spinlock_unlock(&dev->stats_lock);
1116 }
1117
1118 if (dev->type == DPDK_DEV_VHOST) {
1119 __netdev_dpdk_vhost_send(netdev, (struct dp_packet **) mbufs, newcnt, true);
1120 } else {
1121 dpdk_queue_pkts(dev, qid, mbufs, newcnt);
1122 dpdk_queue_flush(dev, qid);
1123 }
1124
1125 if (!thread_is_pmd()) {
1126 ovs_mutex_unlock(&nonpmd_mempool_mutex);
1127 }
1128 }
1129
1130 static int
1131 netdev_dpdk_vhost_send(struct netdev *netdev, int qid OVS_UNUSED, struct dp_packet **pkts,
1132 int cnt, bool may_steal)
1133 {
1134 if (OVS_UNLIKELY(pkts[0]->source != DPBUF_DPDK)) {
1135 int i;
1136
1137 dpdk_do_tx_copy(netdev, qid, pkts, cnt);
1138 if (may_steal) {
1139 for (i = 0; i < cnt; i++) {
1140 dp_packet_delete(pkts[i]);
1141 }
1142 }
1143 } else {
1144 __netdev_dpdk_vhost_send(netdev, pkts, cnt, may_steal);
1145 }
1146 return 0;
1147 }
1148
1149 static inline void
1150 netdev_dpdk_send__(struct netdev_dpdk *dev, int qid,
1151 struct dp_packet **pkts, int cnt, bool may_steal)
1152 {
1153 int i;
1154
1155 if (OVS_UNLIKELY(dev->txq_needs_locking)) {
1156 qid = qid % dev->real_n_txq;
1157 rte_spinlock_lock(&dev->tx_q[qid].tx_lock);
1158 }
1159
1160 if (OVS_UNLIKELY(!may_steal ||
1161 pkts[0]->source != DPBUF_DPDK)) {
1162 struct netdev *netdev = &dev->up;
1163
1164 dpdk_do_tx_copy(netdev, qid, pkts, cnt);
1165
1166 if (may_steal) {
1167 for (i = 0; i < cnt; i++) {
1168 dp_packet_delete(pkts[i]);
1169 }
1170 }
1171 } else {
1172 int next_tx_idx = 0;
1173 int dropped = 0;
1174
1175 for (i = 0; i < cnt; i++) {
1176 int size = dp_packet_size(pkts[i]);
1177
1178 if (OVS_UNLIKELY(size > dev->max_packet_len)) {
1179 if (next_tx_idx != i) {
1180 dpdk_queue_pkts(dev, qid,
1181 (struct rte_mbuf **)&pkts[next_tx_idx],
1182 i-next_tx_idx);
1183 }
1184
1185 VLOG_WARN_RL(&rl, "Too big size %d max_packet_len %d",
1186 (int)size , dev->max_packet_len);
1187
1188 dp_packet_delete(pkts[i]);
1189 dropped++;
1190 next_tx_idx = i + 1;
1191 }
1192 }
1193 if (next_tx_idx != cnt) {
1194 dpdk_queue_pkts(dev, qid,
1195 (struct rte_mbuf **)&pkts[next_tx_idx],
1196 cnt-next_tx_idx);
1197 }
1198
1199 if (OVS_UNLIKELY(dropped)) {
1200 rte_spinlock_lock(&dev->stats_lock);
1201 dev->stats.tx_dropped += dropped;
1202 rte_spinlock_unlock(&dev->stats_lock);
1203 }
1204 }
1205
1206 if (OVS_UNLIKELY(dev->txq_needs_locking)) {
1207 rte_spinlock_unlock(&dev->tx_q[qid].tx_lock);
1208 }
1209 }
1210
1211 static int
1212 netdev_dpdk_eth_send(struct netdev *netdev, int qid,
1213 struct dp_packet **pkts, int cnt, bool may_steal)
1214 {
1215 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1216
1217 netdev_dpdk_send__(dev, qid, pkts, cnt, may_steal);
1218 return 0;
1219 }
1220
1221 static int
1222 netdev_dpdk_set_etheraddr(struct netdev *netdev,
1223 const uint8_t mac[ETH_ADDR_LEN])
1224 {
1225 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1226
1227 ovs_mutex_lock(&dev->mutex);
1228 if (!eth_addr_equals(dev->hwaddr, mac)) {
1229 memcpy(dev->hwaddr, mac, ETH_ADDR_LEN);
1230 netdev_change_seq_changed(netdev);
1231 }
1232 ovs_mutex_unlock(&dev->mutex);
1233
1234 return 0;
1235 }
1236
1237 static int
1238 netdev_dpdk_get_etheraddr(const struct netdev *netdev,
1239 uint8_t mac[ETH_ADDR_LEN])
1240 {
1241 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1242
1243 ovs_mutex_lock(&dev->mutex);
1244 memcpy(mac, dev->hwaddr, ETH_ADDR_LEN);
1245 ovs_mutex_unlock(&dev->mutex);
1246
1247 return 0;
1248 }
1249
1250 static int
1251 netdev_dpdk_get_mtu(const struct netdev *netdev, int *mtup)
1252 {
1253 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1254
1255 ovs_mutex_lock(&dev->mutex);
1256 *mtup = dev->mtu;
1257 ovs_mutex_unlock(&dev->mutex);
1258
1259 return 0;
1260 }
1261
1262 static int
1263 netdev_dpdk_set_mtu(const struct netdev *netdev, int mtu)
1264 {
1265 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1266 int old_mtu, err;
1267 struct dpdk_mp *old_mp;
1268 struct dpdk_mp *mp;
1269
1270 ovs_mutex_lock(&dpdk_mutex);
1271 ovs_mutex_lock(&dev->mutex);
1272 if (dev->mtu == mtu) {
1273 err = 0;
1274 goto out;
1275 }
1276
1277 mp = dpdk_mp_get(dev->socket_id, dev->mtu);
1278 if (!mp) {
1279 err = ENOMEM;
1280 goto out;
1281 }
1282
1283 rte_eth_dev_stop(dev->port_id);
1284
1285 old_mtu = dev->mtu;
1286 old_mp = dev->dpdk_mp;
1287 dev->dpdk_mp = mp;
1288 dev->mtu = mtu;
1289 dev->max_packet_len = MTU_TO_MAX_LEN(dev->mtu);
1290
1291 err = dpdk_eth_dev_init(dev);
1292 if (err) {
1293 dpdk_mp_put(mp);
1294 dev->mtu = old_mtu;
1295 dev->dpdk_mp = old_mp;
1296 dev->max_packet_len = MTU_TO_MAX_LEN(dev->mtu);
1297 dpdk_eth_dev_init(dev);
1298 goto out;
1299 }
1300
1301 dpdk_mp_put(old_mp);
1302 netdev_change_seq_changed(netdev);
1303 out:
1304 ovs_mutex_unlock(&dev->mutex);
1305 ovs_mutex_unlock(&dpdk_mutex);
1306 return err;
1307 }
1308
1309 static int
1310 netdev_dpdk_get_carrier(const struct netdev *netdev_, bool *carrier);
1311
1312 static int
1313 netdev_dpdk_vhost_get_stats(const struct netdev *netdev,
1314 struct netdev_stats *stats)
1315 {
1316 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1317
1318 ovs_mutex_lock(&dev->mutex);
1319 memset(stats, 0, sizeof(*stats));
1320 /* Unsupported Stats */
1321 stats->rx_errors = UINT64_MAX;
1322 stats->tx_errors = UINT64_MAX;
1323 stats->multicast = UINT64_MAX;
1324 stats->collisions = UINT64_MAX;
1325 stats->rx_crc_errors = UINT64_MAX;
1326 stats->rx_fifo_errors = UINT64_MAX;
1327 stats->rx_frame_errors = UINT64_MAX;
1328 stats->rx_length_errors = UINT64_MAX;
1329 stats->rx_missed_errors = UINT64_MAX;
1330 stats->rx_over_errors = UINT64_MAX;
1331 stats->tx_aborted_errors = UINT64_MAX;
1332 stats->tx_carrier_errors = UINT64_MAX;
1333 stats->tx_errors = UINT64_MAX;
1334 stats->tx_fifo_errors = UINT64_MAX;
1335 stats->tx_heartbeat_errors = UINT64_MAX;
1336 stats->tx_window_errors = UINT64_MAX;
1337 stats->rx_bytes += UINT64_MAX;
1338 stats->rx_dropped += UINT64_MAX;
1339 stats->tx_bytes += UINT64_MAX;
1340
1341 rte_spinlock_lock(&dev->stats_lock);
1342 /* Supported Stats */
1343 stats->rx_packets += dev->stats.rx_packets;
1344 stats->tx_packets += dev->stats.tx_packets;
1345 stats->tx_dropped += dev->stats.tx_dropped;
1346 rte_spinlock_unlock(&dev->stats_lock);
1347 ovs_mutex_unlock(&dev->mutex);
1348
1349 return 0;
1350 }
1351
1352 static int
1353 netdev_dpdk_get_stats(const struct netdev *netdev, struct netdev_stats *stats)
1354 {
1355 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1356 struct rte_eth_stats rte_stats;
1357 bool gg;
1358
1359 netdev_dpdk_get_carrier(netdev, &gg);
1360 ovs_mutex_lock(&dev->mutex);
1361 rte_eth_stats_get(dev->port_id, &rte_stats);
1362
1363 memset(stats, 0, sizeof(*stats));
1364
1365 stats->rx_packets = rte_stats.ipackets;
1366 stats->tx_packets = rte_stats.opackets;
1367 stats->rx_bytes = rte_stats.ibytes;
1368 stats->tx_bytes = rte_stats.obytes;
1369 stats->rx_errors = rte_stats.ierrors;
1370 stats->tx_errors = rte_stats.oerrors;
1371 stats->multicast = rte_stats.imcasts;
1372
1373 rte_spinlock_lock(&dev->stats_lock);
1374 stats->tx_dropped = dev->stats.tx_dropped;
1375 rte_spinlock_unlock(&dev->stats_lock);
1376 ovs_mutex_unlock(&dev->mutex);
1377
1378 return 0;
1379 }
1380
1381 static int
1382 netdev_dpdk_get_features(const struct netdev *netdev_,
1383 enum netdev_features *current,
1384 enum netdev_features *advertised OVS_UNUSED,
1385 enum netdev_features *supported OVS_UNUSED,
1386 enum netdev_features *peer OVS_UNUSED)
1387 {
1388 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev_);
1389 struct rte_eth_link link;
1390
1391 ovs_mutex_lock(&dev->mutex);
1392 link = dev->link;
1393 ovs_mutex_unlock(&dev->mutex);
1394
1395 if (link.link_duplex == ETH_LINK_AUTONEG_DUPLEX) {
1396 if (link.link_speed == ETH_LINK_SPEED_AUTONEG) {
1397 *current = NETDEV_F_AUTONEG;
1398 }
1399 } else if (link.link_duplex == ETH_LINK_HALF_DUPLEX) {
1400 if (link.link_speed == ETH_LINK_SPEED_10) {
1401 *current = NETDEV_F_10MB_HD;
1402 }
1403 if (link.link_speed == ETH_LINK_SPEED_100) {
1404 *current = NETDEV_F_100MB_HD;
1405 }
1406 if (link.link_speed == ETH_LINK_SPEED_1000) {
1407 *current = NETDEV_F_1GB_HD;
1408 }
1409 } else if (link.link_duplex == ETH_LINK_FULL_DUPLEX) {
1410 if (link.link_speed == ETH_LINK_SPEED_10) {
1411 *current = NETDEV_F_10MB_FD;
1412 }
1413 if (link.link_speed == ETH_LINK_SPEED_100) {
1414 *current = NETDEV_F_100MB_FD;
1415 }
1416 if (link.link_speed == ETH_LINK_SPEED_1000) {
1417 *current = NETDEV_F_1GB_FD;
1418 }
1419 if (link.link_speed == ETH_LINK_SPEED_10000) {
1420 *current = NETDEV_F_10GB_FD;
1421 }
1422 }
1423
1424 return 0;
1425 }
1426
1427 static int
1428 netdev_dpdk_get_ifindex(const struct netdev *netdev)
1429 {
1430 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1431 int ifindex;
1432
1433 ovs_mutex_lock(&dev->mutex);
1434 ifindex = dev->port_id;
1435 ovs_mutex_unlock(&dev->mutex);
1436
1437 return ifindex;
1438 }
1439
1440 static int
1441 netdev_dpdk_get_carrier(const struct netdev *netdev_, bool *carrier)
1442 {
1443 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev_);
1444
1445 ovs_mutex_lock(&dev->mutex);
1446 check_link_status(dev);
1447 *carrier = dev->link.link_status;
1448
1449 ovs_mutex_unlock(&dev->mutex);
1450
1451 return 0;
1452 }
1453
1454 static int
1455 netdev_dpdk_vhost_get_carrier(const struct netdev *netdev_, bool *carrier)
1456 {
1457 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev_);
1458 struct virtio_net *virtio_dev = netdev_dpdk_get_virtio(dev);
1459
1460 ovs_mutex_lock(&dev->mutex);
1461
1462 if (is_vhost_running(virtio_dev)) {
1463 *carrier = 1;
1464 } else {
1465 *carrier = 0;
1466 }
1467
1468 ovs_mutex_unlock(&dev->mutex);
1469
1470 return 0;
1471 }
1472
1473 static long long int
1474 netdev_dpdk_get_carrier_resets(const struct netdev *netdev_)
1475 {
1476 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev_);
1477 long long int carrier_resets;
1478
1479 ovs_mutex_lock(&dev->mutex);
1480 carrier_resets = dev->link_reset_cnt;
1481 ovs_mutex_unlock(&dev->mutex);
1482
1483 return carrier_resets;
1484 }
1485
1486 static int
1487 netdev_dpdk_set_miimon(struct netdev *netdev_ OVS_UNUSED,
1488 long long int interval OVS_UNUSED)
1489 {
1490 return EOPNOTSUPP;
1491 }
1492
1493 static int
1494 netdev_dpdk_update_flags__(struct netdev_dpdk *dev,
1495 enum netdev_flags off, enum netdev_flags on,
1496 enum netdev_flags *old_flagsp) OVS_REQUIRES(dev->mutex)
1497 {
1498 int err;
1499
1500 if ((off | on) & ~(NETDEV_UP | NETDEV_PROMISC)) {
1501 return EINVAL;
1502 }
1503
1504 *old_flagsp = dev->flags;
1505 dev->flags |= on;
1506 dev->flags &= ~off;
1507
1508 if (dev->flags == *old_flagsp) {
1509 return 0;
1510 }
1511
1512 if (dev->type == DPDK_DEV_ETH) {
1513 if (dev->flags & NETDEV_UP) {
1514 err = rte_eth_dev_start(dev->port_id);
1515 if (err)
1516 return -err;
1517 }
1518
1519 if (dev->flags & NETDEV_PROMISC) {
1520 rte_eth_promiscuous_enable(dev->port_id);
1521 }
1522
1523 if (!(dev->flags & NETDEV_UP)) {
1524 rte_eth_dev_stop(dev->port_id);
1525 }
1526 }
1527
1528 return 0;
1529 }
1530
1531 static int
1532 netdev_dpdk_update_flags(struct netdev *netdev_,
1533 enum netdev_flags off, enum netdev_flags on,
1534 enum netdev_flags *old_flagsp)
1535 {
1536 struct netdev_dpdk *netdev = netdev_dpdk_cast(netdev_);
1537 int error;
1538
1539 ovs_mutex_lock(&netdev->mutex);
1540 error = netdev_dpdk_update_flags__(netdev, off, on, old_flagsp);
1541 ovs_mutex_unlock(&netdev->mutex);
1542
1543 return error;
1544 }
1545
1546 static int
1547 netdev_dpdk_get_status(const struct netdev *netdev_, struct smap *args)
1548 {
1549 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev_);
1550 struct rte_eth_dev_info dev_info;
1551
1552 if (dev->port_id < 0)
1553 return ENODEV;
1554
1555 ovs_mutex_lock(&dev->mutex);
1556 rte_eth_dev_info_get(dev->port_id, &dev_info);
1557 ovs_mutex_unlock(&dev->mutex);
1558
1559 smap_add_format(args, "driver_name", "%s", dev_info.driver_name);
1560
1561 smap_add_format(args, "port_no", "%d", dev->port_id);
1562 smap_add_format(args, "numa_id", "%d", rte_eth_dev_socket_id(dev->port_id));
1563 smap_add_format(args, "driver_name", "%s", dev_info.driver_name);
1564 smap_add_format(args, "min_rx_bufsize", "%u", dev_info.min_rx_bufsize);
1565 smap_add_format(args, "max_rx_pktlen", "%u", dev_info.max_rx_pktlen);
1566 smap_add_format(args, "max_rx_queues", "%u", dev_info.max_rx_queues);
1567 smap_add_format(args, "max_tx_queues", "%u", dev_info.max_tx_queues);
1568 smap_add_format(args, "max_mac_addrs", "%u", dev_info.max_mac_addrs);
1569 smap_add_format(args, "max_hash_mac_addrs", "%u", dev_info.max_hash_mac_addrs);
1570 smap_add_format(args, "max_vfs", "%u", dev_info.max_vfs);
1571 smap_add_format(args, "max_vmdq_pools", "%u", dev_info.max_vmdq_pools);
1572
1573 smap_add_format(args, "pci-vendor_id", "0x%u", dev_info.pci_dev->id.vendor_id);
1574 smap_add_format(args, "pci-device_id", "0x%x", dev_info.pci_dev->id.device_id);
1575
1576 return 0;
1577 }
1578
1579 static void
1580 netdev_dpdk_set_admin_state__(struct netdev_dpdk *dev, bool admin_state)
1581 OVS_REQUIRES(dev->mutex)
1582 {
1583 enum netdev_flags old_flags;
1584
1585 if (admin_state) {
1586 netdev_dpdk_update_flags__(dev, 0, NETDEV_UP, &old_flags);
1587 } else {
1588 netdev_dpdk_update_flags__(dev, NETDEV_UP, 0, &old_flags);
1589 }
1590 }
1591
1592 static void
1593 netdev_dpdk_set_admin_state(struct unixctl_conn *conn, int argc,
1594 const char *argv[], void *aux OVS_UNUSED)
1595 {
1596 bool up;
1597
1598 if (!strcasecmp(argv[argc - 1], "up")) {
1599 up = true;
1600 } else if ( !strcasecmp(argv[argc - 1], "down")) {
1601 up = false;
1602 } else {
1603 unixctl_command_reply_error(conn, "Invalid Admin State");
1604 return;
1605 }
1606
1607 if (argc > 2) {
1608 struct netdev *netdev = netdev_from_name(argv[1]);
1609 if (netdev && is_dpdk_class(netdev->netdev_class)) {
1610 struct netdev_dpdk *dpdk_dev = netdev_dpdk_cast(netdev);
1611
1612 ovs_mutex_lock(&dpdk_dev->mutex);
1613 netdev_dpdk_set_admin_state__(dpdk_dev, up);
1614 ovs_mutex_unlock(&dpdk_dev->mutex);
1615
1616 netdev_close(netdev);
1617 } else {
1618 unixctl_command_reply_error(conn, "Not a DPDK Interface");
1619 netdev_close(netdev);
1620 return;
1621 }
1622 } else {
1623 struct netdev_dpdk *netdev;
1624
1625 ovs_mutex_lock(&dpdk_mutex);
1626 LIST_FOR_EACH (netdev, list_node, &dpdk_list) {
1627 ovs_mutex_lock(&netdev->mutex);
1628 netdev_dpdk_set_admin_state__(netdev, up);
1629 ovs_mutex_unlock(&netdev->mutex);
1630 }
1631 ovs_mutex_unlock(&dpdk_mutex);
1632 }
1633 unixctl_command_reply(conn, "OK");
1634 }
1635
1636 /*
1637 * Set virtqueue flags so that we do not receive interrupts.
1638 */
1639 static void
1640 set_irq_status(struct virtio_net *dev)
1641 {
1642 dev->virtqueue[VIRTIO_RXQ]->used->flags = VRING_USED_F_NO_NOTIFY;
1643 dev->virtqueue[VIRTIO_TXQ]->used->flags = VRING_USED_F_NO_NOTIFY;
1644 }
1645
1646 /*
1647 * A new virtio-net device is added to a vhost port.
1648 */
1649 static int
1650 new_device(struct virtio_net *dev)
1651 {
1652 struct netdev_dpdk *netdev;
1653 bool exists = false;
1654
1655 ovs_mutex_lock(&dpdk_mutex);
1656 /* Add device to the vhost port with the same name as that passed down. */
1657 LIST_FOR_EACH(netdev, list_node, &dpdk_list) {
1658 if (strncmp(dev->ifname, netdev->vhost_id, IF_NAME_SZ) == 0) {
1659 ovs_mutex_lock(&netdev->mutex);
1660 ovsrcu_set(&netdev->virtio_dev, dev);
1661 ovs_mutex_unlock(&netdev->mutex);
1662 exists = true;
1663 dev->flags |= VIRTIO_DEV_RUNNING;
1664 /* Disable notifications. */
1665 set_irq_status(dev);
1666 break;
1667 }
1668 }
1669 ovs_mutex_unlock(&dpdk_mutex);
1670
1671 if (!exists) {
1672 VLOG_INFO("vHost Device '%s' (%ld) can't be added - name not found",
1673 dev->ifname, dev->device_fh);
1674
1675 return -1;
1676 }
1677
1678 VLOG_INFO("vHost Device '%s' (%ld) has been added",
1679 dev->ifname, dev->device_fh);
1680 return 0;
1681 }
1682
1683 /*
1684 * Remove a virtio-net device from the specific vhost port. Use dev->remove
1685 * flag to stop any more packets from being sent or received to/from a VM and
1686 * ensure all currently queued packets have been sent/received before removing
1687 * the device.
1688 */
1689 static void
1690 destroy_device(volatile struct virtio_net *dev)
1691 {
1692 struct netdev_dpdk *vhost_dev;
1693
1694 ovs_mutex_lock(&dpdk_mutex);
1695 LIST_FOR_EACH (vhost_dev, list_node, &dpdk_list) {
1696 if (netdev_dpdk_get_virtio(vhost_dev) == dev) {
1697
1698 ovs_mutex_lock(&vhost_dev->mutex);
1699 dev->flags &= ~VIRTIO_DEV_RUNNING;
1700 ovsrcu_set(&vhost_dev->virtio_dev, NULL);
1701 ovs_mutex_unlock(&vhost_dev->mutex);
1702
1703 /*
1704 * Wait for other threads to quiesce before
1705 * setting the virtio_dev to NULL.
1706 */
1707 ovsrcu_synchronize();
1708 /*
1709 * As call to ovsrcu_synchronize() will end the quiescent state,
1710 * put thread back into quiescent state before returning.
1711 */
1712 ovsrcu_quiesce_start();
1713 }
1714 }
1715 ovs_mutex_unlock(&dpdk_mutex);
1716
1717 VLOG_INFO("vHost Device '%s' (%ld) has been removed",
1718 dev->ifname, dev->device_fh);
1719 }
1720
1721 struct virtio_net *
1722 netdev_dpdk_get_virtio(const struct netdev_dpdk *dev)
1723 {
1724 return ovsrcu_get(struct virtio_net *, &dev->virtio_dev);
1725 }
1726
1727 /*
1728 * These callbacks allow virtio-net devices to be added to vhost ports when
1729 * configuration has been fully complete.
1730 */
1731 static const struct virtio_net_device_ops virtio_net_device_ops =
1732 {
1733 .new_device = new_device,
1734 .destroy_device = destroy_device,
1735 };
1736
1737 static void *
1738 start_vhost_loop(void *dummy OVS_UNUSED)
1739 {
1740 pthread_detach(pthread_self());
1741 /* Put the cuse thread into quiescent state. */
1742 ovsrcu_quiesce_start();
1743 rte_vhost_driver_session_start();
1744 return NULL;
1745 }
1746
1747 static int
1748 dpdk_vhost_class_init(void)
1749 {
1750 rte_vhost_driver_callback_register(&virtio_net_device_ops);
1751 ovs_thread_create("vhost_thread", start_vhost_loop, NULL);
1752 return 0;
1753 }
1754
1755 static int
1756 dpdk_vhost_cuse_class_init(void)
1757 {
1758 int err = -1;
1759
1760
1761 /* Register CUSE device to handle IOCTLs.
1762 * Unless otherwise specified on the vswitchd command line, cuse_dev_name
1763 * is set to vhost-net.
1764 */
1765 err = rte_vhost_driver_register(cuse_dev_name);
1766
1767 if (err != 0) {
1768 VLOG_ERR("CUSE device setup failure.");
1769 return -1;
1770 }
1771
1772 dpdk_vhost_class_init();
1773 return 0;
1774 }
1775
1776 static int
1777 dpdk_vhost_user_class_init(void)
1778 {
1779 dpdk_vhost_class_init();
1780 return 0;
1781 }
1782
1783 static void
1784 dpdk_common_init(void)
1785 {
1786 unixctl_command_register("netdev-dpdk/set-admin-state",
1787 "[netdev] up|down", 1, 2,
1788 netdev_dpdk_set_admin_state, NULL);
1789
1790 ovs_thread_create("dpdk_watchdog", dpdk_watchdog, NULL);
1791 }
1792
1793 /* Client Rings */
1794
1795 static int
1796 dpdk_ring_create(const char dev_name[], unsigned int port_no,
1797 unsigned int *eth_port_id)
1798 {
1799 struct dpdk_ring *ivshmem;
1800 char ring_name[10];
1801 int err;
1802
1803 ivshmem = dpdk_rte_mzalloc(sizeof *ivshmem);
1804 if (ivshmem == NULL) {
1805 return ENOMEM;
1806 }
1807
1808 /* XXX: Add support for multiquque ring. */
1809 err = snprintf(ring_name, 10, "%s_tx", dev_name);
1810 if (err < 0) {
1811 return -err;
1812 }
1813
1814 /* Create single consumer/producer rings, netdev does explicit locking. */
1815 ivshmem->cring_tx = rte_ring_create(ring_name, DPDK_RING_SIZE, SOCKET0,
1816 RING_F_SP_ENQ | RING_F_SC_DEQ);
1817 if (ivshmem->cring_tx == NULL) {
1818 rte_free(ivshmem);
1819 return ENOMEM;
1820 }
1821
1822 err = snprintf(ring_name, 10, "%s_rx", dev_name);
1823 if (err < 0) {
1824 return -err;
1825 }
1826
1827 /* Create single consumer/producer rings, netdev does explicit locking. */
1828 ivshmem->cring_rx = rte_ring_create(ring_name, DPDK_RING_SIZE, SOCKET0,
1829 RING_F_SP_ENQ | RING_F_SC_DEQ);
1830 if (ivshmem->cring_rx == NULL) {
1831 rte_free(ivshmem);
1832 return ENOMEM;
1833 }
1834
1835 err = rte_eth_from_rings(dev_name, &ivshmem->cring_rx, 1,
1836 &ivshmem->cring_tx, 1, SOCKET0);
1837
1838 if (err < 0) {
1839 rte_free(ivshmem);
1840 return ENODEV;
1841 }
1842
1843 ivshmem->user_port_id = port_no;
1844 ivshmem->eth_port_id = rte_eth_dev_count() - 1;
1845 list_push_back(&dpdk_ring_list, &ivshmem->list_node);
1846
1847 *eth_port_id = ivshmem->eth_port_id;
1848 return 0;
1849 }
1850
1851 static int
1852 dpdk_ring_open(const char dev_name[], unsigned int *eth_port_id) OVS_REQUIRES(dpdk_mutex)
1853 {
1854 struct dpdk_ring *ivshmem;
1855 unsigned int port_no;
1856 int err = 0;
1857
1858 /* Names always start with "dpdkr" */
1859 err = dpdk_dev_parse_name(dev_name, "dpdkr", &port_no);
1860 if (err) {
1861 return err;
1862 }
1863
1864 /* look through our list to find the device */
1865 LIST_FOR_EACH (ivshmem, list_node, &dpdk_ring_list) {
1866 if (ivshmem->user_port_id == port_no) {
1867 VLOG_INFO("Found dpdk ring device %s:", dev_name);
1868 *eth_port_id = ivshmem->eth_port_id; /* really all that is needed */
1869 return 0;
1870 }
1871 }
1872 /* Need to create the device rings */
1873 return dpdk_ring_create(dev_name, port_no, eth_port_id);
1874 }
1875
1876 static int
1877 netdev_dpdk_ring_send(struct netdev *netdev_, int qid,
1878 struct dp_packet **pkts, int cnt, bool may_steal)
1879 {
1880 struct netdev_dpdk *netdev = netdev_dpdk_cast(netdev_);
1881 unsigned i;
1882
1883 /* When using 'dpdkr' and sending to a DPDK ring, we want to ensure that the
1884 * rss hash field is clear. This is because the same mbuf may be modified by
1885 * the consumer of the ring and return into the datapath without recalculating
1886 * the RSS hash. */
1887 for (i = 0; i < cnt; i++) {
1888 dp_packet_set_rss_hash(pkts[i], 0);
1889 }
1890
1891 netdev_dpdk_send__(netdev, qid, pkts, cnt, may_steal);
1892 return 0;
1893 }
1894
1895 static int
1896 netdev_dpdk_ring_construct(struct netdev *netdev)
1897 {
1898 unsigned int port_no = 0;
1899 int err = 0;
1900
1901 if (rte_eal_init_ret) {
1902 return rte_eal_init_ret;
1903 }
1904
1905 ovs_mutex_lock(&dpdk_mutex);
1906
1907 err = dpdk_ring_open(netdev->name, &port_no);
1908 if (err) {
1909 goto unlock_dpdk;
1910 }
1911
1912 err = netdev_dpdk_init(netdev, port_no, DPDK_DEV_ETH);
1913
1914 unlock_dpdk:
1915 ovs_mutex_unlock(&dpdk_mutex);
1916 return err;
1917 }
1918
1919 #define NETDEV_DPDK_CLASS(NAME, INIT, CONSTRUCT, DESTRUCT, MULTIQ, SEND, \
1920 GET_CARRIER, GET_STATS, GET_FEATURES, GET_STATUS, RXQ_RECV) \
1921 { \
1922 NAME, \
1923 INIT, /* init */ \
1924 NULL, /* netdev_dpdk_run */ \
1925 NULL, /* netdev_dpdk_wait */ \
1926 \
1927 netdev_dpdk_alloc, \
1928 CONSTRUCT, \
1929 DESTRUCT, \
1930 netdev_dpdk_dealloc, \
1931 netdev_dpdk_get_config, \
1932 NULL, /* netdev_dpdk_set_config */ \
1933 NULL, /* get_tunnel_config */ \
1934 NULL, /* build header */ \
1935 NULL, /* push header */ \
1936 NULL, /* pop header */ \
1937 netdev_dpdk_get_numa_id, /* get_numa_id */ \
1938 MULTIQ, /* set_multiq */ \
1939 \
1940 SEND, /* send */ \
1941 NULL, /* send_wait */ \
1942 \
1943 netdev_dpdk_set_etheraddr, \
1944 netdev_dpdk_get_etheraddr, \
1945 netdev_dpdk_get_mtu, \
1946 netdev_dpdk_set_mtu, \
1947 netdev_dpdk_get_ifindex, \
1948 GET_CARRIER, \
1949 netdev_dpdk_get_carrier_resets, \
1950 netdev_dpdk_set_miimon, \
1951 GET_STATS, \
1952 GET_FEATURES, \
1953 NULL, /* set_advertisements */ \
1954 \
1955 NULL, /* set_policing */ \
1956 NULL, /* get_qos_types */ \
1957 NULL, /* get_qos_capabilities */ \
1958 NULL, /* get_qos */ \
1959 NULL, /* set_qos */ \
1960 NULL, /* get_queue */ \
1961 NULL, /* set_queue */ \
1962 NULL, /* delete_queue */ \
1963 NULL, /* get_queue_stats */ \
1964 NULL, /* queue_dump_start */ \
1965 NULL, /* queue_dump_next */ \
1966 NULL, /* queue_dump_done */ \
1967 NULL, /* dump_queue_stats */ \
1968 \
1969 NULL, /* get_in4 */ \
1970 NULL, /* set_in4 */ \
1971 NULL, /* get_in6 */ \
1972 NULL, /* add_router */ \
1973 NULL, /* get_next_hop */ \
1974 GET_STATUS, \
1975 NULL, /* arp_lookup */ \
1976 \
1977 netdev_dpdk_update_flags, \
1978 \
1979 netdev_dpdk_rxq_alloc, \
1980 netdev_dpdk_rxq_construct, \
1981 netdev_dpdk_rxq_destruct, \
1982 netdev_dpdk_rxq_dealloc, \
1983 RXQ_RECV, \
1984 NULL, /* rx_wait */ \
1985 NULL, /* rxq_drain */ \
1986 }
1987
1988 static int
1989 process_vhost_flags(char *flag, char *default_val, int size,
1990 char **argv, char **new_val)
1991 {
1992 int changed = 0;
1993
1994 /* Depending on which version of vhost is in use, process the vhost-specific
1995 * flag if it is provided on the vswitchd command line, otherwise resort to
1996 * a default value.
1997 *
1998 * For vhost-user: Process "-cuse_dev_name" to set the custom location of
1999 * the vhost-user socket(s).
2000 * For vhost-cuse: Process "-vhost_sock_dir" to set the custom name of the
2001 * vhost-cuse character device.
2002 */
2003 if (!strcmp(argv[1], flag) && (strlen(argv[2]) <= size)) {
2004 changed = 1;
2005 *new_val = strdup(argv[2]);
2006 VLOG_INFO("User-provided %s in use: %s", flag, *new_val);
2007 } else {
2008 VLOG_INFO("No %s provided - defaulting to %s", flag, default_val);
2009 *new_val = default_val;
2010 }
2011
2012 return changed;
2013 }
2014
2015 int
2016 dpdk_init(int argc, char **argv)
2017 {
2018 int result;
2019 int base = 0;
2020 char *pragram_name = argv[0];
2021
2022 if (argc < 2 || strcmp(argv[1], "--dpdk"))
2023 return 0;
2024
2025 /* Remove the --dpdk argument from arg list.*/
2026 argc--;
2027 argv++;
2028
2029 #ifdef VHOST_CUSE
2030 if (process_vhost_flags("-cuse_dev_name", strdup("vhost-net"),
2031 PATH_MAX, argv, &cuse_dev_name)) {
2032 #else
2033 if (process_vhost_flags("-vhost_sock_dir", strdup(ovs_rundir()),
2034 NAME_MAX, argv, &vhost_sock_dir)) {
2035 struct stat s;
2036 int err;
2037
2038 err = stat(vhost_sock_dir, &s);
2039 if (err) {
2040 VLOG_ERR("vHostUser socket DIR '%s' does not exist.",
2041 vhost_sock_dir);
2042 return err;
2043 }
2044 #endif
2045 /* Remove the vhost flag configuration parameters from the argument
2046 * list, so that the correct elements are passed to the DPDK
2047 * initialization function
2048 */
2049 argc -= 2;
2050 argv += 2; /* Increment by two to bypass the vhost flag arguments */
2051 base = 2;
2052 }
2053
2054 /* Keep the program name argument as this is needed for call to
2055 * rte_eal_init()
2056 */
2057 argv[0] = pragram_name;
2058
2059 /* Make sure things are initialized ... */
2060 result = rte_eal_init(argc, argv);
2061 if (result < 0) {
2062 ovs_abort(result, "Cannot init EAL");
2063 }
2064
2065 rte_memzone_dump(stdout);
2066 rte_eal_init_ret = 0;
2067
2068 if (argc > result) {
2069 argv[result] = argv[0];
2070 }
2071
2072 /* We are called from the main thread here */
2073 RTE_PER_LCORE(_lcore_id) = NON_PMD_CORE_ID;
2074
2075 return result + 1 + base;
2076 }
2077
2078 static const struct netdev_class dpdk_class =
2079 NETDEV_DPDK_CLASS(
2080 "dpdk",
2081 NULL,
2082 netdev_dpdk_construct,
2083 netdev_dpdk_destruct,
2084 netdev_dpdk_set_multiq,
2085 netdev_dpdk_eth_send,
2086 netdev_dpdk_get_carrier,
2087 netdev_dpdk_get_stats,
2088 netdev_dpdk_get_features,
2089 netdev_dpdk_get_status,
2090 netdev_dpdk_rxq_recv);
2091
2092 static const struct netdev_class dpdk_ring_class =
2093 NETDEV_DPDK_CLASS(
2094 "dpdkr",
2095 NULL,
2096 netdev_dpdk_ring_construct,
2097 netdev_dpdk_destruct,
2098 netdev_dpdk_set_multiq,
2099 netdev_dpdk_ring_send,
2100 netdev_dpdk_get_carrier,
2101 netdev_dpdk_get_stats,
2102 netdev_dpdk_get_features,
2103 netdev_dpdk_get_status,
2104 netdev_dpdk_rxq_recv);
2105
2106 static const struct netdev_class OVS_UNUSED dpdk_vhost_cuse_class =
2107 NETDEV_DPDK_CLASS(
2108 "dpdkvhostcuse",
2109 dpdk_vhost_cuse_class_init,
2110 netdev_dpdk_vhost_cuse_construct,
2111 netdev_dpdk_vhost_destruct,
2112 netdev_dpdk_vhost_set_multiq,
2113 netdev_dpdk_vhost_send,
2114 netdev_dpdk_vhost_get_carrier,
2115 netdev_dpdk_vhost_get_stats,
2116 NULL,
2117 NULL,
2118 netdev_dpdk_vhost_rxq_recv);
2119
2120 static const struct netdev_class OVS_UNUSED dpdk_vhost_user_class =
2121 NETDEV_DPDK_CLASS(
2122 "dpdkvhostuser",
2123 dpdk_vhost_user_class_init,
2124 netdev_dpdk_vhost_user_construct,
2125 netdev_dpdk_vhost_destruct,
2126 netdev_dpdk_vhost_set_multiq,
2127 netdev_dpdk_vhost_send,
2128 netdev_dpdk_vhost_get_carrier,
2129 netdev_dpdk_vhost_get_stats,
2130 NULL,
2131 NULL,
2132 netdev_dpdk_vhost_rxq_recv);
2133
2134 void
2135 netdev_dpdk_register(void)
2136 {
2137 static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER;
2138
2139 if (rte_eal_init_ret) {
2140 return;
2141 }
2142
2143 if (ovsthread_once_start(&once)) {
2144 dpdk_common_init();
2145 netdev_register_provider(&dpdk_class);
2146 netdev_register_provider(&dpdk_ring_class);
2147 #ifdef VHOST_CUSE
2148 netdev_register_provider(&dpdk_vhost_cuse_class);
2149 #else
2150 netdev_register_provider(&dpdk_vhost_user_class);
2151 #endif
2152 ovsthread_once_done(&once);
2153 }
2154 }
2155
2156 int
2157 pmd_thread_setaffinity_cpu(unsigned cpu)
2158 {
2159 cpu_set_t cpuset;
2160 int err;
2161
2162 CPU_ZERO(&cpuset);
2163 CPU_SET(cpu, &cpuset);
2164 err = pthread_setaffinity_np(pthread_self(), sizeof(cpu_set_t), &cpuset);
2165 if (err) {
2166 VLOG_ERR("Thread affinity error %d",err);
2167 return err;
2168 }
2169 /* NON_PMD_CORE_ID is reserved for use by non pmd threads. */
2170 ovs_assert(cpu != NON_PMD_CORE_ID);
2171 RTE_PER_LCORE(_lcore_id) = cpu;
2172
2173 return 0;
2174 }
2175
2176 static bool
2177 thread_is_pmd(void)
2178 {
2179 return rte_lcore_id() != NON_PMD_CORE_ID;
2180 }