]> git.proxmox.com Git - ovs.git/blob - lib/netdev-dpdk.c
netdev-dpdk: Fix dpdk_watchdog failure to quiesce.
[ovs.git] / lib / netdev-dpdk.c
1 /*
2 * Copyright (c) 2014, 2015, 2016 Nicira, Inc.
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <config.h>
18
19 #include <string.h>
20 #include <signal.h>
21 #include <stdlib.h>
22 #include <pthread.h>
23 #include <config.h>
24 #include <errno.h>
25 #include <sched.h>
26 #include <stdlib.h>
27 #include <unistd.h>
28 #include <sys/stat.h>
29 #include <stdio.h>
30 #include <sys/types.h>
31 #include <sys/stat.h>
32
33 #include "dirs.h"
34 #include "dp-packet.h"
35 #include "dpif-netdev.h"
36 #include "fatal-signal.h"
37 #include "list.h"
38 #include "netdev-dpdk.h"
39 #include "netdev-provider.h"
40 #include "netdev-vport.h"
41 #include "odp-util.h"
42 #include "ofp-print.h"
43 #include "ovs-numa.h"
44 #include "ovs-thread.h"
45 #include "ovs-rcu.h"
46 #include "packets.h"
47 #include "shash.h"
48 #include "sset.h"
49 #include "unaligned.h"
50 #include "timeval.h"
51 #include "unixctl.h"
52 #include "openvswitch/vlog.h"
53
54 #include "rte_config.h"
55 #include "rte_mbuf.h"
56 #include "rte_virtio_net.h"
57
58 VLOG_DEFINE_THIS_MODULE(dpdk);
59 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 20);
60
61 #define DPDK_PORT_WATCHDOG_INTERVAL 5
62
63 #define OVS_CACHE_LINE_SIZE CACHE_LINE_SIZE
64 #define OVS_VPORT_DPDK "ovs_dpdk"
65
66 /*
67 * need to reserve tons of extra space in the mbufs so we can align the
68 * DMA addresses to 4KB.
69 * The minimum mbuf size is limited to avoid scatter behaviour and drop in
70 * performance for standard Ethernet MTU.
71 */
72 #define MTU_TO_MAX_LEN(mtu) ((mtu) + ETHER_HDR_LEN + ETHER_CRC_LEN)
73 #define MBUF_SIZE_MTU(mtu) (MTU_TO_MAX_LEN(mtu) \
74 + sizeof(struct dp_packet) \
75 + RTE_PKTMBUF_HEADROOM)
76 #define MBUF_SIZE_DRIVER (2048 \
77 + sizeof (struct rte_mbuf) \
78 + RTE_PKTMBUF_HEADROOM)
79 #define MBUF_SIZE(mtu) MAX(MBUF_SIZE_MTU(mtu), MBUF_SIZE_DRIVER)
80
81 /* Max and min number of packets in the mempool. OVS tries to allocate a
82 * mempool with MAX_NB_MBUF: if this fails (because the system doesn't have
83 * enough hugepages) we keep halving the number until the allocation succeeds
84 * or we reach MIN_NB_MBUF */
85
86 #define MAX_NB_MBUF (4096 * 64)
87 #define MIN_NB_MBUF (4096 * 4)
88 #define MP_CACHE_SZ RTE_MEMPOOL_CACHE_MAX_SIZE
89
90 /* MAX_NB_MBUF can be divided by 2 many times, until MIN_NB_MBUF */
91 BUILD_ASSERT_DECL(MAX_NB_MBUF % ROUND_DOWN_POW2(MAX_NB_MBUF/MIN_NB_MBUF) == 0);
92
93 /* The smallest possible NB_MBUF that we're going to try should be a multiple
94 * of MP_CACHE_SZ. This is advised by DPDK documentation. */
95 BUILD_ASSERT_DECL((MAX_NB_MBUF / ROUND_DOWN_POW2(MAX_NB_MBUF/MIN_NB_MBUF))
96 % MP_CACHE_SZ == 0);
97
98 #define SOCKET0 0
99
100 #define NIC_PORT_RX_Q_SIZE 2048 /* Size of Physical NIC RX Queue, Max (n+32<=4096)*/
101 #define NIC_PORT_TX_Q_SIZE 2048 /* Size of Physical NIC TX Queue, Max (n+32<=4096)*/
102
103 static char *cuse_dev_name = NULL; /* Character device cuse_dev_name. */
104 static char *vhost_sock_dir = NULL; /* Location of vhost-user sockets */
105
106 /*
107 * Maximum amount of time in micro seconds to try and enqueue to vhost.
108 */
109 #define VHOST_ENQ_RETRY_USECS 100
110
111 static const struct rte_eth_conf port_conf = {
112 .rxmode = {
113 .mq_mode = ETH_MQ_RX_RSS,
114 .split_hdr_size = 0,
115 .header_split = 0, /* Header Split disabled */
116 .hw_ip_checksum = 0, /* IP checksum offload disabled */
117 .hw_vlan_filter = 0, /* VLAN filtering disabled */
118 .jumbo_frame = 0, /* Jumbo Frame Support disabled */
119 .hw_strip_crc = 0,
120 },
121 .rx_adv_conf = {
122 .rss_conf = {
123 .rss_key = NULL,
124 .rss_hf = ETH_RSS_IP | ETH_RSS_UDP | ETH_RSS_TCP,
125 },
126 },
127 .txmode = {
128 .mq_mode = ETH_MQ_TX_NONE,
129 },
130 };
131
132 enum { MAX_TX_QUEUE_LEN = 384 };
133 enum { DPDK_RING_SIZE = 256 };
134 BUILD_ASSERT_DECL(IS_POW2(DPDK_RING_SIZE));
135 enum { DRAIN_TSC = 200000ULL };
136
137 enum dpdk_dev_type {
138 DPDK_DEV_ETH = 0,
139 DPDK_DEV_VHOST = 1,
140 };
141
142 static int rte_eal_init_ret = ENODEV;
143
144 static struct ovs_mutex dpdk_mutex = OVS_MUTEX_INITIALIZER;
145
146 /* Contains all 'struct dpdk_dev's. */
147 static struct ovs_list dpdk_list OVS_GUARDED_BY(dpdk_mutex)
148 = OVS_LIST_INITIALIZER(&dpdk_list);
149
150 static struct ovs_list dpdk_mp_list OVS_GUARDED_BY(dpdk_mutex)
151 = OVS_LIST_INITIALIZER(&dpdk_mp_list);
152
153 /* This mutex must be used by non pmd threads when allocating or freeing
154 * mbufs through mempools. Since dpdk_queue_pkts() and dpdk_queue_flush() may
155 * use mempools, a non pmd thread should hold this mutex while calling them */
156 static struct ovs_mutex nonpmd_mempool_mutex = OVS_MUTEX_INITIALIZER;
157
158 struct dpdk_mp {
159 struct rte_mempool *mp;
160 int mtu;
161 int socket_id;
162 int refcount;
163 struct ovs_list list_node OVS_GUARDED_BY(dpdk_mutex);
164 };
165
166 /* There should be one 'struct dpdk_tx_queue' created for
167 * each cpu core. */
168 struct dpdk_tx_queue {
169 bool flush_tx; /* Set to true to flush queue everytime */
170 /* pkts are queued. */
171 int count;
172 rte_spinlock_t tx_lock; /* Protects the members and the NIC queue
173 * from concurrent access. It is used only
174 * if the queue is shared among different
175 * pmd threads (see 'txq_needs_locking'). */
176 uint64_t tsc;
177 struct rte_mbuf *burst_pkts[MAX_TX_QUEUE_LEN];
178 };
179
180 /* dpdk has no way to remove dpdk ring ethernet devices
181 so we have to keep them around once they've been created
182 */
183
184 static struct ovs_list dpdk_ring_list OVS_GUARDED_BY(dpdk_mutex)
185 = OVS_LIST_INITIALIZER(&dpdk_ring_list);
186
187 struct dpdk_ring {
188 /* For the client rings */
189 struct rte_ring *cring_tx;
190 struct rte_ring *cring_rx;
191 int user_port_id; /* User given port no, parsed from port name */
192 int eth_port_id; /* ethernet device port id */
193 struct ovs_list list_node OVS_GUARDED_BY(dpdk_mutex);
194 };
195
196 struct netdev_dpdk {
197 struct netdev up;
198 int port_id;
199 int max_packet_len;
200 enum dpdk_dev_type type;
201
202 struct dpdk_tx_queue *tx_q;
203
204 struct ovs_mutex mutex OVS_ACQ_AFTER(dpdk_mutex);
205
206 struct dpdk_mp *dpdk_mp;
207 int mtu;
208 int socket_id;
209 int buf_size;
210 struct netdev_stats stats;
211 /* Protects stats */
212 rte_spinlock_t stats_lock;
213
214 struct eth_addr hwaddr;
215 enum netdev_flags flags;
216
217 struct rte_eth_link link;
218 int link_reset_cnt;
219
220 /* The user might request more txqs than the NIC has. We remap those
221 * ('up.n_txq') on these ('real_n_txq').
222 * If the numbers match, 'txq_needs_locking' is false, otherwise it is
223 * true and we will take a spinlock on transmission */
224 int real_n_txq;
225 int real_n_rxq;
226 bool txq_needs_locking;
227
228 /* virtio-net structure for vhost device */
229 OVSRCU_TYPE(struct virtio_net *) virtio_dev;
230
231 /* Identifier used to distinguish vhost devices from each other */
232 char vhost_id[PATH_MAX];
233
234 /* In dpdk_list. */
235 struct ovs_list list_node OVS_GUARDED_BY(dpdk_mutex);
236 };
237
238 struct netdev_rxq_dpdk {
239 struct netdev_rxq up;
240 int port_id;
241 };
242
243 static bool dpdk_thread_is_pmd(void);
244
245 static int netdev_dpdk_construct(struct netdev *);
246
247 struct virtio_net * netdev_dpdk_get_virtio(const struct netdev_dpdk *dev);
248
249 static bool
250 is_dpdk_class(const struct netdev_class *class)
251 {
252 return class->construct == netdev_dpdk_construct;
253 }
254
255 /* XXX: use dpdk malloc for entire OVS. in fact huge page should be used
256 * for all other segments data, bss and text. */
257
258 static void *
259 dpdk_rte_mzalloc(size_t sz)
260 {
261 void *ptr;
262
263 ptr = rte_zmalloc(OVS_VPORT_DPDK, sz, OVS_CACHE_LINE_SIZE);
264 if (ptr == NULL) {
265 out_of_memory();
266 }
267 return ptr;
268 }
269
270 /* XXX this function should be called only by pmd threads (or by non pmd
271 * threads holding the nonpmd_mempool_mutex) */
272 void
273 free_dpdk_buf(struct dp_packet *p)
274 {
275 struct rte_mbuf *pkt = (struct rte_mbuf *) p;
276
277 rte_pktmbuf_free_seg(pkt);
278 }
279
280 static void
281 __rte_pktmbuf_init(struct rte_mempool *mp,
282 void *opaque_arg OVS_UNUSED,
283 void *_m,
284 unsigned i OVS_UNUSED)
285 {
286 struct rte_mbuf *m = _m;
287 uint32_t buf_len = mp->elt_size - sizeof(struct dp_packet);
288
289 RTE_MBUF_ASSERT(mp->elt_size >= sizeof(struct dp_packet));
290
291 memset(m, 0, mp->elt_size);
292
293 /* start of buffer is just after mbuf structure */
294 m->buf_addr = (char *)m + sizeof(struct dp_packet);
295 m->buf_physaddr = rte_mempool_virt2phy(mp, m) +
296 sizeof(struct dp_packet);
297 m->buf_len = (uint16_t)buf_len;
298
299 /* keep some headroom between start of buffer and data */
300 m->data_off = RTE_MIN(RTE_PKTMBUF_HEADROOM, m->buf_len);
301
302 /* init some constant fields */
303 m->pool = mp;
304 m->nb_segs = 1;
305 m->port = 0xff;
306 }
307
308 static void
309 ovs_rte_pktmbuf_init(struct rte_mempool *mp,
310 void *opaque_arg OVS_UNUSED,
311 void *_m,
312 unsigned i OVS_UNUSED)
313 {
314 struct rte_mbuf *m = _m;
315
316 __rte_pktmbuf_init(mp, opaque_arg, _m, i);
317
318 dp_packet_init_dpdk((struct dp_packet *) m, m->buf_len);
319 }
320
321 static struct dpdk_mp *
322 dpdk_mp_get(int socket_id, int mtu) OVS_REQUIRES(dpdk_mutex)
323 {
324 struct dpdk_mp *dmp = NULL;
325 char mp_name[RTE_MEMPOOL_NAMESIZE];
326 unsigned mp_size;
327
328 LIST_FOR_EACH (dmp, list_node, &dpdk_mp_list) {
329 if (dmp->socket_id == socket_id && dmp->mtu == mtu) {
330 dmp->refcount++;
331 return dmp;
332 }
333 }
334
335 dmp = dpdk_rte_mzalloc(sizeof *dmp);
336 dmp->socket_id = socket_id;
337 dmp->mtu = mtu;
338 dmp->refcount = 1;
339
340 mp_size = MAX_NB_MBUF;
341 do {
342 if (snprintf(mp_name, RTE_MEMPOOL_NAMESIZE, "ovs_mp_%d_%d_%u",
343 dmp->mtu, dmp->socket_id, mp_size) < 0) {
344 return NULL;
345 }
346
347 dmp->mp = rte_mempool_create(mp_name, mp_size, MBUF_SIZE(mtu),
348 MP_CACHE_SZ,
349 sizeof(struct rte_pktmbuf_pool_private),
350 rte_pktmbuf_pool_init, NULL,
351 ovs_rte_pktmbuf_init, NULL,
352 socket_id, 0);
353 } while (!dmp->mp && rte_errno == ENOMEM && (mp_size /= 2) >= MIN_NB_MBUF);
354
355 if (dmp->mp == NULL) {
356 return NULL;
357 } else {
358 VLOG_DBG("Allocated \"%s\" mempool with %u mbufs", mp_name, mp_size );
359 }
360
361 list_push_back(&dpdk_mp_list, &dmp->list_node);
362 return dmp;
363 }
364
365 static void
366 dpdk_mp_put(struct dpdk_mp *dmp)
367 {
368
369 if (!dmp) {
370 return;
371 }
372
373 dmp->refcount--;
374 ovs_assert(dmp->refcount >= 0);
375
376 #if 0
377 /* I could not find any API to destroy mp. */
378 if (dmp->refcount == 0) {
379 list_delete(dmp->list_node);
380 /* destroy mp-pool. */
381 }
382 #endif
383 }
384
385 static void
386 check_link_status(struct netdev_dpdk *dev)
387 {
388 struct rte_eth_link link;
389
390 rte_eth_link_get_nowait(dev->port_id, &link);
391
392 if (dev->link.link_status != link.link_status) {
393 netdev_change_seq_changed(&dev->up);
394
395 dev->link_reset_cnt++;
396 dev->link = link;
397 if (dev->link.link_status) {
398 VLOG_DBG_RL(&rl, "Port %d Link Up - speed %u Mbps - %s",
399 dev->port_id, (unsigned)dev->link.link_speed,
400 (dev->link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
401 ("full-duplex") : ("half-duplex"));
402 } else {
403 VLOG_DBG_RL(&rl, "Port %d Link Down", dev->port_id);
404 }
405 }
406 }
407
408 static void *
409 dpdk_watchdog(void *dummy OVS_UNUSED)
410 {
411 struct netdev_dpdk *dev;
412
413 pthread_detach(pthread_self());
414
415 for (;;) {
416 ovs_mutex_lock(&dpdk_mutex);
417 LIST_FOR_EACH (dev, list_node, &dpdk_list) {
418 ovs_mutex_lock(&dev->mutex);
419 check_link_status(dev);
420 ovs_mutex_unlock(&dev->mutex);
421 }
422 ovs_mutex_unlock(&dpdk_mutex);
423 xsleep(DPDK_PORT_WATCHDOG_INTERVAL);
424 }
425
426 return NULL;
427 }
428
429 static int
430 dpdk_eth_dev_queue_setup(struct netdev_dpdk *dev, int n_rxq, int n_txq)
431 {
432 int diag = 0;
433 int i;
434
435 /* A device may report more queues than it makes available (this has
436 * been observed for Intel xl710, which reserves some of them for
437 * SRIOV): rte_eth_*_queue_setup will fail if a queue is not
438 * available. When this happens we can retry the configuration
439 * and request less queues */
440 while (n_rxq && n_txq) {
441 if (diag) {
442 VLOG_INFO("Retrying setup with (rxq:%d txq:%d)", n_rxq, n_txq);
443 }
444
445 diag = rte_eth_dev_configure(dev->port_id, n_rxq, n_txq, &port_conf);
446 if (diag) {
447 break;
448 }
449
450 for (i = 0; i < n_txq; i++) {
451 diag = rte_eth_tx_queue_setup(dev->port_id, i, NIC_PORT_TX_Q_SIZE,
452 dev->socket_id, NULL);
453 if (diag) {
454 VLOG_INFO("Interface %s txq(%d) setup error: %s",
455 dev->up.name, i, rte_strerror(-diag));
456 break;
457 }
458 }
459
460 if (i != n_txq) {
461 /* Retry with less tx queues */
462 n_txq = i;
463 continue;
464 }
465
466 for (i = 0; i < n_rxq; i++) {
467 diag = rte_eth_rx_queue_setup(dev->port_id, i, NIC_PORT_RX_Q_SIZE,
468 dev->socket_id, NULL,
469 dev->dpdk_mp->mp);
470 if (diag) {
471 VLOG_INFO("Interface %s rxq(%d) setup error: %s",
472 dev->up.name, i, rte_strerror(-diag));
473 break;
474 }
475 }
476
477 if (i != n_rxq) {
478 /* Retry with less rx queues */
479 n_rxq = i;
480 continue;
481 }
482
483 dev->up.n_rxq = n_rxq;
484 dev->real_n_txq = n_txq;
485
486 return 0;
487 }
488
489 return diag;
490 }
491
492
493 static int
494 dpdk_eth_dev_init(struct netdev_dpdk *dev) OVS_REQUIRES(dpdk_mutex)
495 {
496 struct rte_pktmbuf_pool_private *mbp_priv;
497 struct rte_eth_dev_info info;
498 struct ether_addr eth_addr;
499 int diag;
500 int n_rxq, n_txq;
501
502 if (dev->port_id < 0 || dev->port_id >= rte_eth_dev_count()) {
503 return ENODEV;
504 }
505
506 rte_eth_dev_info_get(dev->port_id, &info);
507
508 n_rxq = MIN(info.max_rx_queues, dev->up.n_rxq);
509 n_txq = MIN(info.max_tx_queues, dev->up.n_txq);
510
511 diag = dpdk_eth_dev_queue_setup(dev, n_rxq, n_txq);
512 if (diag) {
513 VLOG_ERR("Interface %s(rxq:%d txq:%d) configure error: %s",
514 dev->up.name, n_rxq, n_txq, rte_strerror(-diag));
515 return -diag;
516 }
517
518 diag = rte_eth_dev_start(dev->port_id);
519 if (diag) {
520 VLOG_ERR("Interface %s start error: %s", dev->up.name,
521 rte_strerror(-diag));
522 return -diag;
523 }
524
525 rte_eth_promiscuous_enable(dev->port_id);
526 rte_eth_allmulticast_enable(dev->port_id);
527
528 memset(&eth_addr, 0x0, sizeof(eth_addr));
529 rte_eth_macaddr_get(dev->port_id, &eth_addr);
530 VLOG_INFO_RL(&rl, "Port %d: "ETH_ADDR_FMT"",
531 dev->port_id, ETH_ADDR_BYTES_ARGS(eth_addr.addr_bytes));
532
533 memcpy(dev->hwaddr.ea, eth_addr.addr_bytes, ETH_ADDR_LEN);
534 rte_eth_link_get_nowait(dev->port_id, &dev->link);
535
536 mbp_priv = rte_mempool_get_priv(dev->dpdk_mp->mp);
537 dev->buf_size = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM;
538
539 dev->flags = NETDEV_UP | NETDEV_PROMISC;
540 return 0;
541 }
542
543 static struct netdev_dpdk *
544 netdev_dpdk_cast(const struct netdev *netdev)
545 {
546 return CONTAINER_OF(netdev, struct netdev_dpdk, up);
547 }
548
549 static struct netdev *
550 netdev_dpdk_alloc(void)
551 {
552 struct netdev_dpdk *netdev = dpdk_rte_mzalloc(sizeof *netdev);
553 return &netdev->up;
554 }
555
556 static void
557 netdev_dpdk_alloc_txq(struct netdev_dpdk *netdev, unsigned int n_txqs)
558 {
559 unsigned i;
560
561 netdev->tx_q = dpdk_rte_mzalloc(n_txqs * sizeof *netdev->tx_q);
562 for (i = 0; i < n_txqs; i++) {
563 int numa_id = ovs_numa_get_numa_id(i);
564
565 if (!netdev->txq_needs_locking) {
566 /* Each index is considered as a cpu core id, since there should
567 * be one tx queue for each cpu core. If the corresponding core
568 * is not on the same numa node as 'netdev', flags the
569 * 'flush_tx'. */
570 netdev->tx_q[i].flush_tx = netdev->socket_id == numa_id;
571 } else {
572 /* Queues are shared among CPUs. Always flush */
573 netdev->tx_q[i].flush_tx = true;
574 }
575 rte_spinlock_init(&netdev->tx_q[i].tx_lock);
576 }
577 }
578
579 static int
580 netdev_dpdk_init(struct netdev *netdev_, unsigned int port_no,
581 enum dpdk_dev_type type)
582 OVS_REQUIRES(dpdk_mutex)
583 {
584 struct netdev_dpdk *netdev = netdev_dpdk_cast(netdev_);
585 int sid;
586 int err = 0;
587
588 ovs_mutex_init(&netdev->mutex);
589 ovs_mutex_lock(&netdev->mutex);
590
591 rte_spinlock_init(&netdev->stats_lock);
592
593 /* If the 'sid' is negative, it means that the kernel fails
594 * to obtain the pci numa info. In that situation, always
595 * use 'SOCKET0'. */
596 if (type == DPDK_DEV_ETH) {
597 sid = rte_eth_dev_socket_id(port_no);
598 } else {
599 sid = rte_lcore_to_socket_id(rte_get_master_lcore());
600 }
601
602 netdev->socket_id = sid < 0 ? SOCKET0 : sid;
603 netdev->port_id = port_no;
604 netdev->type = type;
605 netdev->flags = 0;
606 netdev->mtu = ETHER_MTU;
607 netdev->max_packet_len = MTU_TO_MAX_LEN(netdev->mtu);
608
609 netdev->dpdk_mp = dpdk_mp_get(netdev->socket_id, netdev->mtu);
610 if (!netdev->dpdk_mp) {
611 err = ENOMEM;
612 goto unlock;
613 }
614
615 netdev_->n_txq = NR_QUEUE;
616 netdev_->n_rxq = NR_QUEUE;
617 netdev_->requested_n_rxq = NR_QUEUE;
618 netdev->real_n_txq = NR_QUEUE;
619
620 if (type == DPDK_DEV_ETH) {
621 netdev_dpdk_alloc_txq(netdev, NR_QUEUE);
622 err = dpdk_eth_dev_init(netdev);
623 if (err) {
624 goto unlock;
625 }
626 }
627
628 list_push_back(&dpdk_list, &netdev->list_node);
629
630 unlock:
631 if (err) {
632 rte_free(netdev->tx_q);
633 }
634 ovs_mutex_unlock(&netdev->mutex);
635 return err;
636 }
637
638 static int
639 dpdk_dev_parse_name(const char dev_name[], const char prefix[],
640 unsigned int *port_no)
641 {
642 const char *cport;
643
644 if (strncmp(dev_name, prefix, strlen(prefix))) {
645 return ENODEV;
646 }
647
648 cport = dev_name + strlen(prefix);
649 *port_no = strtol(cport, NULL, 0); /* string must be null terminated */
650 return 0;
651 }
652
653 static int
654 vhost_construct_helper(struct netdev *netdev_) OVS_REQUIRES(dpdk_mutex)
655 {
656 if (rte_eal_init_ret) {
657 return rte_eal_init_ret;
658 }
659
660 return netdev_dpdk_init(netdev_, -1, DPDK_DEV_VHOST);
661 }
662
663 static int
664 netdev_dpdk_vhost_cuse_construct(struct netdev *netdev_)
665 {
666 struct netdev_dpdk *netdev = netdev_dpdk_cast(netdev_);
667 int err;
668
669 ovs_mutex_lock(&dpdk_mutex);
670 strncpy(netdev->vhost_id, netdev->up.name, sizeof(netdev->vhost_id));
671 err = vhost_construct_helper(netdev_);
672 ovs_mutex_unlock(&dpdk_mutex);
673 return err;
674 }
675
676 static int
677 netdev_dpdk_vhost_user_construct(struct netdev *netdev_)
678 {
679 struct netdev_dpdk *netdev = netdev_dpdk_cast(netdev_);
680 int err;
681
682 ovs_mutex_lock(&dpdk_mutex);
683 /* Take the name of the vhost-user port and append it to the location where
684 * the socket is to be created, then register the socket.
685 */
686 snprintf(netdev->vhost_id, sizeof(netdev->vhost_id), "%s/%s",
687 vhost_sock_dir, netdev_->name);
688 err = rte_vhost_driver_register(netdev->vhost_id);
689 if (err) {
690 VLOG_ERR("vhost-user socket device setup failure for socket %s\n",
691 netdev->vhost_id);
692 } else {
693 fatal_signal_add_file_to_unlink(netdev->vhost_id);
694 VLOG_INFO("Socket %s created for vhost-user port %s\n",
695 netdev->vhost_id, netdev_->name);
696 err = vhost_construct_helper(netdev_);
697 }
698
699 ovs_mutex_unlock(&dpdk_mutex);
700 return err;
701 }
702
703 static int
704 netdev_dpdk_construct(struct netdev *netdev)
705 {
706 unsigned int port_no;
707 int err;
708
709 if (rte_eal_init_ret) {
710 return rte_eal_init_ret;
711 }
712
713 /* Names always start with "dpdk" */
714 err = dpdk_dev_parse_name(netdev->name, "dpdk", &port_no);
715 if (err) {
716 return err;
717 }
718
719 ovs_mutex_lock(&dpdk_mutex);
720 err = netdev_dpdk_init(netdev, port_no, DPDK_DEV_ETH);
721 ovs_mutex_unlock(&dpdk_mutex);
722 return err;
723 }
724
725 static void
726 netdev_dpdk_destruct(struct netdev *netdev_)
727 {
728 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev_);
729
730 ovs_mutex_lock(&dev->mutex);
731 rte_eth_dev_stop(dev->port_id);
732 ovs_mutex_unlock(&dev->mutex);
733
734 ovs_mutex_lock(&dpdk_mutex);
735 rte_free(dev->tx_q);
736 list_remove(&dev->list_node);
737 dpdk_mp_put(dev->dpdk_mp);
738 ovs_mutex_unlock(&dpdk_mutex);
739 }
740
741 static void
742 netdev_dpdk_vhost_destruct(struct netdev *netdev_)
743 {
744 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev_);
745
746 /* Can't remove a port while a guest is attached to it. */
747 if (netdev_dpdk_get_virtio(dev) != NULL) {
748 VLOG_ERR("Can not remove port, vhost device still attached");
749 return;
750 }
751
752 if (rte_vhost_driver_unregister(dev->vhost_id)) {
753 VLOG_ERR("Unable to remove vhost-user socket %s", dev->vhost_id);
754 } else {
755 fatal_signal_remove_file_to_unlink(dev->vhost_id);
756 }
757
758 ovs_mutex_lock(&dpdk_mutex);
759 list_remove(&dev->list_node);
760 dpdk_mp_put(dev->dpdk_mp);
761 ovs_mutex_unlock(&dpdk_mutex);
762 }
763
764 static void
765 netdev_dpdk_dealloc(struct netdev *netdev_)
766 {
767 struct netdev_dpdk *netdev = netdev_dpdk_cast(netdev_);
768
769 rte_free(netdev);
770 }
771
772 static int
773 netdev_dpdk_get_config(const struct netdev *netdev, struct smap *args)
774 {
775 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
776
777 ovs_mutex_lock(&dev->mutex);
778
779 smap_add_format(args, "requested_rx_queues", "%d", netdev->requested_n_rxq);
780 smap_add_format(args, "configured_rx_queues", "%d", netdev->n_rxq);
781 smap_add_format(args, "requested_tx_queues", "%d", netdev->n_txq);
782 smap_add_format(args, "configured_tx_queues", "%d", dev->real_n_txq);
783 ovs_mutex_unlock(&dev->mutex);
784
785 return 0;
786 }
787
788 static int
789 netdev_dpdk_set_config(struct netdev *netdev, const struct smap *args)
790 {
791 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
792
793 ovs_mutex_lock(&dev->mutex);
794 netdev->requested_n_rxq = MAX(smap_get_int(args, "n_rxq",
795 netdev->requested_n_rxq), 1);
796 netdev_change_seq_changed(netdev);
797 ovs_mutex_unlock(&dev->mutex);
798
799 return 0;
800 }
801
802 static int
803 netdev_dpdk_get_numa_id(const struct netdev *netdev_)
804 {
805 struct netdev_dpdk *netdev = netdev_dpdk_cast(netdev_);
806
807 return netdev->socket_id;
808 }
809
810 /* Sets the number of tx queues and rx queues for the dpdk interface.
811 * If the configuration fails, do not try restoring its old configuration
812 * and just returns the error. */
813 static int
814 netdev_dpdk_set_multiq(struct netdev *netdev_, unsigned int n_txq,
815 unsigned int n_rxq)
816 {
817 struct netdev_dpdk *netdev = netdev_dpdk_cast(netdev_);
818 int err = 0;
819 int old_rxq, old_txq;
820
821 if (netdev->up.n_txq == n_txq && netdev->up.n_rxq == n_rxq) {
822 return err;
823 }
824
825 ovs_mutex_lock(&dpdk_mutex);
826 ovs_mutex_lock(&netdev->mutex);
827
828 rte_eth_dev_stop(netdev->port_id);
829
830 old_txq = netdev->up.n_txq;
831 old_rxq = netdev->up.n_rxq;
832 netdev->up.n_txq = n_txq;
833 netdev->up.n_rxq = n_rxq;
834
835 rte_free(netdev->tx_q);
836 err = dpdk_eth_dev_init(netdev);
837 netdev_dpdk_alloc_txq(netdev, netdev->real_n_txq);
838 if (err) {
839 /* If there has been an error, it means that the requested queues
840 * have not been created. Restore the old numbers. */
841 netdev->up.n_txq = old_txq;
842 netdev->up.n_rxq = old_rxq;
843 }
844
845 netdev->txq_needs_locking = netdev->real_n_txq != netdev->up.n_txq;
846
847 ovs_mutex_unlock(&netdev->mutex);
848 ovs_mutex_unlock(&dpdk_mutex);
849
850 return err;
851 }
852
853 static int
854 netdev_dpdk_vhost_cuse_set_multiq(struct netdev *netdev_, unsigned int n_txq,
855 unsigned int n_rxq)
856 {
857 struct netdev_dpdk *netdev = netdev_dpdk_cast(netdev_);
858 int err = 0;
859
860 if (netdev->up.n_txq == n_txq && netdev->up.n_rxq == n_rxq) {
861 return err;
862 }
863
864 ovs_mutex_lock(&dpdk_mutex);
865 ovs_mutex_lock(&netdev->mutex);
866
867 netdev->up.n_txq = n_txq;
868 netdev->real_n_txq = 1;
869 netdev->up.n_rxq = 1;
870 netdev->txq_needs_locking = netdev->real_n_txq != netdev->up.n_txq;
871
872 ovs_mutex_unlock(&netdev->mutex);
873 ovs_mutex_unlock(&dpdk_mutex);
874
875 return err;
876 }
877
878 static int
879 netdev_dpdk_vhost_set_multiq(struct netdev *netdev_, unsigned int n_txq,
880 unsigned int n_rxq)
881 {
882 struct netdev_dpdk *netdev = netdev_dpdk_cast(netdev_);
883 int err = 0;
884
885 if (netdev->up.n_txq == n_txq && netdev->up.n_rxq == n_rxq) {
886 return err;
887 }
888
889 ovs_mutex_lock(&dpdk_mutex);
890 ovs_mutex_lock(&netdev->mutex);
891
892 rte_free(netdev->tx_q);
893 netdev->up.n_txq = n_txq;
894 netdev->up.n_rxq = n_rxq;
895 netdev_dpdk_alloc_txq(netdev, netdev->up.n_txq);
896
897 ovs_mutex_unlock(&netdev->mutex);
898 ovs_mutex_unlock(&dpdk_mutex);
899
900 return err;
901 }
902
903 static struct netdev_rxq *
904 netdev_dpdk_rxq_alloc(void)
905 {
906 struct netdev_rxq_dpdk *rx = dpdk_rte_mzalloc(sizeof *rx);
907
908 return &rx->up;
909 }
910
911 static struct netdev_rxq_dpdk *
912 netdev_rxq_dpdk_cast(const struct netdev_rxq *rx)
913 {
914 return CONTAINER_OF(rx, struct netdev_rxq_dpdk, up);
915 }
916
917 static int
918 netdev_dpdk_rxq_construct(struct netdev_rxq *rxq_)
919 {
920 struct netdev_rxq_dpdk *rx = netdev_rxq_dpdk_cast(rxq_);
921 struct netdev_dpdk *netdev = netdev_dpdk_cast(rx->up.netdev);
922
923 ovs_mutex_lock(&netdev->mutex);
924 rx->port_id = netdev->port_id;
925 ovs_mutex_unlock(&netdev->mutex);
926
927 return 0;
928 }
929
930 static void
931 netdev_dpdk_rxq_destruct(struct netdev_rxq *rxq_ OVS_UNUSED)
932 {
933 }
934
935 static void
936 netdev_dpdk_rxq_dealloc(struct netdev_rxq *rxq_)
937 {
938 struct netdev_rxq_dpdk *rx = netdev_rxq_dpdk_cast(rxq_);
939
940 rte_free(rx);
941 }
942
943 static inline void
944 dpdk_queue_flush__(struct netdev_dpdk *dev, int qid)
945 {
946 struct dpdk_tx_queue *txq = &dev->tx_q[qid];
947 uint32_t nb_tx = 0;
948
949 while (nb_tx != txq->count) {
950 uint32_t ret;
951
952 ret = rte_eth_tx_burst(dev->port_id, qid, txq->burst_pkts + nb_tx,
953 txq->count - nb_tx);
954 if (!ret) {
955 break;
956 }
957
958 nb_tx += ret;
959 }
960
961 if (OVS_UNLIKELY(nb_tx != txq->count)) {
962 /* free buffers, which we couldn't transmit, one at a time (each
963 * packet could come from a different mempool) */
964 int i;
965
966 for (i = nb_tx; i < txq->count; i++) {
967 rte_pktmbuf_free_seg(txq->burst_pkts[i]);
968 }
969 rte_spinlock_lock(&dev->stats_lock);
970 dev->stats.tx_dropped += txq->count-nb_tx;
971 rte_spinlock_unlock(&dev->stats_lock);
972 }
973
974 txq->count = 0;
975 txq->tsc = rte_get_timer_cycles();
976 }
977
978 static inline void
979 dpdk_queue_flush(struct netdev_dpdk *dev, int qid)
980 {
981 struct dpdk_tx_queue *txq = &dev->tx_q[qid];
982
983 if (txq->count == 0) {
984 return;
985 }
986 dpdk_queue_flush__(dev, qid);
987 }
988
989 static bool
990 is_vhost_running(struct virtio_net *dev)
991 {
992 return (dev != NULL && (dev->flags & VIRTIO_DEV_RUNNING));
993 }
994
995 static inline void
996 netdev_dpdk_vhost_update_rx_counters(struct netdev_stats *stats,
997 struct dp_packet **packets, int count)
998 {
999 int i;
1000 struct dp_packet *packet;
1001
1002 stats->rx_packets += count;
1003 for (i = 0; i < count; i++) {
1004 packet = packets[i];
1005
1006 if (OVS_UNLIKELY(dp_packet_size(packet) < ETH_HEADER_LEN)) {
1007 /* This only protects the following multicast counting from
1008 * too short packets, but it does not stop the packet from
1009 * further processing. */
1010 stats->rx_errors++;
1011 stats->rx_length_errors++;
1012 continue;
1013 }
1014
1015 struct eth_header *eh = (struct eth_header *) dp_packet_data(packet);
1016 if (OVS_UNLIKELY(eth_addr_is_multicast(eh->eth_dst))) {
1017 stats->multicast++;
1018 }
1019
1020 stats->rx_bytes += dp_packet_size(packet);
1021 }
1022 }
1023
1024 /*
1025 * The receive path for the vhost port is the TX path out from guest.
1026 */
1027 static int
1028 netdev_dpdk_vhost_rxq_recv(struct netdev_rxq *rxq_,
1029 struct dp_packet **packets, int *c)
1030 {
1031 struct netdev_rxq_dpdk *rx = netdev_rxq_dpdk_cast(rxq_);
1032 struct netdev *netdev = rx->up.netdev;
1033 struct netdev_dpdk *vhost_dev = netdev_dpdk_cast(netdev);
1034 struct virtio_net *virtio_dev = netdev_dpdk_get_virtio(vhost_dev);
1035 int qid = rxq_->queue_id;
1036 uint16_t nb_rx = 0;
1037
1038 if (OVS_UNLIKELY(!is_vhost_running(virtio_dev))) {
1039 return EAGAIN;
1040 }
1041
1042 if (rxq_->queue_id >= vhost_dev->real_n_rxq) {
1043 return EOPNOTSUPP;
1044 }
1045
1046 nb_rx = rte_vhost_dequeue_burst(virtio_dev, qid * VIRTIO_QNUM + VIRTIO_TXQ,
1047 vhost_dev->dpdk_mp->mp,
1048 (struct rte_mbuf **)packets,
1049 NETDEV_MAX_BURST);
1050 if (!nb_rx) {
1051 return EAGAIN;
1052 }
1053
1054 rte_spinlock_lock(&vhost_dev->stats_lock);
1055 netdev_dpdk_vhost_update_rx_counters(&vhost_dev->stats, packets, nb_rx);
1056 rte_spinlock_unlock(&vhost_dev->stats_lock);
1057
1058 *c = (int) nb_rx;
1059 return 0;
1060 }
1061
1062 static int
1063 netdev_dpdk_rxq_recv(struct netdev_rxq *rxq_, struct dp_packet **packets,
1064 int *c)
1065 {
1066 struct netdev_rxq_dpdk *rx = netdev_rxq_dpdk_cast(rxq_);
1067 struct netdev *netdev = rx->up.netdev;
1068 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1069 int nb_rx;
1070
1071 /* There is only one tx queue for this core. Do not flush other
1072 * queues.
1073 * Do not flush tx queue which is shared among CPUs
1074 * since it is always flushed */
1075 if (rxq_->queue_id == rte_lcore_id() &&
1076 OVS_LIKELY(!dev->txq_needs_locking)) {
1077 dpdk_queue_flush(dev, rxq_->queue_id);
1078 }
1079
1080 nb_rx = rte_eth_rx_burst(rx->port_id, rxq_->queue_id,
1081 (struct rte_mbuf **) packets,
1082 NETDEV_MAX_BURST);
1083 if (!nb_rx) {
1084 return EAGAIN;
1085 }
1086
1087 *c = nb_rx;
1088
1089 return 0;
1090 }
1091
1092 static inline void
1093 netdev_dpdk_vhost_update_tx_counters(struct netdev_stats *stats,
1094 struct dp_packet **packets,
1095 int attempted,
1096 int dropped)
1097 {
1098 int i;
1099 int sent = attempted - dropped;
1100
1101 stats->tx_packets += sent;
1102 stats->tx_dropped += dropped;
1103
1104 for (i = 0; i < sent; i++) {
1105 stats->tx_bytes += dp_packet_size(packets[i]);
1106 }
1107 }
1108
1109 static void
1110 __netdev_dpdk_vhost_send(struct netdev *netdev, int qid,
1111 struct dp_packet **pkts, int cnt,
1112 bool may_steal)
1113 {
1114 struct netdev_dpdk *vhost_dev = netdev_dpdk_cast(netdev);
1115 struct virtio_net *virtio_dev = netdev_dpdk_get_virtio(vhost_dev);
1116 struct rte_mbuf **cur_pkts = (struct rte_mbuf **) pkts;
1117 unsigned int total_pkts = cnt;
1118 uint64_t start = 0;
1119
1120 if (OVS_UNLIKELY(!is_vhost_running(virtio_dev))) {
1121 rte_spinlock_lock(&vhost_dev->stats_lock);
1122 vhost_dev->stats.tx_dropped+= cnt;
1123 rte_spinlock_unlock(&vhost_dev->stats_lock);
1124 goto out;
1125 }
1126
1127 if (vhost_dev->txq_needs_locking) {
1128 qid = qid % vhost_dev->real_n_txq;
1129 rte_spinlock_lock(&vhost_dev->tx_q[qid].tx_lock);
1130 }
1131
1132 do {
1133 int vhost_qid = qid * VIRTIO_QNUM + VIRTIO_RXQ;
1134 unsigned int tx_pkts;
1135
1136 tx_pkts = rte_vhost_enqueue_burst(virtio_dev, vhost_qid,
1137 cur_pkts, cnt);
1138 if (OVS_LIKELY(tx_pkts)) {
1139 /* Packets have been sent.*/
1140 cnt -= tx_pkts;
1141 /* Prepare for possible next iteration.*/
1142 cur_pkts = &cur_pkts[tx_pkts];
1143 } else {
1144 uint64_t timeout = VHOST_ENQ_RETRY_USECS * rte_get_timer_hz() / 1E6;
1145 unsigned int expired = 0;
1146
1147 if (!start) {
1148 start = rte_get_timer_cycles();
1149 }
1150
1151 /*
1152 * Unable to enqueue packets to vhost interface.
1153 * Check available entries before retrying.
1154 */
1155 while (!rte_vring_available_entries(virtio_dev, vhost_qid)) {
1156 if (OVS_UNLIKELY((rte_get_timer_cycles() - start) > timeout)) {
1157 expired = 1;
1158 break;
1159 }
1160 }
1161 if (expired) {
1162 /* break out of main loop. */
1163 break;
1164 }
1165 }
1166 } while (cnt);
1167
1168 if (vhost_dev->txq_needs_locking) {
1169 rte_spinlock_unlock(&vhost_dev->tx_q[qid].tx_lock);
1170 }
1171
1172 rte_spinlock_lock(&vhost_dev->stats_lock);
1173 netdev_dpdk_vhost_update_tx_counters(&vhost_dev->stats, pkts, total_pkts,
1174 cnt);
1175 rte_spinlock_unlock(&vhost_dev->stats_lock);
1176
1177 out:
1178 if (may_steal) {
1179 int i;
1180
1181 for (i = 0; i < total_pkts; i++) {
1182 dp_packet_delete(pkts[i]);
1183 }
1184 }
1185 }
1186
1187 inline static void
1188 dpdk_queue_pkts(struct netdev_dpdk *dev, int qid,
1189 struct rte_mbuf **pkts, int cnt)
1190 {
1191 struct dpdk_tx_queue *txq = &dev->tx_q[qid];
1192 uint64_t diff_tsc;
1193
1194 int i = 0;
1195
1196 while (i < cnt) {
1197 int freeslots = MAX_TX_QUEUE_LEN - txq->count;
1198 int tocopy = MIN(freeslots, cnt-i);
1199
1200 memcpy(&txq->burst_pkts[txq->count], &pkts[i],
1201 tocopy * sizeof (struct rte_mbuf *));
1202
1203 txq->count += tocopy;
1204 i += tocopy;
1205
1206 if (txq->count == MAX_TX_QUEUE_LEN || txq->flush_tx) {
1207 dpdk_queue_flush__(dev, qid);
1208 }
1209 diff_tsc = rte_get_timer_cycles() - txq->tsc;
1210 if (diff_tsc >= DRAIN_TSC) {
1211 dpdk_queue_flush__(dev, qid);
1212 }
1213 }
1214 }
1215
1216 /* Tx function. Transmit packets indefinitely */
1217 static void
1218 dpdk_do_tx_copy(struct netdev *netdev, int qid, struct dp_packet **pkts,
1219 int cnt)
1220 OVS_NO_THREAD_SAFETY_ANALYSIS
1221 {
1222 #if !defined(__CHECKER__) && !defined(_WIN32)
1223 const size_t PKT_ARRAY_SIZE = cnt;
1224 #else
1225 /* Sparse or MSVC doesn't like variable length array. */
1226 enum { PKT_ARRAY_SIZE = NETDEV_MAX_BURST };
1227 #endif
1228 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1229 struct rte_mbuf *mbufs[PKT_ARRAY_SIZE];
1230 int dropped = 0;
1231 int newcnt = 0;
1232 int i;
1233
1234 /* If we are on a non pmd thread we have to use the mempool mutex, because
1235 * every non pmd thread shares the same mempool cache */
1236
1237 if (!dpdk_thread_is_pmd()) {
1238 ovs_mutex_lock(&nonpmd_mempool_mutex);
1239 }
1240
1241 for (i = 0; i < cnt; i++) {
1242 int size = dp_packet_size(pkts[i]);
1243
1244 if (OVS_UNLIKELY(size > dev->max_packet_len)) {
1245 VLOG_WARN_RL(&rl, "Too big size %d max_packet_len %d",
1246 (int)size , dev->max_packet_len);
1247
1248 dropped++;
1249 continue;
1250 }
1251
1252 mbufs[newcnt] = rte_pktmbuf_alloc(dev->dpdk_mp->mp);
1253
1254 if (!mbufs[newcnt]) {
1255 dropped += cnt - i;
1256 break;
1257 }
1258
1259 /* We have to do a copy for now */
1260 memcpy(rte_pktmbuf_mtod(mbufs[newcnt], void *), dp_packet_data(pkts[i]), size);
1261
1262 rte_pktmbuf_data_len(mbufs[newcnt]) = size;
1263 rte_pktmbuf_pkt_len(mbufs[newcnt]) = size;
1264
1265 newcnt++;
1266 }
1267
1268 if (OVS_UNLIKELY(dropped)) {
1269 rte_spinlock_lock(&dev->stats_lock);
1270 dev->stats.tx_dropped += dropped;
1271 rte_spinlock_unlock(&dev->stats_lock);
1272 }
1273
1274 if (dev->type == DPDK_DEV_VHOST) {
1275 __netdev_dpdk_vhost_send(netdev, qid, (struct dp_packet **) mbufs, newcnt, true);
1276 } else {
1277 dpdk_queue_pkts(dev, qid, mbufs, newcnt);
1278 dpdk_queue_flush(dev, qid);
1279 }
1280
1281 if (!dpdk_thread_is_pmd()) {
1282 ovs_mutex_unlock(&nonpmd_mempool_mutex);
1283 }
1284 }
1285
1286 static int
1287 netdev_dpdk_vhost_send(struct netdev *netdev, int qid, struct dp_packet **pkts,
1288 int cnt, bool may_steal)
1289 {
1290 if (OVS_UNLIKELY(pkts[0]->source != DPBUF_DPDK)) {
1291 int i;
1292
1293 dpdk_do_tx_copy(netdev, qid, pkts, cnt);
1294 if (may_steal) {
1295 for (i = 0; i < cnt; i++) {
1296 dp_packet_delete(pkts[i]);
1297 }
1298 }
1299 } else {
1300 __netdev_dpdk_vhost_send(netdev, qid, pkts, cnt, may_steal);
1301 }
1302 return 0;
1303 }
1304
1305 static inline void
1306 netdev_dpdk_send__(struct netdev_dpdk *dev, int qid,
1307 struct dp_packet **pkts, int cnt, bool may_steal)
1308 {
1309 int i;
1310
1311 if (OVS_UNLIKELY(dev->txq_needs_locking)) {
1312 qid = qid % dev->real_n_txq;
1313 rte_spinlock_lock(&dev->tx_q[qid].tx_lock);
1314 }
1315
1316 if (OVS_UNLIKELY(!may_steal ||
1317 pkts[0]->source != DPBUF_DPDK)) {
1318 struct netdev *netdev = &dev->up;
1319
1320 dpdk_do_tx_copy(netdev, qid, pkts, cnt);
1321
1322 if (may_steal) {
1323 for (i = 0; i < cnt; i++) {
1324 dp_packet_delete(pkts[i]);
1325 }
1326 }
1327 } else {
1328 int next_tx_idx = 0;
1329 int dropped = 0;
1330
1331 for (i = 0; i < cnt; i++) {
1332 int size = dp_packet_size(pkts[i]);
1333
1334 if (OVS_UNLIKELY(size > dev->max_packet_len)) {
1335 if (next_tx_idx != i) {
1336 dpdk_queue_pkts(dev, qid,
1337 (struct rte_mbuf **)&pkts[next_tx_idx],
1338 i-next_tx_idx);
1339 }
1340
1341 VLOG_WARN_RL(&rl, "Too big size %d max_packet_len %d",
1342 (int)size , dev->max_packet_len);
1343
1344 dp_packet_delete(pkts[i]);
1345 dropped++;
1346 next_tx_idx = i + 1;
1347 }
1348 }
1349 if (next_tx_idx != cnt) {
1350 dpdk_queue_pkts(dev, qid,
1351 (struct rte_mbuf **)&pkts[next_tx_idx],
1352 cnt-next_tx_idx);
1353 }
1354
1355 if (OVS_UNLIKELY(dropped)) {
1356 rte_spinlock_lock(&dev->stats_lock);
1357 dev->stats.tx_dropped += dropped;
1358 rte_spinlock_unlock(&dev->stats_lock);
1359 }
1360 }
1361
1362 if (OVS_UNLIKELY(dev->txq_needs_locking)) {
1363 rte_spinlock_unlock(&dev->tx_q[qid].tx_lock);
1364 }
1365 }
1366
1367 static int
1368 netdev_dpdk_eth_send(struct netdev *netdev, int qid,
1369 struct dp_packet **pkts, int cnt, bool may_steal)
1370 {
1371 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1372
1373 netdev_dpdk_send__(dev, qid, pkts, cnt, may_steal);
1374 return 0;
1375 }
1376
1377 static int
1378 netdev_dpdk_set_etheraddr(struct netdev *netdev, const struct eth_addr mac)
1379 {
1380 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1381
1382 ovs_mutex_lock(&dev->mutex);
1383 if (!eth_addr_equals(dev->hwaddr, mac)) {
1384 dev->hwaddr = mac;
1385 netdev_change_seq_changed(netdev);
1386 }
1387 ovs_mutex_unlock(&dev->mutex);
1388
1389 return 0;
1390 }
1391
1392 static int
1393 netdev_dpdk_get_etheraddr(const struct netdev *netdev, struct eth_addr *mac)
1394 {
1395 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1396
1397 ovs_mutex_lock(&dev->mutex);
1398 *mac = dev->hwaddr;
1399 ovs_mutex_unlock(&dev->mutex);
1400
1401 return 0;
1402 }
1403
1404 static int
1405 netdev_dpdk_get_mtu(const struct netdev *netdev, int *mtup)
1406 {
1407 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1408
1409 ovs_mutex_lock(&dev->mutex);
1410 *mtup = dev->mtu;
1411 ovs_mutex_unlock(&dev->mutex);
1412
1413 return 0;
1414 }
1415
1416 static int
1417 netdev_dpdk_set_mtu(const struct netdev *netdev, int mtu)
1418 {
1419 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1420 int old_mtu, err;
1421 struct dpdk_mp *old_mp;
1422 struct dpdk_mp *mp;
1423
1424 ovs_mutex_lock(&dpdk_mutex);
1425 ovs_mutex_lock(&dev->mutex);
1426 if (dev->mtu == mtu) {
1427 err = 0;
1428 goto out;
1429 }
1430
1431 mp = dpdk_mp_get(dev->socket_id, dev->mtu);
1432 if (!mp) {
1433 err = ENOMEM;
1434 goto out;
1435 }
1436
1437 rte_eth_dev_stop(dev->port_id);
1438
1439 old_mtu = dev->mtu;
1440 old_mp = dev->dpdk_mp;
1441 dev->dpdk_mp = mp;
1442 dev->mtu = mtu;
1443 dev->max_packet_len = MTU_TO_MAX_LEN(dev->mtu);
1444
1445 err = dpdk_eth_dev_init(dev);
1446 if (err) {
1447 dpdk_mp_put(mp);
1448 dev->mtu = old_mtu;
1449 dev->dpdk_mp = old_mp;
1450 dev->max_packet_len = MTU_TO_MAX_LEN(dev->mtu);
1451 dpdk_eth_dev_init(dev);
1452 goto out;
1453 }
1454
1455 dpdk_mp_put(old_mp);
1456 netdev_change_seq_changed(netdev);
1457 out:
1458 ovs_mutex_unlock(&dev->mutex);
1459 ovs_mutex_unlock(&dpdk_mutex);
1460 return err;
1461 }
1462
1463 static int
1464 netdev_dpdk_get_carrier(const struct netdev *netdev_, bool *carrier);
1465
1466 static int
1467 netdev_dpdk_vhost_get_stats(const struct netdev *netdev,
1468 struct netdev_stats *stats)
1469 {
1470 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1471
1472 ovs_mutex_lock(&dev->mutex);
1473 memset(stats, 0, sizeof(*stats));
1474 /* Unsupported Stats */
1475 stats->collisions = UINT64_MAX;
1476 stats->rx_crc_errors = UINT64_MAX;
1477 stats->rx_fifo_errors = UINT64_MAX;
1478 stats->rx_frame_errors = UINT64_MAX;
1479 stats->rx_missed_errors = UINT64_MAX;
1480 stats->rx_over_errors = UINT64_MAX;
1481 stats->tx_aborted_errors = UINT64_MAX;
1482 stats->tx_carrier_errors = UINT64_MAX;
1483 stats->tx_errors = UINT64_MAX;
1484 stats->tx_fifo_errors = UINT64_MAX;
1485 stats->tx_heartbeat_errors = UINT64_MAX;
1486 stats->tx_window_errors = UINT64_MAX;
1487 stats->rx_dropped += UINT64_MAX;
1488
1489 rte_spinlock_lock(&dev->stats_lock);
1490 /* Supported Stats */
1491 stats->rx_packets += dev->stats.rx_packets;
1492 stats->tx_packets += dev->stats.tx_packets;
1493 stats->tx_dropped += dev->stats.tx_dropped;
1494 stats->multicast = dev->stats.multicast;
1495 stats->rx_bytes = dev->stats.rx_bytes;
1496 stats->tx_bytes = dev->stats.tx_bytes;
1497 stats->rx_errors = dev->stats.rx_errors;
1498 stats->rx_length_errors = dev->stats.rx_length_errors;
1499 rte_spinlock_unlock(&dev->stats_lock);
1500
1501 ovs_mutex_unlock(&dev->mutex);
1502
1503 return 0;
1504 }
1505
1506 static int
1507 netdev_dpdk_get_stats(const struct netdev *netdev, struct netdev_stats *stats)
1508 {
1509 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1510 struct rte_eth_stats rte_stats;
1511 bool gg;
1512
1513 netdev_dpdk_get_carrier(netdev, &gg);
1514 ovs_mutex_lock(&dev->mutex);
1515 rte_eth_stats_get(dev->port_id, &rte_stats);
1516
1517 memset(stats, 0, sizeof(*stats));
1518
1519 stats->rx_packets = rte_stats.ipackets;
1520 stats->tx_packets = rte_stats.opackets;
1521 stats->rx_bytes = rte_stats.ibytes;
1522 stats->tx_bytes = rte_stats.obytes;
1523 /* DPDK counts imissed as errors, but count them here as dropped instead */
1524 stats->rx_errors = rte_stats.ierrors - rte_stats.imissed;
1525 stats->tx_errors = rte_stats.oerrors;
1526 stats->multicast = rte_stats.imcasts;
1527
1528 rte_spinlock_lock(&dev->stats_lock);
1529 stats->tx_dropped = dev->stats.tx_dropped;
1530 rte_spinlock_unlock(&dev->stats_lock);
1531
1532 /* These are the available DPDK counters for packets not received due to
1533 * local resource constraints in DPDK and NIC respectively. */
1534 stats->rx_dropped = rte_stats.rx_nombuf + rte_stats.imissed;
1535 stats->collisions = UINT64_MAX;
1536
1537 stats->rx_length_errors = UINT64_MAX;
1538 stats->rx_over_errors = UINT64_MAX;
1539 stats->rx_crc_errors = UINT64_MAX;
1540 stats->rx_frame_errors = UINT64_MAX;
1541 stats->rx_fifo_errors = UINT64_MAX;
1542 stats->rx_missed_errors = rte_stats.imissed;
1543
1544 stats->tx_aborted_errors = UINT64_MAX;
1545 stats->tx_carrier_errors = UINT64_MAX;
1546 stats->tx_fifo_errors = UINT64_MAX;
1547 stats->tx_heartbeat_errors = UINT64_MAX;
1548 stats->tx_window_errors = UINT64_MAX;
1549
1550 ovs_mutex_unlock(&dev->mutex);
1551
1552 return 0;
1553 }
1554
1555 static int
1556 netdev_dpdk_get_features(const struct netdev *netdev_,
1557 enum netdev_features *current,
1558 enum netdev_features *advertised OVS_UNUSED,
1559 enum netdev_features *supported OVS_UNUSED,
1560 enum netdev_features *peer OVS_UNUSED)
1561 {
1562 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev_);
1563 struct rte_eth_link link;
1564
1565 ovs_mutex_lock(&dev->mutex);
1566 link = dev->link;
1567 ovs_mutex_unlock(&dev->mutex);
1568
1569 if (link.link_duplex == ETH_LINK_AUTONEG_DUPLEX) {
1570 if (link.link_speed == ETH_LINK_SPEED_AUTONEG) {
1571 *current = NETDEV_F_AUTONEG;
1572 }
1573 } else if (link.link_duplex == ETH_LINK_HALF_DUPLEX) {
1574 if (link.link_speed == ETH_LINK_SPEED_10) {
1575 *current = NETDEV_F_10MB_HD;
1576 }
1577 if (link.link_speed == ETH_LINK_SPEED_100) {
1578 *current = NETDEV_F_100MB_HD;
1579 }
1580 if (link.link_speed == ETH_LINK_SPEED_1000) {
1581 *current = NETDEV_F_1GB_HD;
1582 }
1583 } else if (link.link_duplex == ETH_LINK_FULL_DUPLEX) {
1584 if (link.link_speed == ETH_LINK_SPEED_10) {
1585 *current = NETDEV_F_10MB_FD;
1586 }
1587 if (link.link_speed == ETH_LINK_SPEED_100) {
1588 *current = NETDEV_F_100MB_FD;
1589 }
1590 if (link.link_speed == ETH_LINK_SPEED_1000) {
1591 *current = NETDEV_F_1GB_FD;
1592 }
1593 if (link.link_speed == ETH_LINK_SPEED_10000) {
1594 *current = NETDEV_F_10GB_FD;
1595 }
1596 }
1597
1598 return 0;
1599 }
1600
1601 static int
1602 netdev_dpdk_get_ifindex(const struct netdev *netdev)
1603 {
1604 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
1605 int ifindex;
1606
1607 ovs_mutex_lock(&dev->mutex);
1608 ifindex = dev->port_id;
1609 ovs_mutex_unlock(&dev->mutex);
1610
1611 return ifindex;
1612 }
1613
1614 static int
1615 netdev_dpdk_get_carrier(const struct netdev *netdev_, bool *carrier)
1616 {
1617 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev_);
1618
1619 ovs_mutex_lock(&dev->mutex);
1620 check_link_status(dev);
1621 *carrier = dev->link.link_status;
1622
1623 ovs_mutex_unlock(&dev->mutex);
1624
1625 return 0;
1626 }
1627
1628 static int
1629 netdev_dpdk_vhost_get_carrier(const struct netdev *netdev_, bool *carrier)
1630 {
1631 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev_);
1632 struct virtio_net *virtio_dev = netdev_dpdk_get_virtio(dev);
1633
1634 ovs_mutex_lock(&dev->mutex);
1635
1636 if (is_vhost_running(virtio_dev)) {
1637 *carrier = 1;
1638 } else {
1639 *carrier = 0;
1640 }
1641
1642 ovs_mutex_unlock(&dev->mutex);
1643
1644 return 0;
1645 }
1646
1647 static long long int
1648 netdev_dpdk_get_carrier_resets(const struct netdev *netdev_)
1649 {
1650 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev_);
1651 long long int carrier_resets;
1652
1653 ovs_mutex_lock(&dev->mutex);
1654 carrier_resets = dev->link_reset_cnt;
1655 ovs_mutex_unlock(&dev->mutex);
1656
1657 return carrier_resets;
1658 }
1659
1660 static int
1661 netdev_dpdk_set_miimon(struct netdev *netdev_ OVS_UNUSED,
1662 long long int interval OVS_UNUSED)
1663 {
1664 return EOPNOTSUPP;
1665 }
1666
1667 static int
1668 netdev_dpdk_update_flags__(struct netdev_dpdk *dev,
1669 enum netdev_flags off, enum netdev_flags on,
1670 enum netdev_flags *old_flagsp) OVS_REQUIRES(dev->mutex)
1671 {
1672 int err;
1673
1674 if ((off | on) & ~(NETDEV_UP | NETDEV_PROMISC)) {
1675 return EINVAL;
1676 }
1677
1678 *old_flagsp = dev->flags;
1679 dev->flags |= on;
1680 dev->flags &= ~off;
1681
1682 if (dev->flags == *old_flagsp) {
1683 return 0;
1684 }
1685
1686 if (dev->type == DPDK_DEV_ETH) {
1687 if (dev->flags & NETDEV_UP) {
1688 err = rte_eth_dev_start(dev->port_id);
1689 if (err)
1690 return -err;
1691 }
1692
1693 if (dev->flags & NETDEV_PROMISC) {
1694 rte_eth_promiscuous_enable(dev->port_id);
1695 }
1696
1697 if (!(dev->flags & NETDEV_UP)) {
1698 rte_eth_dev_stop(dev->port_id);
1699 }
1700 }
1701
1702 return 0;
1703 }
1704
1705 static int
1706 netdev_dpdk_update_flags(struct netdev *netdev_,
1707 enum netdev_flags off, enum netdev_flags on,
1708 enum netdev_flags *old_flagsp)
1709 {
1710 struct netdev_dpdk *netdev = netdev_dpdk_cast(netdev_);
1711 int error;
1712
1713 ovs_mutex_lock(&netdev->mutex);
1714 error = netdev_dpdk_update_flags__(netdev, off, on, old_flagsp);
1715 ovs_mutex_unlock(&netdev->mutex);
1716
1717 return error;
1718 }
1719
1720 static int
1721 netdev_dpdk_get_status(const struct netdev *netdev_, struct smap *args)
1722 {
1723 struct netdev_dpdk *dev = netdev_dpdk_cast(netdev_);
1724 struct rte_eth_dev_info dev_info;
1725
1726 if (dev->port_id < 0)
1727 return ENODEV;
1728
1729 ovs_mutex_lock(&dev->mutex);
1730 rte_eth_dev_info_get(dev->port_id, &dev_info);
1731 ovs_mutex_unlock(&dev->mutex);
1732
1733 smap_add_format(args, "driver_name", "%s", dev_info.driver_name);
1734
1735 smap_add_format(args, "port_no", "%d", dev->port_id);
1736 smap_add_format(args, "numa_id", "%d", rte_eth_dev_socket_id(dev->port_id));
1737 smap_add_format(args, "driver_name", "%s", dev_info.driver_name);
1738 smap_add_format(args, "min_rx_bufsize", "%u", dev_info.min_rx_bufsize);
1739 smap_add_format(args, "max_rx_pktlen", "%u", dev_info.max_rx_pktlen);
1740 smap_add_format(args, "max_rx_queues", "%u", dev_info.max_rx_queues);
1741 smap_add_format(args, "max_tx_queues", "%u", dev_info.max_tx_queues);
1742 smap_add_format(args, "max_mac_addrs", "%u", dev_info.max_mac_addrs);
1743 smap_add_format(args, "max_hash_mac_addrs", "%u", dev_info.max_hash_mac_addrs);
1744 smap_add_format(args, "max_vfs", "%u", dev_info.max_vfs);
1745 smap_add_format(args, "max_vmdq_pools", "%u", dev_info.max_vmdq_pools);
1746
1747 if (dev_info.pci_dev) {
1748 smap_add_format(args, "pci-vendor_id", "0x%u",
1749 dev_info.pci_dev->id.vendor_id);
1750 smap_add_format(args, "pci-device_id", "0x%x",
1751 dev_info.pci_dev->id.device_id);
1752 }
1753
1754 return 0;
1755 }
1756
1757 static void
1758 netdev_dpdk_set_admin_state__(struct netdev_dpdk *dev, bool admin_state)
1759 OVS_REQUIRES(dev->mutex)
1760 {
1761 enum netdev_flags old_flags;
1762
1763 if (admin_state) {
1764 netdev_dpdk_update_flags__(dev, 0, NETDEV_UP, &old_flags);
1765 } else {
1766 netdev_dpdk_update_flags__(dev, NETDEV_UP, 0, &old_flags);
1767 }
1768 }
1769
1770 static void
1771 netdev_dpdk_set_admin_state(struct unixctl_conn *conn, int argc,
1772 const char *argv[], void *aux OVS_UNUSED)
1773 {
1774 bool up;
1775
1776 if (!strcasecmp(argv[argc - 1], "up")) {
1777 up = true;
1778 } else if ( !strcasecmp(argv[argc - 1], "down")) {
1779 up = false;
1780 } else {
1781 unixctl_command_reply_error(conn, "Invalid Admin State");
1782 return;
1783 }
1784
1785 if (argc > 2) {
1786 struct netdev *netdev = netdev_from_name(argv[1]);
1787 if (netdev && is_dpdk_class(netdev->netdev_class)) {
1788 struct netdev_dpdk *dpdk_dev = netdev_dpdk_cast(netdev);
1789
1790 ovs_mutex_lock(&dpdk_dev->mutex);
1791 netdev_dpdk_set_admin_state__(dpdk_dev, up);
1792 ovs_mutex_unlock(&dpdk_dev->mutex);
1793
1794 netdev_close(netdev);
1795 } else {
1796 unixctl_command_reply_error(conn, "Not a DPDK Interface");
1797 netdev_close(netdev);
1798 return;
1799 }
1800 } else {
1801 struct netdev_dpdk *netdev;
1802
1803 ovs_mutex_lock(&dpdk_mutex);
1804 LIST_FOR_EACH (netdev, list_node, &dpdk_list) {
1805 ovs_mutex_lock(&netdev->mutex);
1806 netdev_dpdk_set_admin_state__(netdev, up);
1807 ovs_mutex_unlock(&netdev->mutex);
1808 }
1809 ovs_mutex_unlock(&dpdk_mutex);
1810 }
1811 unixctl_command_reply(conn, "OK");
1812 }
1813
1814 /*
1815 * Set virtqueue flags so that we do not receive interrupts.
1816 */
1817 static void
1818 set_irq_status(struct virtio_net *dev)
1819 {
1820 uint32_t i;
1821 uint64_t idx;
1822
1823 for (i = 0; i < dev->virt_qp_nb; i++) {
1824 idx = i * VIRTIO_QNUM;
1825 rte_vhost_enable_guest_notification(dev, idx + VIRTIO_RXQ, 0);
1826 rte_vhost_enable_guest_notification(dev, idx + VIRTIO_TXQ, 0);
1827 }
1828 }
1829
1830
1831 static int
1832 netdev_dpdk_vhost_set_queues(struct netdev_dpdk *netdev, struct virtio_net *dev)
1833 {
1834 uint32_t qp_num;
1835
1836 qp_num = dev->virt_qp_nb;
1837 if (qp_num > netdev->up.n_rxq) {
1838 VLOG_ERR("vHost Device '%s' %"PRIu64" can't be added - "
1839 "too many queues %d > %d", dev->ifname, dev->device_fh,
1840 qp_num, netdev->up.n_rxq);
1841 return -1;
1842 }
1843
1844 netdev->real_n_rxq = qp_num;
1845 netdev->real_n_txq = qp_num;
1846 if (netdev->up.n_txq > netdev->real_n_txq) {
1847 netdev->txq_needs_locking = true;
1848 } else {
1849 netdev->txq_needs_locking = false;
1850 }
1851
1852 return 0;
1853 }
1854
1855 /*
1856 * A new virtio-net device is added to a vhost port.
1857 */
1858 static int
1859 new_device(struct virtio_net *dev)
1860 {
1861 struct netdev_dpdk *netdev;
1862 bool exists = false;
1863
1864 ovs_mutex_lock(&dpdk_mutex);
1865 /* Add device to the vhost port with the same name as that passed down. */
1866 LIST_FOR_EACH(netdev, list_node, &dpdk_list) {
1867 if (strncmp(dev->ifname, netdev->vhost_id, IF_NAME_SZ) == 0) {
1868 ovs_mutex_lock(&netdev->mutex);
1869 if (netdev_dpdk_vhost_set_queues(netdev, dev)) {
1870 ovs_mutex_unlock(&netdev->mutex);
1871 ovs_mutex_unlock(&dpdk_mutex);
1872 return -1;
1873 }
1874 ovsrcu_set(&netdev->virtio_dev, dev);
1875 exists = true;
1876 dev->flags |= VIRTIO_DEV_RUNNING;
1877 /* Disable notifications. */
1878 set_irq_status(dev);
1879 ovs_mutex_unlock(&netdev->mutex);
1880 break;
1881 }
1882 }
1883 ovs_mutex_unlock(&dpdk_mutex);
1884
1885 if (!exists) {
1886 VLOG_INFO("vHost Device '%s' %"PRIu64" can't be added - name not "
1887 "found", dev->ifname, dev->device_fh);
1888
1889 return -1;
1890 }
1891
1892 VLOG_INFO("vHost Device '%s' %"PRIu64" has been added", dev->ifname,
1893 dev->device_fh);
1894 return 0;
1895 }
1896
1897 /*
1898 * Remove a virtio-net device from the specific vhost port. Use dev->remove
1899 * flag to stop any more packets from being sent or received to/from a VM and
1900 * ensure all currently queued packets have been sent/received before removing
1901 * the device.
1902 */
1903 static void
1904 destroy_device(volatile struct virtio_net *dev)
1905 {
1906 struct netdev_dpdk *vhost_dev;
1907 bool exists = false;
1908
1909 ovs_mutex_lock(&dpdk_mutex);
1910 LIST_FOR_EACH (vhost_dev, list_node, &dpdk_list) {
1911 if (netdev_dpdk_get_virtio(vhost_dev) == dev) {
1912
1913 ovs_mutex_lock(&vhost_dev->mutex);
1914 dev->flags &= ~VIRTIO_DEV_RUNNING;
1915 ovsrcu_set(&vhost_dev->virtio_dev, NULL);
1916 exists = true;
1917 ovs_mutex_unlock(&vhost_dev->mutex);
1918 break;
1919 }
1920 }
1921
1922 ovs_mutex_unlock(&dpdk_mutex);
1923
1924 if (exists == true) {
1925 /*
1926 * Wait for other threads to quiesce after setting the 'virtio_dev'
1927 * to NULL, before returning.
1928 */
1929 ovsrcu_synchronize();
1930 /*
1931 * As call to ovsrcu_synchronize() will end the quiescent state,
1932 * put thread back into quiescent state before returning.
1933 */
1934 ovsrcu_quiesce_start();
1935 VLOG_INFO("vHost Device '%s' %"PRIu64" has been removed", dev->ifname,
1936 dev->device_fh);
1937 } else {
1938 VLOG_INFO("vHost Device '%s' %"PRIu64" not found", dev->ifname,
1939 dev->device_fh);
1940 }
1941
1942 }
1943
1944 struct virtio_net *
1945 netdev_dpdk_get_virtio(const struct netdev_dpdk *dev)
1946 {
1947 return ovsrcu_get(struct virtio_net *, &dev->virtio_dev);
1948 }
1949
1950 /*
1951 * These callbacks allow virtio-net devices to be added to vhost ports when
1952 * configuration has been fully complete.
1953 */
1954 static const struct virtio_net_device_ops virtio_net_device_ops =
1955 {
1956 .new_device = new_device,
1957 .destroy_device = destroy_device,
1958 };
1959
1960 static void *
1961 start_vhost_loop(void *dummy OVS_UNUSED)
1962 {
1963 pthread_detach(pthread_self());
1964 /* Put the cuse thread into quiescent state. */
1965 ovsrcu_quiesce_start();
1966 rte_vhost_driver_session_start();
1967 return NULL;
1968 }
1969
1970 static int
1971 dpdk_vhost_class_init(void)
1972 {
1973 rte_vhost_driver_callback_register(&virtio_net_device_ops);
1974 ovs_thread_create("vhost_thread", start_vhost_loop, NULL);
1975 return 0;
1976 }
1977
1978 static int
1979 dpdk_vhost_cuse_class_init(void)
1980 {
1981 int err = -1;
1982
1983
1984 /* Register CUSE device to handle IOCTLs.
1985 * Unless otherwise specified on the vswitchd command line, cuse_dev_name
1986 * is set to vhost-net.
1987 */
1988 err = rte_vhost_driver_register(cuse_dev_name);
1989
1990 if (err != 0) {
1991 VLOG_ERR("CUSE device setup failure.");
1992 return -1;
1993 }
1994
1995 dpdk_vhost_class_init();
1996 return 0;
1997 }
1998
1999 static int
2000 dpdk_vhost_user_class_init(void)
2001 {
2002 dpdk_vhost_class_init();
2003 return 0;
2004 }
2005
2006 static void
2007 dpdk_common_init(void)
2008 {
2009 unixctl_command_register("netdev-dpdk/set-admin-state",
2010 "[netdev] up|down", 1, 2,
2011 netdev_dpdk_set_admin_state, NULL);
2012
2013 ovs_thread_create("dpdk_watchdog", dpdk_watchdog, NULL);
2014 }
2015
2016 /* Client Rings */
2017
2018 static int
2019 dpdk_ring_create(const char dev_name[], unsigned int port_no,
2020 unsigned int *eth_port_id)
2021 {
2022 struct dpdk_ring *ivshmem;
2023 char ring_name[10];
2024 int err;
2025
2026 ivshmem = dpdk_rte_mzalloc(sizeof *ivshmem);
2027 if (ivshmem == NULL) {
2028 return ENOMEM;
2029 }
2030
2031 /* XXX: Add support for multiquque ring. */
2032 err = snprintf(ring_name, 10, "%s_tx", dev_name);
2033 if (err < 0) {
2034 return -err;
2035 }
2036
2037 /* Create single producer tx ring, netdev does explicit locking. */
2038 ivshmem->cring_tx = rte_ring_create(ring_name, DPDK_RING_SIZE, SOCKET0,
2039 RING_F_SP_ENQ);
2040 if (ivshmem->cring_tx == NULL) {
2041 rte_free(ivshmem);
2042 return ENOMEM;
2043 }
2044
2045 err = snprintf(ring_name, 10, "%s_rx", dev_name);
2046 if (err < 0) {
2047 return -err;
2048 }
2049
2050 /* Create single consumer rx ring, netdev does explicit locking. */
2051 ivshmem->cring_rx = rte_ring_create(ring_name, DPDK_RING_SIZE, SOCKET0,
2052 RING_F_SC_DEQ);
2053 if (ivshmem->cring_rx == NULL) {
2054 rte_free(ivshmem);
2055 return ENOMEM;
2056 }
2057
2058 err = rte_eth_from_rings(dev_name, &ivshmem->cring_rx, 1,
2059 &ivshmem->cring_tx, 1, SOCKET0);
2060
2061 if (err < 0) {
2062 rte_free(ivshmem);
2063 return ENODEV;
2064 }
2065
2066 ivshmem->user_port_id = port_no;
2067 ivshmem->eth_port_id = rte_eth_dev_count() - 1;
2068 list_push_back(&dpdk_ring_list, &ivshmem->list_node);
2069
2070 *eth_port_id = ivshmem->eth_port_id;
2071 return 0;
2072 }
2073
2074 static int
2075 dpdk_ring_open(const char dev_name[], unsigned int *eth_port_id) OVS_REQUIRES(dpdk_mutex)
2076 {
2077 struct dpdk_ring *ivshmem;
2078 unsigned int port_no;
2079 int err = 0;
2080
2081 /* Names always start with "dpdkr" */
2082 err = dpdk_dev_parse_name(dev_name, "dpdkr", &port_no);
2083 if (err) {
2084 return err;
2085 }
2086
2087 /* look through our list to find the device */
2088 LIST_FOR_EACH (ivshmem, list_node, &dpdk_ring_list) {
2089 if (ivshmem->user_port_id == port_no) {
2090 VLOG_INFO("Found dpdk ring device %s:", dev_name);
2091 *eth_port_id = ivshmem->eth_port_id; /* really all that is needed */
2092 return 0;
2093 }
2094 }
2095 /* Need to create the device rings */
2096 return dpdk_ring_create(dev_name, port_no, eth_port_id);
2097 }
2098
2099 static int
2100 netdev_dpdk_ring_send(struct netdev *netdev_, int qid,
2101 struct dp_packet **pkts, int cnt, bool may_steal)
2102 {
2103 struct netdev_dpdk *netdev = netdev_dpdk_cast(netdev_);
2104 unsigned i;
2105
2106 /* When using 'dpdkr' and sending to a DPDK ring, we want to ensure that the
2107 * rss hash field is clear. This is because the same mbuf may be modified by
2108 * the consumer of the ring and return into the datapath without recalculating
2109 * the RSS hash. */
2110 for (i = 0; i < cnt; i++) {
2111 dp_packet_rss_invalidate(pkts[i]);
2112 }
2113
2114 netdev_dpdk_send__(netdev, qid, pkts, cnt, may_steal);
2115 return 0;
2116 }
2117
2118 static int
2119 netdev_dpdk_ring_construct(struct netdev *netdev)
2120 {
2121 unsigned int port_no = 0;
2122 int err = 0;
2123
2124 if (rte_eal_init_ret) {
2125 return rte_eal_init_ret;
2126 }
2127
2128 ovs_mutex_lock(&dpdk_mutex);
2129
2130 err = dpdk_ring_open(netdev->name, &port_no);
2131 if (err) {
2132 goto unlock_dpdk;
2133 }
2134
2135 err = netdev_dpdk_init(netdev, port_no, DPDK_DEV_ETH);
2136
2137 unlock_dpdk:
2138 ovs_mutex_unlock(&dpdk_mutex);
2139 return err;
2140 }
2141
2142 #define NETDEV_DPDK_CLASS(NAME, INIT, CONSTRUCT, DESTRUCT, MULTIQ, SEND, \
2143 GET_CARRIER, GET_STATS, GET_FEATURES, GET_STATUS, RXQ_RECV) \
2144 { \
2145 NAME, \
2146 INIT, /* init */ \
2147 NULL, /* netdev_dpdk_run */ \
2148 NULL, /* netdev_dpdk_wait */ \
2149 \
2150 netdev_dpdk_alloc, \
2151 CONSTRUCT, \
2152 DESTRUCT, \
2153 netdev_dpdk_dealloc, \
2154 netdev_dpdk_get_config, \
2155 netdev_dpdk_set_config, \
2156 NULL, /* get_tunnel_config */ \
2157 NULL, /* build header */ \
2158 NULL, /* push header */ \
2159 NULL, /* pop header */ \
2160 netdev_dpdk_get_numa_id, /* get_numa_id */ \
2161 MULTIQ, /* set_multiq */ \
2162 \
2163 SEND, /* send */ \
2164 NULL, /* send_wait */ \
2165 \
2166 netdev_dpdk_set_etheraddr, \
2167 netdev_dpdk_get_etheraddr, \
2168 netdev_dpdk_get_mtu, \
2169 netdev_dpdk_set_mtu, \
2170 netdev_dpdk_get_ifindex, \
2171 GET_CARRIER, \
2172 netdev_dpdk_get_carrier_resets, \
2173 netdev_dpdk_set_miimon, \
2174 GET_STATS, \
2175 GET_FEATURES, \
2176 NULL, /* set_advertisements */ \
2177 \
2178 NULL, /* set_policing */ \
2179 NULL, /* get_qos_types */ \
2180 NULL, /* get_qos_capabilities */ \
2181 NULL, /* get_qos */ \
2182 NULL, /* set_qos */ \
2183 NULL, /* get_queue */ \
2184 NULL, /* set_queue */ \
2185 NULL, /* delete_queue */ \
2186 NULL, /* get_queue_stats */ \
2187 NULL, /* queue_dump_start */ \
2188 NULL, /* queue_dump_next */ \
2189 NULL, /* queue_dump_done */ \
2190 NULL, /* dump_queue_stats */ \
2191 \
2192 NULL, /* get_in4 */ \
2193 NULL, /* set_in4 */ \
2194 NULL, /* get_in6 */ \
2195 NULL, /* add_router */ \
2196 NULL, /* get_next_hop */ \
2197 GET_STATUS, \
2198 NULL, /* arp_lookup */ \
2199 \
2200 netdev_dpdk_update_flags, \
2201 \
2202 netdev_dpdk_rxq_alloc, \
2203 netdev_dpdk_rxq_construct, \
2204 netdev_dpdk_rxq_destruct, \
2205 netdev_dpdk_rxq_dealloc, \
2206 RXQ_RECV, \
2207 NULL, /* rx_wait */ \
2208 NULL, /* rxq_drain */ \
2209 }
2210
2211 static int
2212 process_vhost_flags(char *flag, char *default_val, int size,
2213 char **argv, char **new_val)
2214 {
2215 int changed = 0;
2216
2217 /* Depending on which version of vhost is in use, process the vhost-specific
2218 * flag if it is provided on the vswitchd command line, otherwise resort to
2219 * a default value.
2220 *
2221 * For vhost-user: Process "-vhost_sock_dir" to set the custom location of
2222 * the vhost-user socket(s).
2223 * For vhost-cuse: Process "-cuse_dev_name" to set the custom name of the
2224 * vhost-cuse character device.
2225 */
2226 if (!strcmp(argv[1], flag) && (strlen(argv[2]) <= size)) {
2227 changed = 1;
2228 *new_val = xstrdup(argv[2]);
2229 VLOG_INFO("User-provided %s in use: %s", flag, *new_val);
2230 } else {
2231 VLOG_INFO("No %s provided - defaulting to %s", flag, default_val);
2232 *new_val = default_val;
2233 }
2234
2235 return changed;
2236 }
2237
2238 int
2239 dpdk_init(int argc, char **argv)
2240 {
2241 int result;
2242 int base = 0;
2243 char *pragram_name = argv[0];
2244
2245 if (argc < 2 || strcmp(argv[1], "--dpdk"))
2246 return 0;
2247
2248 /* Remove the --dpdk argument from arg list.*/
2249 argc--;
2250 argv++;
2251
2252 /* Reject --user option */
2253 int i;
2254 for (i = 0; i < argc; i++) {
2255 if (!strcmp(argv[i], "--user")) {
2256 VLOG_ERR("Can not mix --dpdk and --user options, aborting.");
2257 }
2258 }
2259
2260 #ifdef VHOST_CUSE
2261 if (process_vhost_flags("-cuse_dev_name", xstrdup("vhost-net"),
2262 PATH_MAX, argv, &cuse_dev_name)) {
2263 #else
2264 if (process_vhost_flags("-vhost_sock_dir", xstrdup(ovs_rundir()),
2265 NAME_MAX, argv, &vhost_sock_dir)) {
2266 struct stat s;
2267 int err;
2268
2269 err = stat(vhost_sock_dir, &s);
2270 if (err) {
2271 VLOG_ERR("vHostUser socket DIR '%s' does not exist.",
2272 vhost_sock_dir);
2273 return err;
2274 }
2275 #endif
2276 /* Remove the vhost flag configuration parameters from the argument
2277 * list, so that the correct elements are passed to the DPDK
2278 * initialization function
2279 */
2280 argc -= 2;
2281 argv += 2; /* Increment by two to bypass the vhost flag arguments */
2282 base = 2;
2283 }
2284
2285 /* Keep the program name argument as this is needed for call to
2286 * rte_eal_init()
2287 */
2288 argv[0] = pragram_name;
2289
2290 /* Make sure things are initialized ... */
2291 result = rte_eal_init(argc, argv);
2292 if (result < 0) {
2293 ovs_abort(result, "Cannot init EAL");
2294 }
2295
2296 rte_memzone_dump(stdout);
2297 rte_eal_init_ret = 0;
2298
2299 if (argc > result) {
2300 argv[result] = argv[0];
2301 }
2302
2303 /* We are called from the main thread here */
2304 RTE_PER_LCORE(_lcore_id) = NON_PMD_CORE_ID;
2305
2306 return result + 1 + base;
2307 }
2308
2309 static const struct netdev_class dpdk_class =
2310 NETDEV_DPDK_CLASS(
2311 "dpdk",
2312 NULL,
2313 netdev_dpdk_construct,
2314 netdev_dpdk_destruct,
2315 netdev_dpdk_set_multiq,
2316 netdev_dpdk_eth_send,
2317 netdev_dpdk_get_carrier,
2318 netdev_dpdk_get_stats,
2319 netdev_dpdk_get_features,
2320 netdev_dpdk_get_status,
2321 netdev_dpdk_rxq_recv);
2322
2323 static const struct netdev_class dpdk_ring_class =
2324 NETDEV_DPDK_CLASS(
2325 "dpdkr",
2326 NULL,
2327 netdev_dpdk_ring_construct,
2328 netdev_dpdk_destruct,
2329 netdev_dpdk_set_multiq,
2330 netdev_dpdk_ring_send,
2331 netdev_dpdk_get_carrier,
2332 netdev_dpdk_get_stats,
2333 netdev_dpdk_get_features,
2334 netdev_dpdk_get_status,
2335 netdev_dpdk_rxq_recv);
2336
2337 static const struct netdev_class OVS_UNUSED dpdk_vhost_cuse_class =
2338 NETDEV_DPDK_CLASS(
2339 "dpdkvhostcuse",
2340 dpdk_vhost_cuse_class_init,
2341 netdev_dpdk_vhost_cuse_construct,
2342 netdev_dpdk_vhost_destruct,
2343 netdev_dpdk_vhost_cuse_set_multiq,
2344 netdev_dpdk_vhost_send,
2345 netdev_dpdk_vhost_get_carrier,
2346 netdev_dpdk_vhost_get_stats,
2347 NULL,
2348 NULL,
2349 netdev_dpdk_vhost_rxq_recv);
2350
2351 static const struct netdev_class OVS_UNUSED dpdk_vhost_user_class =
2352 NETDEV_DPDK_CLASS(
2353 "dpdkvhostuser",
2354 dpdk_vhost_user_class_init,
2355 netdev_dpdk_vhost_user_construct,
2356 netdev_dpdk_vhost_destruct,
2357 netdev_dpdk_vhost_set_multiq,
2358 netdev_dpdk_vhost_send,
2359 netdev_dpdk_vhost_get_carrier,
2360 netdev_dpdk_vhost_get_stats,
2361 NULL,
2362 NULL,
2363 netdev_dpdk_vhost_rxq_recv);
2364
2365 void
2366 netdev_dpdk_register(void)
2367 {
2368 static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER;
2369
2370 if (rte_eal_init_ret) {
2371 return;
2372 }
2373
2374 if (ovsthread_once_start(&once)) {
2375 dpdk_common_init();
2376 netdev_register_provider(&dpdk_class);
2377 netdev_register_provider(&dpdk_ring_class);
2378 #ifdef VHOST_CUSE
2379 netdev_register_provider(&dpdk_vhost_cuse_class);
2380 #else
2381 netdev_register_provider(&dpdk_vhost_user_class);
2382 #endif
2383 ovsthread_once_done(&once);
2384 }
2385 }
2386
2387 int
2388 pmd_thread_setaffinity_cpu(unsigned cpu)
2389 {
2390 cpu_set_t cpuset;
2391 int err;
2392
2393 CPU_ZERO(&cpuset);
2394 CPU_SET(cpu, &cpuset);
2395 err = pthread_setaffinity_np(pthread_self(), sizeof(cpu_set_t), &cpuset);
2396 if (err) {
2397 VLOG_ERR("Thread affinity error %d",err);
2398 return err;
2399 }
2400 /* NON_PMD_CORE_ID is reserved for use by non pmd threads. */
2401 ovs_assert(cpu != NON_PMD_CORE_ID);
2402 RTE_PER_LCORE(_lcore_id) = cpu;
2403
2404 return 0;
2405 }
2406
2407 static bool
2408 dpdk_thread_is_pmd(void)
2409 {
2410 return rte_lcore_id() != NON_PMD_CORE_ID;
2411 }