2 * Copyright (c) 2014, 2015, 2016 Nicira, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
30 #include <sys/types.h>
34 #include "dp-packet.h"
35 #include "dpif-netdev.h"
36 #include "fatal-signal.h"
38 #include "netdev-dpdk.h"
39 #include "netdev-provider.h"
40 #include "netdev-vport.h"
42 #include "ofp-print.h"
44 #include "ovs-thread.h"
49 #include "unaligned.h"
52 #include "openvswitch/vlog.h"
54 #include "rte_config.h"
56 #include "rte_virtio_net.h"
58 VLOG_DEFINE_THIS_MODULE(dpdk
);
59 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(5, 20);
61 #define DPDK_PORT_WATCHDOG_INTERVAL 5
63 #define OVS_CACHE_LINE_SIZE CACHE_LINE_SIZE
64 #define OVS_VPORT_DPDK "ovs_dpdk"
67 * need to reserve tons of extra space in the mbufs so we can align the
68 * DMA addresses to 4KB.
69 * The minimum mbuf size is limited to avoid scatter behaviour and drop in
70 * performance for standard Ethernet MTU.
72 #define MTU_TO_MAX_LEN(mtu) ((mtu) + ETHER_HDR_LEN + ETHER_CRC_LEN)
73 #define MBUF_SIZE_MTU(mtu) (MTU_TO_MAX_LEN(mtu) \
74 + sizeof(struct dp_packet) \
75 + RTE_PKTMBUF_HEADROOM)
76 #define MBUF_SIZE_DRIVER (2048 \
77 + sizeof (struct rte_mbuf) \
78 + RTE_PKTMBUF_HEADROOM)
79 #define MBUF_SIZE(mtu) MAX(MBUF_SIZE_MTU(mtu), MBUF_SIZE_DRIVER)
81 /* Max and min number of packets in the mempool. OVS tries to allocate a
82 * mempool with MAX_NB_MBUF: if this fails (because the system doesn't have
83 * enough hugepages) we keep halving the number until the allocation succeeds
84 * or we reach MIN_NB_MBUF */
86 #define MAX_NB_MBUF (4096 * 64)
87 #define MIN_NB_MBUF (4096 * 4)
88 #define MP_CACHE_SZ RTE_MEMPOOL_CACHE_MAX_SIZE
90 /* MAX_NB_MBUF can be divided by 2 many times, until MIN_NB_MBUF */
91 BUILD_ASSERT_DECL(MAX_NB_MBUF
% ROUND_DOWN_POW2(MAX_NB_MBUF
/MIN_NB_MBUF
) == 0);
93 /* The smallest possible NB_MBUF that we're going to try should be a multiple
94 * of MP_CACHE_SZ. This is advised by DPDK documentation. */
95 BUILD_ASSERT_DECL((MAX_NB_MBUF
/ ROUND_DOWN_POW2(MAX_NB_MBUF
/MIN_NB_MBUF
))
100 #define NIC_PORT_RX_Q_SIZE 2048 /* Size of Physical NIC RX Queue, Max (n+32<=4096)*/
101 #define NIC_PORT_TX_Q_SIZE 2048 /* Size of Physical NIC TX Queue, Max (n+32<=4096)*/
103 static char *cuse_dev_name
= NULL
; /* Character device cuse_dev_name. */
104 static char *vhost_sock_dir
= NULL
; /* Location of vhost-user sockets */
107 * Maximum amount of time in micro seconds to try and enqueue to vhost.
109 #define VHOST_ENQ_RETRY_USECS 100
111 static const struct rte_eth_conf port_conf
= {
113 .mq_mode
= ETH_MQ_RX_RSS
,
115 .header_split
= 0, /* Header Split disabled */
116 .hw_ip_checksum
= 0, /* IP checksum offload disabled */
117 .hw_vlan_filter
= 0, /* VLAN filtering disabled */
118 .jumbo_frame
= 0, /* Jumbo Frame Support disabled */
124 .rss_hf
= ETH_RSS_IP
| ETH_RSS_UDP
| ETH_RSS_TCP
,
128 .mq_mode
= ETH_MQ_TX_NONE
,
132 enum { MAX_TX_QUEUE_LEN
= 384 };
133 enum { DPDK_RING_SIZE
= 256 };
134 BUILD_ASSERT_DECL(IS_POW2(DPDK_RING_SIZE
));
135 enum { DRAIN_TSC
= 200000ULL };
142 static int rte_eal_init_ret
= ENODEV
;
144 static struct ovs_mutex dpdk_mutex
= OVS_MUTEX_INITIALIZER
;
146 /* Contains all 'struct dpdk_dev's. */
147 static struct ovs_list dpdk_list
OVS_GUARDED_BY(dpdk_mutex
)
148 = OVS_LIST_INITIALIZER(&dpdk_list
);
150 static struct ovs_list dpdk_mp_list
OVS_GUARDED_BY(dpdk_mutex
)
151 = OVS_LIST_INITIALIZER(&dpdk_mp_list
);
153 /* This mutex must be used by non pmd threads when allocating or freeing
154 * mbufs through mempools. Since dpdk_queue_pkts() and dpdk_queue_flush() may
155 * use mempools, a non pmd thread should hold this mutex while calling them */
156 static struct ovs_mutex nonpmd_mempool_mutex
= OVS_MUTEX_INITIALIZER
;
159 struct rte_mempool
*mp
;
163 struct ovs_list list_node
OVS_GUARDED_BY(dpdk_mutex
);
166 /* There should be one 'struct dpdk_tx_queue' created for
168 struct dpdk_tx_queue
{
169 bool flush_tx
; /* Set to true to flush queue everytime */
170 /* pkts are queued. */
172 rte_spinlock_t tx_lock
; /* Protects the members and the NIC queue
173 * from concurrent access. It is used only
174 * if the queue is shared among different
175 * pmd threads (see 'txq_needs_locking'). */
177 struct rte_mbuf
*burst_pkts
[MAX_TX_QUEUE_LEN
];
180 /* dpdk has no way to remove dpdk ring ethernet devices
181 so we have to keep them around once they've been created
184 static struct ovs_list dpdk_ring_list
OVS_GUARDED_BY(dpdk_mutex
)
185 = OVS_LIST_INITIALIZER(&dpdk_ring_list
);
188 /* For the client rings */
189 struct rte_ring
*cring_tx
;
190 struct rte_ring
*cring_rx
;
191 int user_port_id
; /* User given port no, parsed from port name */
192 int eth_port_id
; /* ethernet device port id */
193 struct ovs_list list_node
OVS_GUARDED_BY(dpdk_mutex
);
200 enum dpdk_dev_type type
;
202 struct dpdk_tx_queue
*tx_q
;
204 struct ovs_mutex mutex
OVS_ACQ_AFTER(dpdk_mutex
);
206 struct dpdk_mp
*dpdk_mp
;
210 struct netdev_stats stats
;
212 rte_spinlock_t stats_lock
;
214 struct eth_addr hwaddr
;
215 enum netdev_flags flags
;
217 struct rte_eth_link link
;
220 /* The user might request more txqs than the NIC has. We remap those
221 * ('up.n_txq') on these ('real_n_txq').
222 * If the numbers match, 'txq_needs_locking' is false, otherwise it is
223 * true and we will take a spinlock on transmission */
226 bool txq_needs_locking
;
228 /* virtio-net structure for vhost device */
229 OVSRCU_TYPE(struct virtio_net
*) virtio_dev
;
231 /* Identifier used to distinguish vhost devices from each other */
232 char vhost_id
[PATH_MAX
];
235 struct ovs_list list_node
OVS_GUARDED_BY(dpdk_mutex
);
238 struct netdev_rxq_dpdk
{
239 struct netdev_rxq up
;
243 static bool dpdk_thread_is_pmd(void);
245 static int netdev_dpdk_construct(struct netdev
*);
247 struct virtio_net
* netdev_dpdk_get_virtio(const struct netdev_dpdk
*dev
);
250 is_dpdk_class(const struct netdev_class
*class)
252 return class->construct
== netdev_dpdk_construct
;
255 /* XXX: use dpdk malloc for entire OVS. in fact huge page should be used
256 * for all other segments data, bss and text. */
259 dpdk_rte_mzalloc(size_t sz
)
263 ptr
= rte_zmalloc(OVS_VPORT_DPDK
, sz
, OVS_CACHE_LINE_SIZE
);
270 /* XXX this function should be called only by pmd threads (or by non pmd
271 * threads holding the nonpmd_mempool_mutex) */
273 free_dpdk_buf(struct dp_packet
*p
)
275 struct rte_mbuf
*pkt
= (struct rte_mbuf
*) p
;
277 rte_pktmbuf_free_seg(pkt
);
281 __rte_pktmbuf_init(struct rte_mempool
*mp
,
282 void *opaque_arg OVS_UNUSED
,
284 unsigned i OVS_UNUSED
)
286 struct rte_mbuf
*m
= _m
;
287 uint32_t buf_len
= mp
->elt_size
- sizeof(struct dp_packet
);
289 RTE_MBUF_ASSERT(mp
->elt_size
>= sizeof(struct dp_packet
));
291 memset(m
, 0, mp
->elt_size
);
293 /* start of buffer is just after mbuf structure */
294 m
->buf_addr
= (char *)m
+ sizeof(struct dp_packet
);
295 m
->buf_physaddr
= rte_mempool_virt2phy(mp
, m
) +
296 sizeof(struct dp_packet
);
297 m
->buf_len
= (uint16_t)buf_len
;
299 /* keep some headroom between start of buffer and data */
300 m
->data_off
= RTE_MIN(RTE_PKTMBUF_HEADROOM
, m
->buf_len
);
302 /* init some constant fields */
309 ovs_rte_pktmbuf_init(struct rte_mempool
*mp
,
310 void *opaque_arg OVS_UNUSED
,
312 unsigned i OVS_UNUSED
)
314 struct rte_mbuf
*m
= _m
;
316 __rte_pktmbuf_init(mp
, opaque_arg
, _m
, i
);
318 dp_packet_init_dpdk((struct dp_packet
*) m
, m
->buf_len
);
321 static struct dpdk_mp
*
322 dpdk_mp_get(int socket_id
, int mtu
) OVS_REQUIRES(dpdk_mutex
)
324 struct dpdk_mp
*dmp
= NULL
;
325 char mp_name
[RTE_MEMPOOL_NAMESIZE
];
328 LIST_FOR_EACH (dmp
, list_node
, &dpdk_mp_list
) {
329 if (dmp
->socket_id
== socket_id
&& dmp
->mtu
== mtu
) {
335 dmp
= dpdk_rte_mzalloc(sizeof *dmp
);
336 dmp
->socket_id
= socket_id
;
340 mp_size
= MAX_NB_MBUF
;
342 if (snprintf(mp_name
, RTE_MEMPOOL_NAMESIZE
, "ovs_mp_%d_%d_%u",
343 dmp
->mtu
, dmp
->socket_id
, mp_size
) < 0) {
347 dmp
->mp
= rte_mempool_create(mp_name
, mp_size
, MBUF_SIZE(mtu
),
349 sizeof(struct rte_pktmbuf_pool_private
),
350 rte_pktmbuf_pool_init
, NULL
,
351 ovs_rte_pktmbuf_init
, NULL
,
353 } while (!dmp
->mp
&& rte_errno
== ENOMEM
&& (mp_size
/= 2) >= MIN_NB_MBUF
);
355 if (dmp
->mp
== NULL
) {
358 VLOG_DBG("Allocated \"%s\" mempool with %u mbufs", mp_name
, mp_size
);
361 list_push_back(&dpdk_mp_list
, &dmp
->list_node
);
366 dpdk_mp_put(struct dpdk_mp
*dmp
)
374 ovs_assert(dmp
->refcount
>= 0);
377 /* I could not find any API to destroy mp. */
378 if (dmp
->refcount
== 0) {
379 list_delete(dmp
->list_node
);
380 /* destroy mp-pool. */
386 check_link_status(struct netdev_dpdk
*dev
)
388 struct rte_eth_link link
;
390 rte_eth_link_get_nowait(dev
->port_id
, &link
);
392 if (dev
->link
.link_status
!= link
.link_status
) {
393 netdev_change_seq_changed(&dev
->up
);
395 dev
->link_reset_cnt
++;
397 if (dev
->link
.link_status
) {
398 VLOG_DBG_RL(&rl
, "Port %d Link Up - speed %u Mbps - %s",
399 dev
->port_id
, (unsigned)dev
->link
.link_speed
,
400 (dev
->link
.link_duplex
== ETH_LINK_FULL_DUPLEX
) ?
401 ("full-duplex") : ("half-duplex"));
403 VLOG_DBG_RL(&rl
, "Port %d Link Down", dev
->port_id
);
409 dpdk_watchdog(void *dummy OVS_UNUSED
)
411 struct netdev_dpdk
*dev
;
413 pthread_detach(pthread_self());
416 ovs_mutex_lock(&dpdk_mutex
);
417 LIST_FOR_EACH (dev
, list_node
, &dpdk_list
) {
418 ovs_mutex_lock(&dev
->mutex
);
419 check_link_status(dev
);
420 ovs_mutex_unlock(&dev
->mutex
);
422 ovs_mutex_unlock(&dpdk_mutex
);
423 xsleep(DPDK_PORT_WATCHDOG_INTERVAL
);
430 dpdk_eth_dev_queue_setup(struct netdev_dpdk
*dev
, int n_rxq
, int n_txq
)
435 /* A device may report more queues than it makes available (this has
436 * been observed for Intel xl710, which reserves some of them for
437 * SRIOV): rte_eth_*_queue_setup will fail if a queue is not
438 * available. When this happens we can retry the configuration
439 * and request less queues */
440 while (n_rxq
&& n_txq
) {
442 VLOG_INFO("Retrying setup with (rxq:%d txq:%d)", n_rxq
, n_txq
);
445 diag
= rte_eth_dev_configure(dev
->port_id
, n_rxq
, n_txq
, &port_conf
);
450 for (i
= 0; i
< n_txq
; i
++) {
451 diag
= rte_eth_tx_queue_setup(dev
->port_id
, i
, NIC_PORT_TX_Q_SIZE
,
452 dev
->socket_id
, NULL
);
454 VLOG_INFO("Interface %s txq(%d) setup error: %s",
455 dev
->up
.name
, i
, rte_strerror(-diag
));
461 /* Retry with less tx queues */
466 for (i
= 0; i
< n_rxq
; i
++) {
467 diag
= rte_eth_rx_queue_setup(dev
->port_id
, i
, NIC_PORT_RX_Q_SIZE
,
468 dev
->socket_id
, NULL
,
471 VLOG_INFO("Interface %s rxq(%d) setup error: %s",
472 dev
->up
.name
, i
, rte_strerror(-diag
));
478 /* Retry with less rx queues */
483 dev
->up
.n_rxq
= n_rxq
;
484 dev
->real_n_txq
= n_txq
;
494 dpdk_eth_dev_init(struct netdev_dpdk
*dev
) OVS_REQUIRES(dpdk_mutex
)
496 struct rte_pktmbuf_pool_private
*mbp_priv
;
497 struct rte_eth_dev_info info
;
498 struct ether_addr eth_addr
;
502 if (dev
->port_id
< 0 || dev
->port_id
>= rte_eth_dev_count()) {
506 rte_eth_dev_info_get(dev
->port_id
, &info
);
508 n_rxq
= MIN(info
.max_rx_queues
, dev
->up
.n_rxq
);
509 n_txq
= MIN(info
.max_tx_queues
, dev
->up
.n_txq
);
511 diag
= dpdk_eth_dev_queue_setup(dev
, n_rxq
, n_txq
);
513 VLOG_ERR("Interface %s(rxq:%d txq:%d) configure error: %s",
514 dev
->up
.name
, n_rxq
, n_txq
, rte_strerror(-diag
));
518 diag
= rte_eth_dev_start(dev
->port_id
);
520 VLOG_ERR("Interface %s start error: %s", dev
->up
.name
,
521 rte_strerror(-diag
));
525 rte_eth_promiscuous_enable(dev
->port_id
);
526 rte_eth_allmulticast_enable(dev
->port_id
);
528 memset(ð_addr
, 0x0, sizeof(eth_addr
));
529 rte_eth_macaddr_get(dev
->port_id
, ð_addr
);
530 VLOG_INFO_RL(&rl
, "Port %d: "ETH_ADDR_FMT
"",
531 dev
->port_id
, ETH_ADDR_BYTES_ARGS(eth_addr
.addr_bytes
));
533 memcpy(dev
->hwaddr
.ea
, eth_addr
.addr_bytes
, ETH_ADDR_LEN
);
534 rte_eth_link_get_nowait(dev
->port_id
, &dev
->link
);
536 mbp_priv
= rte_mempool_get_priv(dev
->dpdk_mp
->mp
);
537 dev
->buf_size
= mbp_priv
->mbuf_data_room_size
- RTE_PKTMBUF_HEADROOM
;
539 dev
->flags
= NETDEV_UP
| NETDEV_PROMISC
;
543 static struct netdev_dpdk
*
544 netdev_dpdk_cast(const struct netdev
*netdev
)
546 return CONTAINER_OF(netdev
, struct netdev_dpdk
, up
);
549 static struct netdev
*
550 netdev_dpdk_alloc(void)
552 struct netdev_dpdk
*netdev
= dpdk_rte_mzalloc(sizeof *netdev
);
557 netdev_dpdk_alloc_txq(struct netdev_dpdk
*netdev
, unsigned int n_txqs
)
561 netdev
->tx_q
= dpdk_rte_mzalloc(n_txqs
* sizeof *netdev
->tx_q
);
562 for (i
= 0; i
< n_txqs
; i
++) {
563 int numa_id
= ovs_numa_get_numa_id(i
);
565 if (!netdev
->txq_needs_locking
) {
566 /* Each index is considered as a cpu core id, since there should
567 * be one tx queue for each cpu core. If the corresponding core
568 * is not on the same numa node as 'netdev', flags the
570 netdev
->tx_q
[i
].flush_tx
= netdev
->socket_id
== numa_id
;
572 /* Queues are shared among CPUs. Always flush */
573 netdev
->tx_q
[i
].flush_tx
= true;
575 rte_spinlock_init(&netdev
->tx_q
[i
].tx_lock
);
580 netdev_dpdk_init(struct netdev
*netdev_
, unsigned int port_no
,
581 enum dpdk_dev_type type
)
582 OVS_REQUIRES(dpdk_mutex
)
584 struct netdev_dpdk
*netdev
= netdev_dpdk_cast(netdev_
);
588 ovs_mutex_init(&netdev
->mutex
);
589 ovs_mutex_lock(&netdev
->mutex
);
591 rte_spinlock_init(&netdev
->stats_lock
);
593 /* If the 'sid' is negative, it means that the kernel fails
594 * to obtain the pci numa info. In that situation, always
596 if (type
== DPDK_DEV_ETH
) {
597 sid
= rte_eth_dev_socket_id(port_no
);
599 sid
= rte_lcore_to_socket_id(rte_get_master_lcore());
602 netdev
->socket_id
= sid
< 0 ? SOCKET0
: sid
;
603 netdev
->port_id
= port_no
;
606 netdev
->mtu
= ETHER_MTU
;
607 netdev
->max_packet_len
= MTU_TO_MAX_LEN(netdev
->mtu
);
609 netdev
->dpdk_mp
= dpdk_mp_get(netdev
->socket_id
, netdev
->mtu
);
610 if (!netdev
->dpdk_mp
) {
615 netdev_
->n_txq
= NR_QUEUE
;
616 netdev_
->n_rxq
= NR_QUEUE
;
617 netdev_
->requested_n_rxq
= NR_QUEUE
;
618 netdev
->real_n_txq
= NR_QUEUE
;
620 if (type
== DPDK_DEV_ETH
) {
621 netdev_dpdk_alloc_txq(netdev
, NR_QUEUE
);
622 err
= dpdk_eth_dev_init(netdev
);
628 list_push_back(&dpdk_list
, &netdev
->list_node
);
632 rte_free(netdev
->tx_q
);
634 ovs_mutex_unlock(&netdev
->mutex
);
639 dpdk_dev_parse_name(const char dev_name
[], const char prefix
[],
640 unsigned int *port_no
)
644 if (strncmp(dev_name
, prefix
, strlen(prefix
))) {
648 cport
= dev_name
+ strlen(prefix
);
649 *port_no
= strtol(cport
, NULL
, 0); /* string must be null terminated */
654 vhost_construct_helper(struct netdev
*netdev_
) OVS_REQUIRES(dpdk_mutex
)
656 if (rte_eal_init_ret
) {
657 return rte_eal_init_ret
;
660 return netdev_dpdk_init(netdev_
, -1, DPDK_DEV_VHOST
);
664 netdev_dpdk_vhost_cuse_construct(struct netdev
*netdev_
)
666 struct netdev_dpdk
*netdev
= netdev_dpdk_cast(netdev_
);
669 ovs_mutex_lock(&dpdk_mutex
);
670 strncpy(netdev
->vhost_id
, netdev
->up
.name
, sizeof(netdev
->vhost_id
));
671 err
= vhost_construct_helper(netdev_
);
672 ovs_mutex_unlock(&dpdk_mutex
);
677 netdev_dpdk_vhost_user_construct(struct netdev
*netdev_
)
679 struct netdev_dpdk
*netdev
= netdev_dpdk_cast(netdev_
);
682 ovs_mutex_lock(&dpdk_mutex
);
683 /* Take the name of the vhost-user port and append it to the location where
684 * the socket is to be created, then register the socket.
686 snprintf(netdev
->vhost_id
, sizeof(netdev
->vhost_id
), "%s/%s",
687 vhost_sock_dir
, netdev_
->name
);
688 err
= rte_vhost_driver_register(netdev
->vhost_id
);
690 VLOG_ERR("vhost-user socket device setup failure for socket %s\n",
693 fatal_signal_add_file_to_unlink(netdev
->vhost_id
);
694 VLOG_INFO("Socket %s created for vhost-user port %s\n",
695 netdev
->vhost_id
, netdev_
->name
);
696 err
= vhost_construct_helper(netdev_
);
699 ovs_mutex_unlock(&dpdk_mutex
);
704 netdev_dpdk_construct(struct netdev
*netdev
)
706 unsigned int port_no
;
709 if (rte_eal_init_ret
) {
710 return rte_eal_init_ret
;
713 /* Names always start with "dpdk" */
714 err
= dpdk_dev_parse_name(netdev
->name
, "dpdk", &port_no
);
719 ovs_mutex_lock(&dpdk_mutex
);
720 err
= netdev_dpdk_init(netdev
, port_no
, DPDK_DEV_ETH
);
721 ovs_mutex_unlock(&dpdk_mutex
);
726 netdev_dpdk_destruct(struct netdev
*netdev_
)
728 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev_
);
730 ovs_mutex_lock(&dev
->mutex
);
731 rte_eth_dev_stop(dev
->port_id
);
732 ovs_mutex_unlock(&dev
->mutex
);
734 ovs_mutex_lock(&dpdk_mutex
);
736 list_remove(&dev
->list_node
);
737 dpdk_mp_put(dev
->dpdk_mp
);
738 ovs_mutex_unlock(&dpdk_mutex
);
742 netdev_dpdk_vhost_destruct(struct netdev
*netdev_
)
744 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev_
);
746 /* Can't remove a port while a guest is attached to it. */
747 if (netdev_dpdk_get_virtio(dev
) != NULL
) {
748 VLOG_ERR("Can not remove port, vhost device still attached");
752 if (rte_vhost_driver_unregister(dev
->vhost_id
)) {
753 VLOG_ERR("Unable to remove vhost-user socket %s", dev
->vhost_id
);
755 fatal_signal_remove_file_to_unlink(dev
->vhost_id
);
758 ovs_mutex_lock(&dpdk_mutex
);
759 list_remove(&dev
->list_node
);
760 dpdk_mp_put(dev
->dpdk_mp
);
761 ovs_mutex_unlock(&dpdk_mutex
);
765 netdev_dpdk_dealloc(struct netdev
*netdev_
)
767 struct netdev_dpdk
*netdev
= netdev_dpdk_cast(netdev_
);
773 netdev_dpdk_get_config(const struct netdev
*netdev
, struct smap
*args
)
775 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
777 ovs_mutex_lock(&dev
->mutex
);
779 smap_add_format(args
, "requested_rx_queues", "%d", netdev
->requested_n_rxq
);
780 smap_add_format(args
, "configured_rx_queues", "%d", netdev
->n_rxq
);
781 smap_add_format(args
, "requested_tx_queues", "%d", netdev
->n_txq
);
782 smap_add_format(args
, "configured_tx_queues", "%d", dev
->real_n_txq
);
783 ovs_mutex_unlock(&dev
->mutex
);
789 netdev_dpdk_set_config(struct netdev
*netdev
, const struct smap
*args
)
791 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
793 ovs_mutex_lock(&dev
->mutex
);
794 netdev
->requested_n_rxq
= MAX(smap_get_int(args
, "n_rxq",
795 netdev
->requested_n_rxq
), 1);
796 netdev_change_seq_changed(netdev
);
797 ovs_mutex_unlock(&dev
->mutex
);
803 netdev_dpdk_get_numa_id(const struct netdev
*netdev_
)
805 struct netdev_dpdk
*netdev
= netdev_dpdk_cast(netdev_
);
807 return netdev
->socket_id
;
810 /* Sets the number of tx queues and rx queues for the dpdk interface.
811 * If the configuration fails, do not try restoring its old configuration
812 * and just returns the error. */
814 netdev_dpdk_set_multiq(struct netdev
*netdev_
, unsigned int n_txq
,
817 struct netdev_dpdk
*netdev
= netdev_dpdk_cast(netdev_
);
819 int old_rxq
, old_txq
;
821 if (netdev
->up
.n_txq
== n_txq
&& netdev
->up
.n_rxq
== n_rxq
) {
825 ovs_mutex_lock(&dpdk_mutex
);
826 ovs_mutex_lock(&netdev
->mutex
);
828 rte_eth_dev_stop(netdev
->port_id
);
830 old_txq
= netdev
->up
.n_txq
;
831 old_rxq
= netdev
->up
.n_rxq
;
832 netdev
->up
.n_txq
= n_txq
;
833 netdev
->up
.n_rxq
= n_rxq
;
835 rte_free(netdev
->tx_q
);
836 err
= dpdk_eth_dev_init(netdev
);
837 netdev_dpdk_alloc_txq(netdev
, netdev
->real_n_txq
);
839 /* If there has been an error, it means that the requested queues
840 * have not been created. Restore the old numbers. */
841 netdev
->up
.n_txq
= old_txq
;
842 netdev
->up
.n_rxq
= old_rxq
;
845 netdev
->txq_needs_locking
= netdev
->real_n_txq
!= netdev
->up
.n_txq
;
847 ovs_mutex_unlock(&netdev
->mutex
);
848 ovs_mutex_unlock(&dpdk_mutex
);
854 netdev_dpdk_vhost_cuse_set_multiq(struct netdev
*netdev_
, unsigned int n_txq
,
857 struct netdev_dpdk
*netdev
= netdev_dpdk_cast(netdev_
);
860 if (netdev
->up
.n_txq
== n_txq
&& netdev
->up
.n_rxq
== n_rxq
) {
864 ovs_mutex_lock(&dpdk_mutex
);
865 ovs_mutex_lock(&netdev
->mutex
);
867 netdev
->up
.n_txq
= n_txq
;
868 netdev
->real_n_txq
= 1;
869 netdev
->up
.n_rxq
= 1;
870 netdev
->txq_needs_locking
= netdev
->real_n_txq
!= netdev
->up
.n_txq
;
872 ovs_mutex_unlock(&netdev
->mutex
);
873 ovs_mutex_unlock(&dpdk_mutex
);
879 netdev_dpdk_vhost_set_multiq(struct netdev
*netdev_
, unsigned int n_txq
,
882 struct netdev_dpdk
*netdev
= netdev_dpdk_cast(netdev_
);
885 if (netdev
->up
.n_txq
== n_txq
&& netdev
->up
.n_rxq
== n_rxq
) {
889 ovs_mutex_lock(&dpdk_mutex
);
890 ovs_mutex_lock(&netdev
->mutex
);
892 rte_free(netdev
->tx_q
);
893 netdev
->up
.n_txq
= n_txq
;
894 netdev
->up
.n_rxq
= n_rxq
;
895 netdev_dpdk_alloc_txq(netdev
, netdev
->up
.n_txq
);
897 ovs_mutex_unlock(&netdev
->mutex
);
898 ovs_mutex_unlock(&dpdk_mutex
);
903 static struct netdev_rxq
*
904 netdev_dpdk_rxq_alloc(void)
906 struct netdev_rxq_dpdk
*rx
= dpdk_rte_mzalloc(sizeof *rx
);
911 static struct netdev_rxq_dpdk
*
912 netdev_rxq_dpdk_cast(const struct netdev_rxq
*rx
)
914 return CONTAINER_OF(rx
, struct netdev_rxq_dpdk
, up
);
918 netdev_dpdk_rxq_construct(struct netdev_rxq
*rxq_
)
920 struct netdev_rxq_dpdk
*rx
= netdev_rxq_dpdk_cast(rxq_
);
921 struct netdev_dpdk
*netdev
= netdev_dpdk_cast(rx
->up
.netdev
);
923 ovs_mutex_lock(&netdev
->mutex
);
924 rx
->port_id
= netdev
->port_id
;
925 ovs_mutex_unlock(&netdev
->mutex
);
931 netdev_dpdk_rxq_destruct(struct netdev_rxq
*rxq_ OVS_UNUSED
)
936 netdev_dpdk_rxq_dealloc(struct netdev_rxq
*rxq_
)
938 struct netdev_rxq_dpdk
*rx
= netdev_rxq_dpdk_cast(rxq_
);
944 dpdk_queue_flush__(struct netdev_dpdk
*dev
, int qid
)
946 struct dpdk_tx_queue
*txq
= &dev
->tx_q
[qid
];
949 while (nb_tx
!= txq
->count
) {
952 ret
= rte_eth_tx_burst(dev
->port_id
, qid
, txq
->burst_pkts
+ nb_tx
,
961 if (OVS_UNLIKELY(nb_tx
!= txq
->count
)) {
962 /* free buffers, which we couldn't transmit, one at a time (each
963 * packet could come from a different mempool) */
966 for (i
= nb_tx
; i
< txq
->count
; i
++) {
967 rte_pktmbuf_free_seg(txq
->burst_pkts
[i
]);
969 rte_spinlock_lock(&dev
->stats_lock
);
970 dev
->stats
.tx_dropped
+= txq
->count
-nb_tx
;
971 rte_spinlock_unlock(&dev
->stats_lock
);
975 txq
->tsc
= rte_get_timer_cycles();
979 dpdk_queue_flush(struct netdev_dpdk
*dev
, int qid
)
981 struct dpdk_tx_queue
*txq
= &dev
->tx_q
[qid
];
983 if (txq
->count
== 0) {
986 dpdk_queue_flush__(dev
, qid
);
990 is_vhost_running(struct virtio_net
*dev
)
992 return (dev
!= NULL
&& (dev
->flags
& VIRTIO_DEV_RUNNING
));
996 netdev_dpdk_vhost_update_rx_counters(struct netdev_stats
*stats
,
997 struct dp_packet
**packets
, int count
)
1000 struct dp_packet
*packet
;
1002 stats
->rx_packets
+= count
;
1003 for (i
= 0; i
< count
; i
++) {
1004 packet
= packets
[i
];
1006 if (OVS_UNLIKELY(dp_packet_size(packet
) < ETH_HEADER_LEN
)) {
1007 /* This only protects the following multicast counting from
1008 * too short packets, but it does not stop the packet from
1009 * further processing. */
1011 stats
->rx_length_errors
++;
1015 struct eth_header
*eh
= (struct eth_header
*) dp_packet_data(packet
);
1016 if (OVS_UNLIKELY(eth_addr_is_multicast(eh
->eth_dst
))) {
1020 stats
->rx_bytes
+= dp_packet_size(packet
);
1025 * The receive path for the vhost port is the TX path out from guest.
1028 netdev_dpdk_vhost_rxq_recv(struct netdev_rxq
*rxq_
,
1029 struct dp_packet
**packets
, int *c
)
1031 struct netdev_rxq_dpdk
*rx
= netdev_rxq_dpdk_cast(rxq_
);
1032 struct netdev
*netdev
= rx
->up
.netdev
;
1033 struct netdev_dpdk
*vhost_dev
= netdev_dpdk_cast(netdev
);
1034 struct virtio_net
*virtio_dev
= netdev_dpdk_get_virtio(vhost_dev
);
1035 int qid
= rxq_
->queue_id
;
1038 if (OVS_UNLIKELY(!is_vhost_running(virtio_dev
))) {
1042 if (rxq_
->queue_id
>= vhost_dev
->real_n_rxq
) {
1046 nb_rx
= rte_vhost_dequeue_burst(virtio_dev
, qid
* VIRTIO_QNUM
+ VIRTIO_TXQ
,
1047 vhost_dev
->dpdk_mp
->mp
,
1048 (struct rte_mbuf
**)packets
,
1054 rte_spinlock_lock(&vhost_dev
->stats_lock
);
1055 netdev_dpdk_vhost_update_rx_counters(&vhost_dev
->stats
, packets
, nb_rx
);
1056 rte_spinlock_unlock(&vhost_dev
->stats_lock
);
1063 netdev_dpdk_rxq_recv(struct netdev_rxq
*rxq_
, struct dp_packet
**packets
,
1066 struct netdev_rxq_dpdk
*rx
= netdev_rxq_dpdk_cast(rxq_
);
1067 struct netdev
*netdev
= rx
->up
.netdev
;
1068 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1071 /* There is only one tx queue for this core. Do not flush other
1073 * Do not flush tx queue which is shared among CPUs
1074 * since it is always flushed */
1075 if (rxq_
->queue_id
== rte_lcore_id() &&
1076 OVS_LIKELY(!dev
->txq_needs_locking
)) {
1077 dpdk_queue_flush(dev
, rxq_
->queue_id
);
1080 nb_rx
= rte_eth_rx_burst(rx
->port_id
, rxq_
->queue_id
,
1081 (struct rte_mbuf
**) packets
,
1093 netdev_dpdk_vhost_update_tx_counters(struct netdev_stats
*stats
,
1094 struct dp_packet
**packets
,
1099 int sent
= attempted
- dropped
;
1101 stats
->tx_packets
+= sent
;
1102 stats
->tx_dropped
+= dropped
;
1104 for (i
= 0; i
< sent
; i
++) {
1105 stats
->tx_bytes
+= dp_packet_size(packets
[i
]);
1110 __netdev_dpdk_vhost_send(struct netdev
*netdev
, int qid
,
1111 struct dp_packet
**pkts
, int cnt
,
1114 struct netdev_dpdk
*vhost_dev
= netdev_dpdk_cast(netdev
);
1115 struct virtio_net
*virtio_dev
= netdev_dpdk_get_virtio(vhost_dev
);
1116 struct rte_mbuf
**cur_pkts
= (struct rte_mbuf
**) pkts
;
1117 unsigned int total_pkts
= cnt
;
1120 if (OVS_UNLIKELY(!is_vhost_running(virtio_dev
))) {
1121 rte_spinlock_lock(&vhost_dev
->stats_lock
);
1122 vhost_dev
->stats
.tx_dropped
+= cnt
;
1123 rte_spinlock_unlock(&vhost_dev
->stats_lock
);
1127 if (vhost_dev
->txq_needs_locking
) {
1128 qid
= qid
% vhost_dev
->real_n_txq
;
1129 rte_spinlock_lock(&vhost_dev
->tx_q
[qid
].tx_lock
);
1133 int vhost_qid
= qid
* VIRTIO_QNUM
+ VIRTIO_RXQ
;
1134 unsigned int tx_pkts
;
1136 tx_pkts
= rte_vhost_enqueue_burst(virtio_dev
, vhost_qid
,
1138 if (OVS_LIKELY(tx_pkts
)) {
1139 /* Packets have been sent.*/
1141 /* Prepare for possible next iteration.*/
1142 cur_pkts
= &cur_pkts
[tx_pkts
];
1144 uint64_t timeout
= VHOST_ENQ_RETRY_USECS
* rte_get_timer_hz() / 1E6
;
1145 unsigned int expired
= 0;
1148 start
= rte_get_timer_cycles();
1152 * Unable to enqueue packets to vhost interface.
1153 * Check available entries before retrying.
1155 while (!rte_vring_available_entries(virtio_dev
, vhost_qid
)) {
1156 if (OVS_UNLIKELY((rte_get_timer_cycles() - start
) > timeout
)) {
1162 /* break out of main loop. */
1168 if (vhost_dev
->txq_needs_locking
) {
1169 rte_spinlock_unlock(&vhost_dev
->tx_q
[qid
].tx_lock
);
1172 rte_spinlock_lock(&vhost_dev
->stats_lock
);
1173 netdev_dpdk_vhost_update_tx_counters(&vhost_dev
->stats
, pkts
, total_pkts
,
1175 rte_spinlock_unlock(&vhost_dev
->stats_lock
);
1181 for (i
= 0; i
< total_pkts
; i
++) {
1182 dp_packet_delete(pkts
[i
]);
1188 dpdk_queue_pkts(struct netdev_dpdk
*dev
, int qid
,
1189 struct rte_mbuf
**pkts
, int cnt
)
1191 struct dpdk_tx_queue
*txq
= &dev
->tx_q
[qid
];
1197 int freeslots
= MAX_TX_QUEUE_LEN
- txq
->count
;
1198 int tocopy
= MIN(freeslots
, cnt
-i
);
1200 memcpy(&txq
->burst_pkts
[txq
->count
], &pkts
[i
],
1201 tocopy
* sizeof (struct rte_mbuf
*));
1203 txq
->count
+= tocopy
;
1206 if (txq
->count
== MAX_TX_QUEUE_LEN
|| txq
->flush_tx
) {
1207 dpdk_queue_flush__(dev
, qid
);
1209 diff_tsc
= rte_get_timer_cycles() - txq
->tsc
;
1210 if (diff_tsc
>= DRAIN_TSC
) {
1211 dpdk_queue_flush__(dev
, qid
);
1216 /* Tx function. Transmit packets indefinitely */
1218 dpdk_do_tx_copy(struct netdev
*netdev
, int qid
, struct dp_packet
**pkts
,
1220 OVS_NO_THREAD_SAFETY_ANALYSIS
1222 #if !defined(__CHECKER__) && !defined(_WIN32)
1223 const size_t PKT_ARRAY_SIZE
= cnt
;
1225 /* Sparse or MSVC doesn't like variable length array. */
1226 enum { PKT_ARRAY_SIZE
= NETDEV_MAX_BURST
};
1228 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1229 struct rte_mbuf
*mbufs
[PKT_ARRAY_SIZE
];
1234 /* If we are on a non pmd thread we have to use the mempool mutex, because
1235 * every non pmd thread shares the same mempool cache */
1237 if (!dpdk_thread_is_pmd()) {
1238 ovs_mutex_lock(&nonpmd_mempool_mutex
);
1241 for (i
= 0; i
< cnt
; i
++) {
1242 int size
= dp_packet_size(pkts
[i
]);
1244 if (OVS_UNLIKELY(size
> dev
->max_packet_len
)) {
1245 VLOG_WARN_RL(&rl
, "Too big size %d max_packet_len %d",
1246 (int)size
, dev
->max_packet_len
);
1252 mbufs
[newcnt
] = rte_pktmbuf_alloc(dev
->dpdk_mp
->mp
);
1254 if (!mbufs
[newcnt
]) {
1259 /* We have to do a copy for now */
1260 memcpy(rte_pktmbuf_mtod(mbufs
[newcnt
], void *), dp_packet_data(pkts
[i
]), size
);
1262 rte_pktmbuf_data_len(mbufs
[newcnt
]) = size
;
1263 rte_pktmbuf_pkt_len(mbufs
[newcnt
]) = size
;
1268 if (OVS_UNLIKELY(dropped
)) {
1269 rte_spinlock_lock(&dev
->stats_lock
);
1270 dev
->stats
.tx_dropped
+= dropped
;
1271 rte_spinlock_unlock(&dev
->stats_lock
);
1274 if (dev
->type
== DPDK_DEV_VHOST
) {
1275 __netdev_dpdk_vhost_send(netdev
, qid
, (struct dp_packet
**) mbufs
, newcnt
, true);
1277 dpdk_queue_pkts(dev
, qid
, mbufs
, newcnt
);
1278 dpdk_queue_flush(dev
, qid
);
1281 if (!dpdk_thread_is_pmd()) {
1282 ovs_mutex_unlock(&nonpmd_mempool_mutex
);
1287 netdev_dpdk_vhost_send(struct netdev
*netdev
, int qid
, struct dp_packet
**pkts
,
1288 int cnt
, bool may_steal
)
1290 if (OVS_UNLIKELY(pkts
[0]->source
!= DPBUF_DPDK
)) {
1293 dpdk_do_tx_copy(netdev
, qid
, pkts
, cnt
);
1295 for (i
= 0; i
< cnt
; i
++) {
1296 dp_packet_delete(pkts
[i
]);
1300 __netdev_dpdk_vhost_send(netdev
, qid
, pkts
, cnt
, may_steal
);
1306 netdev_dpdk_send__(struct netdev_dpdk
*dev
, int qid
,
1307 struct dp_packet
**pkts
, int cnt
, bool may_steal
)
1311 if (OVS_UNLIKELY(dev
->txq_needs_locking
)) {
1312 qid
= qid
% dev
->real_n_txq
;
1313 rte_spinlock_lock(&dev
->tx_q
[qid
].tx_lock
);
1316 if (OVS_UNLIKELY(!may_steal
||
1317 pkts
[0]->source
!= DPBUF_DPDK
)) {
1318 struct netdev
*netdev
= &dev
->up
;
1320 dpdk_do_tx_copy(netdev
, qid
, pkts
, cnt
);
1323 for (i
= 0; i
< cnt
; i
++) {
1324 dp_packet_delete(pkts
[i
]);
1328 int next_tx_idx
= 0;
1331 for (i
= 0; i
< cnt
; i
++) {
1332 int size
= dp_packet_size(pkts
[i
]);
1334 if (OVS_UNLIKELY(size
> dev
->max_packet_len
)) {
1335 if (next_tx_idx
!= i
) {
1336 dpdk_queue_pkts(dev
, qid
,
1337 (struct rte_mbuf
**)&pkts
[next_tx_idx
],
1341 VLOG_WARN_RL(&rl
, "Too big size %d max_packet_len %d",
1342 (int)size
, dev
->max_packet_len
);
1344 dp_packet_delete(pkts
[i
]);
1346 next_tx_idx
= i
+ 1;
1349 if (next_tx_idx
!= cnt
) {
1350 dpdk_queue_pkts(dev
, qid
,
1351 (struct rte_mbuf
**)&pkts
[next_tx_idx
],
1355 if (OVS_UNLIKELY(dropped
)) {
1356 rte_spinlock_lock(&dev
->stats_lock
);
1357 dev
->stats
.tx_dropped
+= dropped
;
1358 rte_spinlock_unlock(&dev
->stats_lock
);
1362 if (OVS_UNLIKELY(dev
->txq_needs_locking
)) {
1363 rte_spinlock_unlock(&dev
->tx_q
[qid
].tx_lock
);
1368 netdev_dpdk_eth_send(struct netdev
*netdev
, int qid
,
1369 struct dp_packet
**pkts
, int cnt
, bool may_steal
)
1371 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1373 netdev_dpdk_send__(dev
, qid
, pkts
, cnt
, may_steal
);
1378 netdev_dpdk_set_etheraddr(struct netdev
*netdev
, const struct eth_addr mac
)
1380 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1382 ovs_mutex_lock(&dev
->mutex
);
1383 if (!eth_addr_equals(dev
->hwaddr
, mac
)) {
1385 netdev_change_seq_changed(netdev
);
1387 ovs_mutex_unlock(&dev
->mutex
);
1393 netdev_dpdk_get_etheraddr(const struct netdev
*netdev
, struct eth_addr
*mac
)
1395 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1397 ovs_mutex_lock(&dev
->mutex
);
1399 ovs_mutex_unlock(&dev
->mutex
);
1405 netdev_dpdk_get_mtu(const struct netdev
*netdev
, int *mtup
)
1407 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1409 ovs_mutex_lock(&dev
->mutex
);
1411 ovs_mutex_unlock(&dev
->mutex
);
1417 netdev_dpdk_set_mtu(const struct netdev
*netdev
, int mtu
)
1419 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1421 struct dpdk_mp
*old_mp
;
1424 ovs_mutex_lock(&dpdk_mutex
);
1425 ovs_mutex_lock(&dev
->mutex
);
1426 if (dev
->mtu
== mtu
) {
1431 mp
= dpdk_mp_get(dev
->socket_id
, dev
->mtu
);
1437 rte_eth_dev_stop(dev
->port_id
);
1440 old_mp
= dev
->dpdk_mp
;
1443 dev
->max_packet_len
= MTU_TO_MAX_LEN(dev
->mtu
);
1445 err
= dpdk_eth_dev_init(dev
);
1449 dev
->dpdk_mp
= old_mp
;
1450 dev
->max_packet_len
= MTU_TO_MAX_LEN(dev
->mtu
);
1451 dpdk_eth_dev_init(dev
);
1455 dpdk_mp_put(old_mp
);
1456 netdev_change_seq_changed(netdev
);
1458 ovs_mutex_unlock(&dev
->mutex
);
1459 ovs_mutex_unlock(&dpdk_mutex
);
1464 netdev_dpdk_get_carrier(const struct netdev
*netdev_
, bool *carrier
);
1467 netdev_dpdk_vhost_get_stats(const struct netdev
*netdev
,
1468 struct netdev_stats
*stats
)
1470 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1472 ovs_mutex_lock(&dev
->mutex
);
1473 memset(stats
, 0, sizeof(*stats
));
1474 /* Unsupported Stats */
1475 stats
->collisions
= UINT64_MAX
;
1476 stats
->rx_crc_errors
= UINT64_MAX
;
1477 stats
->rx_fifo_errors
= UINT64_MAX
;
1478 stats
->rx_frame_errors
= UINT64_MAX
;
1479 stats
->rx_missed_errors
= UINT64_MAX
;
1480 stats
->rx_over_errors
= UINT64_MAX
;
1481 stats
->tx_aborted_errors
= UINT64_MAX
;
1482 stats
->tx_carrier_errors
= UINT64_MAX
;
1483 stats
->tx_errors
= UINT64_MAX
;
1484 stats
->tx_fifo_errors
= UINT64_MAX
;
1485 stats
->tx_heartbeat_errors
= UINT64_MAX
;
1486 stats
->tx_window_errors
= UINT64_MAX
;
1487 stats
->rx_dropped
+= UINT64_MAX
;
1489 rte_spinlock_lock(&dev
->stats_lock
);
1490 /* Supported Stats */
1491 stats
->rx_packets
+= dev
->stats
.rx_packets
;
1492 stats
->tx_packets
+= dev
->stats
.tx_packets
;
1493 stats
->tx_dropped
+= dev
->stats
.tx_dropped
;
1494 stats
->multicast
= dev
->stats
.multicast
;
1495 stats
->rx_bytes
= dev
->stats
.rx_bytes
;
1496 stats
->tx_bytes
= dev
->stats
.tx_bytes
;
1497 stats
->rx_errors
= dev
->stats
.rx_errors
;
1498 stats
->rx_length_errors
= dev
->stats
.rx_length_errors
;
1499 rte_spinlock_unlock(&dev
->stats_lock
);
1501 ovs_mutex_unlock(&dev
->mutex
);
1507 netdev_dpdk_get_stats(const struct netdev
*netdev
, struct netdev_stats
*stats
)
1509 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1510 struct rte_eth_stats rte_stats
;
1513 netdev_dpdk_get_carrier(netdev
, &gg
);
1514 ovs_mutex_lock(&dev
->mutex
);
1515 rte_eth_stats_get(dev
->port_id
, &rte_stats
);
1517 memset(stats
, 0, sizeof(*stats
));
1519 stats
->rx_packets
= rte_stats
.ipackets
;
1520 stats
->tx_packets
= rte_stats
.opackets
;
1521 stats
->rx_bytes
= rte_stats
.ibytes
;
1522 stats
->tx_bytes
= rte_stats
.obytes
;
1523 /* DPDK counts imissed as errors, but count them here as dropped instead */
1524 stats
->rx_errors
= rte_stats
.ierrors
- rte_stats
.imissed
;
1525 stats
->tx_errors
= rte_stats
.oerrors
;
1526 stats
->multicast
= rte_stats
.imcasts
;
1528 rte_spinlock_lock(&dev
->stats_lock
);
1529 stats
->tx_dropped
= dev
->stats
.tx_dropped
;
1530 rte_spinlock_unlock(&dev
->stats_lock
);
1532 /* These are the available DPDK counters for packets not received due to
1533 * local resource constraints in DPDK and NIC respectively. */
1534 stats
->rx_dropped
= rte_stats
.rx_nombuf
+ rte_stats
.imissed
;
1535 stats
->collisions
= UINT64_MAX
;
1537 stats
->rx_length_errors
= UINT64_MAX
;
1538 stats
->rx_over_errors
= UINT64_MAX
;
1539 stats
->rx_crc_errors
= UINT64_MAX
;
1540 stats
->rx_frame_errors
= UINT64_MAX
;
1541 stats
->rx_fifo_errors
= UINT64_MAX
;
1542 stats
->rx_missed_errors
= rte_stats
.imissed
;
1544 stats
->tx_aborted_errors
= UINT64_MAX
;
1545 stats
->tx_carrier_errors
= UINT64_MAX
;
1546 stats
->tx_fifo_errors
= UINT64_MAX
;
1547 stats
->tx_heartbeat_errors
= UINT64_MAX
;
1548 stats
->tx_window_errors
= UINT64_MAX
;
1550 ovs_mutex_unlock(&dev
->mutex
);
1556 netdev_dpdk_get_features(const struct netdev
*netdev_
,
1557 enum netdev_features
*current
,
1558 enum netdev_features
*advertised OVS_UNUSED
,
1559 enum netdev_features
*supported OVS_UNUSED
,
1560 enum netdev_features
*peer OVS_UNUSED
)
1562 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev_
);
1563 struct rte_eth_link link
;
1565 ovs_mutex_lock(&dev
->mutex
);
1567 ovs_mutex_unlock(&dev
->mutex
);
1569 if (link
.link_duplex
== ETH_LINK_AUTONEG_DUPLEX
) {
1570 if (link
.link_speed
== ETH_LINK_SPEED_AUTONEG
) {
1571 *current
= NETDEV_F_AUTONEG
;
1573 } else if (link
.link_duplex
== ETH_LINK_HALF_DUPLEX
) {
1574 if (link
.link_speed
== ETH_LINK_SPEED_10
) {
1575 *current
= NETDEV_F_10MB_HD
;
1577 if (link
.link_speed
== ETH_LINK_SPEED_100
) {
1578 *current
= NETDEV_F_100MB_HD
;
1580 if (link
.link_speed
== ETH_LINK_SPEED_1000
) {
1581 *current
= NETDEV_F_1GB_HD
;
1583 } else if (link
.link_duplex
== ETH_LINK_FULL_DUPLEX
) {
1584 if (link
.link_speed
== ETH_LINK_SPEED_10
) {
1585 *current
= NETDEV_F_10MB_FD
;
1587 if (link
.link_speed
== ETH_LINK_SPEED_100
) {
1588 *current
= NETDEV_F_100MB_FD
;
1590 if (link
.link_speed
== ETH_LINK_SPEED_1000
) {
1591 *current
= NETDEV_F_1GB_FD
;
1593 if (link
.link_speed
== ETH_LINK_SPEED_10000
) {
1594 *current
= NETDEV_F_10GB_FD
;
1602 netdev_dpdk_get_ifindex(const struct netdev
*netdev
)
1604 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1607 ovs_mutex_lock(&dev
->mutex
);
1608 ifindex
= dev
->port_id
;
1609 ovs_mutex_unlock(&dev
->mutex
);
1615 netdev_dpdk_get_carrier(const struct netdev
*netdev_
, bool *carrier
)
1617 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev_
);
1619 ovs_mutex_lock(&dev
->mutex
);
1620 check_link_status(dev
);
1621 *carrier
= dev
->link
.link_status
;
1623 ovs_mutex_unlock(&dev
->mutex
);
1629 netdev_dpdk_vhost_get_carrier(const struct netdev
*netdev_
, bool *carrier
)
1631 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev_
);
1632 struct virtio_net
*virtio_dev
= netdev_dpdk_get_virtio(dev
);
1634 ovs_mutex_lock(&dev
->mutex
);
1636 if (is_vhost_running(virtio_dev
)) {
1642 ovs_mutex_unlock(&dev
->mutex
);
1647 static long long int
1648 netdev_dpdk_get_carrier_resets(const struct netdev
*netdev_
)
1650 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev_
);
1651 long long int carrier_resets
;
1653 ovs_mutex_lock(&dev
->mutex
);
1654 carrier_resets
= dev
->link_reset_cnt
;
1655 ovs_mutex_unlock(&dev
->mutex
);
1657 return carrier_resets
;
1661 netdev_dpdk_set_miimon(struct netdev
*netdev_ OVS_UNUSED
,
1662 long long int interval OVS_UNUSED
)
1668 netdev_dpdk_update_flags__(struct netdev_dpdk
*dev
,
1669 enum netdev_flags off
, enum netdev_flags on
,
1670 enum netdev_flags
*old_flagsp
) OVS_REQUIRES(dev
->mutex
)
1674 if ((off
| on
) & ~(NETDEV_UP
| NETDEV_PROMISC
)) {
1678 *old_flagsp
= dev
->flags
;
1682 if (dev
->flags
== *old_flagsp
) {
1686 if (dev
->type
== DPDK_DEV_ETH
) {
1687 if (dev
->flags
& NETDEV_UP
) {
1688 err
= rte_eth_dev_start(dev
->port_id
);
1693 if (dev
->flags
& NETDEV_PROMISC
) {
1694 rte_eth_promiscuous_enable(dev
->port_id
);
1697 if (!(dev
->flags
& NETDEV_UP
)) {
1698 rte_eth_dev_stop(dev
->port_id
);
1706 netdev_dpdk_update_flags(struct netdev
*netdev_
,
1707 enum netdev_flags off
, enum netdev_flags on
,
1708 enum netdev_flags
*old_flagsp
)
1710 struct netdev_dpdk
*netdev
= netdev_dpdk_cast(netdev_
);
1713 ovs_mutex_lock(&netdev
->mutex
);
1714 error
= netdev_dpdk_update_flags__(netdev
, off
, on
, old_flagsp
);
1715 ovs_mutex_unlock(&netdev
->mutex
);
1721 netdev_dpdk_get_status(const struct netdev
*netdev_
, struct smap
*args
)
1723 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev_
);
1724 struct rte_eth_dev_info dev_info
;
1726 if (dev
->port_id
< 0)
1729 ovs_mutex_lock(&dev
->mutex
);
1730 rte_eth_dev_info_get(dev
->port_id
, &dev_info
);
1731 ovs_mutex_unlock(&dev
->mutex
);
1733 smap_add_format(args
, "driver_name", "%s", dev_info
.driver_name
);
1735 smap_add_format(args
, "port_no", "%d", dev
->port_id
);
1736 smap_add_format(args
, "numa_id", "%d", rte_eth_dev_socket_id(dev
->port_id
));
1737 smap_add_format(args
, "driver_name", "%s", dev_info
.driver_name
);
1738 smap_add_format(args
, "min_rx_bufsize", "%u", dev_info
.min_rx_bufsize
);
1739 smap_add_format(args
, "max_rx_pktlen", "%u", dev_info
.max_rx_pktlen
);
1740 smap_add_format(args
, "max_rx_queues", "%u", dev_info
.max_rx_queues
);
1741 smap_add_format(args
, "max_tx_queues", "%u", dev_info
.max_tx_queues
);
1742 smap_add_format(args
, "max_mac_addrs", "%u", dev_info
.max_mac_addrs
);
1743 smap_add_format(args
, "max_hash_mac_addrs", "%u", dev_info
.max_hash_mac_addrs
);
1744 smap_add_format(args
, "max_vfs", "%u", dev_info
.max_vfs
);
1745 smap_add_format(args
, "max_vmdq_pools", "%u", dev_info
.max_vmdq_pools
);
1747 if (dev_info
.pci_dev
) {
1748 smap_add_format(args
, "pci-vendor_id", "0x%u",
1749 dev_info
.pci_dev
->id
.vendor_id
);
1750 smap_add_format(args
, "pci-device_id", "0x%x",
1751 dev_info
.pci_dev
->id
.device_id
);
1758 netdev_dpdk_set_admin_state__(struct netdev_dpdk
*dev
, bool admin_state
)
1759 OVS_REQUIRES(dev
->mutex
)
1761 enum netdev_flags old_flags
;
1764 netdev_dpdk_update_flags__(dev
, 0, NETDEV_UP
, &old_flags
);
1766 netdev_dpdk_update_flags__(dev
, NETDEV_UP
, 0, &old_flags
);
1771 netdev_dpdk_set_admin_state(struct unixctl_conn
*conn
, int argc
,
1772 const char *argv
[], void *aux OVS_UNUSED
)
1776 if (!strcasecmp(argv
[argc
- 1], "up")) {
1778 } else if ( !strcasecmp(argv
[argc
- 1], "down")) {
1781 unixctl_command_reply_error(conn
, "Invalid Admin State");
1786 struct netdev
*netdev
= netdev_from_name(argv
[1]);
1787 if (netdev
&& is_dpdk_class(netdev
->netdev_class
)) {
1788 struct netdev_dpdk
*dpdk_dev
= netdev_dpdk_cast(netdev
);
1790 ovs_mutex_lock(&dpdk_dev
->mutex
);
1791 netdev_dpdk_set_admin_state__(dpdk_dev
, up
);
1792 ovs_mutex_unlock(&dpdk_dev
->mutex
);
1794 netdev_close(netdev
);
1796 unixctl_command_reply_error(conn
, "Not a DPDK Interface");
1797 netdev_close(netdev
);
1801 struct netdev_dpdk
*netdev
;
1803 ovs_mutex_lock(&dpdk_mutex
);
1804 LIST_FOR_EACH (netdev
, list_node
, &dpdk_list
) {
1805 ovs_mutex_lock(&netdev
->mutex
);
1806 netdev_dpdk_set_admin_state__(netdev
, up
);
1807 ovs_mutex_unlock(&netdev
->mutex
);
1809 ovs_mutex_unlock(&dpdk_mutex
);
1811 unixctl_command_reply(conn
, "OK");
1815 * Set virtqueue flags so that we do not receive interrupts.
1818 set_irq_status(struct virtio_net
*dev
)
1823 for (i
= 0; i
< dev
->virt_qp_nb
; i
++) {
1824 idx
= i
* VIRTIO_QNUM
;
1825 rte_vhost_enable_guest_notification(dev
, idx
+ VIRTIO_RXQ
, 0);
1826 rte_vhost_enable_guest_notification(dev
, idx
+ VIRTIO_TXQ
, 0);
1832 netdev_dpdk_vhost_set_queues(struct netdev_dpdk
*netdev
, struct virtio_net
*dev
)
1836 qp_num
= dev
->virt_qp_nb
;
1837 if (qp_num
> netdev
->up
.n_rxq
) {
1838 VLOG_ERR("vHost Device '%s' %"PRIu64
" can't be added - "
1839 "too many queues %d > %d", dev
->ifname
, dev
->device_fh
,
1840 qp_num
, netdev
->up
.n_rxq
);
1844 netdev
->real_n_rxq
= qp_num
;
1845 netdev
->real_n_txq
= qp_num
;
1846 if (netdev
->up
.n_txq
> netdev
->real_n_txq
) {
1847 netdev
->txq_needs_locking
= true;
1849 netdev
->txq_needs_locking
= false;
1856 * A new virtio-net device is added to a vhost port.
1859 new_device(struct virtio_net
*dev
)
1861 struct netdev_dpdk
*netdev
;
1862 bool exists
= false;
1864 ovs_mutex_lock(&dpdk_mutex
);
1865 /* Add device to the vhost port with the same name as that passed down. */
1866 LIST_FOR_EACH(netdev
, list_node
, &dpdk_list
) {
1867 if (strncmp(dev
->ifname
, netdev
->vhost_id
, IF_NAME_SZ
) == 0) {
1868 ovs_mutex_lock(&netdev
->mutex
);
1869 if (netdev_dpdk_vhost_set_queues(netdev
, dev
)) {
1870 ovs_mutex_unlock(&netdev
->mutex
);
1871 ovs_mutex_unlock(&dpdk_mutex
);
1874 ovsrcu_set(&netdev
->virtio_dev
, dev
);
1876 dev
->flags
|= VIRTIO_DEV_RUNNING
;
1877 /* Disable notifications. */
1878 set_irq_status(dev
);
1879 ovs_mutex_unlock(&netdev
->mutex
);
1883 ovs_mutex_unlock(&dpdk_mutex
);
1886 VLOG_INFO("vHost Device '%s' %"PRIu64
" can't be added - name not "
1887 "found", dev
->ifname
, dev
->device_fh
);
1892 VLOG_INFO("vHost Device '%s' %"PRIu64
" has been added", dev
->ifname
,
1898 * Remove a virtio-net device from the specific vhost port. Use dev->remove
1899 * flag to stop any more packets from being sent or received to/from a VM and
1900 * ensure all currently queued packets have been sent/received before removing
1904 destroy_device(volatile struct virtio_net
*dev
)
1906 struct netdev_dpdk
*vhost_dev
;
1907 bool exists
= false;
1909 ovs_mutex_lock(&dpdk_mutex
);
1910 LIST_FOR_EACH (vhost_dev
, list_node
, &dpdk_list
) {
1911 if (netdev_dpdk_get_virtio(vhost_dev
) == dev
) {
1913 ovs_mutex_lock(&vhost_dev
->mutex
);
1914 dev
->flags
&= ~VIRTIO_DEV_RUNNING
;
1915 ovsrcu_set(&vhost_dev
->virtio_dev
, NULL
);
1917 ovs_mutex_unlock(&vhost_dev
->mutex
);
1922 ovs_mutex_unlock(&dpdk_mutex
);
1924 if (exists
== true) {
1926 * Wait for other threads to quiesce after setting the 'virtio_dev'
1927 * to NULL, before returning.
1929 ovsrcu_synchronize();
1931 * As call to ovsrcu_synchronize() will end the quiescent state,
1932 * put thread back into quiescent state before returning.
1934 ovsrcu_quiesce_start();
1935 VLOG_INFO("vHost Device '%s' %"PRIu64
" has been removed", dev
->ifname
,
1938 VLOG_INFO("vHost Device '%s' %"PRIu64
" not found", dev
->ifname
,
1945 netdev_dpdk_get_virtio(const struct netdev_dpdk
*dev
)
1947 return ovsrcu_get(struct virtio_net
*, &dev
->virtio_dev
);
1951 * These callbacks allow virtio-net devices to be added to vhost ports when
1952 * configuration has been fully complete.
1954 static const struct virtio_net_device_ops virtio_net_device_ops
=
1956 .new_device
= new_device
,
1957 .destroy_device
= destroy_device
,
1961 start_vhost_loop(void *dummy OVS_UNUSED
)
1963 pthread_detach(pthread_self());
1964 /* Put the cuse thread into quiescent state. */
1965 ovsrcu_quiesce_start();
1966 rte_vhost_driver_session_start();
1971 dpdk_vhost_class_init(void)
1973 rte_vhost_driver_callback_register(&virtio_net_device_ops
);
1974 ovs_thread_create("vhost_thread", start_vhost_loop
, NULL
);
1979 dpdk_vhost_cuse_class_init(void)
1984 /* Register CUSE device to handle IOCTLs.
1985 * Unless otherwise specified on the vswitchd command line, cuse_dev_name
1986 * is set to vhost-net.
1988 err
= rte_vhost_driver_register(cuse_dev_name
);
1991 VLOG_ERR("CUSE device setup failure.");
1995 dpdk_vhost_class_init();
2000 dpdk_vhost_user_class_init(void)
2002 dpdk_vhost_class_init();
2007 dpdk_common_init(void)
2009 unixctl_command_register("netdev-dpdk/set-admin-state",
2010 "[netdev] up|down", 1, 2,
2011 netdev_dpdk_set_admin_state
, NULL
);
2013 ovs_thread_create("dpdk_watchdog", dpdk_watchdog
, NULL
);
2019 dpdk_ring_create(const char dev_name
[], unsigned int port_no
,
2020 unsigned int *eth_port_id
)
2022 struct dpdk_ring
*ivshmem
;
2026 ivshmem
= dpdk_rte_mzalloc(sizeof *ivshmem
);
2027 if (ivshmem
== NULL
) {
2031 /* XXX: Add support for multiquque ring. */
2032 err
= snprintf(ring_name
, 10, "%s_tx", dev_name
);
2037 /* Create single producer tx ring, netdev does explicit locking. */
2038 ivshmem
->cring_tx
= rte_ring_create(ring_name
, DPDK_RING_SIZE
, SOCKET0
,
2040 if (ivshmem
->cring_tx
== NULL
) {
2045 err
= snprintf(ring_name
, 10, "%s_rx", dev_name
);
2050 /* Create single consumer rx ring, netdev does explicit locking. */
2051 ivshmem
->cring_rx
= rte_ring_create(ring_name
, DPDK_RING_SIZE
, SOCKET0
,
2053 if (ivshmem
->cring_rx
== NULL
) {
2058 err
= rte_eth_from_rings(dev_name
, &ivshmem
->cring_rx
, 1,
2059 &ivshmem
->cring_tx
, 1, SOCKET0
);
2066 ivshmem
->user_port_id
= port_no
;
2067 ivshmem
->eth_port_id
= rte_eth_dev_count() - 1;
2068 list_push_back(&dpdk_ring_list
, &ivshmem
->list_node
);
2070 *eth_port_id
= ivshmem
->eth_port_id
;
2075 dpdk_ring_open(const char dev_name
[], unsigned int *eth_port_id
) OVS_REQUIRES(dpdk_mutex
)
2077 struct dpdk_ring
*ivshmem
;
2078 unsigned int port_no
;
2081 /* Names always start with "dpdkr" */
2082 err
= dpdk_dev_parse_name(dev_name
, "dpdkr", &port_no
);
2087 /* look through our list to find the device */
2088 LIST_FOR_EACH (ivshmem
, list_node
, &dpdk_ring_list
) {
2089 if (ivshmem
->user_port_id
== port_no
) {
2090 VLOG_INFO("Found dpdk ring device %s:", dev_name
);
2091 *eth_port_id
= ivshmem
->eth_port_id
; /* really all that is needed */
2095 /* Need to create the device rings */
2096 return dpdk_ring_create(dev_name
, port_no
, eth_port_id
);
2100 netdev_dpdk_ring_send(struct netdev
*netdev_
, int qid
,
2101 struct dp_packet
**pkts
, int cnt
, bool may_steal
)
2103 struct netdev_dpdk
*netdev
= netdev_dpdk_cast(netdev_
);
2106 /* When using 'dpdkr' and sending to a DPDK ring, we want to ensure that the
2107 * rss hash field is clear. This is because the same mbuf may be modified by
2108 * the consumer of the ring and return into the datapath without recalculating
2110 for (i
= 0; i
< cnt
; i
++) {
2111 dp_packet_rss_invalidate(pkts
[i
]);
2114 netdev_dpdk_send__(netdev
, qid
, pkts
, cnt
, may_steal
);
2119 netdev_dpdk_ring_construct(struct netdev
*netdev
)
2121 unsigned int port_no
= 0;
2124 if (rte_eal_init_ret
) {
2125 return rte_eal_init_ret
;
2128 ovs_mutex_lock(&dpdk_mutex
);
2130 err
= dpdk_ring_open(netdev
->name
, &port_no
);
2135 err
= netdev_dpdk_init(netdev
, port_no
, DPDK_DEV_ETH
);
2138 ovs_mutex_unlock(&dpdk_mutex
);
2142 #define NETDEV_DPDK_CLASS(NAME, INIT, CONSTRUCT, DESTRUCT, MULTIQ, SEND, \
2143 GET_CARRIER, GET_STATS, GET_FEATURES, GET_STATUS, RXQ_RECV) \
2147 NULL, /* netdev_dpdk_run */ \
2148 NULL, /* netdev_dpdk_wait */ \
2150 netdev_dpdk_alloc, \
2153 netdev_dpdk_dealloc, \
2154 netdev_dpdk_get_config, \
2155 netdev_dpdk_set_config, \
2156 NULL, /* get_tunnel_config */ \
2157 NULL, /* build header */ \
2158 NULL, /* push header */ \
2159 NULL, /* pop header */ \
2160 netdev_dpdk_get_numa_id, /* get_numa_id */ \
2161 MULTIQ, /* set_multiq */ \
2164 NULL, /* send_wait */ \
2166 netdev_dpdk_set_etheraddr, \
2167 netdev_dpdk_get_etheraddr, \
2168 netdev_dpdk_get_mtu, \
2169 netdev_dpdk_set_mtu, \
2170 netdev_dpdk_get_ifindex, \
2172 netdev_dpdk_get_carrier_resets, \
2173 netdev_dpdk_set_miimon, \
2176 NULL, /* set_advertisements */ \
2178 NULL, /* set_policing */ \
2179 NULL, /* get_qos_types */ \
2180 NULL, /* get_qos_capabilities */ \
2181 NULL, /* get_qos */ \
2182 NULL, /* set_qos */ \
2183 NULL, /* get_queue */ \
2184 NULL, /* set_queue */ \
2185 NULL, /* delete_queue */ \
2186 NULL, /* get_queue_stats */ \
2187 NULL, /* queue_dump_start */ \
2188 NULL, /* queue_dump_next */ \
2189 NULL, /* queue_dump_done */ \
2190 NULL, /* dump_queue_stats */ \
2192 NULL, /* get_in4 */ \
2193 NULL, /* set_in4 */ \
2194 NULL, /* get_in6 */ \
2195 NULL, /* add_router */ \
2196 NULL, /* get_next_hop */ \
2198 NULL, /* arp_lookup */ \
2200 netdev_dpdk_update_flags, \
2202 netdev_dpdk_rxq_alloc, \
2203 netdev_dpdk_rxq_construct, \
2204 netdev_dpdk_rxq_destruct, \
2205 netdev_dpdk_rxq_dealloc, \
2207 NULL, /* rx_wait */ \
2208 NULL, /* rxq_drain */ \
2212 process_vhost_flags(char *flag
, char *default_val
, int size
,
2213 char **argv
, char **new_val
)
2217 /* Depending on which version of vhost is in use, process the vhost-specific
2218 * flag if it is provided on the vswitchd command line, otherwise resort to
2221 * For vhost-user: Process "-vhost_sock_dir" to set the custom location of
2222 * the vhost-user socket(s).
2223 * For vhost-cuse: Process "-cuse_dev_name" to set the custom name of the
2224 * vhost-cuse character device.
2226 if (!strcmp(argv
[1], flag
) && (strlen(argv
[2]) <= size
)) {
2228 *new_val
= xstrdup(argv
[2]);
2229 VLOG_INFO("User-provided %s in use: %s", flag
, *new_val
);
2231 VLOG_INFO("No %s provided - defaulting to %s", flag
, default_val
);
2232 *new_val
= default_val
;
2239 dpdk_init(int argc
, char **argv
)
2243 char *pragram_name
= argv
[0];
2245 if (argc
< 2 || strcmp(argv
[1], "--dpdk"))
2248 /* Remove the --dpdk argument from arg list.*/
2252 /* Reject --user option */
2254 for (i
= 0; i
< argc
; i
++) {
2255 if (!strcmp(argv
[i
], "--user")) {
2256 VLOG_ERR("Can not mix --dpdk and --user options, aborting.");
2261 if (process_vhost_flags("-cuse_dev_name", xstrdup("vhost-net"),
2262 PATH_MAX
, argv
, &cuse_dev_name
)) {
2264 if (process_vhost_flags("-vhost_sock_dir", xstrdup(ovs_rundir()),
2265 NAME_MAX
, argv
, &vhost_sock_dir
)) {
2269 err
= stat(vhost_sock_dir
, &s
);
2271 VLOG_ERR("vHostUser socket DIR '%s' does not exist.",
2276 /* Remove the vhost flag configuration parameters from the argument
2277 * list, so that the correct elements are passed to the DPDK
2278 * initialization function
2281 argv
+= 2; /* Increment by two to bypass the vhost flag arguments */
2285 /* Keep the program name argument as this is needed for call to
2288 argv
[0] = pragram_name
;
2290 /* Make sure things are initialized ... */
2291 result
= rte_eal_init(argc
, argv
);
2293 ovs_abort(result
, "Cannot init EAL");
2296 rte_memzone_dump(stdout
);
2297 rte_eal_init_ret
= 0;
2299 if (argc
> result
) {
2300 argv
[result
] = argv
[0];
2303 /* We are called from the main thread here */
2304 RTE_PER_LCORE(_lcore_id
) = NON_PMD_CORE_ID
;
2306 return result
+ 1 + base
;
2309 static const struct netdev_class dpdk_class
=
2313 netdev_dpdk_construct
,
2314 netdev_dpdk_destruct
,
2315 netdev_dpdk_set_multiq
,
2316 netdev_dpdk_eth_send
,
2317 netdev_dpdk_get_carrier
,
2318 netdev_dpdk_get_stats
,
2319 netdev_dpdk_get_features
,
2320 netdev_dpdk_get_status
,
2321 netdev_dpdk_rxq_recv
);
2323 static const struct netdev_class dpdk_ring_class
=
2327 netdev_dpdk_ring_construct
,
2328 netdev_dpdk_destruct
,
2329 netdev_dpdk_set_multiq
,
2330 netdev_dpdk_ring_send
,
2331 netdev_dpdk_get_carrier
,
2332 netdev_dpdk_get_stats
,
2333 netdev_dpdk_get_features
,
2334 netdev_dpdk_get_status
,
2335 netdev_dpdk_rxq_recv
);
2337 static const struct netdev_class OVS_UNUSED dpdk_vhost_cuse_class
=
2340 dpdk_vhost_cuse_class_init
,
2341 netdev_dpdk_vhost_cuse_construct
,
2342 netdev_dpdk_vhost_destruct
,
2343 netdev_dpdk_vhost_cuse_set_multiq
,
2344 netdev_dpdk_vhost_send
,
2345 netdev_dpdk_vhost_get_carrier
,
2346 netdev_dpdk_vhost_get_stats
,
2349 netdev_dpdk_vhost_rxq_recv
);
2351 static const struct netdev_class OVS_UNUSED dpdk_vhost_user_class
=
2354 dpdk_vhost_user_class_init
,
2355 netdev_dpdk_vhost_user_construct
,
2356 netdev_dpdk_vhost_destruct
,
2357 netdev_dpdk_vhost_set_multiq
,
2358 netdev_dpdk_vhost_send
,
2359 netdev_dpdk_vhost_get_carrier
,
2360 netdev_dpdk_vhost_get_stats
,
2363 netdev_dpdk_vhost_rxq_recv
);
2366 netdev_dpdk_register(void)
2368 static struct ovsthread_once once
= OVSTHREAD_ONCE_INITIALIZER
;
2370 if (rte_eal_init_ret
) {
2374 if (ovsthread_once_start(&once
)) {
2376 netdev_register_provider(&dpdk_class
);
2377 netdev_register_provider(&dpdk_ring_class
);
2379 netdev_register_provider(&dpdk_vhost_cuse_class
);
2381 netdev_register_provider(&dpdk_vhost_user_class
);
2383 ovsthread_once_done(&once
);
2388 pmd_thread_setaffinity_cpu(unsigned cpu
)
2394 CPU_SET(cpu
, &cpuset
);
2395 err
= pthread_setaffinity_np(pthread_self(), sizeof(cpu_set_t
), &cpuset
);
2397 VLOG_ERR("Thread affinity error %d",err
);
2400 /* NON_PMD_CORE_ID is reserved for use by non pmd threads. */
2401 ovs_assert(cpu
!= NON_PMD_CORE_ID
);
2402 RTE_PER_LCORE(_lcore_id
) = cpu
;
2408 dpdk_thread_is_pmd(void)
2410 return rte_lcore_id() != NON_PMD_CORE_ID
;