2 * Copyright (c) 2014 Nicira, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
31 #include "dp-packet.h"
32 #include "dpif-netdev.h"
34 #include "netdev-dpdk.h"
35 #include "netdev-provider.h"
36 #include "netdev-vport.h"
38 #include "ofp-print.h"
40 #include "ovs-thread.h"
45 #include "unaligned.h"
48 #include "openvswitch/vlog.h"
50 #include "rte_config.h"
52 #include "rte_virtio_net.h"
54 VLOG_DEFINE_THIS_MODULE(dpdk
);
55 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(5, 20);
57 #define DPDK_PORT_WATCHDOG_INTERVAL 5
59 #define OVS_CACHE_LINE_SIZE CACHE_LINE_SIZE
60 #define OVS_VPORT_DPDK "ovs_dpdk"
63 * need to reserve tons of extra space in the mbufs so we can align the
64 * DMA addresses to 4KB.
67 #define MTU_TO_MAX_LEN(mtu) ((mtu) + ETHER_HDR_LEN + ETHER_CRC_LEN)
68 #define MBUF_SIZE(mtu) (MTU_TO_MAX_LEN(mtu) + (512) + \
69 sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM)
71 /* Max and min number of packets in the mempool. OVS tries to allocate a
72 * mempool with MAX_NB_MBUF: if this fails (because the system doesn't have
73 * enough hugepages) we keep halving the number until the allocation succeeds
74 * or we reach MIN_NB_MBUF */
76 #define MAX_NB_MBUF (4096 * 64)
77 #define MIN_NB_MBUF (4096 * 4)
78 #define MP_CACHE_SZ RTE_MEMPOOL_CACHE_MAX_SIZE
80 /* MAX_NB_MBUF can be divided by 2 many times, until MIN_NB_MBUF */
81 BUILD_ASSERT_DECL(MAX_NB_MBUF
% ROUND_DOWN_POW2(MAX_NB_MBUF
/MIN_NB_MBUF
) == 0);
83 /* The smallest possible NB_MBUF that we're going to try should be a multiple
84 * of MP_CACHE_SZ. This is advised by DPDK documentation. */
85 BUILD_ASSERT_DECL((MAX_NB_MBUF
/ ROUND_DOWN_POW2(MAX_NB_MBUF
/MIN_NB_MBUF
))
90 #define NIC_PORT_RX_Q_SIZE 2048 /* Size of Physical NIC RX Queue, Max (n+32<=4096)*/
91 #define NIC_PORT_TX_Q_SIZE 2048 /* Size of Physical NIC TX Queue, Max (n+32<=4096)*/
93 /* XXX: Needs per NIC value for these constants. */
94 #define RX_PTHRESH 32 /* Default values of RX prefetch threshold reg. */
95 #define RX_HTHRESH 32 /* Default values of RX host threshold reg. */
96 #define RX_WTHRESH 16 /* Default values of RX write-back threshold reg. */
98 #define TX_PTHRESH 36 /* Default values of TX prefetch threshold reg. */
99 #define TX_HTHRESH 0 /* Default values of TX host threshold reg. */
100 #define TX_WTHRESH 0 /* Default values of TX write-back threshold reg. */
102 #define MAX_PKT_BURST 32 /* Max burst size for RX/TX */
104 /* Character device cuse_dev_name. */
105 char *cuse_dev_name
= NULL
;
108 * Maximum amount of time in micro seconds to try and enqueue to vhost.
110 #define VHOST_ENQ_RETRY_USECS 100
112 static const struct rte_eth_conf port_conf
= {
114 .mq_mode
= ETH_MQ_RX_RSS
,
116 .header_split
= 0, /* Header Split disabled */
117 .hw_ip_checksum
= 0, /* IP checksum offload disabled */
118 .hw_vlan_filter
= 0, /* VLAN filtering disabled */
119 .jumbo_frame
= 0, /* Jumbo Frame Support disabled */
125 .rss_hf
= ETH_RSS_IP
| ETH_RSS_UDP
| ETH_RSS_TCP
,
129 .mq_mode
= ETH_MQ_TX_NONE
,
133 static const struct rte_eth_rxconf rx_conf
= {
135 .pthresh
= RX_PTHRESH
,
136 .hthresh
= RX_HTHRESH
,
137 .wthresh
= RX_WTHRESH
,
141 static const struct rte_eth_txconf tx_conf
= {
143 .pthresh
= TX_PTHRESH
,
144 .hthresh
= TX_HTHRESH
,
145 .wthresh
= TX_WTHRESH
,
149 .txq_flags
= ETH_TXQ_FLAGS_NOMULTSEGS
|ETH_TXQ_FLAGS_NOOFFLOADS
,
152 enum { MAX_TX_QUEUE_LEN
= 384 };
153 enum { DPDK_RING_SIZE
= 256 };
154 BUILD_ASSERT_DECL(IS_POW2(DPDK_RING_SIZE
));
155 enum { DRAIN_TSC
= 200000ULL };
162 static int rte_eal_init_ret
= ENODEV
;
164 static struct ovs_mutex dpdk_mutex
= OVS_MUTEX_INITIALIZER
;
166 /* Contains all 'struct dpdk_dev's. */
167 static struct ovs_list dpdk_list
OVS_GUARDED_BY(dpdk_mutex
)
168 = OVS_LIST_INITIALIZER(&dpdk_list
);
170 static struct ovs_list dpdk_mp_list
OVS_GUARDED_BY(dpdk_mutex
)
171 = OVS_LIST_INITIALIZER(&dpdk_mp_list
);
173 /* This mutex must be used by non pmd threads when allocating or freeing
174 * mbufs through mempools. Since dpdk_queue_pkts() and dpdk_queue_flush() may
175 * use mempools, a non pmd thread should hold this mutex while calling them */
176 struct ovs_mutex nonpmd_mempool_mutex
= OVS_MUTEX_INITIALIZER
;
179 struct rte_mempool
*mp
;
183 struct ovs_list list_node
OVS_GUARDED_BY(dpdk_mutex
);
186 /* There should be one 'struct dpdk_tx_queue' created for
188 struct dpdk_tx_queue
{
189 bool flush_tx
; /* Set to true to flush queue everytime */
190 /* pkts are queued. */
193 struct rte_mbuf
*burst_pkts
[MAX_TX_QUEUE_LEN
];
196 /* dpdk has no way to remove dpdk ring ethernet devices
197 so we have to keep them around once they've been created
200 static struct ovs_list dpdk_ring_list
OVS_GUARDED_BY(dpdk_mutex
)
201 = OVS_LIST_INITIALIZER(&dpdk_ring_list
);
204 /* For the client rings */
205 struct rte_ring
*cring_tx
;
206 struct rte_ring
*cring_rx
;
207 int user_port_id
; /* User given port no, parsed from port name */
208 int eth_port_id
; /* ethernet device port id */
209 struct ovs_list list_node
OVS_GUARDED_BY(dpdk_mutex
);
216 enum dpdk_dev_type type
;
218 struct dpdk_tx_queue
*tx_q
;
220 struct ovs_mutex mutex
OVS_ACQ_AFTER(dpdk_mutex
);
222 struct dpdk_mp
*dpdk_mp
;
226 struct netdev_stats stats
;
228 uint8_t hwaddr
[ETH_ADDR_LEN
];
229 enum netdev_flags flags
;
231 struct rte_eth_link link
;
234 /* virtio-net structure for vhost device */
235 OVSRCU_TYPE(struct virtio_net
*) virtio_dev
;
238 struct ovs_list list_node
OVS_GUARDED_BY(dpdk_mutex
);
239 rte_spinlock_t txq_lock
;
242 struct netdev_rxq_dpdk
{
243 struct netdev_rxq up
;
247 static bool thread_is_pmd(void);
249 static int netdev_dpdk_construct(struct netdev
*);
251 struct virtio_net
* netdev_dpdk_get_virtio(const struct netdev_dpdk
*dev
);
254 is_dpdk_class(const struct netdev_class
*class)
256 return class->construct
== netdev_dpdk_construct
;
259 /* XXX: use dpdk malloc for entire OVS. in fact huge page should be used
260 * for all other segments data, bss and text. */
263 dpdk_rte_mzalloc(size_t sz
)
267 ptr
= rte_zmalloc(OVS_VPORT_DPDK
, sz
, OVS_CACHE_LINE_SIZE
);
274 /* XXX this function should be called only by pmd threads (or by non pmd
275 * threads holding the nonpmd_mempool_mutex) */
277 free_dpdk_buf(struct dp_packet
*p
)
279 struct rte_mbuf
*pkt
= (struct rte_mbuf
*) p
;
281 rte_pktmbuf_free_seg(pkt
);
285 __rte_pktmbuf_init(struct rte_mempool
*mp
,
286 void *opaque_arg OVS_UNUSED
,
288 unsigned i OVS_UNUSED
)
290 struct rte_mbuf
*m
= _m
;
291 uint32_t buf_len
= mp
->elt_size
- sizeof(struct dp_packet
);
293 RTE_MBUF_ASSERT(mp
->elt_size
>= sizeof(struct dp_packet
));
295 memset(m
, 0, mp
->elt_size
);
297 /* start of buffer is just after mbuf structure */
298 m
->buf_addr
= (char *)m
+ sizeof(struct dp_packet
);
299 m
->buf_physaddr
= rte_mempool_virt2phy(mp
, m
) +
300 sizeof(struct dp_packet
);
301 m
->buf_len
= (uint16_t)buf_len
;
303 /* keep some headroom between start of buffer and data */
304 m
->data_off
= RTE_MIN(RTE_PKTMBUF_HEADROOM
, m
->buf_len
);
306 /* init some constant fields */
313 ovs_rte_pktmbuf_init(struct rte_mempool
*mp
,
314 void *opaque_arg OVS_UNUSED
,
316 unsigned i OVS_UNUSED
)
318 struct rte_mbuf
*m
= _m
;
320 __rte_pktmbuf_init(mp
, opaque_arg
, _m
, i
);
322 dp_packet_init_dpdk((struct dp_packet
*) m
, m
->buf_len
);
325 static struct dpdk_mp
*
326 dpdk_mp_get(int socket_id
, int mtu
) OVS_REQUIRES(dpdk_mutex
)
328 struct dpdk_mp
*dmp
= NULL
;
329 char mp_name
[RTE_MEMPOOL_NAMESIZE
];
332 LIST_FOR_EACH (dmp
, list_node
, &dpdk_mp_list
) {
333 if (dmp
->socket_id
== socket_id
&& dmp
->mtu
== mtu
) {
339 dmp
= dpdk_rte_mzalloc(sizeof *dmp
);
340 dmp
->socket_id
= socket_id
;
344 mp_size
= MAX_NB_MBUF
;
346 if (snprintf(mp_name
, RTE_MEMPOOL_NAMESIZE
, "ovs_mp_%d_%d_%u",
347 dmp
->mtu
, dmp
->socket_id
, mp_size
) < 0) {
351 dmp
->mp
= rte_mempool_create(mp_name
, mp_size
, MBUF_SIZE(mtu
),
353 sizeof(struct rte_pktmbuf_pool_private
),
354 rte_pktmbuf_pool_init
, NULL
,
355 ovs_rte_pktmbuf_init
, NULL
,
357 } while (!dmp
->mp
&& rte_errno
== ENOMEM
&& (mp_size
/= 2) >= MIN_NB_MBUF
);
359 if (dmp
->mp
== NULL
) {
362 VLOG_DBG("Allocated \"%s\" mempool with %u mbufs", mp_name
, mp_size
);
365 list_push_back(&dpdk_mp_list
, &dmp
->list_node
);
370 dpdk_mp_put(struct dpdk_mp
*dmp
)
378 ovs_assert(dmp
->refcount
>= 0);
381 /* I could not find any API to destroy mp. */
382 if (dmp
->refcount
== 0) {
383 list_delete(dmp
->list_node
);
384 /* destroy mp-pool. */
390 check_link_status(struct netdev_dpdk
*dev
)
392 struct rte_eth_link link
;
394 rte_eth_link_get_nowait(dev
->port_id
, &link
);
396 if (dev
->link
.link_status
!= link
.link_status
) {
397 netdev_change_seq_changed(&dev
->up
);
399 dev
->link_reset_cnt
++;
401 if (dev
->link
.link_status
) {
402 VLOG_DBG_RL(&rl
, "Port %d Link Up - speed %u Mbps - %s",
403 dev
->port_id
, (unsigned)dev
->link
.link_speed
,
404 (dev
->link
.link_duplex
== ETH_LINK_FULL_DUPLEX
) ?
405 ("full-duplex") : ("half-duplex"));
407 VLOG_DBG_RL(&rl
, "Port %d Link Down", dev
->port_id
);
413 dpdk_watchdog(void *dummy OVS_UNUSED
)
415 struct netdev_dpdk
*dev
;
417 pthread_detach(pthread_self());
420 ovs_mutex_lock(&dpdk_mutex
);
421 LIST_FOR_EACH (dev
, list_node
, &dpdk_list
) {
422 ovs_mutex_lock(&dev
->mutex
);
423 check_link_status(dev
);
424 ovs_mutex_unlock(&dev
->mutex
);
426 ovs_mutex_unlock(&dpdk_mutex
);
427 xsleep(DPDK_PORT_WATCHDOG_INTERVAL
);
434 dpdk_eth_dev_init(struct netdev_dpdk
*dev
) OVS_REQUIRES(dpdk_mutex
)
436 struct rte_pktmbuf_pool_private
*mbp_priv
;
437 struct ether_addr eth_addr
;
441 if (dev
->port_id
< 0 || dev
->port_id
>= rte_eth_dev_count()) {
445 diag
= rte_eth_dev_configure(dev
->port_id
, dev
->up
.n_rxq
, dev
->up
.n_txq
,
448 VLOG_ERR("eth dev config error %d",diag
);
452 for (i
= 0; i
< dev
->up
.n_txq
; i
++) {
453 diag
= rte_eth_tx_queue_setup(dev
->port_id
, i
, NIC_PORT_TX_Q_SIZE
,
454 dev
->socket_id
, &tx_conf
);
456 VLOG_ERR("eth dev tx queue setup error %d",diag
);
461 for (i
= 0; i
< dev
->up
.n_rxq
; i
++) {
462 diag
= rte_eth_rx_queue_setup(dev
->port_id
, i
, NIC_PORT_RX_Q_SIZE
,
464 &rx_conf
, dev
->dpdk_mp
->mp
);
466 VLOG_ERR("eth dev rx queue setup error %d",diag
);
471 diag
= rte_eth_dev_start(dev
->port_id
);
473 VLOG_ERR("eth dev start error %d",diag
);
477 rte_eth_promiscuous_enable(dev
->port_id
);
478 rte_eth_allmulticast_enable(dev
->port_id
);
480 memset(ð_addr
, 0x0, sizeof(eth_addr
));
481 rte_eth_macaddr_get(dev
->port_id
, ð_addr
);
482 VLOG_INFO_RL(&rl
, "Port %d: "ETH_ADDR_FMT
"",
483 dev
->port_id
, ETH_ADDR_ARGS(eth_addr
.addr_bytes
));
485 memcpy(dev
->hwaddr
, eth_addr
.addr_bytes
, ETH_ADDR_LEN
);
486 rte_eth_link_get_nowait(dev
->port_id
, &dev
->link
);
488 mbp_priv
= rte_mempool_get_priv(dev
->dpdk_mp
->mp
);
489 dev
->buf_size
= mbp_priv
->mbuf_data_room_size
- RTE_PKTMBUF_HEADROOM
;
491 dev
->flags
= NETDEV_UP
| NETDEV_PROMISC
;
495 static struct netdev_dpdk
*
496 netdev_dpdk_cast(const struct netdev
*netdev
)
498 return CONTAINER_OF(netdev
, struct netdev_dpdk
, up
);
501 static struct netdev
*
502 netdev_dpdk_alloc(void)
504 struct netdev_dpdk
*netdev
= dpdk_rte_mzalloc(sizeof *netdev
);
509 netdev_dpdk_alloc_txq(struct netdev_dpdk
*netdev
, unsigned int n_txqs
)
513 netdev
->tx_q
= dpdk_rte_mzalloc(n_txqs
* sizeof *netdev
->tx_q
);
514 /* Each index is considered as a cpu core id, since there should
515 * be one tx queue for each cpu core. */
516 for (i
= 0; i
< n_txqs
; i
++) {
517 int numa_id
= ovs_numa_get_numa_id(i
);
519 /* If the corresponding core is not on the same numa node
520 * as 'netdev', flags the 'flush_tx'. */
521 netdev
->tx_q
[i
].flush_tx
= netdev
->socket_id
== numa_id
;
526 netdev_dpdk_init(struct netdev
*netdev_
, unsigned int port_no
,
527 enum dpdk_dev_type type
)
528 OVS_REQUIRES(dpdk_mutex
)
530 struct netdev_dpdk
*netdev
= netdev_dpdk_cast(netdev_
);
534 ovs_mutex_init(&netdev
->mutex
);
535 ovs_mutex_lock(&netdev
->mutex
);
537 /* If the 'sid' is negative, it means that the kernel fails
538 * to obtain the pci numa info. In that situation, always
540 if (type
== DPDK_DEV_ETH
) {
541 sid
= rte_eth_dev_socket_id(port_no
);
543 sid
= rte_lcore_to_socket_id(rte_get_master_lcore());
546 netdev
->socket_id
= sid
< 0 ? SOCKET0
: sid
;
547 netdev
->port_id
= port_no
;
550 netdev
->mtu
= ETHER_MTU
;
551 netdev
->max_packet_len
= MTU_TO_MAX_LEN(netdev
->mtu
);
552 rte_spinlock_init(&netdev
->txq_lock
);
554 netdev
->dpdk_mp
= dpdk_mp_get(netdev
->socket_id
, netdev
->mtu
);
555 if (!netdev
->dpdk_mp
) {
560 netdev_
->n_txq
= NR_QUEUE
;
561 netdev_
->n_rxq
= NR_QUEUE
;
563 if (type
== DPDK_DEV_ETH
) {
564 netdev_dpdk_alloc_txq(netdev
, NR_QUEUE
);
565 err
= dpdk_eth_dev_init(netdev
);
571 list_push_back(&dpdk_list
, &netdev
->list_node
);
575 rte_free(netdev
->tx_q
);
577 ovs_mutex_unlock(&netdev
->mutex
);
582 dpdk_dev_parse_name(const char dev_name
[], const char prefix
[],
583 unsigned int *port_no
)
587 if (strncmp(dev_name
, prefix
, strlen(prefix
))) {
591 cport
= dev_name
+ strlen(prefix
);
592 *port_no
= strtol(cport
, 0, 0); /* string must be null terminated */
597 netdev_dpdk_vhost_construct(struct netdev
*netdev_
)
601 if (rte_eal_init_ret
) {
602 return rte_eal_init_ret
;
605 ovs_mutex_lock(&dpdk_mutex
);
606 err
= netdev_dpdk_init(netdev_
, -1, DPDK_DEV_VHOST
);
607 ovs_mutex_unlock(&dpdk_mutex
);
613 netdev_dpdk_construct(struct netdev
*netdev
)
615 unsigned int port_no
;
618 if (rte_eal_init_ret
) {
619 return rte_eal_init_ret
;
622 /* Names always start with "dpdk" */
623 err
= dpdk_dev_parse_name(netdev
->name
, "dpdk", &port_no
);
628 ovs_mutex_lock(&dpdk_mutex
);
629 err
= netdev_dpdk_init(netdev
, port_no
, DPDK_DEV_ETH
);
630 ovs_mutex_unlock(&dpdk_mutex
);
635 netdev_dpdk_destruct(struct netdev
*netdev_
)
637 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev_
);
639 ovs_mutex_lock(&dev
->mutex
);
640 rte_eth_dev_stop(dev
->port_id
);
641 ovs_mutex_unlock(&dev
->mutex
);
643 ovs_mutex_lock(&dpdk_mutex
);
645 list_remove(&dev
->list_node
);
646 dpdk_mp_put(dev
->dpdk_mp
);
647 ovs_mutex_unlock(&dpdk_mutex
);
651 netdev_dpdk_vhost_destruct(struct netdev
*netdev_
)
653 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev_
);
655 /* Can't remove a port while a guest is attached to it. */
656 if (netdev_dpdk_get_virtio(dev
) != NULL
) {
657 VLOG_ERR("Can not remove port, vhost device still attached");
661 ovs_mutex_lock(&dpdk_mutex
);
662 list_remove(&dev
->list_node
);
663 dpdk_mp_put(dev
->dpdk_mp
);
664 ovs_mutex_unlock(&dpdk_mutex
);
668 netdev_dpdk_dealloc(struct netdev
*netdev_
)
670 struct netdev_dpdk
*netdev
= netdev_dpdk_cast(netdev_
);
676 netdev_dpdk_get_config(const struct netdev
*netdev_
, struct smap
*args
)
678 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev_
);
680 ovs_mutex_lock(&dev
->mutex
);
682 smap_add_format(args
, "configured_rx_queues", "%d", netdev_
->n_rxq
);
683 smap_add_format(args
, "configured_tx_queues", "%d", netdev_
->n_txq
);
684 ovs_mutex_unlock(&dev
->mutex
);
690 netdev_dpdk_get_numa_id(const struct netdev
*netdev_
)
692 struct netdev_dpdk
*netdev
= netdev_dpdk_cast(netdev_
);
694 return netdev
->socket_id
;
697 /* Sets the number of tx queues and rx queues for the dpdk interface.
698 * If the configuration fails, do not try restoring its old configuration
699 * and just returns the error. */
701 netdev_dpdk_set_multiq(struct netdev
*netdev_
, unsigned int n_txq
,
704 struct netdev_dpdk
*netdev
= netdev_dpdk_cast(netdev_
);
707 if (netdev
->up
.n_txq
== n_txq
&& netdev
->up
.n_rxq
== n_rxq
) {
711 ovs_mutex_lock(&dpdk_mutex
);
712 ovs_mutex_lock(&netdev
->mutex
);
714 rte_eth_dev_stop(netdev
->port_id
);
716 netdev
->up
.n_txq
= n_txq
;
717 netdev
->up
.n_rxq
= n_rxq
;
719 rte_free(netdev
->tx_q
);
720 netdev_dpdk_alloc_txq(netdev
, n_txq
);
721 err
= dpdk_eth_dev_init(netdev
);
723 ovs_mutex_unlock(&netdev
->mutex
);
724 ovs_mutex_unlock(&dpdk_mutex
);
730 netdev_dpdk_vhost_set_multiq(struct netdev
*netdev_
, unsigned int n_txq
,
733 struct netdev_dpdk
*netdev
= netdev_dpdk_cast(netdev_
);
736 if (netdev
->up
.n_txq
== n_txq
&& netdev
->up
.n_rxq
== n_rxq
) {
740 ovs_mutex_lock(&dpdk_mutex
);
741 ovs_mutex_lock(&netdev
->mutex
);
743 netdev
->up
.n_txq
= n_txq
;
744 netdev
->up
.n_rxq
= n_rxq
;
746 ovs_mutex_unlock(&netdev
->mutex
);
747 ovs_mutex_unlock(&dpdk_mutex
);
752 static struct netdev_rxq
*
753 netdev_dpdk_rxq_alloc(void)
755 struct netdev_rxq_dpdk
*rx
= dpdk_rte_mzalloc(sizeof *rx
);
760 static struct netdev_rxq_dpdk
*
761 netdev_rxq_dpdk_cast(const struct netdev_rxq
*rx
)
763 return CONTAINER_OF(rx
, struct netdev_rxq_dpdk
, up
);
767 netdev_dpdk_rxq_construct(struct netdev_rxq
*rxq_
)
769 struct netdev_rxq_dpdk
*rx
= netdev_rxq_dpdk_cast(rxq_
);
770 struct netdev_dpdk
*netdev
= netdev_dpdk_cast(rx
->up
.netdev
);
772 ovs_mutex_lock(&netdev
->mutex
);
773 rx
->port_id
= netdev
->port_id
;
774 ovs_mutex_unlock(&netdev
->mutex
);
780 netdev_dpdk_rxq_destruct(struct netdev_rxq
*rxq_ OVS_UNUSED
)
785 netdev_dpdk_rxq_dealloc(struct netdev_rxq
*rxq_
)
787 struct netdev_rxq_dpdk
*rx
= netdev_rxq_dpdk_cast(rxq_
);
793 dpdk_queue_flush__(struct netdev_dpdk
*dev
, int qid
)
795 struct dpdk_tx_queue
*txq
= &dev
->tx_q
[qid
];
798 while (nb_tx
!= txq
->count
) {
801 ret
= rte_eth_tx_burst(dev
->port_id
, qid
, txq
->burst_pkts
+ nb_tx
,
810 if (OVS_UNLIKELY(nb_tx
!= txq
->count
)) {
811 /* free buffers, which we couldn't transmit, one at a time (each
812 * packet could come from a different mempool) */
815 for (i
= nb_tx
; i
< txq
->count
; i
++) {
816 rte_pktmbuf_free_seg(txq
->burst_pkts
[i
]);
818 ovs_mutex_lock(&dev
->mutex
);
819 dev
->stats
.tx_dropped
+= txq
->count
-nb_tx
;
820 ovs_mutex_unlock(&dev
->mutex
);
824 txq
->tsc
= rte_get_timer_cycles();
828 dpdk_queue_flush(struct netdev_dpdk
*dev
, int qid
)
830 struct dpdk_tx_queue
*txq
= &dev
->tx_q
[qid
];
832 if (txq
->count
== 0) {
835 dpdk_queue_flush__(dev
, qid
);
839 is_vhost_running(struct virtio_net
*dev
)
841 return (dev
!= NULL
&& (dev
->flags
& VIRTIO_DEV_RUNNING
));
845 * The receive path for the vhost port is the TX path out from guest.
848 netdev_dpdk_vhost_rxq_recv(struct netdev_rxq
*rxq_
,
849 struct dp_packet
**packets
, int *c
)
851 struct netdev_rxq_dpdk
*rx
= netdev_rxq_dpdk_cast(rxq_
);
852 struct netdev
*netdev
= rx
->up
.netdev
;
853 struct netdev_dpdk
*vhost_dev
= netdev_dpdk_cast(netdev
);
854 struct virtio_net
*virtio_dev
= netdev_dpdk_get_virtio(vhost_dev
);
858 if (OVS_UNLIKELY(!is_vhost_running(virtio_dev
))) {
862 nb_rx
= rte_vhost_dequeue_burst(virtio_dev
, qid
,
863 vhost_dev
->dpdk_mp
->mp
,
864 (struct rte_mbuf
**)packets
,
870 vhost_dev
->stats
.rx_packets
+= (uint64_t)nb_rx
;
876 netdev_dpdk_rxq_recv(struct netdev_rxq
*rxq_
, struct dp_packet
**packets
,
879 struct netdev_rxq_dpdk
*rx
= netdev_rxq_dpdk_cast(rxq_
);
880 struct netdev
*netdev
= rx
->up
.netdev
;
881 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
884 /* There is only one tx queue for this core. Do not flush other
886 if (rxq_
->queue_id
== rte_lcore_id()) {
887 dpdk_queue_flush(dev
, rxq_
->queue_id
);
890 nb_rx
= rte_eth_rx_burst(rx
->port_id
, rxq_
->queue_id
,
891 (struct rte_mbuf
**) packets
,
892 MIN((int) NETDEV_MAX_RX_BATCH
,
893 (int) MAX_PKT_BURST
));
904 __netdev_dpdk_vhost_send(struct netdev
*netdev
, struct dp_packet
**pkts
,
905 int cnt
, bool may_steal
)
907 struct netdev_dpdk
*vhost_dev
= netdev_dpdk_cast(netdev
);
908 struct virtio_net
*virtio_dev
= netdev_dpdk_get_virtio(vhost_dev
);
909 struct rte_mbuf
**cur_pkts
= (struct rte_mbuf
**) pkts
;
910 unsigned int total_pkts
= cnt
;
913 if (OVS_UNLIKELY(!is_vhost_running(virtio_dev
))) {
914 ovs_mutex_lock(&vhost_dev
->mutex
);
915 vhost_dev
->stats
.tx_dropped
+= cnt
;
916 ovs_mutex_unlock(&vhost_dev
->mutex
);
920 /* There is vHost TX single queue, So we need to lock it for TX. */
921 rte_spinlock_lock(&vhost_dev
->txq_lock
);
924 unsigned int tx_pkts
;
926 tx_pkts
= rte_vhost_enqueue_burst(virtio_dev
, VIRTIO_RXQ
,
928 if (OVS_LIKELY(tx_pkts
)) {
929 /* Packets have been sent.*/
931 /* Prepare for possible next iteration.*/
932 cur_pkts
= &cur_pkts
[tx_pkts
];
934 uint64_t timeout
= VHOST_ENQ_RETRY_USECS
* rte_get_timer_hz() / 1E6
;
935 unsigned int expired
= 0;
938 start
= rte_get_timer_cycles();
942 * Unable to enqueue packets to vhost interface.
943 * Check available entries before retrying.
945 while (!rte_vring_available_entries(virtio_dev
, VIRTIO_RXQ
)) {
946 if (OVS_UNLIKELY((rte_get_timer_cycles() - start
) > timeout
)) {
952 /* break out of main loop. */
958 vhost_dev
->stats
.tx_packets
+= (total_pkts
- cnt
);
959 vhost_dev
->stats
.tx_dropped
+= cnt
;
960 rte_spinlock_unlock(&vhost_dev
->txq_lock
);
966 for (i
= 0; i
< total_pkts
; i
++) {
967 dp_packet_delete(pkts
[i
]);
973 dpdk_queue_pkts(struct netdev_dpdk
*dev
, int qid
,
974 struct rte_mbuf
**pkts
, int cnt
)
976 struct dpdk_tx_queue
*txq
= &dev
->tx_q
[qid
];
982 int freeslots
= MAX_TX_QUEUE_LEN
- txq
->count
;
983 int tocopy
= MIN(freeslots
, cnt
-i
);
985 memcpy(&txq
->burst_pkts
[txq
->count
], &pkts
[i
],
986 tocopy
* sizeof (struct rte_mbuf
*));
988 txq
->count
+= tocopy
;
991 if (txq
->count
== MAX_TX_QUEUE_LEN
|| txq
->flush_tx
) {
992 dpdk_queue_flush__(dev
, qid
);
994 diff_tsc
= rte_get_timer_cycles() - txq
->tsc
;
995 if (diff_tsc
>= DRAIN_TSC
) {
996 dpdk_queue_flush__(dev
, qid
);
1001 /* Tx function. Transmit packets indefinitely */
1003 dpdk_do_tx_copy(struct netdev
*netdev
, int qid
, struct dp_packet
**pkts
,
1005 OVS_NO_THREAD_SAFETY_ANALYSIS
1007 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1008 struct rte_mbuf
*mbufs
[cnt
];
1013 /* If we are on a non pmd thread we have to use the mempool mutex, because
1014 * every non pmd thread shares the same mempool cache */
1016 if (!thread_is_pmd()) {
1017 ovs_mutex_lock(&nonpmd_mempool_mutex
);
1020 for (i
= 0; i
< cnt
; i
++) {
1021 int size
= dp_packet_size(pkts
[i
]);
1023 if (OVS_UNLIKELY(size
> dev
->max_packet_len
)) {
1024 VLOG_WARN_RL(&rl
, "Too big size %d max_packet_len %d",
1025 (int)size
, dev
->max_packet_len
);
1031 mbufs
[newcnt
] = rte_pktmbuf_alloc(dev
->dpdk_mp
->mp
);
1033 if (!mbufs
[newcnt
]) {
1038 /* We have to do a copy for now */
1039 memcpy(rte_pktmbuf_mtod(mbufs
[newcnt
], void *), dp_packet_data(pkts
[i
]), size
);
1041 rte_pktmbuf_data_len(mbufs
[newcnt
]) = size
;
1042 rte_pktmbuf_pkt_len(mbufs
[newcnt
]) = size
;
1047 if (OVS_UNLIKELY(dropped
)) {
1048 ovs_mutex_lock(&dev
->mutex
);
1049 dev
->stats
.tx_dropped
+= dropped
;
1050 ovs_mutex_unlock(&dev
->mutex
);
1053 if (dev
->type
== DPDK_DEV_VHOST
) {
1054 __netdev_dpdk_vhost_send(netdev
, (struct dp_packet
**) mbufs
, newcnt
, true);
1056 dpdk_queue_pkts(dev
, qid
, mbufs
, newcnt
);
1057 dpdk_queue_flush(dev
, qid
);
1060 if (!thread_is_pmd()) {
1061 ovs_mutex_unlock(&nonpmd_mempool_mutex
);
1066 netdev_dpdk_vhost_send(struct netdev
*netdev
, int qid OVS_UNUSED
, struct dp_packet
**pkts
,
1067 int cnt
, bool may_steal
)
1069 if (OVS_UNLIKELY(pkts
[0]->source
!= DPBUF_DPDK
)) {
1072 dpdk_do_tx_copy(netdev
, qid
, pkts
, cnt
);
1074 for (i
= 0; i
< cnt
; i
++) {
1075 dp_packet_delete(pkts
[i
]);
1079 __netdev_dpdk_vhost_send(netdev
, pkts
, cnt
, may_steal
);
1085 netdev_dpdk_send__(struct netdev_dpdk
*dev
, int qid
,
1086 struct dp_packet
**pkts
, int cnt
, bool may_steal
)
1090 if (OVS_UNLIKELY(!may_steal
||
1091 pkts
[0]->source
!= DPBUF_DPDK
)) {
1092 struct netdev
*netdev
= &dev
->up
;
1094 dpdk_do_tx_copy(netdev
, qid
, pkts
, cnt
);
1097 for (i
= 0; i
< cnt
; i
++) {
1098 dp_packet_delete(pkts
[i
]);
1102 int next_tx_idx
= 0;
1105 for (i
= 0; i
< cnt
; i
++) {
1106 int size
= dp_packet_size(pkts
[i
]);
1108 if (OVS_UNLIKELY(size
> dev
->max_packet_len
)) {
1109 if (next_tx_idx
!= i
) {
1110 dpdk_queue_pkts(dev
, qid
,
1111 (struct rte_mbuf
**)&pkts
[next_tx_idx
],
1115 VLOG_WARN_RL(&rl
, "Too big size %d max_packet_len %d",
1116 (int)size
, dev
->max_packet_len
);
1118 dp_packet_delete(pkts
[i
]);
1120 next_tx_idx
= i
+ 1;
1123 if (next_tx_idx
!= cnt
) {
1124 dpdk_queue_pkts(dev
, qid
,
1125 (struct rte_mbuf
**)&pkts
[next_tx_idx
],
1129 if (OVS_UNLIKELY(dropped
)) {
1130 ovs_mutex_lock(&dev
->mutex
);
1131 dev
->stats
.tx_dropped
+= dropped
;
1132 ovs_mutex_unlock(&dev
->mutex
);
1138 netdev_dpdk_eth_send(struct netdev
*netdev
, int qid
,
1139 struct dp_packet
**pkts
, int cnt
, bool may_steal
)
1141 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1143 netdev_dpdk_send__(dev
, qid
, pkts
, cnt
, may_steal
);
1148 netdev_dpdk_set_etheraddr(struct netdev
*netdev
,
1149 const uint8_t mac
[ETH_ADDR_LEN
])
1151 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1153 ovs_mutex_lock(&dev
->mutex
);
1154 if (!eth_addr_equals(dev
->hwaddr
, mac
)) {
1155 memcpy(dev
->hwaddr
, mac
, ETH_ADDR_LEN
);
1156 netdev_change_seq_changed(netdev
);
1158 ovs_mutex_unlock(&dev
->mutex
);
1164 netdev_dpdk_get_etheraddr(const struct netdev
*netdev
,
1165 uint8_t mac
[ETH_ADDR_LEN
])
1167 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1169 ovs_mutex_lock(&dev
->mutex
);
1170 memcpy(mac
, dev
->hwaddr
, ETH_ADDR_LEN
);
1171 ovs_mutex_unlock(&dev
->mutex
);
1177 netdev_dpdk_get_mtu(const struct netdev
*netdev
, int *mtup
)
1179 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1181 ovs_mutex_lock(&dev
->mutex
);
1183 ovs_mutex_unlock(&dev
->mutex
);
1189 netdev_dpdk_set_mtu(const struct netdev
*netdev
, int mtu
)
1191 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1193 struct dpdk_mp
*old_mp
;
1196 ovs_mutex_lock(&dpdk_mutex
);
1197 ovs_mutex_lock(&dev
->mutex
);
1198 if (dev
->mtu
== mtu
) {
1203 mp
= dpdk_mp_get(dev
->socket_id
, dev
->mtu
);
1209 rte_eth_dev_stop(dev
->port_id
);
1212 old_mp
= dev
->dpdk_mp
;
1215 dev
->max_packet_len
= MTU_TO_MAX_LEN(dev
->mtu
);
1217 err
= dpdk_eth_dev_init(dev
);
1221 dev
->dpdk_mp
= old_mp
;
1222 dev
->max_packet_len
= MTU_TO_MAX_LEN(dev
->mtu
);
1223 dpdk_eth_dev_init(dev
);
1227 dpdk_mp_put(old_mp
);
1228 netdev_change_seq_changed(netdev
);
1230 ovs_mutex_unlock(&dev
->mutex
);
1231 ovs_mutex_unlock(&dpdk_mutex
);
1236 netdev_dpdk_get_carrier(const struct netdev
*netdev_
, bool *carrier
);
1239 netdev_dpdk_vhost_get_stats(const struct netdev
*netdev
,
1240 struct netdev_stats
*stats
)
1242 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1244 ovs_mutex_lock(&dev
->mutex
);
1245 memset(stats
, 0, sizeof(*stats
));
1246 /* Unsupported Stats */
1247 stats
->rx_errors
= UINT64_MAX
;
1248 stats
->tx_errors
= UINT64_MAX
;
1249 stats
->multicast
= UINT64_MAX
;
1250 stats
->collisions
= UINT64_MAX
;
1251 stats
->rx_crc_errors
= UINT64_MAX
;
1252 stats
->rx_fifo_errors
= UINT64_MAX
;
1253 stats
->rx_frame_errors
= UINT64_MAX
;
1254 stats
->rx_length_errors
= UINT64_MAX
;
1255 stats
->rx_missed_errors
= UINT64_MAX
;
1256 stats
->rx_over_errors
= UINT64_MAX
;
1257 stats
->tx_aborted_errors
= UINT64_MAX
;
1258 stats
->tx_carrier_errors
= UINT64_MAX
;
1259 stats
->tx_errors
= UINT64_MAX
;
1260 stats
->tx_fifo_errors
= UINT64_MAX
;
1261 stats
->tx_heartbeat_errors
= UINT64_MAX
;
1262 stats
->tx_window_errors
= UINT64_MAX
;
1263 stats
->rx_bytes
+= UINT64_MAX
;
1264 stats
->rx_dropped
+= UINT64_MAX
;
1265 stats
->tx_bytes
+= UINT64_MAX
;
1267 /* Supported Stats */
1268 stats
->rx_packets
+= dev
->stats
.rx_packets
;
1269 stats
->tx_packets
+= dev
->stats
.tx_packets
;
1270 stats
->tx_dropped
+= dev
->stats
.tx_dropped
;
1271 ovs_mutex_unlock(&dev
->mutex
);
1277 netdev_dpdk_get_stats(const struct netdev
*netdev
, struct netdev_stats
*stats
)
1279 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1280 struct rte_eth_stats rte_stats
;
1283 netdev_dpdk_get_carrier(netdev
, &gg
);
1284 ovs_mutex_lock(&dev
->mutex
);
1285 rte_eth_stats_get(dev
->port_id
, &rte_stats
);
1287 memset(stats
, 0, sizeof(*stats
));
1289 stats
->rx_packets
= rte_stats
.ipackets
;
1290 stats
->tx_packets
= rte_stats
.opackets
;
1291 stats
->rx_bytes
= rte_stats
.ibytes
;
1292 stats
->tx_bytes
= rte_stats
.obytes
;
1293 stats
->rx_errors
= rte_stats
.ierrors
;
1294 stats
->tx_errors
= rte_stats
.oerrors
;
1295 stats
->multicast
= rte_stats
.imcasts
;
1297 stats
->tx_dropped
= dev
->stats
.tx_dropped
;
1298 ovs_mutex_unlock(&dev
->mutex
);
1304 netdev_dpdk_get_features(const struct netdev
*netdev_
,
1305 enum netdev_features
*current
,
1306 enum netdev_features
*advertised OVS_UNUSED
,
1307 enum netdev_features
*supported OVS_UNUSED
,
1308 enum netdev_features
*peer OVS_UNUSED
)
1310 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev_
);
1311 struct rte_eth_link link
;
1313 ovs_mutex_lock(&dev
->mutex
);
1315 ovs_mutex_unlock(&dev
->mutex
);
1317 if (link
.link_duplex
== ETH_LINK_AUTONEG_DUPLEX
) {
1318 if (link
.link_speed
== ETH_LINK_SPEED_AUTONEG
) {
1319 *current
= NETDEV_F_AUTONEG
;
1321 } else if (link
.link_duplex
== ETH_LINK_HALF_DUPLEX
) {
1322 if (link
.link_speed
== ETH_LINK_SPEED_10
) {
1323 *current
= NETDEV_F_10MB_HD
;
1325 if (link
.link_speed
== ETH_LINK_SPEED_100
) {
1326 *current
= NETDEV_F_100MB_HD
;
1328 if (link
.link_speed
== ETH_LINK_SPEED_1000
) {
1329 *current
= NETDEV_F_1GB_HD
;
1331 } else if (link
.link_duplex
== ETH_LINK_FULL_DUPLEX
) {
1332 if (link
.link_speed
== ETH_LINK_SPEED_10
) {
1333 *current
= NETDEV_F_10MB_FD
;
1335 if (link
.link_speed
== ETH_LINK_SPEED_100
) {
1336 *current
= NETDEV_F_100MB_FD
;
1338 if (link
.link_speed
== ETH_LINK_SPEED_1000
) {
1339 *current
= NETDEV_F_1GB_FD
;
1341 if (link
.link_speed
== ETH_LINK_SPEED_10000
) {
1342 *current
= NETDEV_F_10GB_FD
;
1350 netdev_dpdk_get_ifindex(const struct netdev
*netdev
)
1352 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1355 ovs_mutex_lock(&dev
->mutex
);
1356 ifindex
= dev
->port_id
;
1357 ovs_mutex_unlock(&dev
->mutex
);
1363 netdev_dpdk_get_carrier(const struct netdev
*netdev_
, bool *carrier
)
1365 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev_
);
1367 ovs_mutex_lock(&dev
->mutex
);
1368 check_link_status(dev
);
1369 *carrier
= dev
->link
.link_status
;
1371 ovs_mutex_unlock(&dev
->mutex
);
1377 netdev_dpdk_vhost_get_carrier(const struct netdev
*netdev_
, bool *carrier
)
1379 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev_
);
1380 struct virtio_net
*virtio_dev
= netdev_dpdk_get_virtio(dev
);
1382 ovs_mutex_lock(&dev
->mutex
);
1384 if (is_vhost_running(virtio_dev
)) {
1390 ovs_mutex_unlock(&dev
->mutex
);
1395 static long long int
1396 netdev_dpdk_get_carrier_resets(const struct netdev
*netdev_
)
1398 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev_
);
1399 long long int carrier_resets
;
1401 ovs_mutex_lock(&dev
->mutex
);
1402 carrier_resets
= dev
->link_reset_cnt
;
1403 ovs_mutex_unlock(&dev
->mutex
);
1405 return carrier_resets
;
1409 netdev_dpdk_set_miimon(struct netdev
*netdev_ OVS_UNUSED
,
1410 long long int interval OVS_UNUSED
)
1416 netdev_dpdk_update_flags__(struct netdev_dpdk
*dev
,
1417 enum netdev_flags off
, enum netdev_flags on
,
1418 enum netdev_flags
*old_flagsp
) OVS_REQUIRES(dev
->mutex
)
1422 if ((off
| on
) & ~(NETDEV_UP
| NETDEV_PROMISC
)) {
1426 *old_flagsp
= dev
->flags
;
1430 if (dev
->flags
== *old_flagsp
) {
1434 if (dev
->type
== DPDK_DEV_ETH
) {
1435 if (dev
->flags
& NETDEV_UP
) {
1436 err
= rte_eth_dev_start(dev
->port_id
);
1441 if (dev
->flags
& NETDEV_PROMISC
) {
1442 rte_eth_promiscuous_enable(dev
->port_id
);
1445 if (!(dev
->flags
& NETDEV_UP
)) {
1446 rte_eth_dev_stop(dev
->port_id
);
1454 netdev_dpdk_update_flags(struct netdev
*netdev_
,
1455 enum netdev_flags off
, enum netdev_flags on
,
1456 enum netdev_flags
*old_flagsp
)
1458 struct netdev_dpdk
*netdev
= netdev_dpdk_cast(netdev_
);
1461 ovs_mutex_lock(&netdev
->mutex
);
1462 error
= netdev_dpdk_update_flags__(netdev
, off
, on
, old_flagsp
);
1463 ovs_mutex_unlock(&netdev
->mutex
);
1469 netdev_dpdk_get_status(const struct netdev
*netdev_
, struct smap
*args
)
1471 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev_
);
1472 struct rte_eth_dev_info dev_info
;
1474 if (dev
->port_id
< 0)
1477 ovs_mutex_lock(&dev
->mutex
);
1478 rte_eth_dev_info_get(dev
->port_id
, &dev_info
);
1479 ovs_mutex_unlock(&dev
->mutex
);
1481 smap_add_format(args
, "driver_name", "%s", dev_info
.driver_name
);
1483 smap_add_format(args
, "port_no", "%d", dev
->port_id
);
1484 smap_add_format(args
, "numa_id", "%d", rte_eth_dev_socket_id(dev
->port_id
));
1485 smap_add_format(args
, "driver_name", "%s", dev_info
.driver_name
);
1486 smap_add_format(args
, "min_rx_bufsize", "%u", dev_info
.min_rx_bufsize
);
1487 smap_add_format(args
, "max_rx_pktlen", "%u", dev_info
.max_rx_pktlen
);
1488 smap_add_format(args
, "max_rx_queues", "%u", dev_info
.max_rx_queues
);
1489 smap_add_format(args
, "max_tx_queues", "%u", dev_info
.max_tx_queues
);
1490 smap_add_format(args
, "max_mac_addrs", "%u", dev_info
.max_mac_addrs
);
1491 smap_add_format(args
, "max_hash_mac_addrs", "%u", dev_info
.max_hash_mac_addrs
);
1492 smap_add_format(args
, "max_vfs", "%u", dev_info
.max_vfs
);
1493 smap_add_format(args
, "max_vmdq_pools", "%u", dev_info
.max_vmdq_pools
);
1495 smap_add_format(args
, "pci-vendor_id", "0x%u", dev_info
.pci_dev
->id
.vendor_id
);
1496 smap_add_format(args
, "pci-device_id", "0x%x", dev_info
.pci_dev
->id
.device_id
);
1502 netdev_dpdk_set_admin_state__(struct netdev_dpdk
*dev
, bool admin_state
)
1503 OVS_REQUIRES(dev
->mutex
)
1505 enum netdev_flags old_flags
;
1508 netdev_dpdk_update_flags__(dev
, 0, NETDEV_UP
, &old_flags
);
1510 netdev_dpdk_update_flags__(dev
, NETDEV_UP
, 0, &old_flags
);
1515 netdev_dpdk_set_admin_state(struct unixctl_conn
*conn
, int argc
,
1516 const char *argv
[], void *aux OVS_UNUSED
)
1520 if (!strcasecmp(argv
[argc
- 1], "up")) {
1522 } else if ( !strcasecmp(argv
[argc
- 1], "down")) {
1525 unixctl_command_reply_error(conn
, "Invalid Admin State");
1530 struct netdev
*netdev
= netdev_from_name(argv
[1]);
1531 if (netdev
&& is_dpdk_class(netdev
->netdev_class
)) {
1532 struct netdev_dpdk
*dpdk_dev
= netdev_dpdk_cast(netdev
);
1534 ovs_mutex_lock(&dpdk_dev
->mutex
);
1535 netdev_dpdk_set_admin_state__(dpdk_dev
, up
);
1536 ovs_mutex_unlock(&dpdk_dev
->mutex
);
1538 netdev_close(netdev
);
1540 unixctl_command_reply_error(conn
, "Not a DPDK Interface");
1541 netdev_close(netdev
);
1545 struct netdev_dpdk
*netdev
;
1547 ovs_mutex_lock(&dpdk_mutex
);
1548 LIST_FOR_EACH (netdev
, list_node
, &dpdk_list
) {
1549 ovs_mutex_lock(&netdev
->mutex
);
1550 netdev_dpdk_set_admin_state__(netdev
, up
);
1551 ovs_mutex_unlock(&netdev
->mutex
);
1553 ovs_mutex_unlock(&dpdk_mutex
);
1555 unixctl_command_reply(conn
, "OK");
1559 * Set virtqueue flags so that we do not receive interrupts.
1562 set_irq_status(struct virtio_net
*dev
)
1564 dev
->virtqueue
[VIRTIO_RXQ
]->used
->flags
= VRING_USED_F_NO_NOTIFY
;
1565 dev
->virtqueue
[VIRTIO_TXQ
]->used
->flags
= VRING_USED_F_NO_NOTIFY
;
1569 * A new virtio-net device is added to a vhost port.
1572 new_device(struct virtio_net
*dev
)
1574 struct netdev_dpdk
*netdev
;
1575 bool exists
= false;
1577 ovs_mutex_lock(&dpdk_mutex
);
1578 /* Add device to the vhost port with the same name as that passed down. */
1579 LIST_FOR_EACH(netdev
, list_node
, &dpdk_list
) {
1580 if (strncmp(dev
->ifname
, netdev
->up
.name
, IFNAMSIZ
) == 0) {
1581 ovs_mutex_lock(&netdev
->mutex
);
1582 ovsrcu_set(&netdev
->virtio_dev
, dev
);
1583 ovs_mutex_unlock(&netdev
->mutex
);
1585 dev
->flags
|= VIRTIO_DEV_RUNNING
;
1586 /* Disable notifications. */
1587 set_irq_status(dev
);
1591 ovs_mutex_unlock(&dpdk_mutex
);
1594 VLOG_INFO("vHost Device '%s' (%ld) can't be added - name not found",
1595 dev
->ifname
, dev
->device_fh
);
1600 VLOG_INFO("vHost Device '%s' (%ld) has been added",
1601 dev
->ifname
, dev
->device_fh
);
1606 * Remove a virtio-net device from the specific vhost port. Use dev->remove
1607 * flag to stop any more packets from being sent or received to/from a VM and
1608 * ensure all currently queued packets have been sent/received before removing
1612 destroy_device(volatile struct virtio_net
*dev
)
1614 struct netdev_dpdk
*vhost_dev
;
1616 ovs_mutex_lock(&dpdk_mutex
);
1617 LIST_FOR_EACH (vhost_dev
, list_node
, &dpdk_list
) {
1618 if (netdev_dpdk_get_virtio(vhost_dev
) == dev
) {
1620 ovs_mutex_lock(&vhost_dev
->mutex
);
1621 dev
->flags
&= ~VIRTIO_DEV_RUNNING
;
1622 ovsrcu_set(&vhost_dev
->virtio_dev
, NULL
);
1623 ovs_mutex_unlock(&vhost_dev
->mutex
);
1626 * Wait for other threads to quiesce before
1627 * setting the virtio_dev to NULL.
1629 ovsrcu_synchronize();
1631 * As call to ovsrcu_synchronize() will end the quiescent state,
1632 * put thread back into quiescent state before returning.
1634 ovsrcu_quiesce_start();
1637 ovs_mutex_unlock(&dpdk_mutex
);
1639 VLOG_INFO("vHost Device '%s' (%ld) has been removed",
1640 dev
->ifname
, dev
->device_fh
);
1644 netdev_dpdk_get_virtio(const struct netdev_dpdk
*dev
)
1646 return ovsrcu_get(struct virtio_net
*, &dev
->virtio_dev
);
1650 * These callbacks allow virtio-net devices to be added to vhost ports when
1651 * configuration has been fully complete.
1653 const struct virtio_net_device_ops virtio_net_device_ops
=
1655 .new_device
= new_device
,
1656 .destroy_device
= destroy_device
,
1660 start_cuse_session_loop(void *dummy OVS_UNUSED
)
1662 pthread_detach(pthread_self());
1663 /* Put the cuse thread into quiescent state. */
1664 ovsrcu_quiesce_start();
1665 rte_vhost_driver_session_start();
1670 dpdk_vhost_class_init(void)
1674 rte_vhost_driver_callback_register(&virtio_net_device_ops
);
1676 /* Register CUSE device to handle IOCTLs.
1677 * Unless otherwise specified on the vswitchd command line, cuse_dev_name
1678 * is set to vhost-net.
1680 err
= rte_vhost_driver_register(cuse_dev_name
);
1683 VLOG_ERR("CUSE device setup failure.");
1687 ovs_thread_create("cuse_thread", start_cuse_session_loop
, NULL
);
1692 dpdk_common_init(void)
1694 unixctl_command_register("netdev-dpdk/set-admin-state",
1695 "[netdev] up|down", 1, 2,
1696 netdev_dpdk_set_admin_state
, NULL
);
1698 ovs_thread_create("dpdk_watchdog", dpdk_watchdog
, NULL
);
1704 dpdk_ring_create(const char dev_name
[], unsigned int port_no
,
1705 unsigned int *eth_port_id
)
1707 struct dpdk_ring
*ivshmem
;
1711 ivshmem
= dpdk_rte_mzalloc(sizeof *ivshmem
);
1712 if (ivshmem
== NULL
) {
1716 /* XXX: Add support for multiquque ring. */
1717 err
= snprintf(ring_name
, 10, "%s_tx", dev_name
);
1722 /* Create single consumer/producer rings, netdev does explicit locking. */
1723 ivshmem
->cring_tx
= rte_ring_create(ring_name
, DPDK_RING_SIZE
, SOCKET0
,
1724 RING_F_SP_ENQ
| RING_F_SC_DEQ
);
1725 if (ivshmem
->cring_tx
== NULL
) {
1730 err
= snprintf(ring_name
, 10, "%s_rx", dev_name
);
1735 /* Create single consumer/producer rings, netdev does explicit locking. */
1736 ivshmem
->cring_rx
= rte_ring_create(ring_name
, DPDK_RING_SIZE
, SOCKET0
,
1737 RING_F_SP_ENQ
| RING_F_SC_DEQ
);
1738 if (ivshmem
->cring_rx
== NULL
) {
1743 err
= rte_eth_from_rings(dev_name
, &ivshmem
->cring_rx
, 1,
1744 &ivshmem
->cring_tx
, 1, SOCKET0
);
1751 ivshmem
->user_port_id
= port_no
;
1752 ivshmem
->eth_port_id
= rte_eth_dev_count() - 1;
1753 list_push_back(&dpdk_ring_list
, &ivshmem
->list_node
);
1755 *eth_port_id
= ivshmem
->eth_port_id
;
1760 dpdk_ring_open(const char dev_name
[], unsigned int *eth_port_id
) OVS_REQUIRES(dpdk_mutex
)
1762 struct dpdk_ring
*ivshmem
;
1763 unsigned int port_no
;
1766 /* Names always start with "dpdkr" */
1767 err
= dpdk_dev_parse_name(dev_name
, "dpdkr", &port_no
);
1772 /* look through our list to find the device */
1773 LIST_FOR_EACH (ivshmem
, list_node
, &dpdk_ring_list
) {
1774 if (ivshmem
->user_port_id
== port_no
) {
1775 VLOG_INFO("Found dpdk ring device %s:", dev_name
);
1776 *eth_port_id
= ivshmem
->eth_port_id
; /* really all that is needed */
1780 /* Need to create the device rings */
1781 return dpdk_ring_create(dev_name
, port_no
, eth_port_id
);
1785 netdev_dpdk_ring_send(struct netdev
*netdev
, int qid OVS_UNUSED
,
1786 struct dp_packet
**pkts
, int cnt
, bool may_steal
)
1788 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1791 /* When using 'dpdkr' and sending to a DPDK ring, we want to ensure that the
1792 * rss hash field is clear. This is because the same mbuf may be modified by
1793 * the consumer of the ring and return into the datapath without recalculating
1795 for (i
= 0; i
< cnt
; i
++) {
1796 dp_packet_set_rss_hash(pkts
[i
], 0);
1799 /* DPDK Rings have a single TX queue, Therefore needs locking. */
1800 rte_spinlock_lock(&dev
->txq_lock
);
1801 netdev_dpdk_send__(dev
, 0, pkts
, cnt
, may_steal
);
1802 rte_spinlock_unlock(&dev
->txq_lock
);
1807 netdev_dpdk_ring_construct(struct netdev
*netdev
)
1809 unsigned int port_no
= 0;
1812 if (rte_eal_init_ret
) {
1813 return rte_eal_init_ret
;
1816 ovs_mutex_lock(&dpdk_mutex
);
1818 err
= dpdk_ring_open(netdev
->name
, &port_no
);
1823 err
= netdev_dpdk_init(netdev
, port_no
, DPDK_DEV_ETH
);
1826 ovs_mutex_unlock(&dpdk_mutex
);
1830 #define NETDEV_DPDK_CLASS(NAME, INIT, CONSTRUCT, DESTRUCT, MULTIQ, SEND, \
1831 GET_CARRIER, GET_STATS, GET_FEATURES, GET_STATUS, RXQ_RECV) \
1835 NULL, /* netdev_dpdk_run */ \
1836 NULL, /* netdev_dpdk_wait */ \
1838 netdev_dpdk_alloc, \
1841 netdev_dpdk_dealloc, \
1842 netdev_dpdk_get_config, \
1843 NULL, /* netdev_dpdk_set_config */ \
1844 NULL, /* get_tunnel_config */ \
1845 NULL, /* build header */ \
1846 NULL, /* push header */ \
1847 NULL, /* pop header */ \
1848 netdev_dpdk_get_numa_id, /* get_numa_id */ \
1849 MULTIQ, /* set_multiq */ \
1852 NULL, /* send_wait */ \
1854 netdev_dpdk_set_etheraddr, \
1855 netdev_dpdk_get_etheraddr, \
1856 netdev_dpdk_get_mtu, \
1857 netdev_dpdk_set_mtu, \
1858 netdev_dpdk_get_ifindex, \
1860 netdev_dpdk_get_carrier_resets, \
1861 netdev_dpdk_set_miimon, \
1864 NULL, /* set_advertisements */ \
1866 NULL, /* set_policing */ \
1867 NULL, /* get_qos_types */ \
1868 NULL, /* get_qos_capabilities */ \
1869 NULL, /* get_qos */ \
1870 NULL, /* set_qos */ \
1871 NULL, /* get_queue */ \
1872 NULL, /* set_queue */ \
1873 NULL, /* delete_queue */ \
1874 NULL, /* get_queue_stats */ \
1875 NULL, /* queue_dump_start */ \
1876 NULL, /* queue_dump_next */ \
1877 NULL, /* queue_dump_done */ \
1878 NULL, /* dump_queue_stats */ \
1880 NULL, /* get_in4 */ \
1881 NULL, /* set_in4 */ \
1882 NULL, /* get_in6 */ \
1883 NULL, /* add_router */ \
1884 NULL, /* get_next_hop */ \
1886 NULL, /* arp_lookup */ \
1888 netdev_dpdk_update_flags, \
1890 netdev_dpdk_rxq_alloc, \
1891 netdev_dpdk_rxq_construct, \
1892 netdev_dpdk_rxq_destruct, \
1893 netdev_dpdk_rxq_dealloc, \
1895 NULL, /* rx_wait */ \
1896 NULL, /* rxq_drain */ \
1900 dpdk_init(int argc
, char **argv
)
1904 char *pragram_name
= argv
[0];
1906 if (argc
< 2 || strcmp(argv
[1], "--dpdk"))
1909 /* Remove the --dpdk argument from arg list.*/
1913 /* If the cuse_dev_name parameter has been provided, set 'cuse_dev_name' to
1914 * this string if it meets the correct criteria. Otherwise, set it to the
1915 * default (vhost-net).
1917 if (!strcmp(argv
[1], "--cuse_dev_name") &&
1918 (strlen(argv
[2]) <= NAME_MAX
)) {
1920 cuse_dev_name
= strdup(argv
[2]);
1922 /* Remove the cuse_dev_name configuration parameters from the argument
1923 * list, so that the correct elements are passed to the DPDK
1924 * initialization function
1927 argv
+= 2; /* Increment by two to bypass the cuse_dev_name arguments */
1930 VLOG_ERR("User-provided cuse_dev_name in use: /dev/%s", cuse_dev_name
);
1932 cuse_dev_name
= "vhost-net";
1933 VLOG_INFO("No cuse_dev_name provided - defaulting to /dev/vhost-net");
1936 /* Keep the program name argument as this is needed for call to
1939 argv
[0] = pragram_name
;
1941 /* Make sure things are initialized ... */
1942 result
= rte_eal_init(argc
, argv
);
1944 ovs_abort(result
, "Cannot init EAL");
1947 rte_memzone_dump(stdout
);
1948 rte_eal_init_ret
= 0;
1950 if (argc
> result
) {
1951 argv
[result
] = argv
[0];
1954 /* We are called from the main thread here */
1955 thread_set_nonpmd();
1957 return result
+ 1 + base
;
1960 const struct netdev_class dpdk_class
=
1964 netdev_dpdk_construct
,
1965 netdev_dpdk_destruct
,
1966 netdev_dpdk_set_multiq
,
1967 netdev_dpdk_eth_send
,
1968 netdev_dpdk_get_carrier
,
1969 netdev_dpdk_get_stats
,
1970 netdev_dpdk_get_features
,
1971 netdev_dpdk_get_status
,
1972 netdev_dpdk_rxq_recv
);
1974 const struct netdev_class dpdk_ring_class
=
1978 netdev_dpdk_ring_construct
,
1979 netdev_dpdk_destruct
,
1981 netdev_dpdk_ring_send
,
1982 netdev_dpdk_get_carrier
,
1983 netdev_dpdk_get_stats
,
1984 netdev_dpdk_get_features
,
1985 netdev_dpdk_get_status
,
1986 netdev_dpdk_rxq_recv
);
1988 const struct netdev_class dpdk_vhost_class
=
1991 dpdk_vhost_class_init
,
1992 netdev_dpdk_vhost_construct
,
1993 netdev_dpdk_vhost_destruct
,
1994 netdev_dpdk_vhost_set_multiq
,
1995 netdev_dpdk_vhost_send
,
1996 netdev_dpdk_vhost_get_carrier
,
1997 netdev_dpdk_vhost_get_stats
,
2000 netdev_dpdk_vhost_rxq_recv
);
2003 netdev_dpdk_register(void)
2005 static struct ovsthread_once once
= OVSTHREAD_ONCE_INITIALIZER
;
2007 if (rte_eal_init_ret
) {
2011 if (ovsthread_once_start(&once
)) {
2013 netdev_register_provider(&dpdk_class
);
2014 netdev_register_provider(&dpdk_ring_class
);
2015 netdev_register_provider(&dpdk_vhost_class
);
2016 ovsthread_once_done(&once
);
2021 pmd_thread_setaffinity_cpu(int cpu
)
2027 CPU_SET(cpu
, &cpuset
);
2028 err
= pthread_setaffinity_np(pthread_self(), sizeof(cpu_set_t
), &cpuset
);
2030 VLOG_ERR("Thread affinity error %d",err
);
2033 /* NON_PMD_CORE_ID is reserved for use by non pmd threads. */
2034 ovs_assert(cpu
!= NON_PMD_CORE_ID
);
2035 RTE_PER_LCORE(_lcore_id
) = cpu
;
2041 thread_set_nonpmd(void)
2043 /* We have to use NON_PMD_CORE_ID to allow non-pmd threads to perform
2044 * certain DPDK operations, like rte_eth_dev_configure(). */
2045 RTE_PER_LCORE(_lcore_id
) = NON_PMD_CORE_ID
;
2051 return rte_lcore_id() != NON_PMD_CORE_ID
;