2 * Copyright (c) 2014 Nicira, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
31 #include "dp-packet.h"
32 #include "dpif-netdev.h"
34 #include "netdev-dpdk.h"
35 #include "netdev-provider.h"
36 #include "netdev-vport.h"
38 #include "ofp-print.h"
40 #include "ovs-thread.h"
45 #include "unaligned.h"
48 #include "openvswitch/vlog.h"
50 #include "rte_config.h"
52 #include "rte_virtio_net.h"
54 VLOG_DEFINE_THIS_MODULE(dpdk
);
55 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(5, 20);
57 #define DPDK_PORT_WATCHDOG_INTERVAL 5
59 #define OVS_CACHE_LINE_SIZE CACHE_LINE_SIZE
60 #define OVS_VPORT_DPDK "ovs_dpdk"
63 * need to reserve tons of extra space in the mbufs so we can align the
64 * DMA addresses to 4KB.
67 #define MTU_TO_MAX_LEN(mtu) ((mtu) + ETHER_HDR_LEN + ETHER_CRC_LEN)
68 #define MBUF_SIZE(mtu) (MTU_TO_MAX_LEN(mtu) + (512) + \
69 sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM)
71 /* Max and min number of packets in the mempool. OVS tries to allocate a
72 * mempool with MAX_NB_MBUF: if this fails (because the system doesn't have
73 * enough hugepages) we keep halving the number until the allocation succeeds
74 * or we reach MIN_NB_MBUF */
76 #define MAX_NB_MBUF (4096 * 64)
77 #define MIN_NB_MBUF (4096 * 4)
78 #define MP_CACHE_SZ RTE_MEMPOOL_CACHE_MAX_SIZE
80 /* MAX_NB_MBUF can be divided by 2 many times, until MIN_NB_MBUF */
81 BUILD_ASSERT_DECL(MAX_NB_MBUF
% ROUND_DOWN_POW2(MAX_NB_MBUF
/MIN_NB_MBUF
) == 0);
83 /* The smallest possible NB_MBUF that we're going to try should be a multiple
84 * of MP_CACHE_SZ. This is advised by DPDK documentation. */
85 BUILD_ASSERT_DECL((MAX_NB_MBUF
/ ROUND_DOWN_POW2(MAX_NB_MBUF
/MIN_NB_MBUF
))
90 #define NIC_PORT_RX_Q_SIZE 2048 /* Size of Physical NIC RX Queue, Max (n+32<=4096)*/
91 #define NIC_PORT_TX_Q_SIZE 2048 /* Size of Physical NIC TX Queue, Max (n+32<=4096)*/
93 /* XXX: Needs per NIC value for these constants. */
94 #define RX_PTHRESH 32 /* Default values of RX prefetch threshold reg. */
95 #define RX_HTHRESH 32 /* Default values of RX host threshold reg. */
96 #define RX_WTHRESH 16 /* Default values of RX write-back threshold reg. */
98 #define TX_PTHRESH 36 /* Default values of TX prefetch threshold reg. */
99 #define TX_HTHRESH 0 /* Default values of TX host threshold reg. */
100 #define TX_WTHRESH 0 /* Default values of TX write-back threshold reg. */
102 #define MAX_PKT_BURST 32 /* Max burst size for RX/TX */
104 /* Character device cuse_dev_name. */
105 char *cuse_dev_name
= NULL
;
107 static const struct rte_eth_conf port_conf
= {
109 .mq_mode
= ETH_MQ_RX_RSS
,
111 .header_split
= 0, /* Header Split disabled */
112 .hw_ip_checksum
= 0, /* IP checksum offload disabled */
113 .hw_vlan_filter
= 0, /* VLAN filtering disabled */
114 .jumbo_frame
= 0, /* Jumbo Frame Support disabled */
120 .rss_hf
= ETH_RSS_IPV4_TCP
| ETH_RSS_IPV4
| ETH_RSS_IPV6
121 | ETH_RSS_IPV4_UDP
| ETH_RSS_IPV6_TCP
| ETH_RSS_IPV6_UDP
,
125 .mq_mode
= ETH_MQ_TX_NONE
,
129 static const struct rte_eth_rxconf rx_conf
= {
131 .pthresh
= RX_PTHRESH
,
132 .hthresh
= RX_HTHRESH
,
133 .wthresh
= RX_WTHRESH
,
137 static const struct rte_eth_txconf tx_conf
= {
139 .pthresh
= TX_PTHRESH
,
140 .hthresh
= TX_HTHRESH
,
141 .wthresh
= TX_WTHRESH
,
145 .txq_flags
= ETH_TXQ_FLAGS_NOMULTSEGS
|ETH_TXQ_FLAGS_NOOFFLOADS
,
148 enum { MAX_RX_QUEUE_LEN
= 192 };
149 enum { MAX_TX_QUEUE_LEN
= 384 };
150 enum { DPDK_RING_SIZE
= 256 };
151 BUILD_ASSERT_DECL(IS_POW2(DPDK_RING_SIZE
));
152 enum { DRAIN_TSC
= 200000ULL };
159 static int rte_eal_init_ret
= ENODEV
;
161 static struct ovs_mutex dpdk_mutex
= OVS_MUTEX_INITIALIZER
;
163 /* Contains all 'struct dpdk_dev's. */
164 static struct ovs_list dpdk_list
OVS_GUARDED_BY(dpdk_mutex
)
165 = OVS_LIST_INITIALIZER(&dpdk_list
);
167 static struct ovs_list dpdk_mp_list
OVS_GUARDED_BY(dpdk_mutex
)
168 = OVS_LIST_INITIALIZER(&dpdk_mp_list
);
170 /* This mutex must be used by non pmd threads when allocating or freeing
171 * mbufs through mempools. Since dpdk_queue_pkts() and dpdk_queue_flush() may
172 * use mempools, a non pmd thread should hold this mutex while calling them */
173 struct ovs_mutex nonpmd_mempool_mutex
= OVS_MUTEX_INITIALIZER
;
176 struct rte_mempool
*mp
;
180 struct ovs_list list_node
OVS_GUARDED_BY(dpdk_mutex
);
183 /* There should be one 'struct dpdk_tx_queue' created for
185 struct dpdk_tx_queue
{
186 bool flush_tx
; /* Set to true to flush queue everytime */
187 /* pkts are queued. */
190 struct rte_mbuf
*burst_pkts
[MAX_TX_QUEUE_LEN
];
193 /* dpdk has no way to remove dpdk ring ethernet devices
194 so we have to keep them around once they've been created
197 static struct ovs_list dpdk_ring_list
OVS_GUARDED_BY(dpdk_mutex
)
198 = OVS_LIST_INITIALIZER(&dpdk_ring_list
);
201 /* For the client rings */
202 struct rte_ring
*cring_tx
;
203 struct rte_ring
*cring_rx
;
204 int user_port_id
; /* User given port no, parsed from port name */
205 int eth_port_id
; /* ethernet device port id */
206 struct ovs_list list_node
OVS_GUARDED_BY(dpdk_mutex
);
213 enum dpdk_dev_type type
;
215 struct dpdk_tx_queue
*tx_q
;
217 struct ovs_mutex mutex
OVS_ACQ_AFTER(dpdk_mutex
);
219 struct dpdk_mp
*dpdk_mp
;
223 struct netdev_stats stats
;
225 uint8_t hwaddr
[ETH_ADDR_LEN
];
226 enum netdev_flags flags
;
228 struct rte_eth_link link
;
231 /* virtio-net structure for vhost device */
232 OVSRCU_TYPE(struct virtio_net
*) virtio_dev
;
235 struct ovs_list list_node
OVS_GUARDED_BY(dpdk_mutex
);
236 rte_spinlock_t txq_lock
;
239 struct netdev_rxq_dpdk
{
240 struct netdev_rxq up
;
244 static bool thread_is_pmd(void);
246 static int netdev_dpdk_construct(struct netdev
*);
248 struct virtio_net
* netdev_dpdk_get_virtio(const struct netdev_dpdk
*dev
);
251 is_dpdk_class(const struct netdev_class
*class)
253 return class->construct
== netdev_dpdk_construct
;
256 /* XXX: use dpdk malloc for entire OVS. in fact huge page should be used
257 * for all other segments data, bss and text. */
260 dpdk_rte_mzalloc(size_t sz
)
264 ptr
= rte_zmalloc(OVS_VPORT_DPDK
, sz
, OVS_CACHE_LINE_SIZE
);
271 /* XXX this function should be called only by pmd threads (or by non pmd
272 * threads holding the nonpmd_mempool_mutex) */
274 free_dpdk_buf(struct dp_packet
*p
)
276 struct rte_mbuf
*pkt
= (struct rte_mbuf
*) p
;
278 rte_pktmbuf_free_seg(pkt
);
282 __rte_pktmbuf_init(struct rte_mempool
*mp
,
283 void *opaque_arg OVS_UNUSED
,
285 unsigned i OVS_UNUSED
)
287 struct rte_mbuf
*m
= _m
;
288 uint32_t buf_len
= mp
->elt_size
- sizeof(struct dp_packet
);
290 RTE_MBUF_ASSERT(mp
->elt_size
>= sizeof(struct dp_packet
));
292 memset(m
, 0, mp
->elt_size
);
294 /* start of buffer is just after mbuf structure */
295 m
->buf_addr
= (char *)m
+ sizeof(struct dp_packet
);
296 m
->buf_physaddr
= rte_mempool_virt2phy(mp
, m
) +
297 sizeof(struct dp_packet
);
298 m
->buf_len
= (uint16_t)buf_len
;
300 /* keep some headroom between start of buffer and data */
301 m
->data_off
= RTE_MIN(RTE_PKTMBUF_HEADROOM
, m
->buf_len
);
303 /* init some constant fields */
310 ovs_rte_pktmbuf_init(struct rte_mempool
*mp
,
311 void *opaque_arg OVS_UNUSED
,
313 unsigned i OVS_UNUSED
)
315 struct rte_mbuf
*m
= _m
;
317 __rte_pktmbuf_init(mp
, opaque_arg
, _m
, i
);
319 dp_packet_init_dpdk((struct dp_packet
*) m
, m
->buf_len
);
322 static struct dpdk_mp
*
323 dpdk_mp_get(int socket_id
, int mtu
) OVS_REQUIRES(dpdk_mutex
)
325 struct dpdk_mp
*dmp
= NULL
;
326 char mp_name
[RTE_MEMPOOL_NAMESIZE
];
329 LIST_FOR_EACH (dmp
, list_node
, &dpdk_mp_list
) {
330 if (dmp
->socket_id
== socket_id
&& dmp
->mtu
== mtu
) {
336 dmp
= dpdk_rte_mzalloc(sizeof *dmp
);
337 dmp
->socket_id
= socket_id
;
341 mp_size
= MAX_NB_MBUF
;
343 if (snprintf(mp_name
, RTE_MEMPOOL_NAMESIZE
, "ovs_mp_%d_%d_%u",
344 dmp
->mtu
, dmp
->socket_id
, mp_size
) < 0) {
348 dmp
->mp
= rte_mempool_create(mp_name
, mp_size
, MBUF_SIZE(mtu
),
350 sizeof(struct rte_pktmbuf_pool_private
),
351 rte_pktmbuf_pool_init
, NULL
,
352 ovs_rte_pktmbuf_init
, NULL
,
354 } while (!dmp
->mp
&& rte_errno
== ENOMEM
&& (mp_size
/= 2) >= MIN_NB_MBUF
);
356 if (dmp
->mp
== NULL
) {
359 VLOG_DBG("Allocated \"%s\" mempool with %u mbufs", mp_name
, mp_size
);
362 list_push_back(&dpdk_mp_list
, &dmp
->list_node
);
367 dpdk_mp_put(struct dpdk_mp
*dmp
)
375 ovs_assert(dmp
->refcount
>= 0);
378 /* I could not find any API to destroy mp. */
379 if (dmp
->refcount
== 0) {
380 list_delete(dmp
->list_node
);
381 /* destroy mp-pool. */
387 check_link_status(struct netdev_dpdk
*dev
)
389 struct rte_eth_link link
;
391 rte_eth_link_get_nowait(dev
->port_id
, &link
);
393 if (dev
->link
.link_status
!= link
.link_status
) {
394 netdev_change_seq_changed(&dev
->up
);
396 dev
->link_reset_cnt
++;
398 if (dev
->link
.link_status
) {
399 VLOG_DBG_RL(&rl
, "Port %d Link Up - speed %u Mbps - %s",
400 dev
->port_id
, (unsigned)dev
->link
.link_speed
,
401 (dev
->link
.link_duplex
== ETH_LINK_FULL_DUPLEX
) ?
402 ("full-duplex") : ("half-duplex"));
404 VLOG_DBG_RL(&rl
, "Port %d Link Down", dev
->port_id
);
410 dpdk_watchdog(void *dummy OVS_UNUSED
)
412 struct netdev_dpdk
*dev
;
414 pthread_detach(pthread_self());
417 ovs_mutex_lock(&dpdk_mutex
);
418 LIST_FOR_EACH (dev
, list_node
, &dpdk_list
) {
419 ovs_mutex_lock(&dev
->mutex
);
420 check_link_status(dev
);
421 ovs_mutex_unlock(&dev
->mutex
);
423 ovs_mutex_unlock(&dpdk_mutex
);
424 xsleep(DPDK_PORT_WATCHDOG_INTERVAL
);
431 dpdk_eth_dev_init(struct netdev_dpdk
*dev
) OVS_REQUIRES(dpdk_mutex
)
433 struct rte_pktmbuf_pool_private
*mbp_priv
;
434 struct ether_addr eth_addr
;
438 if (dev
->port_id
< 0 || dev
->port_id
>= rte_eth_dev_count()) {
442 diag
= rte_eth_dev_configure(dev
->port_id
, dev
->up
.n_rxq
, dev
->up
.n_txq
,
445 VLOG_ERR("eth dev config error %d",diag
);
449 for (i
= 0; i
< dev
->up
.n_txq
; i
++) {
450 diag
= rte_eth_tx_queue_setup(dev
->port_id
, i
, NIC_PORT_TX_Q_SIZE
,
451 dev
->socket_id
, &tx_conf
);
453 VLOG_ERR("eth dev tx queue setup error %d",diag
);
458 for (i
= 0; i
< dev
->up
.n_rxq
; i
++) {
459 diag
= rte_eth_rx_queue_setup(dev
->port_id
, i
, NIC_PORT_RX_Q_SIZE
,
461 &rx_conf
, dev
->dpdk_mp
->mp
);
463 VLOG_ERR("eth dev rx queue setup error %d",diag
);
468 diag
= rte_eth_dev_start(dev
->port_id
);
470 VLOG_ERR("eth dev start error %d",diag
);
474 rte_eth_promiscuous_enable(dev
->port_id
);
475 rte_eth_allmulticast_enable(dev
->port_id
);
477 memset(ð_addr
, 0x0, sizeof(eth_addr
));
478 rte_eth_macaddr_get(dev
->port_id
, ð_addr
);
479 VLOG_INFO_RL(&rl
, "Port %d: "ETH_ADDR_FMT
"",
480 dev
->port_id
, ETH_ADDR_ARGS(eth_addr
.addr_bytes
));
482 memcpy(dev
->hwaddr
, eth_addr
.addr_bytes
, ETH_ADDR_LEN
);
483 rte_eth_link_get_nowait(dev
->port_id
, &dev
->link
);
485 mbp_priv
= rte_mempool_get_priv(dev
->dpdk_mp
->mp
);
486 dev
->buf_size
= mbp_priv
->mbuf_data_room_size
- RTE_PKTMBUF_HEADROOM
;
488 dev
->flags
= NETDEV_UP
| NETDEV_PROMISC
;
492 static struct netdev_dpdk
*
493 netdev_dpdk_cast(const struct netdev
*netdev
)
495 return CONTAINER_OF(netdev
, struct netdev_dpdk
, up
);
498 static struct netdev
*
499 netdev_dpdk_alloc(void)
501 struct netdev_dpdk
*netdev
= dpdk_rte_mzalloc(sizeof *netdev
);
506 netdev_dpdk_alloc_txq(struct netdev_dpdk
*netdev
, unsigned int n_txqs
)
510 netdev
->tx_q
= dpdk_rte_mzalloc(n_txqs
* sizeof *netdev
->tx_q
);
511 /* Each index is considered as a cpu core id, since there should
512 * be one tx queue for each cpu core. */
513 for (i
= 0; i
< n_txqs
; i
++) {
514 int numa_id
= ovs_numa_get_numa_id(i
);
516 /* If the corresponding core is not on the same numa node
517 * as 'netdev', flags the 'flush_tx'. */
518 netdev
->tx_q
[i
].flush_tx
= netdev
->socket_id
== numa_id
;
523 netdev_dpdk_init(struct netdev
*netdev_
, unsigned int port_no
,
524 enum dpdk_dev_type type
)
525 OVS_REQUIRES(dpdk_mutex
)
527 struct netdev_dpdk
*netdev
= netdev_dpdk_cast(netdev_
);
531 ovs_mutex_init(&netdev
->mutex
);
532 ovs_mutex_lock(&netdev
->mutex
);
534 /* If the 'sid' is negative, it means that the kernel fails
535 * to obtain the pci numa info. In that situation, always
537 if (type
== DPDK_DEV_ETH
) {
538 sid
= rte_eth_dev_socket_id(port_no
);
540 sid
= rte_lcore_to_socket_id(rte_get_master_lcore());
543 netdev
->socket_id
= sid
< 0 ? SOCKET0
: sid
;
544 netdev
->port_id
= port_no
;
547 netdev
->mtu
= ETHER_MTU
;
548 netdev
->max_packet_len
= MTU_TO_MAX_LEN(netdev
->mtu
);
549 rte_spinlock_init(&netdev
->txq_lock
);
551 netdev
->dpdk_mp
= dpdk_mp_get(netdev
->socket_id
, netdev
->mtu
);
552 if (!netdev
->dpdk_mp
) {
557 netdev_
->n_txq
= NR_QUEUE
;
558 netdev_
->n_rxq
= NR_QUEUE
;
560 if (type
== DPDK_DEV_ETH
) {
561 netdev_dpdk_alloc_txq(netdev
, NR_QUEUE
);
562 err
= dpdk_eth_dev_init(netdev
);
568 list_push_back(&dpdk_list
, &netdev
->list_node
);
572 rte_free(netdev
->tx_q
);
574 ovs_mutex_unlock(&netdev
->mutex
);
579 dpdk_dev_parse_name(const char dev_name
[], const char prefix
[],
580 unsigned int *port_no
)
584 if (strncmp(dev_name
, prefix
, strlen(prefix
))) {
588 cport
= dev_name
+ strlen(prefix
);
589 *port_no
= strtol(cport
, 0, 0); /* string must be null terminated */
594 netdev_dpdk_vhost_construct(struct netdev
*netdev_
)
598 if (rte_eal_init_ret
) {
599 return rte_eal_init_ret
;
602 ovs_mutex_lock(&dpdk_mutex
);
603 err
= netdev_dpdk_init(netdev_
, -1, DPDK_DEV_VHOST
);
604 ovs_mutex_unlock(&dpdk_mutex
);
610 netdev_dpdk_construct(struct netdev
*netdev
)
612 unsigned int port_no
;
615 if (rte_eal_init_ret
) {
616 return rte_eal_init_ret
;
619 /* Names always start with "dpdk" */
620 err
= dpdk_dev_parse_name(netdev
->name
, "dpdk", &port_no
);
625 ovs_mutex_lock(&dpdk_mutex
);
626 err
= netdev_dpdk_init(netdev
, port_no
, DPDK_DEV_ETH
);
627 ovs_mutex_unlock(&dpdk_mutex
);
632 netdev_dpdk_destruct(struct netdev
*netdev_
)
634 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev_
);
636 ovs_mutex_lock(&dev
->mutex
);
637 rte_eth_dev_stop(dev
->port_id
);
638 ovs_mutex_unlock(&dev
->mutex
);
640 ovs_mutex_lock(&dpdk_mutex
);
642 list_remove(&dev
->list_node
);
643 dpdk_mp_put(dev
->dpdk_mp
);
644 ovs_mutex_unlock(&dpdk_mutex
);
648 netdev_dpdk_vhost_destruct(struct netdev
*netdev_
)
650 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev_
);
652 /* Can't remove a port while a guest is attached to it. */
653 if (netdev_dpdk_get_virtio(dev
) != NULL
) {
654 VLOG_ERR("Can not remove port, vhost device still attached");
658 ovs_mutex_lock(&dpdk_mutex
);
659 list_remove(&dev
->list_node
);
660 dpdk_mp_put(dev
->dpdk_mp
);
661 ovs_mutex_unlock(&dpdk_mutex
);
665 netdev_dpdk_dealloc(struct netdev
*netdev_
)
667 struct netdev_dpdk
*netdev
= netdev_dpdk_cast(netdev_
);
673 netdev_dpdk_get_config(const struct netdev
*netdev_
, struct smap
*args
)
675 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev_
);
677 ovs_mutex_lock(&dev
->mutex
);
679 smap_add_format(args
, "configured_rx_queues", "%d", netdev_
->n_rxq
);
680 smap_add_format(args
, "configured_tx_queues", "%d", netdev_
->n_txq
);
681 ovs_mutex_unlock(&dev
->mutex
);
687 netdev_dpdk_get_numa_id(const struct netdev
*netdev_
)
689 struct netdev_dpdk
*netdev
= netdev_dpdk_cast(netdev_
);
691 return netdev
->socket_id
;
694 /* Sets the number of tx queues and rx queues for the dpdk interface.
695 * If the configuration fails, do not try restoring its old configuration
696 * and just returns the error. */
698 netdev_dpdk_set_multiq(struct netdev
*netdev_
, unsigned int n_txq
,
701 struct netdev_dpdk
*netdev
= netdev_dpdk_cast(netdev_
);
704 if (netdev
->up
.n_txq
== n_txq
&& netdev
->up
.n_rxq
== n_rxq
) {
708 ovs_mutex_lock(&dpdk_mutex
);
709 ovs_mutex_lock(&netdev
->mutex
);
711 rte_eth_dev_stop(netdev
->port_id
);
713 netdev
->up
.n_txq
= n_txq
;
714 netdev
->up
.n_rxq
= n_rxq
;
716 rte_free(netdev
->tx_q
);
717 netdev_dpdk_alloc_txq(netdev
, n_txq
);
718 err
= dpdk_eth_dev_init(netdev
);
720 ovs_mutex_unlock(&netdev
->mutex
);
721 ovs_mutex_unlock(&dpdk_mutex
);
727 netdev_dpdk_vhost_set_multiq(struct netdev
*netdev_
, unsigned int n_txq
,
730 struct netdev_dpdk
*netdev
= netdev_dpdk_cast(netdev_
);
733 if (netdev
->up
.n_txq
== n_txq
&& netdev
->up
.n_rxq
== n_rxq
) {
737 ovs_mutex_lock(&dpdk_mutex
);
738 ovs_mutex_lock(&netdev
->mutex
);
740 netdev
->up
.n_txq
= n_txq
;
741 netdev
->up
.n_rxq
= n_rxq
;
743 ovs_mutex_unlock(&netdev
->mutex
);
744 ovs_mutex_unlock(&dpdk_mutex
);
749 static struct netdev_rxq
*
750 netdev_dpdk_rxq_alloc(void)
752 struct netdev_rxq_dpdk
*rx
= dpdk_rte_mzalloc(sizeof *rx
);
757 static struct netdev_rxq_dpdk
*
758 netdev_rxq_dpdk_cast(const struct netdev_rxq
*rx
)
760 return CONTAINER_OF(rx
, struct netdev_rxq_dpdk
, up
);
764 netdev_dpdk_rxq_construct(struct netdev_rxq
*rxq_
)
766 struct netdev_rxq_dpdk
*rx
= netdev_rxq_dpdk_cast(rxq_
);
767 struct netdev_dpdk
*netdev
= netdev_dpdk_cast(rx
->up
.netdev
);
769 ovs_mutex_lock(&netdev
->mutex
);
770 rx
->port_id
= netdev
->port_id
;
771 ovs_mutex_unlock(&netdev
->mutex
);
777 netdev_dpdk_rxq_destruct(struct netdev_rxq
*rxq_ OVS_UNUSED
)
782 netdev_dpdk_rxq_dealloc(struct netdev_rxq
*rxq_
)
784 struct netdev_rxq_dpdk
*rx
= netdev_rxq_dpdk_cast(rxq_
);
790 dpdk_queue_flush__(struct netdev_dpdk
*dev
, int qid
)
792 struct dpdk_tx_queue
*txq
= &dev
->tx_q
[qid
];
795 while (nb_tx
!= txq
->count
) {
798 ret
= rte_eth_tx_burst(dev
->port_id
, qid
, txq
->burst_pkts
+ nb_tx
,
807 if (OVS_UNLIKELY(nb_tx
!= txq
->count
)) {
808 /* free buffers, which we couldn't transmit, one at a time (each
809 * packet could come from a different mempool) */
812 for (i
= nb_tx
; i
< txq
->count
; i
++) {
813 rte_pktmbuf_free_seg(txq
->burst_pkts
[i
]);
815 ovs_mutex_lock(&dev
->mutex
);
816 dev
->stats
.tx_dropped
+= txq
->count
-nb_tx
;
817 ovs_mutex_unlock(&dev
->mutex
);
821 txq
->tsc
= rte_get_timer_cycles();
825 dpdk_queue_flush(struct netdev_dpdk
*dev
, int qid
)
827 struct dpdk_tx_queue
*txq
= &dev
->tx_q
[qid
];
829 if (txq
->count
== 0) {
832 dpdk_queue_flush__(dev
, qid
);
836 is_vhost_running(struct virtio_net
*dev
)
838 return (dev
!= NULL
&& (dev
->flags
& VIRTIO_DEV_RUNNING
));
842 * The receive path for the vhost port is the TX path out from guest.
845 netdev_dpdk_vhost_rxq_recv(struct netdev_rxq
*rxq_
,
846 struct dp_packet
**packets
, int *c
)
848 struct netdev_rxq_dpdk
*rx
= netdev_rxq_dpdk_cast(rxq_
);
849 struct netdev
*netdev
= rx
->up
.netdev
;
850 struct netdev_dpdk
*vhost_dev
= netdev_dpdk_cast(netdev
);
851 struct virtio_net
*virtio_dev
= netdev_dpdk_get_virtio(vhost_dev
);
855 if (OVS_UNLIKELY(!is_vhost_running(virtio_dev
))) {
859 nb_rx
= rte_vhost_dequeue_burst(virtio_dev
, qid
,
860 vhost_dev
->dpdk_mp
->mp
,
861 (struct rte_mbuf
**)packets
,
867 vhost_dev
->stats
.rx_packets
+= (uint64_t)nb_rx
;
873 netdev_dpdk_rxq_recv(struct netdev_rxq
*rxq_
, struct dp_packet
**packets
,
876 struct netdev_rxq_dpdk
*rx
= netdev_rxq_dpdk_cast(rxq_
);
877 struct netdev
*netdev
= rx
->up
.netdev
;
878 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
881 /* There is only one tx queue for this core. Do not flush other
883 if (rxq_
->queue_id
== rte_lcore_id()) {
884 dpdk_queue_flush(dev
, rxq_
->queue_id
);
887 nb_rx
= rte_eth_rx_burst(rx
->port_id
, rxq_
->queue_id
,
888 (struct rte_mbuf
**) packets
,
889 MIN((int)NETDEV_MAX_RX_BATCH
,
890 (int)MAX_RX_QUEUE_LEN
));
901 __netdev_dpdk_vhost_send(struct netdev
*netdev
, struct dp_packet
**pkts
,
902 int cnt
, bool may_steal
)
904 struct netdev_dpdk
*vhost_dev
= netdev_dpdk_cast(netdev
);
905 struct virtio_net
*virtio_dev
= netdev_dpdk_get_virtio(vhost_dev
);
908 if (OVS_UNLIKELY(!is_vhost_running(virtio_dev
))) {
909 ovs_mutex_lock(&vhost_dev
->mutex
);
910 vhost_dev
->stats
.tx_dropped
+= cnt
;
911 ovs_mutex_unlock(&vhost_dev
->mutex
);
915 /* There is vHost TX single queue, So we need to lock it for TX. */
916 rte_spinlock_lock(&vhost_dev
->txq_lock
);
917 tx_pkts
= rte_vhost_enqueue_burst(virtio_dev
, VIRTIO_RXQ
,
918 (struct rte_mbuf
**)pkts
, cnt
);
920 vhost_dev
->stats
.tx_packets
+= tx_pkts
;
921 vhost_dev
->stats
.tx_dropped
+= (cnt
- tx_pkts
);
922 rte_spinlock_unlock(&vhost_dev
->txq_lock
);
926 for (i
= 0; i
< cnt
; i
++) {
927 dp_packet_delete(pkts
[i
]);
933 dpdk_queue_pkts(struct netdev_dpdk
*dev
, int qid
,
934 struct rte_mbuf
**pkts
, int cnt
)
936 struct dpdk_tx_queue
*txq
= &dev
->tx_q
[qid
];
942 int freeslots
= MAX_TX_QUEUE_LEN
- txq
->count
;
943 int tocopy
= MIN(freeslots
, cnt
-i
);
945 memcpy(&txq
->burst_pkts
[txq
->count
], &pkts
[i
],
946 tocopy
* sizeof (struct rte_mbuf
*));
948 txq
->count
+= tocopy
;
951 if (txq
->count
== MAX_TX_QUEUE_LEN
|| txq
->flush_tx
) {
952 dpdk_queue_flush__(dev
, qid
);
954 diff_tsc
= rte_get_timer_cycles() - txq
->tsc
;
955 if (diff_tsc
>= DRAIN_TSC
) {
956 dpdk_queue_flush__(dev
, qid
);
961 /* Tx function. Transmit packets indefinitely */
963 dpdk_do_tx_copy(struct netdev
*netdev
, int qid
, struct dp_packet
**pkts
,
965 OVS_NO_THREAD_SAFETY_ANALYSIS
967 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
968 struct rte_mbuf
*mbufs
[cnt
];
973 /* If we are on a non pmd thread we have to use the mempool mutex, because
974 * every non pmd thread shares the same mempool cache */
976 if (!thread_is_pmd()) {
977 ovs_mutex_lock(&nonpmd_mempool_mutex
);
980 for (i
= 0; i
< cnt
; i
++) {
981 int size
= dp_packet_size(pkts
[i
]);
983 if (OVS_UNLIKELY(size
> dev
->max_packet_len
)) {
984 VLOG_WARN_RL(&rl
, "Too big size %d max_packet_len %d",
985 (int)size
, dev
->max_packet_len
);
991 mbufs
[newcnt
] = rte_pktmbuf_alloc(dev
->dpdk_mp
->mp
);
993 if (!mbufs
[newcnt
]) {
998 /* We have to do a copy for now */
999 memcpy(rte_pktmbuf_mtod(mbufs
[newcnt
], void *), dp_packet_data(pkts
[i
]), size
);
1001 rte_pktmbuf_data_len(mbufs
[newcnt
]) = size
;
1002 rte_pktmbuf_pkt_len(mbufs
[newcnt
]) = size
;
1007 if (OVS_UNLIKELY(dropped
)) {
1008 ovs_mutex_lock(&dev
->mutex
);
1009 dev
->stats
.tx_dropped
+= dropped
;
1010 ovs_mutex_unlock(&dev
->mutex
);
1013 if (dev
->type
== DPDK_DEV_VHOST
) {
1014 __netdev_dpdk_vhost_send(netdev
, (struct dp_packet
**) mbufs
, newcnt
, true);
1016 dpdk_queue_pkts(dev
, qid
, mbufs
, newcnt
);
1017 dpdk_queue_flush(dev
, qid
);
1020 if (!thread_is_pmd()) {
1021 ovs_mutex_unlock(&nonpmd_mempool_mutex
);
1026 netdev_dpdk_vhost_send(struct netdev
*netdev
, int qid OVS_UNUSED
, struct dp_packet
**pkts
,
1027 int cnt
, bool may_steal
)
1029 if (OVS_UNLIKELY(pkts
[0]->source
!= DPBUF_DPDK
)) {
1032 dpdk_do_tx_copy(netdev
, qid
, pkts
, cnt
);
1034 for (i
= 0; i
< cnt
; i
++) {
1035 dp_packet_delete(pkts
[i
]);
1039 __netdev_dpdk_vhost_send(netdev
, pkts
, cnt
, may_steal
);
1045 netdev_dpdk_send__(struct netdev_dpdk
*dev
, int qid
,
1046 struct dp_packet
**pkts
, int cnt
, bool may_steal
)
1050 if (OVS_UNLIKELY(!may_steal
||
1051 pkts
[0]->source
!= DPBUF_DPDK
)) {
1052 struct netdev
*netdev
= &dev
->up
;
1054 dpdk_do_tx_copy(netdev
, qid
, pkts
, cnt
);
1057 for (i
= 0; i
< cnt
; i
++) {
1058 dp_packet_delete(pkts
[i
]);
1062 int next_tx_idx
= 0;
1065 for (i
= 0; i
< cnt
; i
++) {
1066 int size
= dp_packet_size(pkts
[i
]);
1067 if (OVS_UNLIKELY(size
> dev
->max_packet_len
)) {
1068 if (next_tx_idx
!= i
) {
1069 dpdk_queue_pkts(dev
, qid
,
1070 (struct rte_mbuf
**)&pkts
[next_tx_idx
],
1074 VLOG_WARN_RL(&rl
, "Too big size %d max_packet_len %d",
1075 (int)size
, dev
->max_packet_len
);
1077 dp_packet_delete(pkts
[i
]);
1079 next_tx_idx
= i
+ 1;
1082 if (next_tx_idx
!= cnt
) {
1083 dpdk_queue_pkts(dev
, qid
,
1084 (struct rte_mbuf
**)&pkts
[next_tx_idx
],
1088 if (OVS_UNLIKELY(dropped
)) {
1089 ovs_mutex_lock(&dev
->mutex
);
1090 dev
->stats
.tx_dropped
+= dropped
;
1091 ovs_mutex_unlock(&dev
->mutex
);
1097 netdev_dpdk_eth_send(struct netdev
*netdev
, int qid
,
1098 struct dp_packet
**pkts
, int cnt
, bool may_steal
)
1100 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1102 netdev_dpdk_send__(dev
, qid
, pkts
, cnt
, may_steal
);
1107 netdev_dpdk_set_etheraddr(struct netdev
*netdev
,
1108 const uint8_t mac
[ETH_ADDR_LEN
])
1110 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1112 ovs_mutex_lock(&dev
->mutex
);
1113 if (!eth_addr_equals(dev
->hwaddr
, mac
)) {
1114 memcpy(dev
->hwaddr
, mac
, ETH_ADDR_LEN
);
1115 netdev_change_seq_changed(netdev
);
1117 ovs_mutex_unlock(&dev
->mutex
);
1123 netdev_dpdk_get_etheraddr(const struct netdev
*netdev
,
1124 uint8_t mac
[ETH_ADDR_LEN
])
1126 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1128 ovs_mutex_lock(&dev
->mutex
);
1129 memcpy(mac
, dev
->hwaddr
, ETH_ADDR_LEN
);
1130 ovs_mutex_unlock(&dev
->mutex
);
1136 netdev_dpdk_get_mtu(const struct netdev
*netdev
, int *mtup
)
1138 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1140 ovs_mutex_lock(&dev
->mutex
);
1142 ovs_mutex_unlock(&dev
->mutex
);
1148 netdev_dpdk_set_mtu(const struct netdev
*netdev
, int mtu
)
1150 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1152 struct dpdk_mp
*old_mp
;
1155 ovs_mutex_lock(&dpdk_mutex
);
1156 ovs_mutex_lock(&dev
->mutex
);
1157 if (dev
->mtu
== mtu
) {
1162 mp
= dpdk_mp_get(dev
->socket_id
, dev
->mtu
);
1168 rte_eth_dev_stop(dev
->port_id
);
1171 old_mp
= dev
->dpdk_mp
;
1174 dev
->max_packet_len
= MTU_TO_MAX_LEN(dev
->mtu
);
1176 err
= dpdk_eth_dev_init(dev
);
1180 dev
->dpdk_mp
= old_mp
;
1181 dev
->max_packet_len
= MTU_TO_MAX_LEN(dev
->mtu
);
1182 dpdk_eth_dev_init(dev
);
1186 dpdk_mp_put(old_mp
);
1187 netdev_change_seq_changed(netdev
);
1189 ovs_mutex_unlock(&dev
->mutex
);
1190 ovs_mutex_unlock(&dpdk_mutex
);
1195 netdev_dpdk_get_carrier(const struct netdev
*netdev_
, bool *carrier
);
1198 netdev_dpdk_vhost_get_stats(const struct netdev
*netdev
,
1199 struct netdev_stats
*stats
)
1201 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1203 ovs_mutex_lock(&dev
->mutex
);
1204 memset(stats
, 0, sizeof(*stats
));
1205 /* Unsupported Stats */
1206 stats
->rx_errors
= UINT64_MAX
;
1207 stats
->tx_errors
= UINT64_MAX
;
1208 stats
->multicast
= UINT64_MAX
;
1209 stats
->collisions
= UINT64_MAX
;
1210 stats
->rx_crc_errors
= UINT64_MAX
;
1211 stats
->rx_fifo_errors
= UINT64_MAX
;
1212 stats
->rx_frame_errors
= UINT64_MAX
;
1213 stats
->rx_length_errors
= UINT64_MAX
;
1214 stats
->rx_missed_errors
= UINT64_MAX
;
1215 stats
->rx_over_errors
= UINT64_MAX
;
1216 stats
->tx_aborted_errors
= UINT64_MAX
;
1217 stats
->tx_carrier_errors
= UINT64_MAX
;
1218 stats
->tx_errors
= UINT64_MAX
;
1219 stats
->tx_fifo_errors
= UINT64_MAX
;
1220 stats
->tx_heartbeat_errors
= UINT64_MAX
;
1221 stats
->tx_window_errors
= UINT64_MAX
;
1222 stats
->rx_bytes
+= UINT64_MAX
;
1223 stats
->rx_dropped
+= UINT64_MAX
;
1224 stats
->tx_bytes
+= UINT64_MAX
;
1226 /* Supported Stats */
1227 stats
->rx_packets
+= dev
->stats
.rx_packets
;
1228 stats
->tx_packets
+= dev
->stats
.tx_packets
;
1229 stats
->tx_dropped
+= dev
->stats
.tx_dropped
;
1230 ovs_mutex_unlock(&dev
->mutex
);
1236 netdev_dpdk_get_stats(const struct netdev
*netdev
, struct netdev_stats
*stats
)
1238 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1239 struct rte_eth_stats rte_stats
;
1242 netdev_dpdk_get_carrier(netdev
, &gg
);
1243 ovs_mutex_lock(&dev
->mutex
);
1244 rte_eth_stats_get(dev
->port_id
, &rte_stats
);
1246 memset(stats
, 0, sizeof(*stats
));
1248 stats
->rx_packets
= rte_stats
.ipackets
;
1249 stats
->tx_packets
= rte_stats
.opackets
;
1250 stats
->rx_bytes
= rte_stats
.ibytes
;
1251 stats
->tx_bytes
= rte_stats
.obytes
;
1252 stats
->rx_errors
= rte_stats
.ierrors
;
1253 stats
->tx_errors
= rte_stats
.oerrors
;
1254 stats
->multicast
= rte_stats
.imcasts
;
1256 stats
->tx_dropped
= dev
->stats
.tx_dropped
;
1257 ovs_mutex_unlock(&dev
->mutex
);
1263 netdev_dpdk_get_features(const struct netdev
*netdev_
,
1264 enum netdev_features
*current
,
1265 enum netdev_features
*advertised OVS_UNUSED
,
1266 enum netdev_features
*supported OVS_UNUSED
,
1267 enum netdev_features
*peer OVS_UNUSED
)
1269 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev_
);
1270 struct rte_eth_link link
;
1272 ovs_mutex_lock(&dev
->mutex
);
1274 ovs_mutex_unlock(&dev
->mutex
);
1276 if (link
.link_duplex
== ETH_LINK_AUTONEG_DUPLEX
) {
1277 if (link
.link_speed
== ETH_LINK_SPEED_AUTONEG
) {
1278 *current
= NETDEV_F_AUTONEG
;
1280 } else if (link
.link_duplex
== ETH_LINK_HALF_DUPLEX
) {
1281 if (link
.link_speed
== ETH_LINK_SPEED_10
) {
1282 *current
= NETDEV_F_10MB_HD
;
1284 if (link
.link_speed
== ETH_LINK_SPEED_100
) {
1285 *current
= NETDEV_F_100MB_HD
;
1287 if (link
.link_speed
== ETH_LINK_SPEED_1000
) {
1288 *current
= NETDEV_F_1GB_HD
;
1290 } else if (link
.link_duplex
== ETH_LINK_FULL_DUPLEX
) {
1291 if (link
.link_speed
== ETH_LINK_SPEED_10
) {
1292 *current
= NETDEV_F_10MB_FD
;
1294 if (link
.link_speed
== ETH_LINK_SPEED_100
) {
1295 *current
= NETDEV_F_100MB_FD
;
1297 if (link
.link_speed
== ETH_LINK_SPEED_1000
) {
1298 *current
= NETDEV_F_1GB_FD
;
1300 if (link
.link_speed
== ETH_LINK_SPEED_10000
) {
1301 *current
= NETDEV_F_10GB_FD
;
1309 netdev_dpdk_get_ifindex(const struct netdev
*netdev
)
1311 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1314 ovs_mutex_lock(&dev
->mutex
);
1315 ifindex
= dev
->port_id
;
1316 ovs_mutex_unlock(&dev
->mutex
);
1322 netdev_dpdk_get_carrier(const struct netdev
*netdev_
, bool *carrier
)
1324 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev_
);
1326 ovs_mutex_lock(&dev
->mutex
);
1327 check_link_status(dev
);
1328 *carrier
= dev
->link
.link_status
;
1330 ovs_mutex_unlock(&dev
->mutex
);
1336 netdev_dpdk_vhost_get_carrier(const struct netdev
*netdev_
, bool *carrier
)
1338 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev_
);
1339 struct virtio_net
*virtio_dev
= netdev_dpdk_get_virtio(dev
);
1341 ovs_mutex_lock(&dev
->mutex
);
1343 if (is_vhost_running(virtio_dev
)) {
1349 ovs_mutex_unlock(&dev
->mutex
);
1354 static long long int
1355 netdev_dpdk_get_carrier_resets(const struct netdev
*netdev_
)
1357 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev_
);
1358 long long int carrier_resets
;
1360 ovs_mutex_lock(&dev
->mutex
);
1361 carrier_resets
= dev
->link_reset_cnt
;
1362 ovs_mutex_unlock(&dev
->mutex
);
1364 return carrier_resets
;
1368 netdev_dpdk_set_miimon(struct netdev
*netdev_ OVS_UNUSED
,
1369 long long int interval OVS_UNUSED
)
1375 netdev_dpdk_update_flags__(struct netdev_dpdk
*dev
,
1376 enum netdev_flags off
, enum netdev_flags on
,
1377 enum netdev_flags
*old_flagsp
) OVS_REQUIRES(dev
->mutex
)
1381 if ((off
| on
) & ~(NETDEV_UP
| NETDEV_PROMISC
)) {
1385 *old_flagsp
= dev
->flags
;
1389 if (dev
->flags
== *old_flagsp
) {
1393 if (dev
->type
== DPDK_DEV_ETH
) {
1394 if (dev
->flags
& NETDEV_UP
) {
1395 err
= rte_eth_dev_start(dev
->port_id
);
1400 if (dev
->flags
& NETDEV_PROMISC
) {
1401 rte_eth_promiscuous_enable(dev
->port_id
);
1404 if (!(dev
->flags
& NETDEV_UP
)) {
1405 rte_eth_dev_stop(dev
->port_id
);
1413 netdev_dpdk_update_flags(struct netdev
*netdev_
,
1414 enum netdev_flags off
, enum netdev_flags on
,
1415 enum netdev_flags
*old_flagsp
)
1417 struct netdev_dpdk
*netdev
= netdev_dpdk_cast(netdev_
);
1420 ovs_mutex_lock(&netdev
->mutex
);
1421 error
= netdev_dpdk_update_flags__(netdev
, off
, on
, old_flagsp
);
1422 ovs_mutex_unlock(&netdev
->mutex
);
1428 netdev_dpdk_get_status(const struct netdev
*netdev_
, struct smap
*args
)
1430 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev_
);
1431 struct rte_eth_dev_info dev_info
;
1433 if (dev
->port_id
< 0)
1436 ovs_mutex_lock(&dev
->mutex
);
1437 rte_eth_dev_info_get(dev
->port_id
, &dev_info
);
1438 ovs_mutex_unlock(&dev
->mutex
);
1440 smap_add_format(args
, "driver_name", "%s", dev_info
.driver_name
);
1442 smap_add_format(args
, "port_no", "%d", dev
->port_id
);
1443 smap_add_format(args
, "numa_id", "%d", rte_eth_dev_socket_id(dev
->port_id
));
1444 smap_add_format(args
, "driver_name", "%s", dev_info
.driver_name
);
1445 smap_add_format(args
, "min_rx_bufsize", "%u", dev_info
.min_rx_bufsize
);
1446 smap_add_format(args
, "max_rx_pktlen", "%u", dev_info
.max_rx_pktlen
);
1447 smap_add_format(args
, "max_rx_queues", "%u", dev_info
.max_rx_queues
);
1448 smap_add_format(args
, "max_tx_queues", "%u", dev_info
.max_tx_queues
);
1449 smap_add_format(args
, "max_mac_addrs", "%u", dev_info
.max_mac_addrs
);
1450 smap_add_format(args
, "max_hash_mac_addrs", "%u", dev_info
.max_hash_mac_addrs
);
1451 smap_add_format(args
, "max_vfs", "%u", dev_info
.max_vfs
);
1452 smap_add_format(args
, "max_vmdq_pools", "%u", dev_info
.max_vmdq_pools
);
1454 smap_add_format(args
, "pci-vendor_id", "0x%u", dev_info
.pci_dev
->id
.vendor_id
);
1455 smap_add_format(args
, "pci-device_id", "0x%x", dev_info
.pci_dev
->id
.device_id
);
1461 netdev_dpdk_set_admin_state__(struct netdev_dpdk
*dev
, bool admin_state
)
1462 OVS_REQUIRES(dev
->mutex
)
1464 enum netdev_flags old_flags
;
1467 netdev_dpdk_update_flags__(dev
, 0, NETDEV_UP
, &old_flags
);
1469 netdev_dpdk_update_flags__(dev
, NETDEV_UP
, 0, &old_flags
);
1474 netdev_dpdk_set_admin_state(struct unixctl_conn
*conn
, int argc
,
1475 const char *argv
[], void *aux OVS_UNUSED
)
1479 if (!strcasecmp(argv
[argc
- 1], "up")) {
1481 } else if ( !strcasecmp(argv
[argc
- 1], "down")) {
1484 unixctl_command_reply_error(conn
, "Invalid Admin State");
1489 struct netdev
*netdev
= netdev_from_name(argv
[1]);
1490 if (netdev
&& is_dpdk_class(netdev
->netdev_class
)) {
1491 struct netdev_dpdk
*dpdk_dev
= netdev_dpdk_cast(netdev
);
1493 ovs_mutex_lock(&dpdk_dev
->mutex
);
1494 netdev_dpdk_set_admin_state__(dpdk_dev
, up
);
1495 ovs_mutex_unlock(&dpdk_dev
->mutex
);
1497 netdev_close(netdev
);
1499 unixctl_command_reply_error(conn
, "Not a DPDK Interface");
1500 netdev_close(netdev
);
1504 struct netdev_dpdk
*netdev
;
1506 ovs_mutex_lock(&dpdk_mutex
);
1507 LIST_FOR_EACH (netdev
, list_node
, &dpdk_list
) {
1508 ovs_mutex_lock(&netdev
->mutex
);
1509 netdev_dpdk_set_admin_state__(netdev
, up
);
1510 ovs_mutex_unlock(&netdev
->mutex
);
1512 ovs_mutex_unlock(&dpdk_mutex
);
1514 unixctl_command_reply(conn
, "OK");
1518 * Set virtqueue flags so that we do not receive interrupts.
1521 set_irq_status(struct virtio_net
*dev
)
1523 dev
->virtqueue
[VIRTIO_RXQ
]->used
->flags
= VRING_USED_F_NO_NOTIFY
;
1524 dev
->virtqueue
[VIRTIO_TXQ
]->used
->flags
= VRING_USED_F_NO_NOTIFY
;
1528 * A new virtio-net device is added to a vhost port.
1531 new_device(struct virtio_net
*dev
)
1533 struct netdev_dpdk
*netdev
;
1534 bool exists
= false;
1536 ovs_mutex_lock(&dpdk_mutex
);
1537 /* Add device to the vhost port with the same name as that passed down. */
1538 LIST_FOR_EACH(netdev
, list_node
, &dpdk_list
) {
1539 if (strncmp(dev
->ifname
, netdev
->up
.name
, IFNAMSIZ
) == 0) {
1540 ovs_mutex_lock(&netdev
->mutex
);
1541 ovsrcu_set(&netdev
->virtio_dev
, dev
);
1542 ovs_mutex_unlock(&netdev
->mutex
);
1544 dev
->flags
|= VIRTIO_DEV_RUNNING
;
1545 /* Disable notifications. */
1546 set_irq_status(dev
);
1550 ovs_mutex_unlock(&dpdk_mutex
);
1553 VLOG_INFO("vHost Device '%s' (%ld) can't be added - name not found",
1554 dev
->ifname
, dev
->device_fh
);
1559 VLOG_INFO("vHost Device '%s' (%ld) has been added",
1560 dev
->ifname
, dev
->device_fh
);
1565 * Remove a virtio-net device from the specific vhost port. Use dev->remove
1566 * flag to stop any more packets from being sent or received to/from a VM and
1567 * ensure all currently queued packets have been sent/received before removing
1571 destroy_device(volatile struct virtio_net
*dev
)
1573 struct netdev_dpdk
*vhost_dev
;
1575 ovs_mutex_lock(&dpdk_mutex
);
1576 LIST_FOR_EACH (vhost_dev
, list_node
, &dpdk_list
) {
1577 if (netdev_dpdk_get_virtio(vhost_dev
) == dev
) {
1579 ovs_mutex_lock(&vhost_dev
->mutex
);
1580 dev
->flags
&= ~VIRTIO_DEV_RUNNING
;
1581 ovsrcu_set(&vhost_dev
->virtio_dev
, NULL
);
1582 ovs_mutex_unlock(&vhost_dev
->mutex
);
1585 * Wait for other threads to quiesce before
1586 * setting the virtio_dev to NULL.
1588 ovsrcu_synchronize();
1591 ovs_mutex_unlock(&dpdk_mutex
);
1593 VLOG_INFO("vHost Device '%s' (%ld) has been removed",
1594 dev
->ifname
, dev
->device_fh
);
1598 netdev_dpdk_get_virtio(const struct netdev_dpdk
*dev
)
1600 return ovsrcu_get(struct virtio_net
*, &dev
->virtio_dev
);
1604 * These callbacks allow virtio-net devices to be added to vhost ports when
1605 * configuration has been fully complete.
1607 const struct virtio_net_device_ops virtio_net_device_ops
=
1609 .new_device
= new_device
,
1610 .destroy_device
= destroy_device
,
1614 start_cuse_session_loop(void *dummy OVS_UNUSED
)
1616 pthread_detach(pthread_self());
1617 rte_vhost_driver_session_start();
1622 dpdk_vhost_class_init(void)
1627 rte_vhost_driver_callback_register(&virtio_net_device_ops
);
1629 /* Register CUSE device to handle IOCTLs.
1630 * Unless otherwise specified on the vswitchd command line, cuse_dev_name
1631 * is set to vhost-net.
1633 err
= rte_vhost_driver_register(cuse_dev_name
);
1636 VLOG_ERR("CUSE device setup failure.");
1640 /* start_cuse_session_loop blocks OVS RCU quiescent state, so directly use
1642 return pthread_create(&thread
, NULL
, start_cuse_session_loop
, NULL
);
1646 dpdk_common_init(void)
1648 unixctl_command_register("netdev-dpdk/set-admin-state",
1649 "[netdev] up|down", 1, 2,
1650 netdev_dpdk_set_admin_state
, NULL
);
1652 ovs_thread_create("dpdk_watchdog", dpdk_watchdog
, NULL
);
1658 dpdk_ring_create(const char dev_name
[], unsigned int port_no
,
1659 unsigned int *eth_port_id
)
1661 struct dpdk_ring
*ivshmem
;
1665 ivshmem
= dpdk_rte_mzalloc(sizeof *ivshmem
);
1666 if (ivshmem
== NULL
) {
1670 /* XXX: Add support for multiquque ring. */
1671 err
= snprintf(ring_name
, 10, "%s_tx", dev_name
);
1676 /* Create single consumer/producer rings, netdev does explicit locking. */
1677 ivshmem
->cring_tx
= rte_ring_create(ring_name
, DPDK_RING_SIZE
, SOCKET0
,
1678 RING_F_SP_ENQ
| RING_F_SC_DEQ
);
1679 if (ivshmem
->cring_tx
== NULL
) {
1684 err
= snprintf(ring_name
, 10, "%s_rx", dev_name
);
1689 /* Create single consumer/producer rings, netdev does explicit locking. */
1690 ivshmem
->cring_rx
= rte_ring_create(ring_name
, DPDK_RING_SIZE
, SOCKET0
,
1691 RING_F_SP_ENQ
| RING_F_SC_DEQ
);
1692 if (ivshmem
->cring_rx
== NULL
) {
1697 err
= rte_eth_from_rings(dev_name
, &ivshmem
->cring_rx
, 1,
1698 &ivshmem
->cring_tx
, 1, SOCKET0
);
1705 ivshmem
->user_port_id
= port_no
;
1706 ivshmem
->eth_port_id
= rte_eth_dev_count() - 1;
1707 list_push_back(&dpdk_ring_list
, &ivshmem
->list_node
);
1709 *eth_port_id
= ivshmem
->eth_port_id
;
1714 dpdk_ring_open(const char dev_name
[], unsigned int *eth_port_id
) OVS_REQUIRES(dpdk_mutex
)
1716 struct dpdk_ring
*ivshmem
;
1717 unsigned int port_no
;
1720 /* Names always start with "dpdkr" */
1721 err
= dpdk_dev_parse_name(dev_name
, "dpdkr", &port_no
);
1726 /* look through our list to find the device */
1727 LIST_FOR_EACH (ivshmem
, list_node
, &dpdk_ring_list
) {
1728 if (ivshmem
->user_port_id
== port_no
) {
1729 VLOG_INFO("Found dpdk ring device %s:", dev_name
);
1730 *eth_port_id
= ivshmem
->eth_port_id
; /* really all that is needed */
1734 /* Need to create the device rings */
1735 return dpdk_ring_create(dev_name
, port_no
, eth_port_id
);
1739 netdev_dpdk_ring_send(struct netdev
*netdev
, int qid OVS_UNUSED
,
1740 struct dp_packet
**pkts
, int cnt
, bool may_steal
)
1742 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1744 /* DPDK Rings have a single TX queue, Therefore needs locking. */
1745 rte_spinlock_lock(&dev
->txq_lock
);
1746 netdev_dpdk_send__(dev
, 0, pkts
, cnt
, may_steal
);
1747 rte_spinlock_unlock(&dev
->txq_lock
);
1752 netdev_dpdk_ring_construct(struct netdev
*netdev
)
1754 unsigned int port_no
= 0;
1757 if (rte_eal_init_ret
) {
1758 return rte_eal_init_ret
;
1761 ovs_mutex_lock(&dpdk_mutex
);
1763 err
= dpdk_ring_open(netdev
->name
, &port_no
);
1768 err
= netdev_dpdk_init(netdev
, port_no
, DPDK_DEV_ETH
);
1771 ovs_mutex_unlock(&dpdk_mutex
);
1775 #define NETDEV_DPDK_CLASS(NAME, INIT, CONSTRUCT, DESTRUCT, MULTIQ, SEND, \
1776 GET_CARRIER, GET_STATS, GET_FEATURES, GET_STATUS, RXQ_RECV) \
1780 NULL, /* netdev_dpdk_run */ \
1781 NULL, /* netdev_dpdk_wait */ \
1783 netdev_dpdk_alloc, \
1786 netdev_dpdk_dealloc, \
1787 netdev_dpdk_get_config, \
1788 NULL, /* netdev_dpdk_set_config */ \
1789 NULL, /* get_tunnel_config */ \
1790 NULL, /* build header */ \
1791 NULL, /* push header */ \
1792 NULL, /* pop header */ \
1793 netdev_dpdk_get_numa_id, /* get_numa_id */ \
1794 MULTIQ, /* set_multiq */ \
1797 NULL, /* send_wait */ \
1799 netdev_dpdk_set_etheraddr, \
1800 netdev_dpdk_get_etheraddr, \
1801 netdev_dpdk_get_mtu, \
1802 netdev_dpdk_set_mtu, \
1803 netdev_dpdk_get_ifindex, \
1805 netdev_dpdk_get_carrier_resets, \
1806 netdev_dpdk_set_miimon, \
1809 NULL, /* set_advertisements */ \
1811 NULL, /* set_policing */ \
1812 NULL, /* get_qos_types */ \
1813 NULL, /* get_qos_capabilities */ \
1814 NULL, /* get_qos */ \
1815 NULL, /* set_qos */ \
1816 NULL, /* get_queue */ \
1817 NULL, /* set_queue */ \
1818 NULL, /* delete_queue */ \
1819 NULL, /* get_queue_stats */ \
1820 NULL, /* queue_dump_start */ \
1821 NULL, /* queue_dump_next */ \
1822 NULL, /* queue_dump_done */ \
1823 NULL, /* dump_queue_stats */ \
1825 NULL, /* get_in4 */ \
1826 NULL, /* set_in4 */ \
1827 NULL, /* get_in6 */ \
1828 NULL, /* add_router */ \
1829 NULL, /* get_next_hop */ \
1831 NULL, /* arp_lookup */ \
1833 netdev_dpdk_update_flags, \
1835 netdev_dpdk_rxq_alloc, \
1836 netdev_dpdk_rxq_construct, \
1837 netdev_dpdk_rxq_destruct, \
1838 netdev_dpdk_rxq_dealloc, \
1840 NULL, /* rx_wait */ \
1841 NULL, /* rxq_drain */ \
1845 dpdk_init(int argc
, char **argv
)
1849 char *pragram_name
= argv
[0];
1851 if (argc
< 2 || strcmp(argv
[1], "--dpdk"))
1854 /* Remove the --dpdk argument from arg list.*/
1858 /* If the cuse_dev_name parameter has been provided, set 'cuse_dev_name' to
1859 * this string if it meets the correct criteria. Otherwise, set it to the
1860 * default (vhost-net).
1862 if (!strcmp(argv
[1], "--cuse_dev_name") &&
1863 (strlen(argv
[2]) <= NAME_MAX
)) {
1865 cuse_dev_name
= strdup(argv
[2]);
1867 /* Remove the cuse_dev_name configuration parameters from the argument
1868 * list, so that the correct elements are passed to the DPDK
1869 * initialization function
1872 argv
+= 2; /* Increment by two to bypass the cuse_dev_name arguments */
1875 VLOG_ERR("User-provided cuse_dev_name in use: /dev/%s", cuse_dev_name
);
1877 cuse_dev_name
= "vhost-net";
1878 VLOG_INFO("No cuse_dev_name provided - defaulting to /dev/vhost-net");
1881 /* Keep the program name argument as this is needed for call to
1884 argv
[0] = pragram_name
;
1886 /* Make sure things are initialized ... */
1887 result
= rte_eal_init(argc
, argv
);
1889 ovs_abort(result
, "Cannot init EAL");
1892 rte_memzone_dump(stdout
);
1893 rte_eal_init_ret
= 0;
1895 if (argc
> result
) {
1896 argv
[result
] = argv
[0];
1899 /* We are called from the main thread here */
1900 thread_set_nonpmd();
1902 return result
+ 1 + base
;
1905 const struct netdev_class dpdk_class
=
1909 netdev_dpdk_construct
,
1910 netdev_dpdk_destruct
,
1911 netdev_dpdk_set_multiq
,
1912 netdev_dpdk_eth_send
,
1913 netdev_dpdk_get_carrier
,
1914 netdev_dpdk_get_stats
,
1915 netdev_dpdk_get_features
,
1916 netdev_dpdk_get_status
,
1917 netdev_dpdk_rxq_recv
);
1919 const struct netdev_class dpdk_ring_class
=
1923 netdev_dpdk_ring_construct
,
1924 netdev_dpdk_destruct
,
1926 netdev_dpdk_ring_send
,
1927 netdev_dpdk_get_carrier
,
1928 netdev_dpdk_get_stats
,
1929 netdev_dpdk_get_features
,
1930 netdev_dpdk_get_status
,
1931 netdev_dpdk_rxq_recv
);
1933 const struct netdev_class dpdk_vhost_class
=
1936 dpdk_vhost_class_init
,
1937 netdev_dpdk_vhost_construct
,
1938 netdev_dpdk_vhost_destruct
,
1939 netdev_dpdk_vhost_set_multiq
,
1940 netdev_dpdk_vhost_send
,
1941 netdev_dpdk_vhost_get_carrier
,
1942 netdev_dpdk_vhost_get_stats
,
1945 netdev_dpdk_vhost_rxq_recv
);
1948 netdev_dpdk_register(void)
1950 static struct ovsthread_once once
= OVSTHREAD_ONCE_INITIALIZER
;
1952 if (rte_eal_init_ret
) {
1956 if (ovsthread_once_start(&once
)) {
1958 netdev_register_provider(&dpdk_class
);
1959 netdev_register_provider(&dpdk_ring_class
);
1960 netdev_register_provider(&dpdk_vhost_class
);
1961 ovsthread_once_done(&once
);
1966 pmd_thread_setaffinity_cpu(int cpu
)
1972 CPU_SET(cpu
, &cpuset
);
1973 err
= pthread_setaffinity_np(pthread_self(), sizeof(cpu_set_t
), &cpuset
);
1975 VLOG_ERR("Thread affinity error %d",err
);
1978 /* NON_PMD_CORE_ID is reserved for use by non pmd threads. */
1979 ovs_assert(cpu
!= NON_PMD_CORE_ID
);
1980 RTE_PER_LCORE(_lcore_id
) = cpu
;
1986 thread_set_nonpmd(void)
1988 /* We have to use NON_PMD_CORE_ID to allow non-pmd threads to perform
1989 * certain DPDK operations, like rte_eth_dev_configure(). */
1990 RTE_PER_LCORE(_lcore_id
) = NON_PMD_CORE_ID
;
1996 return rte_lcore_id() != NON_PMD_CORE_ID
;