2 * Copyright (c) 2014 Nicira, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
31 #include "dp-packet.h"
32 #include "dpif-netdev.h"
34 #include "netdev-dpdk.h"
35 #include "netdev-provider.h"
36 #include "netdev-vport.h"
38 #include "ofp-print.h"
40 #include "ovs-thread.h"
45 #include "unaligned.h"
48 #include "openvswitch/vlog.h"
50 #include "rte_config.h"
52 #include "rte_virtio_net.h"
54 VLOG_DEFINE_THIS_MODULE(dpdk
);
55 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(5, 20);
57 #define DPDK_PORT_WATCHDOG_INTERVAL 5
59 #define OVS_CACHE_LINE_SIZE CACHE_LINE_SIZE
60 #define OVS_VPORT_DPDK "ovs_dpdk"
63 * need to reserve tons of extra space in the mbufs so we can align the
64 * DMA addresses to 4KB.
67 #define MTU_TO_MAX_LEN(mtu) ((mtu) + ETHER_HDR_LEN + ETHER_CRC_LEN)
68 #define MBUF_SIZE(mtu) (MTU_TO_MAX_LEN(mtu) + (512) + \
69 sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM)
71 /* Max and min number of packets in the mempool. OVS tries to allocate a
72 * mempool with MAX_NB_MBUF: if this fails (because the system doesn't have
73 * enough hugepages) we keep halving the number until the allocation succeeds
74 * or we reach MIN_NB_MBUF */
76 #define MAX_NB_MBUF (4096 * 64)
77 #define MIN_NB_MBUF (4096 * 4)
78 #define MP_CACHE_SZ RTE_MEMPOOL_CACHE_MAX_SIZE
80 /* MAX_NB_MBUF can be divided by 2 many times, until MIN_NB_MBUF */
81 BUILD_ASSERT_DECL(MAX_NB_MBUF
% ROUND_DOWN_POW2(MAX_NB_MBUF
/MIN_NB_MBUF
) == 0);
83 /* The smallest possible NB_MBUF that we're going to try should be a multiple
84 * of MP_CACHE_SZ. This is advised by DPDK documentation. */
85 BUILD_ASSERT_DECL((MAX_NB_MBUF
/ ROUND_DOWN_POW2(MAX_NB_MBUF
/MIN_NB_MBUF
))
90 #define NIC_PORT_RX_Q_SIZE 2048 /* Size of Physical NIC RX Queue, Max (n+32<=4096)*/
91 #define NIC_PORT_TX_Q_SIZE 2048 /* Size of Physical NIC TX Queue, Max (n+32<=4096)*/
93 /* XXX: Needs per NIC value for these constants. */
94 #define RX_PTHRESH 32 /* Default values of RX prefetch threshold reg. */
95 #define RX_HTHRESH 32 /* Default values of RX host threshold reg. */
96 #define RX_WTHRESH 16 /* Default values of RX write-back threshold reg. */
98 #define TX_PTHRESH 36 /* Default values of TX prefetch threshold reg. */
99 #define TX_HTHRESH 0 /* Default values of TX host threshold reg. */
100 #define TX_WTHRESH 0 /* Default values of TX write-back threshold reg. */
102 #define MAX_PKT_BURST 32 /* Max burst size for RX/TX */
104 /* Character device cuse_dev_name. */
105 char *cuse_dev_name
= NULL
;
107 static const struct rte_eth_conf port_conf
= {
109 .mq_mode
= ETH_MQ_RX_RSS
,
111 .header_split
= 0, /* Header Split disabled */
112 .hw_ip_checksum
= 0, /* IP checksum offload disabled */
113 .hw_vlan_filter
= 0, /* VLAN filtering disabled */
114 .jumbo_frame
= 0, /* Jumbo Frame Support disabled */
120 .rss_hf
= ETH_RSS_IP
| ETH_RSS_UDP
| ETH_RSS_TCP
,
124 .mq_mode
= ETH_MQ_TX_NONE
,
128 static const struct rte_eth_rxconf rx_conf
= {
130 .pthresh
= RX_PTHRESH
,
131 .hthresh
= RX_HTHRESH
,
132 .wthresh
= RX_WTHRESH
,
136 static const struct rte_eth_txconf tx_conf
= {
138 .pthresh
= TX_PTHRESH
,
139 .hthresh
= TX_HTHRESH
,
140 .wthresh
= TX_WTHRESH
,
144 .txq_flags
= ETH_TXQ_FLAGS_NOMULTSEGS
|ETH_TXQ_FLAGS_NOOFFLOADS
,
147 enum { MAX_TX_QUEUE_LEN
= 384 };
148 enum { DPDK_RING_SIZE
= 256 };
149 BUILD_ASSERT_DECL(IS_POW2(DPDK_RING_SIZE
));
150 enum { DRAIN_TSC
= 200000ULL };
157 static int rte_eal_init_ret
= ENODEV
;
159 static struct ovs_mutex dpdk_mutex
= OVS_MUTEX_INITIALIZER
;
161 /* Contains all 'struct dpdk_dev's. */
162 static struct ovs_list dpdk_list
OVS_GUARDED_BY(dpdk_mutex
)
163 = OVS_LIST_INITIALIZER(&dpdk_list
);
165 static struct ovs_list dpdk_mp_list
OVS_GUARDED_BY(dpdk_mutex
)
166 = OVS_LIST_INITIALIZER(&dpdk_mp_list
);
168 /* This mutex must be used by non pmd threads when allocating or freeing
169 * mbufs through mempools. Since dpdk_queue_pkts() and dpdk_queue_flush() may
170 * use mempools, a non pmd thread should hold this mutex while calling them */
171 struct ovs_mutex nonpmd_mempool_mutex
= OVS_MUTEX_INITIALIZER
;
174 struct rte_mempool
*mp
;
178 struct ovs_list list_node
OVS_GUARDED_BY(dpdk_mutex
);
181 /* There should be one 'struct dpdk_tx_queue' created for
183 struct dpdk_tx_queue
{
184 bool flush_tx
; /* Set to true to flush queue everytime */
185 /* pkts are queued. */
188 struct rte_mbuf
*burst_pkts
[MAX_TX_QUEUE_LEN
];
191 /* dpdk has no way to remove dpdk ring ethernet devices
192 so we have to keep them around once they've been created
195 static struct ovs_list dpdk_ring_list
OVS_GUARDED_BY(dpdk_mutex
)
196 = OVS_LIST_INITIALIZER(&dpdk_ring_list
);
199 /* For the client rings */
200 struct rte_ring
*cring_tx
;
201 struct rte_ring
*cring_rx
;
202 int user_port_id
; /* User given port no, parsed from port name */
203 int eth_port_id
; /* ethernet device port id */
204 struct ovs_list list_node
OVS_GUARDED_BY(dpdk_mutex
);
211 enum dpdk_dev_type type
;
213 struct dpdk_tx_queue
*tx_q
;
215 struct ovs_mutex mutex
OVS_ACQ_AFTER(dpdk_mutex
);
217 struct dpdk_mp
*dpdk_mp
;
221 struct netdev_stats stats
;
223 uint8_t hwaddr
[ETH_ADDR_LEN
];
224 enum netdev_flags flags
;
226 struct rte_eth_link link
;
229 /* virtio-net structure for vhost device */
230 OVSRCU_TYPE(struct virtio_net
*) virtio_dev
;
233 struct ovs_list list_node
OVS_GUARDED_BY(dpdk_mutex
);
234 rte_spinlock_t txq_lock
;
237 struct netdev_rxq_dpdk
{
238 struct netdev_rxq up
;
242 static bool thread_is_pmd(void);
244 static int netdev_dpdk_construct(struct netdev
*);
246 struct virtio_net
* netdev_dpdk_get_virtio(const struct netdev_dpdk
*dev
);
249 is_dpdk_class(const struct netdev_class
*class)
251 return class->construct
== netdev_dpdk_construct
;
254 /* XXX: use dpdk malloc for entire OVS. in fact huge page should be used
255 * for all other segments data, bss and text. */
258 dpdk_rte_mzalloc(size_t sz
)
262 ptr
= rte_zmalloc(OVS_VPORT_DPDK
, sz
, OVS_CACHE_LINE_SIZE
);
269 /* XXX this function should be called only by pmd threads (or by non pmd
270 * threads holding the nonpmd_mempool_mutex) */
272 free_dpdk_buf(struct dp_packet
*p
)
274 struct rte_mbuf
*pkt
= (struct rte_mbuf
*) p
;
276 rte_pktmbuf_free_seg(pkt
);
280 __rte_pktmbuf_init(struct rte_mempool
*mp
,
281 void *opaque_arg OVS_UNUSED
,
283 unsigned i OVS_UNUSED
)
285 struct rte_mbuf
*m
= _m
;
286 uint32_t buf_len
= mp
->elt_size
- sizeof(struct dp_packet
);
288 RTE_MBUF_ASSERT(mp
->elt_size
>= sizeof(struct dp_packet
));
290 memset(m
, 0, mp
->elt_size
);
292 /* start of buffer is just after mbuf structure */
293 m
->buf_addr
= (char *)m
+ sizeof(struct dp_packet
);
294 m
->buf_physaddr
= rte_mempool_virt2phy(mp
, m
) +
295 sizeof(struct dp_packet
);
296 m
->buf_len
= (uint16_t)buf_len
;
298 /* keep some headroom between start of buffer and data */
299 m
->data_off
= RTE_MIN(RTE_PKTMBUF_HEADROOM
, m
->buf_len
);
301 /* init some constant fields */
308 ovs_rte_pktmbuf_init(struct rte_mempool
*mp
,
309 void *opaque_arg OVS_UNUSED
,
311 unsigned i OVS_UNUSED
)
313 struct rte_mbuf
*m
= _m
;
315 __rte_pktmbuf_init(mp
, opaque_arg
, _m
, i
);
317 dp_packet_init_dpdk((struct dp_packet
*) m
, m
->buf_len
);
320 static struct dpdk_mp
*
321 dpdk_mp_get(int socket_id
, int mtu
) OVS_REQUIRES(dpdk_mutex
)
323 struct dpdk_mp
*dmp
= NULL
;
324 char mp_name
[RTE_MEMPOOL_NAMESIZE
];
327 LIST_FOR_EACH (dmp
, list_node
, &dpdk_mp_list
) {
328 if (dmp
->socket_id
== socket_id
&& dmp
->mtu
== mtu
) {
334 dmp
= dpdk_rte_mzalloc(sizeof *dmp
);
335 dmp
->socket_id
= socket_id
;
339 mp_size
= MAX_NB_MBUF
;
341 if (snprintf(mp_name
, RTE_MEMPOOL_NAMESIZE
, "ovs_mp_%d_%d_%u",
342 dmp
->mtu
, dmp
->socket_id
, mp_size
) < 0) {
346 dmp
->mp
= rte_mempool_create(mp_name
, mp_size
, MBUF_SIZE(mtu
),
348 sizeof(struct rte_pktmbuf_pool_private
),
349 rte_pktmbuf_pool_init
, NULL
,
350 ovs_rte_pktmbuf_init
, NULL
,
352 } while (!dmp
->mp
&& rte_errno
== ENOMEM
&& (mp_size
/= 2) >= MIN_NB_MBUF
);
354 if (dmp
->mp
== NULL
) {
357 VLOG_DBG("Allocated \"%s\" mempool with %u mbufs", mp_name
, mp_size
);
360 list_push_back(&dpdk_mp_list
, &dmp
->list_node
);
365 dpdk_mp_put(struct dpdk_mp
*dmp
)
373 ovs_assert(dmp
->refcount
>= 0);
376 /* I could not find any API to destroy mp. */
377 if (dmp
->refcount
== 0) {
378 list_delete(dmp
->list_node
);
379 /* destroy mp-pool. */
385 check_link_status(struct netdev_dpdk
*dev
)
387 struct rte_eth_link link
;
389 rte_eth_link_get_nowait(dev
->port_id
, &link
);
391 if (dev
->link
.link_status
!= link
.link_status
) {
392 netdev_change_seq_changed(&dev
->up
);
394 dev
->link_reset_cnt
++;
396 if (dev
->link
.link_status
) {
397 VLOG_DBG_RL(&rl
, "Port %d Link Up - speed %u Mbps - %s",
398 dev
->port_id
, (unsigned)dev
->link
.link_speed
,
399 (dev
->link
.link_duplex
== ETH_LINK_FULL_DUPLEX
) ?
400 ("full-duplex") : ("half-duplex"));
402 VLOG_DBG_RL(&rl
, "Port %d Link Down", dev
->port_id
);
408 dpdk_watchdog(void *dummy OVS_UNUSED
)
410 struct netdev_dpdk
*dev
;
412 pthread_detach(pthread_self());
415 ovs_mutex_lock(&dpdk_mutex
);
416 LIST_FOR_EACH (dev
, list_node
, &dpdk_list
) {
417 ovs_mutex_lock(&dev
->mutex
);
418 check_link_status(dev
);
419 ovs_mutex_unlock(&dev
->mutex
);
421 ovs_mutex_unlock(&dpdk_mutex
);
422 xsleep(DPDK_PORT_WATCHDOG_INTERVAL
);
429 dpdk_eth_dev_init(struct netdev_dpdk
*dev
) OVS_REQUIRES(dpdk_mutex
)
431 struct rte_pktmbuf_pool_private
*mbp_priv
;
432 struct ether_addr eth_addr
;
436 if (dev
->port_id
< 0 || dev
->port_id
>= rte_eth_dev_count()) {
440 diag
= rte_eth_dev_configure(dev
->port_id
, dev
->up
.n_rxq
, dev
->up
.n_txq
,
443 VLOG_ERR("eth dev config error %d",diag
);
447 for (i
= 0; i
< dev
->up
.n_txq
; i
++) {
448 diag
= rte_eth_tx_queue_setup(dev
->port_id
, i
, NIC_PORT_TX_Q_SIZE
,
449 dev
->socket_id
, &tx_conf
);
451 VLOG_ERR("eth dev tx queue setup error %d",diag
);
456 for (i
= 0; i
< dev
->up
.n_rxq
; i
++) {
457 diag
= rte_eth_rx_queue_setup(dev
->port_id
, i
, NIC_PORT_RX_Q_SIZE
,
459 &rx_conf
, dev
->dpdk_mp
->mp
);
461 VLOG_ERR("eth dev rx queue setup error %d",diag
);
466 diag
= rte_eth_dev_start(dev
->port_id
);
468 VLOG_ERR("eth dev start error %d",diag
);
472 rte_eth_promiscuous_enable(dev
->port_id
);
473 rte_eth_allmulticast_enable(dev
->port_id
);
475 memset(ð_addr
, 0x0, sizeof(eth_addr
));
476 rte_eth_macaddr_get(dev
->port_id
, ð_addr
);
477 VLOG_INFO_RL(&rl
, "Port %d: "ETH_ADDR_FMT
"",
478 dev
->port_id
, ETH_ADDR_ARGS(eth_addr
.addr_bytes
));
480 memcpy(dev
->hwaddr
, eth_addr
.addr_bytes
, ETH_ADDR_LEN
);
481 rte_eth_link_get_nowait(dev
->port_id
, &dev
->link
);
483 mbp_priv
= rte_mempool_get_priv(dev
->dpdk_mp
->mp
);
484 dev
->buf_size
= mbp_priv
->mbuf_data_room_size
- RTE_PKTMBUF_HEADROOM
;
486 dev
->flags
= NETDEV_UP
| NETDEV_PROMISC
;
490 static struct netdev_dpdk
*
491 netdev_dpdk_cast(const struct netdev
*netdev
)
493 return CONTAINER_OF(netdev
, struct netdev_dpdk
, up
);
496 static struct netdev
*
497 netdev_dpdk_alloc(void)
499 struct netdev_dpdk
*netdev
= dpdk_rte_mzalloc(sizeof *netdev
);
504 netdev_dpdk_alloc_txq(struct netdev_dpdk
*netdev
, unsigned int n_txqs
)
508 netdev
->tx_q
= dpdk_rte_mzalloc(n_txqs
* sizeof *netdev
->tx_q
);
509 /* Each index is considered as a cpu core id, since there should
510 * be one tx queue for each cpu core. */
511 for (i
= 0; i
< n_txqs
; i
++) {
512 int numa_id
= ovs_numa_get_numa_id(i
);
514 /* If the corresponding core is not on the same numa node
515 * as 'netdev', flags the 'flush_tx'. */
516 netdev
->tx_q
[i
].flush_tx
= netdev
->socket_id
== numa_id
;
521 netdev_dpdk_init(struct netdev
*netdev_
, unsigned int port_no
,
522 enum dpdk_dev_type type
)
523 OVS_REQUIRES(dpdk_mutex
)
525 struct netdev_dpdk
*netdev
= netdev_dpdk_cast(netdev_
);
529 ovs_mutex_init(&netdev
->mutex
);
530 ovs_mutex_lock(&netdev
->mutex
);
532 /* If the 'sid' is negative, it means that the kernel fails
533 * to obtain the pci numa info. In that situation, always
535 if (type
== DPDK_DEV_ETH
) {
536 sid
= rte_eth_dev_socket_id(port_no
);
538 sid
= rte_lcore_to_socket_id(rte_get_master_lcore());
541 netdev
->socket_id
= sid
< 0 ? SOCKET0
: sid
;
542 netdev
->port_id
= port_no
;
545 netdev
->mtu
= ETHER_MTU
;
546 netdev
->max_packet_len
= MTU_TO_MAX_LEN(netdev
->mtu
);
547 rte_spinlock_init(&netdev
->txq_lock
);
549 netdev
->dpdk_mp
= dpdk_mp_get(netdev
->socket_id
, netdev
->mtu
);
550 if (!netdev
->dpdk_mp
) {
555 netdev_
->n_txq
= NR_QUEUE
;
556 netdev_
->n_rxq
= NR_QUEUE
;
558 if (type
== DPDK_DEV_ETH
) {
559 netdev_dpdk_alloc_txq(netdev
, NR_QUEUE
);
560 err
= dpdk_eth_dev_init(netdev
);
566 list_push_back(&dpdk_list
, &netdev
->list_node
);
570 rte_free(netdev
->tx_q
);
572 ovs_mutex_unlock(&netdev
->mutex
);
577 dpdk_dev_parse_name(const char dev_name
[], const char prefix
[],
578 unsigned int *port_no
)
582 if (strncmp(dev_name
, prefix
, strlen(prefix
))) {
586 cport
= dev_name
+ strlen(prefix
);
587 *port_no
= strtol(cport
, 0, 0); /* string must be null terminated */
592 netdev_dpdk_vhost_construct(struct netdev
*netdev_
)
596 if (rte_eal_init_ret
) {
597 return rte_eal_init_ret
;
600 ovs_mutex_lock(&dpdk_mutex
);
601 err
= netdev_dpdk_init(netdev_
, -1, DPDK_DEV_VHOST
);
602 ovs_mutex_unlock(&dpdk_mutex
);
608 netdev_dpdk_construct(struct netdev
*netdev
)
610 unsigned int port_no
;
613 if (rte_eal_init_ret
) {
614 return rte_eal_init_ret
;
617 /* Names always start with "dpdk" */
618 err
= dpdk_dev_parse_name(netdev
->name
, "dpdk", &port_no
);
623 ovs_mutex_lock(&dpdk_mutex
);
624 err
= netdev_dpdk_init(netdev
, port_no
, DPDK_DEV_ETH
);
625 ovs_mutex_unlock(&dpdk_mutex
);
630 netdev_dpdk_destruct(struct netdev
*netdev_
)
632 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev_
);
634 ovs_mutex_lock(&dev
->mutex
);
635 rte_eth_dev_stop(dev
->port_id
);
636 ovs_mutex_unlock(&dev
->mutex
);
638 ovs_mutex_lock(&dpdk_mutex
);
640 list_remove(&dev
->list_node
);
641 dpdk_mp_put(dev
->dpdk_mp
);
642 ovs_mutex_unlock(&dpdk_mutex
);
646 netdev_dpdk_vhost_destruct(struct netdev
*netdev_
)
648 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev_
);
650 /* Can't remove a port while a guest is attached to it. */
651 if (netdev_dpdk_get_virtio(dev
) != NULL
) {
652 VLOG_ERR("Can not remove port, vhost device still attached");
656 ovs_mutex_lock(&dpdk_mutex
);
657 list_remove(&dev
->list_node
);
658 dpdk_mp_put(dev
->dpdk_mp
);
659 ovs_mutex_unlock(&dpdk_mutex
);
663 netdev_dpdk_dealloc(struct netdev
*netdev_
)
665 struct netdev_dpdk
*netdev
= netdev_dpdk_cast(netdev_
);
671 netdev_dpdk_get_config(const struct netdev
*netdev_
, struct smap
*args
)
673 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev_
);
675 ovs_mutex_lock(&dev
->mutex
);
677 smap_add_format(args
, "configured_rx_queues", "%d", netdev_
->n_rxq
);
678 smap_add_format(args
, "configured_tx_queues", "%d", netdev_
->n_txq
);
679 ovs_mutex_unlock(&dev
->mutex
);
685 netdev_dpdk_get_numa_id(const struct netdev
*netdev_
)
687 struct netdev_dpdk
*netdev
= netdev_dpdk_cast(netdev_
);
689 return netdev
->socket_id
;
692 /* Sets the number of tx queues and rx queues for the dpdk interface.
693 * If the configuration fails, do not try restoring its old configuration
694 * and just returns the error. */
696 netdev_dpdk_set_multiq(struct netdev
*netdev_
, unsigned int n_txq
,
699 struct netdev_dpdk
*netdev
= netdev_dpdk_cast(netdev_
);
702 if (netdev
->up
.n_txq
== n_txq
&& netdev
->up
.n_rxq
== n_rxq
) {
706 ovs_mutex_lock(&dpdk_mutex
);
707 ovs_mutex_lock(&netdev
->mutex
);
709 rte_eth_dev_stop(netdev
->port_id
);
711 netdev
->up
.n_txq
= n_txq
;
712 netdev
->up
.n_rxq
= n_rxq
;
714 rte_free(netdev
->tx_q
);
715 netdev_dpdk_alloc_txq(netdev
, n_txq
);
716 err
= dpdk_eth_dev_init(netdev
);
718 ovs_mutex_unlock(&netdev
->mutex
);
719 ovs_mutex_unlock(&dpdk_mutex
);
725 netdev_dpdk_vhost_set_multiq(struct netdev
*netdev_
, unsigned int n_txq
,
728 struct netdev_dpdk
*netdev
= netdev_dpdk_cast(netdev_
);
731 if (netdev
->up
.n_txq
== n_txq
&& netdev
->up
.n_rxq
== n_rxq
) {
735 ovs_mutex_lock(&dpdk_mutex
);
736 ovs_mutex_lock(&netdev
->mutex
);
738 netdev
->up
.n_txq
= n_txq
;
739 netdev
->up
.n_rxq
= n_rxq
;
741 ovs_mutex_unlock(&netdev
->mutex
);
742 ovs_mutex_unlock(&dpdk_mutex
);
747 static struct netdev_rxq
*
748 netdev_dpdk_rxq_alloc(void)
750 struct netdev_rxq_dpdk
*rx
= dpdk_rte_mzalloc(sizeof *rx
);
755 static struct netdev_rxq_dpdk
*
756 netdev_rxq_dpdk_cast(const struct netdev_rxq
*rx
)
758 return CONTAINER_OF(rx
, struct netdev_rxq_dpdk
, up
);
762 netdev_dpdk_rxq_construct(struct netdev_rxq
*rxq_
)
764 struct netdev_rxq_dpdk
*rx
= netdev_rxq_dpdk_cast(rxq_
);
765 struct netdev_dpdk
*netdev
= netdev_dpdk_cast(rx
->up
.netdev
);
767 ovs_mutex_lock(&netdev
->mutex
);
768 rx
->port_id
= netdev
->port_id
;
769 ovs_mutex_unlock(&netdev
->mutex
);
775 netdev_dpdk_rxq_destruct(struct netdev_rxq
*rxq_ OVS_UNUSED
)
780 netdev_dpdk_rxq_dealloc(struct netdev_rxq
*rxq_
)
782 struct netdev_rxq_dpdk
*rx
= netdev_rxq_dpdk_cast(rxq_
);
788 dpdk_queue_flush__(struct netdev_dpdk
*dev
, int qid
)
790 struct dpdk_tx_queue
*txq
= &dev
->tx_q
[qid
];
793 while (nb_tx
!= txq
->count
) {
796 ret
= rte_eth_tx_burst(dev
->port_id
, qid
, txq
->burst_pkts
+ nb_tx
,
805 if (OVS_UNLIKELY(nb_tx
!= txq
->count
)) {
806 /* free buffers, which we couldn't transmit, one at a time (each
807 * packet could come from a different mempool) */
810 for (i
= nb_tx
; i
< txq
->count
; i
++) {
811 rte_pktmbuf_free_seg(txq
->burst_pkts
[i
]);
813 ovs_mutex_lock(&dev
->mutex
);
814 dev
->stats
.tx_dropped
+= txq
->count
-nb_tx
;
815 ovs_mutex_unlock(&dev
->mutex
);
819 txq
->tsc
= rte_get_timer_cycles();
823 dpdk_queue_flush(struct netdev_dpdk
*dev
, int qid
)
825 struct dpdk_tx_queue
*txq
= &dev
->tx_q
[qid
];
827 if (txq
->count
== 0) {
830 dpdk_queue_flush__(dev
, qid
);
834 is_vhost_running(struct virtio_net
*dev
)
836 return (dev
!= NULL
&& (dev
->flags
& VIRTIO_DEV_RUNNING
));
840 * The receive path for the vhost port is the TX path out from guest.
843 netdev_dpdk_vhost_rxq_recv(struct netdev_rxq
*rxq_
,
844 struct dp_packet
**packets
, int *c
)
846 struct netdev_rxq_dpdk
*rx
= netdev_rxq_dpdk_cast(rxq_
);
847 struct netdev
*netdev
= rx
->up
.netdev
;
848 struct netdev_dpdk
*vhost_dev
= netdev_dpdk_cast(netdev
);
849 struct virtio_net
*virtio_dev
= netdev_dpdk_get_virtio(vhost_dev
);
853 if (OVS_UNLIKELY(!is_vhost_running(virtio_dev
))) {
857 nb_rx
= rte_vhost_dequeue_burst(virtio_dev
, qid
,
858 vhost_dev
->dpdk_mp
->mp
,
859 (struct rte_mbuf
**)packets
,
865 vhost_dev
->stats
.rx_packets
+= (uint64_t)nb_rx
;
871 netdev_dpdk_rxq_recv(struct netdev_rxq
*rxq_
, struct dp_packet
**packets
,
874 struct netdev_rxq_dpdk
*rx
= netdev_rxq_dpdk_cast(rxq_
);
875 struct netdev
*netdev
= rx
->up
.netdev
;
876 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
879 /* There is only one tx queue for this core. Do not flush other
881 if (rxq_
->queue_id
== rte_lcore_id()) {
882 dpdk_queue_flush(dev
, rxq_
->queue_id
);
885 nb_rx
= rte_eth_rx_burst(rx
->port_id
, rxq_
->queue_id
,
886 (struct rte_mbuf
**) packets
,
887 MIN((int) NETDEV_MAX_RX_BATCH
,
888 (int) MAX_PKT_BURST
));
899 __netdev_dpdk_vhost_send(struct netdev
*netdev
, struct dp_packet
**pkts
,
900 int cnt
, bool may_steal
)
902 struct netdev_dpdk
*vhost_dev
= netdev_dpdk_cast(netdev
);
903 struct virtio_net
*virtio_dev
= netdev_dpdk_get_virtio(vhost_dev
);
906 if (OVS_UNLIKELY(!is_vhost_running(virtio_dev
))) {
907 ovs_mutex_lock(&vhost_dev
->mutex
);
908 vhost_dev
->stats
.tx_dropped
+= cnt
;
909 ovs_mutex_unlock(&vhost_dev
->mutex
);
913 /* There is vHost TX single queue, So we need to lock it for TX. */
914 rte_spinlock_lock(&vhost_dev
->txq_lock
);
915 tx_pkts
= rte_vhost_enqueue_burst(virtio_dev
, VIRTIO_RXQ
,
916 (struct rte_mbuf
**)pkts
, cnt
);
918 vhost_dev
->stats
.tx_packets
+= tx_pkts
;
919 vhost_dev
->stats
.tx_dropped
+= (cnt
- tx_pkts
);
920 rte_spinlock_unlock(&vhost_dev
->txq_lock
);
924 for (i
= 0; i
< cnt
; i
++) {
925 dp_packet_delete(pkts
[i
]);
931 dpdk_queue_pkts(struct netdev_dpdk
*dev
, int qid
,
932 struct rte_mbuf
**pkts
, int cnt
)
934 struct dpdk_tx_queue
*txq
= &dev
->tx_q
[qid
];
940 int freeslots
= MAX_TX_QUEUE_LEN
- txq
->count
;
941 int tocopy
= MIN(freeslots
, cnt
-i
);
943 memcpy(&txq
->burst_pkts
[txq
->count
], &pkts
[i
],
944 tocopy
* sizeof (struct rte_mbuf
*));
946 txq
->count
+= tocopy
;
949 if (txq
->count
== MAX_TX_QUEUE_LEN
|| txq
->flush_tx
) {
950 dpdk_queue_flush__(dev
, qid
);
952 diff_tsc
= rte_get_timer_cycles() - txq
->tsc
;
953 if (diff_tsc
>= DRAIN_TSC
) {
954 dpdk_queue_flush__(dev
, qid
);
959 /* Tx function. Transmit packets indefinitely */
961 dpdk_do_tx_copy(struct netdev
*netdev
, int qid
, struct dp_packet
**pkts
,
963 OVS_NO_THREAD_SAFETY_ANALYSIS
965 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
966 struct rte_mbuf
*mbufs
[cnt
];
971 /* If we are on a non pmd thread we have to use the mempool mutex, because
972 * every non pmd thread shares the same mempool cache */
974 if (!thread_is_pmd()) {
975 ovs_mutex_lock(&nonpmd_mempool_mutex
);
978 for (i
= 0; i
< cnt
; i
++) {
979 int size
= dp_packet_size(pkts
[i
]);
981 if (OVS_UNLIKELY(size
> dev
->max_packet_len
)) {
982 VLOG_WARN_RL(&rl
, "Too big size %d max_packet_len %d",
983 (int)size
, dev
->max_packet_len
);
989 mbufs
[newcnt
] = rte_pktmbuf_alloc(dev
->dpdk_mp
->mp
);
991 if (!mbufs
[newcnt
]) {
996 /* We have to do a copy for now */
997 memcpy(rte_pktmbuf_mtod(mbufs
[newcnt
], void *), dp_packet_data(pkts
[i
]), size
);
999 rte_pktmbuf_data_len(mbufs
[newcnt
]) = size
;
1000 rte_pktmbuf_pkt_len(mbufs
[newcnt
]) = size
;
1005 if (OVS_UNLIKELY(dropped
)) {
1006 ovs_mutex_lock(&dev
->mutex
);
1007 dev
->stats
.tx_dropped
+= dropped
;
1008 ovs_mutex_unlock(&dev
->mutex
);
1011 if (dev
->type
== DPDK_DEV_VHOST
) {
1012 __netdev_dpdk_vhost_send(netdev
, (struct dp_packet
**) mbufs
, newcnt
, true);
1014 dpdk_queue_pkts(dev
, qid
, mbufs
, newcnt
);
1015 dpdk_queue_flush(dev
, qid
);
1018 if (!thread_is_pmd()) {
1019 ovs_mutex_unlock(&nonpmd_mempool_mutex
);
1024 netdev_dpdk_vhost_send(struct netdev
*netdev
, int qid OVS_UNUSED
, struct dp_packet
**pkts
,
1025 int cnt
, bool may_steal
)
1027 if (OVS_UNLIKELY(pkts
[0]->source
!= DPBUF_DPDK
)) {
1030 dpdk_do_tx_copy(netdev
, qid
, pkts
, cnt
);
1032 for (i
= 0; i
< cnt
; i
++) {
1033 dp_packet_delete(pkts
[i
]);
1037 __netdev_dpdk_vhost_send(netdev
, pkts
, cnt
, may_steal
);
1043 netdev_dpdk_send__(struct netdev_dpdk
*dev
, int qid
,
1044 struct dp_packet
**pkts
, int cnt
, bool may_steal
)
1048 if (OVS_UNLIKELY(!may_steal
||
1049 pkts
[0]->source
!= DPBUF_DPDK
)) {
1050 struct netdev
*netdev
= &dev
->up
;
1052 dpdk_do_tx_copy(netdev
, qid
, pkts
, cnt
);
1055 for (i
= 0; i
< cnt
; i
++) {
1056 dp_packet_delete(pkts
[i
]);
1060 int next_tx_idx
= 0;
1063 for (i
= 0; i
< cnt
; i
++) {
1064 int size
= dp_packet_size(pkts
[i
]);
1066 if (OVS_UNLIKELY(size
> dev
->max_packet_len
)) {
1067 if (next_tx_idx
!= i
) {
1068 dpdk_queue_pkts(dev
, qid
,
1069 (struct rte_mbuf
**)&pkts
[next_tx_idx
],
1073 VLOG_WARN_RL(&rl
, "Too big size %d max_packet_len %d",
1074 (int)size
, dev
->max_packet_len
);
1076 dp_packet_delete(pkts
[i
]);
1078 next_tx_idx
= i
+ 1;
1081 if (next_tx_idx
!= cnt
) {
1082 dpdk_queue_pkts(dev
, qid
,
1083 (struct rte_mbuf
**)&pkts
[next_tx_idx
],
1087 if (OVS_UNLIKELY(dropped
)) {
1088 ovs_mutex_lock(&dev
->mutex
);
1089 dev
->stats
.tx_dropped
+= dropped
;
1090 ovs_mutex_unlock(&dev
->mutex
);
1096 netdev_dpdk_eth_send(struct netdev
*netdev
, int qid
,
1097 struct dp_packet
**pkts
, int cnt
, bool may_steal
)
1099 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1101 netdev_dpdk_send__(dev
, qid
, pkts
, cnt
, may_steal
);
1106 netdev_dpdk_set_etheraddr(struct netdev
*netdev
,
1107 const uint8_t mac
[ETH_ADDR_LEN
])
1109 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1111 ovs_mutex_lock(&dev
->mutex
);
1112 if (!eth_addr_equals(dev
->hwaddr
, mac
)) {
1113 memcpy(dev
->hwaddr
, mac
, ETH_ADDR_LEN
);
1114 netdev_change_seq_changed(netdev
);
1116 ovs_mutex_unlock(&dev
->mutex
);
1122 netdev_dpdk_get_etheraddr(const struct netdev
*netdev
,
1123 uint8_t mac
[ETH_ADDR_LEN
])
1125 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1127 ovs_mutex_lock(&dev
->mutex
);
1128 memcpy(mac
, dev
->hwaddr
, ETH_ADDR_LEN
);
1129 ovs_mutex_unlock(&dev
->mutex
);
1135 netdev_dpdk_get_mtu(const struct netdev
*netdev
, int *mtup
)
1137 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1139 ovs_mutex_lock(&dev
->mutex
);
1141 ovs_mutex_unlock(&dev
->mutex
);
1147 netdev_dpdk_set_mtu(const struct netdev
*netdev
, int mtu
)
1149 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1151 struct dpdk_mp
*old_mp
;
1154 ovs_mutex_lock(&dpdk_mutex
);
1155 ovs_mutex_lock(&dev
->mutex
);
1156 if (dev
->mtu
== mtu
) {
1161 mp
= dpdk_mp_get(dev
->socket_id
, dev
->mtu
);
1167 rte_eth_dev_stop(dev
->port_id
);
1170 old_mp
= dev
->dpdk_mp
;
1173 dev
->max_packet_len
= MTU_TO_MAX_LEN(dev
->mtu
);
1175 err
= dpdk_eth_dev_init(dev
);
1179 dev
->dpdk_mp
= old_mp
;
1180 dev
->max_packet_len
= MTU_TO_MAX_LEN(dev
->mtu
);
1181 dpdk_eth_dev_init(dev
);
1185 dpdk_mp_put(old_mp
);
1186 netdev_change_seq_changed(netdev
);
1188 ovs_mutex_unlock(&dev
->mutex
);
1189 ovs_mutex_unlock(&dpdk_mutex
);
1194 netdev_dpdk_get_carrier(const struct netdev
*netdev_
, bool *carrier
);
1197 netdev_dpdk_vhost_get_stats(const struct netdev
*netdev
,
1198 struct netdev_stats
*stats
)
1200 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1202 ovs_mutex_lock(&dev
->mutex
);
1203 memset(stats
, 0, sizeof(*stats
));
1204 /* Unsupported Stats */
1205 stats
->rx_errors
= UINT64_MAX
;
1206 stats
->tx_errors
= UINT64_MAX
;
1207 stats
->multicast
= UINT64_MAX
;
1208 stats
->collisions
= UINT64_MAX
;
1209 stats
->rx_crc_errors
= UINT64_MAX
;
1210 stats
->rx_fifo_errors
= UINT64_MAX
;
1211 stats
->rx_frame_errors
= UINT64_MAX
;
1212 stats
->rx_length_errors
= UINT64_MAX
;
1213 stats
->rx_missed_errors
= UINT64_MAX
;
1214 stats
->rx_over_errors
= UINT64_MAX
;
1215 stats
->tx_aborted_errors
= UINT64_MAX
;
1216 stats
->tx_carrier_errors
= UINT64_MAX
;
1217 stats
->tx_errors
= UINT64_MAX
;
1218 stats
->tx_fifo_errors
= UINT64_MAX
;
1219 stats
->tx_heartbeat_errors
= UINT64_MAX
;
1220 stats
->tx_window_errors
= UINT64_MAX
;
1221 stats
->rx_bytes
+= UINT64_MAX
;
1222 stats
->rx_dropped
+= UINT64_MAX
;
1223 stats
->tx_bytes
+= UINT64_MAX
;
1225 /* Supported Stats */
1226 stats
->rx_packets
+= dev
->stats
.rx_packets
;
1227 stats
->tx_packets
+= dev
->stats
.tx_packets
;
1228 stats
->tx_dropped
+= dev
->stats
.tx_dropped
;
1229 ovs_mutex_unlock(&dev
->mutex
);
1235 netdev_dpdk_get_stats(const struct netdev
*netdev
, struct netdev_stats
*stats
)
1237 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1238 struct rte_eth_stats rte_stats
;
1241 netdev_dpdk_get_carrier(netdev
, &gg
);
1242 ovs_mutex_lock(&dev
->mutex
);
1243 rte_eth_stats_get(dev
->port_id
, &rte_stats
);
1245 memset(stats
, 0, sizeof(*stats
));
1247 stats
->rx_packets
= rte_stats
.ipackets
;
1248 stats
->tx_packets
= rte_stats
.opackets
;
1249 stats
->rx_bytes
= rte_stats
.ibytes
;
1250 stats
->tx_bytes
= rte_stats
.obytes
;
1251 stats
->rx_errors
= rte_stats
.ierrors
;
1252 stats
->tx_errors
= rte_stats
.oerrors
;
1253 stats
->multicast
= rte_stats
.imcasts
;
1255 stats
->tx_dropped
= dev
->stats
.tx_dropped
;
1256 ovs_mutex_unlock(&dev
->mutex
);
1262 netdev_dpdk_get_features(const struct netdev
*netdev_
,
1263 enum netdev_features
*current
,
1264 enum netdev_features
*advertised OVS_UNUSED
,
1265 enum netdev_features
*supported OVS_UNUSED
,
1266 enum netdev_features
*peer OVS_UNUSED
)
1268 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev_
);
1269 struct rte_eth_link link
;
1271 ovs_mutex_lock(&dev
->mutex
);
1273 ovs_mutex_unlock(&dev
->mutex
);
1275 if (link
.link_duplex
== ETH_LINK_AUTONEG_DUPLEX
) {
1276 if (link
.link_speed
== ETH_LINK_SPEED_AUTONEG
) {
1277 *current
= NETDEV_F_AUTONEG
;
1279 } else if (link
.link_duplex
== ETH_LINK_HALF_DUPLEX
) {
1280 if (link
.link_speed
== ETH_LINK_SPEED_10
) {
1281 *current
= NETDEV_F_10MB_HD
;
1283 if (link
.link_speed
== ETH_LINK_SPEED_100
) {
1284 *current
= NETDEV_F_100MB_HD
;
1286 if (link
.link_speed
== ETH_LINK_SPEED_1000
) {
1287 *current
= NETDEV_F_1GB_HD
;
1289 } else if (link
.link_duplex
== ETH_LINK_FULL_DUPLEX
) {
1290 if (link
.link_speed
== ETH_LINK_SPEED_10
) {
1291 *current
= NETDEV_F_10MB_FD
;
1293 if (link
.link_speed
== ETH_LINK_SPEED_100
) {
1294 *current
= NETDEV_F_100MB_FD
;
1296 if (link
.link_speed
== ETH_LINK_SPEED_1000
) {
1297 *current
= NETDEV_F_1GB_FD
;
1299 if (link
.link_speed
== ETH_LINK_SPEED_10000
) {
1300 *current
= NETDEV_F_10GB_FD
;
1308 netdev_dpdk_get_ifindex(const struct netdev
*netdev
)
1310 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1313 ovs_mutex_lock(&dev
->mutex
);
1314 ifindex
= dev
->port_id
;
1315 ovs_mutex_unlock(&dev
->mutex
);
1321 netdev_dpdk_get_carrier(const struct netdev
*netdev_
, bool *carrier
)
1323 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev_
);
1325 ovs_mutex_lock(&dev
->mutex
);
1326 check_link_status(dev
);
1327 *carrier
= dev
->link
.link_status
;
1329 ovs_mutex_unlock(&dev
->mutex
);
1335 netdev_dpdk_vhost_get_carrier(const struct netdev
*netdev_
, bool *carrier
)
1337 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev_
);
1338 struct virtio_net
*virtio_dev
= netdev_dpdk_get_virtio(dev
);
1340 ovs_mutex_lock(&dev
->mutex
);
1342 if (is_vhost_running(virtio_dev
)) {
1348 ovs_mutex_unlock(&dev
->mutex
);
1353 static long long int
1354 netdev_dpdk_get_carrier_resets(const struct netdev
*netdev_
)
1356 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev_
);
1357 long long int carrier_resets
;
1359 ovs_mutex_lock(&dev
->mutex
);
1360 carrier_resets
= dev
->link_reset_cnt
;
1361 ovs_mutex_unlock(&dev
->mutex
);
1363 return carrier_resets
;
1367 netdev_dpdk_set_miimon(struct netdev
*netdev_ OVS_UNUSED
,
1368 long long int interval OVS_UNUSED
)
1374 netdev_dpdk_update_flags__(struct netdev_dpdk
*dev
,
1375 enum netdev_flags off
, enum netdev_flags on
,
1376 enum netdev_flags
*old_flagsp
) OVS_REQUIRES(dev
->mutex
)
1380 if ((off
| on
) & ~(NETDEV_UP
| NETDEV_PROMISC
)) {
1384 *old_flagsp
= dev
->flags
;
1388 if (dev
->flags
== *old_flagsp
) {
1392 if (dev
->type
== DPDK_DEV_ETH
) {
1393 if (dev
->flags
& NETDEV_UP
) {
1394 err
= rte_eth_dev_start(dev
->port_id
);
1399 if (dev
->flags
& NETDEV_PROMISC
) {
1400 rte_eth_promiscuous_enable(dev
->port_id
);
1403 if (!(dev
->flags
& NETDEV_UP
)) {
1404 rte_eth_dev_stop(dev
->port_id
);
1412 netdev_dpdk_update_flags(struct netdev
*netdev_
,
1413 enum netdev_flags off
, enum netdev_flags on
,
1414 enum netdev_flags
*old_flagsp
)
1416 struct netdev_dpdk
*netdev
= netdev_dpdk_cast(netdev_
);
1419 ovs_mutex_lock(&netdev
->mutex
);
1420 error
= netdev_dpdk_update_flags__(netdev
, off
, on
, old_flagsp
);
1421 ovs_mutex_unlock(&netdev
->mutex
);
1427 netdev_dpdk_get_status(const struct netdev
*netdev_
, struct smap
*args
)
1429 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev_
);
1430 struct rte_eth_dev_info dev_info
;
1432 if (dev
->port_id
< 0)
1435 ovs_mutex_lock(&dev
->mutex
);
1436 rte_eth_dev_info_get(dev
->port_id
, &dev_info
);
1437 ovs_mutex_unlock(&dev
->mutex
);
1439 smap_add_format(args
, "driver_name", "%s", dev_info
.driver_name
);
1441 smap_add_format(args
, "port_no", "%d", dev
->port_id
);
1442 smap_add_format(args
, "numa_id", "%d", rte_eth_dev_socket_id(dev
->port_id
));
1443 smap_add_format(args
, "driver_name", "%s", dev_info
.driver_name
);
1444 smap_add_format(args
, "min_rx_bufsize", "%u", dev_info
.min_rx_bufsize
);
1445 smap_add_format(args
, "max_rx_pktlen", "%u", dev_info
.max_rx_pktlen
);
1446 smap_add_format(args
, "max_rx_queues", "%u", dev_info
.max_rx_queues
);
1447 smap_add_format(args
, "max_tx_queues", "%u", dev_info
.max_tx_queues
);
1448 smap_add_format(args
, "max_mac_addrs", "%u", dev_info
.max_mac_addrs
);
1449 smap_add_format(args
, "max_hash_mac_addrs", "%u", dev_info
.max_hash_mac_addrs
);
1450 smap_add_format(args
, "max_vfs", "%u", dev_info
.max_vfs
);
1451 smap_add_format(args
, "max_vmdq_pools", "%u", dev_info
.max_vmdq_pools
);
1453 smap_add_format(args
, "pci-vendor_id", "0x%u", dev_info
.pci_dev
->id
.vendor_id
);
1454 smap_add_format(args
, "pci-device_id", "0x%x", dev_info
.pci_dev
->id
.device_id
);
1460 netdev_dpdk_set_admin_state__(struct netdev_dpdk
*dev
, bool admin_state
)
1461 OVS_REQUIRES(dev
->mutex
)
1463 enum netdev_flags old_flags
;
1466 netdev_dpdk_update_flags__(dev
, 0, NETDEV_UP
, &old_flags
);
1468 netdev_dpdk_update_flags__(dev
, NETDEV_UP
, 0, &old_flags
);
1473 netdev_dpdk_set_admin_state(struct unixctl_conn
*conn
, int argc
,
1474 const char *argv
[], void *aux OVS_UNUSED
)
1478 if (!strcasecmp(argv
[argc
- 1], "up")) {
1480 } else if ( !strcasecmp(argv
[argc
- 1], "down")) {
1483 unixctl_command_reply_error(conn
, "Invalid Admin State");
1488 struct netdev
*netdev
= netdev_from_name(argv
[1]);
1489 if (netdev
&& is_dpdk_class(netdev
->netdev_class
)) {
1490 struct netdev_dpdk
*dpdk_dev
= netdev_dpdk_cast(netdev
);
1492 ovs_mutex_lock(&dpdk_dev
->mutex
);
1493 netdev_dpdk_set_admin_state__(dpdk_dev
, up
);
1494 ovs_mutex_unlock(&dpdk_dev
->mutex
);
1496 netdev_close(netdev
);
1498 unixctl_command_reply_error(conn
, "Not a DPDK Interface");
1499 netdev_close(netdev
);
1503 struct netdev_dpdk
*netdev
;
1505 ovs_mutex_lock(&dpdk_mutex
);
1506 LIST_FOR_EACH (netdev
, list_node
, &dpdk_list
) {
1507 ovs_mutex_lock(&netdev
->mutex
);
1508 netdev_dpdk_set_admin_state__(netdev
, up
);
1509 ovs_mutex_unlock(&netdev
->mutex
);
1511 ovs_mutex_unlock(&dpdk_mutex
);
1513 unixctl_command_reply(conn
, "OK");
1517 * Set virtqueue flags so that we do not receive interrupts.
1520 set_irq_status(struct virtio_net
*dev
)
1522 dev
->virtqueue
[VIRTIO_RXQ
]->used
->flags
= VRING_USED_F_NO_NOTIFY
;
1523 dev
->virtqueue
[VIRTIO_TXQ
]->used
->flags
= VRING_USED_F_NO_NOTIFY
;
1527 * A new virtio-net device is added to a vhost port.
1530 new_device(struct virtio_net
*dev
)
1532 struct netdev_dpdk
*netdev
;
1533 bool exists
= false;
1535 ovs_mutex_lock(&dpdk_mutex
);
1536 /* Add device to the vhost port with the same name as that passed down. */
1537 LIST_FOR_EACH(netdev
, list_node
, &dpdk_list
) {
1538 if (strncmp(dev
->ifname
, netdev
->up
.name
, IFNAMSIZ
) == 0) {
1539 ovs_mutex_lock(&netdev
->mutex
);
1540 ovsrcu_set(&netdev
->virtio_dev
, dev
);
1541 ovs_mutex_unlock(&netdev
->mutex
);
1543 dev
->flags
|= VIRTIO_DEV_RUNNING
;
1544 /* Disable notifications. */
1545 set_irq_status(dev
);
1549 ovs_mutex_unlock(&dpdk_mutex
);
1552 VLOG_INFO("vHost Device '%s' (%ld) can't be added - name not found",
1553 dev
->ifname
, dev
->device_fh
);
1558 VLOG_INFO("vHost Device '%s' (%ld) has been added",
1559 dev
->ifname
, dev
->device_fh
);
1564 * Remove a virtio-net device from the specific vhost port. Use dev->remove
1565 * flag to stop any more packets from being sent or received to/from a VM and
1566 * ensure all currently queued packets have been sent/received before removing
1570 destroy_device(volatile struct virtio_net
*dev
)
1572 struct netdev_dpdk
*vhost_dev
;
1574 ovs_mutex_lock(&dpdk_mutex
);
1575 LIST_FOR_EACH (vhost_dev
, list_node
, &dpdk_list
) {
1576 if (netdev_dpdk_get_virtio(vhost_dev
) == dev
) {
1578 ovs_mutex_lock(&vhost_dev
->mutex
);
1579 dev
->flags
&= ~VIRTIO_DEV_RUNNING
;
1580 ovsrcu_set(&vhost_dev
->virtio_dev
, NULL
);
1581 ovs_mutex_unlock(&vhost_dev
->mutex
);
1584 * Wait for other threads to quiesce before
1585 * setting the virtio_dev to NULL.
1587 ovsrcu_synchronize();
1589 * As call to ovsrcu_synchronize() will end the quiescent state,
1590 * put thread back into quiescent state before returning.
1592 ovsrcu_quiesce_start();
1595 ovs_mutex_unlock(&dpdk_mutex
);
1597 VLOG_INFO("vHost Device '%s' (%ld) has been removed",
1598 dev
->ifname
, dev
->device_fh
);
1602 netdev_dpdk_get_virtio(const struct netdev_dpdk
*dev
)
1604 return ovsrcu_get(struct virtio_net
*, &dev
->virtio_dev
);
1608 * These callbacks allow virtio-net devices to be added to vhost ports when
1609 * configuration has been fully complete.
1611 const struct virtio_net_device_ops virtio_net_device_ops
=
1613 .new_device
= new_device
,
1614 .destroy_device
= destroy_device
,
1618 start_cuse_session_loop(void *dummy OVS_UNUSED
)
1620 pthread_detach(pthread_self());
1621 /* Put the cuse thread into quiescent state. */
1622 ovsrcu_quiesce_start();
1623 rte_vhost_driver_session_start();
1628 dpdk_vhost_class_init(void)
1632 rte_vhost_driver_callback_register(&virtio_net_device_ops
);
1634 /* Register CUSE device to handle IOCTLs.
1635 * Unless otherwise specified on the vswitchd command line, cuse_dev_name
1636 * is set to vhost-net.
1638 err
= rte_vhost_driver_register(cuse_dev_name
);
1641 VLOG_ERR("CUSE device setup failure.");
1645 ovs_thread_create("cuse_thread", start_cuse_session_loop
, NULL
);
1650 dpdk_common_init(void)
1652 unixctl_command_register("netdev-dpdk/set-admin-state",
1653 "[netdev] up|down", 1, 2,
1654 netdev_dpdk_set_admin_state
, NULL
);
1656 ovs_thread_create("dpdk_watchdog", dpdk_watchdog
, NULL
);
1662 dpdk_ring_create(const char dev_name
[], unsigned int port_no
,
1663 unsigned int *eth_port_id
)
1665 struct dpdk_ring
*ivshmem
;
1669 ivshmem
= dpdk_rte_mzalloc(sizeof *ivshmem
);
1670 if (ivshmem
== NULL
) {
1674 /* XXX: Add support for multiquque ring. */
1675 err
= snprintf(ring_name
, 10, "%s_tx", dev_name
);
1680 /* Create single consumer/producer rings, netdev does explicit locking. */
1681 ivshmem
->cring_tx
= rte_ring_create(ring_name
, DPDK_RING_SIZE
, SOCKET0
,
1682 RING_F_SP_ENQ
| RING_F_SC_DEQ
);
1683 if (ivshmem
->cring_tx
== NULL
) {
1688 err
= snprintf(ring_name
, 10, "%s_rx", dev_name
);
1693 /* Create single consumer/producer rings, netdev does explicit locking. */
1694 ivshmem
->cring_rx
= rte_ring_create(ring_name
, DPDK_RING_SIZE
, SOCKET0
,
1695 RING_F_SP_ENQ
| RING_F_SC_DEQ
);
1696 if (ivshmem
->cring_rx
== NULL
) {
1701 err
= rte_eth_from_rings(dev_name
, &ivshmem
->cring_rx
, 1,
1702 &ivshmem
->cring_tx
, 1, SOCKET0
);
1709 ivshmem
->user_port_id
= port_no
;
1710 ivshmem
->eth_port_id
= rte_eth_dev_count() - 1;
1711 list_push_back(&dpdk_ring_list
, &ivshmem
->list_node
);
1713 *eth_port_id
= ivshmem
->eth_port_id
;
1718 dpdk_ring_open(const char dev_name
[], unsigned int *eth_port_id
) OVS_REQUIRES(dpdk_mutex
)
1720 struct dpdk_ring
*ivshmem
;
1721 unsigned int port_no
;
1724 /* Names always start with "dpdkr" */
1725 err
= dpdk_dev_parse_name(dev_name
, "dpdkr", &port_no
);
1730 /* look through our list to find the device */
1731 LIST_FOR_EACH (ivshmem
, list_node
, &dpdk_ring_list
) {
1732 if (ivshmem
->user_port_id
== port_no
) {
1733 VLOG_INFO("Found dpdk ring device %s:", dev_name
);
1734 *eth_port_id
= ivshmem
->eth_port_id
; /* really all that is needed */
1738 /* Need to create the device rings */
1739 return dpdk_ring_create(dev_name
, port_no
, eth_port_id
);
1743 netdev_dpdk_ring_send(struct netdev
*netdev
, int qid OVS_UNUSED
,
1744 struct dp_packet
**pkts
, int cnt
, bool may_steal
)
1746 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1749 /* When using 'dpdkr' and sending to a DPDK ring, we want to ensure that the
1750 * rss hash field is clear. This is because the same mbuf may be modified by
1751 * the consumer of the ring and return into the datapath without recalculating
1753 for (i
= 0; i
< cnt
; i
++) {
1754 dp_packet_set_rss_hash(pkts
[i
], 0);
1757 /* DPDK Rings have a single TX queue, Therefore needs locking. */
1758 rte_spinlock_lock(&dev
->txq_lock
);
1759 netdev_dpdk_send__(dev
, 0, pkts
, cnt
, may_steal
);
1760 rte_spinlock_unlock(&dev
->txq_lock
);
1765 netdev_dpdk_ring_construct(struct netdev
*netdev
)
1767 unsigned int port_no
= 0;
1770 if (rte_eal_init_ret
) {
1771 return rte_eal_init_ret
;
1774 ovs_mutex_lock(&dpdk_mutex
);
1776 err
= dpdk_ring_open(netdev
->name
, &port_no
);
1781 err
= netdev_dpdk_init(netdev
, port_no
, DPDK_DEV_ETH
);
1784 ovs_mutex_unlock(&dpdk_mutex
);
1788 #define NETDEV_DPDK_CLASS(NAME, INIT, CONSTRUCT, DESTRUCT, MULTIQ, SEND, \
1789 GET_CARRIER, GET_STATS, GET_FEATURES, GET_STATUS, RXQ_RECV) \
1793 NULL, /* netdev_dpdk_run */ \
1794 NULL, /* netdev_dpdk_wait */ \
1796 netdev_dpdk_alloc, \
1799 netdev_dpdk_dealloc, \
1800 netdev_dpdk_get_config, \
1801 NULL, /* netdev_dpdk_set_config */ \
1802 NULL, /* get_tunnel_config */ \
1803 NULL, /* build header */ \
1804 NULL, /* push header */ \
1805 NULL, /* pop header */ \
1806 netdev_dpdk_get_numa_id, /* get_numa_id */ \
1807 MULTIQ, /* set_multiq */ \
1810 NULL, /* send_wait */ \
1812 netdev_dpdk_set_etheraddr, \
1813 netdev_dpdk_get_etheraddr, \
1814 netdev_dpdk_get_mtu, \
1815 netdev_dpdk_set_mtu, \
1816 netdev_dpdk_get_ifindex, \
1818 netdev_dpdk_get_carrier_resets, \
1819 netdev_dpdk_set_miimon, \
1822 NULL, /* set_advertisements */ \
1824 NULL, /* set_policing */ \
1825 NULL, /* get_qos_types */ \
1826 NULL, /* get_qos_capabilities */ \
1827 NULL, /* get_qos */ \
1828 NULL, /* set_qos */ \
1829 NULL, /* get_queue */ \
1830 NULL, /* set_queue */ \
1831 NULL, /* delete_queue */ \
1832 NULL, /* get_queue_stats */ \
1833 NULL, /* queue_dump_start */ \
1834 NULL, /* queue_dump_next */ \
1835 NULL, /* queue_dump_done */ \
1836 NULL, /* dump_queue_stats */ \
1838 NULL, /* get_in4 */ \
1839 NULL, /* set_in4 */ \
1840 NULL, /* get_in6 */ \
1841 NULL, /* add_router */ \
1842 NULL, /* get_next_hop */ \
1844 NULL, /* arp_lookup */ \
1846 netdev_dpdk_update_flags, \
1848 netdev_dpdk_rxq_alloc, \
1849 netdev_dpdk_rxq_construct, \
1850 netdev_dpdk_rxq_destruct, \
1851 netdev_dpdk_rxq_dealloc, \
1853 NULL, /* rx_wait */ \
1854 NULL, /* rxq_drain */ \
1858 dpdk_init(int argc
, char **argv
)
1862 char *pragram_name
= argv
[0];
1864 if (argc
< 2 || strcmp(argv
[1], "--dpdk"))
1867 /* Remove the --dpdk argument from arg list.*/
1871 /* If the cuse_dev_name parameter has been provided, set 'cuse_dev_name' to
1872 * this string if it meets the correct criteria. Otherwise, set it to the
1873 * default (vhost-net).
1875 if (!strcmp(argv
[1], "--cuse_dev_name") &&
1876 (strlen(argv
[2]) <= NAME_MAX
)) {
1878 cuse_dev_name
= strdup(argv
[2]);
1880 /* Remove the cuse_dev_name configuration parameters from the argument
1881 * list, so that the correct elements are passed to the DPDK
1882 * initialization function
1885 argv
+= 2; /* Increment by two to bypass the cuse_dev_name arguments */
1888 VLOG_ERR("User-provided cuse_dev_name in use: /dev/%s", cuse_dev_name
);
1890 cuse_dev_name
= "vhost-net";
1891 VLOG_INFO("No cuse_dev_name provided - defaulting to /dev/vhost-net");
1894 /* Keep the program name argument as this is needed for call to
1897 argv
[0] = pragram_name
;
1899 /* Make sure things are initialized ... */
1900 result
= rte_eal_init(argc
, argv
);
1902 ovs_abort(result
, "Cannot init EAL");
1905 rte_memzone_dump(stdout
);
1906 rte_eal_init_ret
= 0;
1908 if (argc
> result
) {
1909 argv
[result
] = argv
[0];
1912 /* We are called from the main thread here */
1913 thread_set_nonpmd();
1915 return result
+ 1 + base
;
1918 const struct netdev_class dpdk_class
=
1922 netdev_dpdk_construct
,
1923 netdev_dpdk_destruct
,
1924 netdev_dpdk_set_multiq
,
1925 netdev_dpdk_eth_send
,
1926 netdev_dpdk_get_carrier
,
1927 netdev_dpdk_get_stats
,
1928 netdev_dpdk_get_features
,
1929 netdev_dpdk_get_status
,
1930 netdev_dpdk_rxq_recv
);
1932 const struct netdev_class dpdk_ring_class
=
1936 netdev_dpdk_ring_construct
,
1937 netdev_dpdk_destruct
,
1939 netdev_dpdk_ring_send
,
1940 netdev_dpdk_get_carrier
,
1941 netdev_dpdk_get_stats
,
1942 netdev_dpdk_get_features
,
1943 netdev_dpdk_get_status
,
1944 netdev_dpdk_rxq_recv
);
1946 const struct netdev_class dpdk_vhost_class
=
1949 dpdk_vhost_class_init
,
1950 netdev_dpdk_vhost_construct
,
1951 netdev_dpdk_vhost_destruct
,
1952 netdev_dpdk_vhost_set_multiq
,
1953 netdev_dpdk_vhost_send
,
1954 netdev_dpdk_vhost_get_carrier
,
1955 netdev_dpdk_vhost_get_stats
,
1958 netdev_dpdk_vhost_rxq_recv
);
1961 netdev_dpdk_register(void)
1963 static struct ovsthread_once once
= OVSTHREAD_ONCE_INITIALIZER
;
1965 if (rte_eal_init_ret
) {
1969 if (ovsthread_once_start(&once
)) {
1971 netdev_register_provider(&dpdk_class
);
1972 netdev_register_provider(&dpdk_ring_class
);
1973 netdev_register_provider(&dpdk_vhost_class
);
1974 ovsthread_once_done(&once
);
1979 pmd_thread_setaffinity_cpu(int cpu
)
1985 CPU_SET(cpu
, &cpuset
);
1986 err
= pthread_setaffinity_np(pthread_self(), sizeof(cpu_set_t
), &cpuset
);
1988 VLOG_ERR("Thread affinity error %d",err
);
1991 /* NON_PMD_CORE_ID is reserved for use by non pmd threads. */
1992 ovs_assert(cpu
!= NON_PMD_CORE_ID
);
1993 RTE_PER_LCORE(_lcore_id
) = cpu
;
1999 thread_set_nonpmd(void)
2001 /* We have to use NON_PMD_CORE_ID to allow non-pmd threads to perform
2002 * certain DPDK operations, like rte_eth_dev_configure(). */
2003 RTE_PER_LCORE(_lcore_id
) = NON_PMD_CORE_ID
;
2009 return rte_lcore_id() != NON_PMD_CORE_ID
;