2 * Copyright (c) 2014 Nicira, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
30 #include <sys/types.h>
34 #include "dp-packet.h"
35 #include "dpif-netdev.h"
37 #include "netdev-dpdk.h"
38 #include "netdev-provider.h"
39 #include "netdev-vport.h"
41 #include "ofp-print.h"
43 #include "ovs-thread.h"
48 #include "unaligned.h"
51 #include "openvswitch/vlog.h"
53 #include "rte_config.h"
55 #include "rte_virtio_net.h"
57 VLOG_DEFINE_THIS_MODULE(dpdk
);
58 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(5, 20);
60 #define DPDK_PORT_WATCHDOG_INTERVAL 5
62 #define OVS_CACHE_LINE_SIZE CACHE_LINE_SIZE
63 #define OVS_VPORT_DPDK "ovs_dpdk"
66 * need to reserve tons of extra space in the mbufs so we can align the
67 * DMA addresses to 4KB.
70 #define MTU_TO_MAX_LEN(mtu) ((mtu) + ETHER_HDR_LEN + ETHER_CRC_LEN)
71 #define MBUF_SIZE(mtu) (MTU_TO_MAX_LEN(mtu) + (512) + \
72 sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM)
74 /* Max and min number of packets in the mempool. OVS tries to allocate a
75 * mempool with MAX_NB_MBUF: if this fails (because the system doesn't have
76 * enough hugepages) we keep halving the number until the allocation succeeds
77 * or we reach MIN_NB_MBUF */
79 #define MAX_NB_MBUF (4096 * 64)
80 #define MIN_NB_MBUF (4096 * 4)
81 #define MP_CACHE_SZ RTE_MEMPOOL_CACHE_MAX_SIZE
83 /* MAX_NB_MBUF can be divided by 2 many times, until MIN_NB_MBUF */
84 BUILD_ASSERT_DECL(MAX_NB_MBUF
% ROUND_DOWN_POW2(MAX_NB_MBUF
/MIN_NB_MBUF
) == 0);
86 /* The smallest possible NB_MBUF that we're going to try should be a multiple
87 * of MP_CACHE_SZ. This is advised by DPDK documentation. */
88 BUILD_ASSERT_DECL((MAX_NB_MBUF
/ ROUND_DOWN_POW2(MAX_NB_MBUF
/MIN_NB_MBUF
))
93 #define NIC_PORT_RX_Q_SIZE 2048 /* Size of Physical NIC RX Queue, Max (n+32<=4096)*/
94 #define NIC_PORT_TX_Q_SIZE 2048 /* Size of Physical NIC TX Queue, Max (n+32<=4096)*/
96 static char *cuse_dev_name
= NULL
; /* Character device cuse_dev_name. */
97 static char *vhost_sock_dir
= NULL
; /* Location of vhost-user sockets */
100 * Maximum amount of time in micro seconds to try and enqueue to vhost.
102 #define VHOST_ENQ_RETRY_USECS 100
104 static const struct rte_eth_conf port_conf
= {
106 .mq_mode
= ETH_MQ_RX_RSS
,
108 .header_split
= 0, /* Header Split disabled */
109 .hw_ip_checksum
= 0, /* IP checksum offload disabled */
110 .hw_vlan_filter
= 0, /* VLAN filtering disabled */
111 .jumbo_frame
= 0, /* Jumbo Frame Support disabled */
117 .rss_hf
= ETH_RSS_IP
| ETH_RSS_UDP
| ETH_RSS_TCP
,
121 .mq_mode
= ETH_MQ_TX_NONE
,
125 enum { MAX_TX_QUEUE_LEN
= 384 };
126 enum { DPDK_RING_SIZE
= 256 };
127 BUILD_ASSERT_DECL(IS_POW2(DPDK_RING_SIZE
));
128 enum { DRAIN_TSC
= 200000ULL };
135 static int rte_eal_init_ret
= ENODEV
;
137 static struct ovs_mutex dpdk_mutex
= OVS_MUTEX_INITIALIZER
;
139 /* Contains all 'struct dpdk_dev's. */
140 static struct ovs_list dpdk_list
OVS_GUARDED_BY(dpdk_mutex
)
141 = OVS_LIST_INITIALIZER(&dpdk_list
);
143 static struct ovs_list dpdk_mp_list
OVS_GUARDED_BY(dpdk_mutex
)
144 = OVS_LIST_INITIALIZER(&dpdk_mp_list
);
146 /* This mutex must be used by non pmd threads when allocating or freeing
147 * mbufs through mempools. Since dpdk_queue_pkts() and dpdk_queue_flush() may
148 * use mempools, a non pmd thread should hold this mutex while calling them */
149 static struct ovs_mutex nonpmd_mempool_mutex
= OVS_MUTEX_INITIALIZER
;
152 struct rte_mempool
*mp
;
156 struct ovs_list list_node
OVS_GUARDED_BY(dpdk_mutex
);
159 /* There should be one 'struct dpdk_tx_queue' created for
161 struct dpdk_tx_queue
{
162 bool flush_tx
; /* Set to true to flush queue everytime */
163 /* pkts are queued. */
165 rte_spinlock_t tx_lock
; /* Protects the members and the NIC queue
166 * from concurrent access. It is used only
167 * if the queue is shared among different
168 * pmd threads (see 'txq_needs_locking'). */
170 struct rte_mbuf
*burst_pkts
[MAX_TX_QUEUE_LEN
];
173 /* dpdk has no way to remove dpdk ring ethernet devices
174 so we have to keep them around once they've been created
177 static struct ovs_list dpdk_ring_list
OVS_GUARDED_BY(dpdk_mutex
)
178 = OVS_LIST_INITIALIZER(&dpdk_ring_list
);
181 /* For the client rings */
182 struct rte_ring
*cring_tx
;
183 struct rte_ring
*cring_rx
;
184 int user_port_id
; /* User given port no, parsed from port name */
185 int eth_port_id
; /* ethernet device port id */
186 struct ovs_list list_node
OVS_GUARDED_BY(dpdk_mutex
);
193 enum dpdk_dev_type type
;
195 struct dpdk_tx_queue
*tx_q
;
197 struct ovs_mutex mutex
OVS_ACQ_AFTER(dpdk_mutex
);
199 struct dpdk_mp
*dpdk_mp
;
203 struct netdev_stats stats
;
205 rte_spinlock_t stats_lock
;
207 uint8_t hwaddr
[ETH_ADDR_LEN
];
208 enum netdev_flags flags
;
210 struct rte_eth_link link
;
213 /* The user might request more txqs than the NIC has. We remap those
214 * ('up.n_txq') on these ('real_n_txq').
215 * If the numbers match, 'txq_needs_locking' is false, otherwise it is
216 * true and we will take a spinlock on transmission */
218 bool txq_needs_locking
;
220 /* Spinlock for vhost transmission. Other DPDK devices use spinlocks in
222 rte_spinlock_t vhost_tx_lock
;
224 /* virtio-net structure for vhost device */
225 OVSRCU_TYPE(struct virtio_net
*) virtio_dev
;
227 /* Identifier used to distinguish vhost devices from each other */
228 char vhost_id
[PATH_MAX
];
231 struct ovs_list list_node
OVS_GUARDED_BY(dpdk_mutex
);
234 struct netdev_rxq_dpdk
{
235 struct netdev_rxq up
;
239 static bool thread_is_pmd(void);
241 static int netdev_dpdk_construct(struct netdev
*);
243 struct virtio_net
* netdev_dpdk_get_virtio(const struct netdev_dpdk
*dev
);
246 is_dpdk_class(const struct netdev_class
*class)
248 return class->construct
== netdev_dpdk_construct
;
251 /* XXX: use dpdk malloc for entire OVS. in fact huge page should be used
252 * for all other segments data, bss and text. */
255 dpdk_rte_mzalloc(size_t sz
)
259 ptr
= rte_zmalloc(OVS_VPORT_DPDK
, sz
, OVS_CACHE_LINE_SIZE
);
266 /* XXX this function should be called only by pmd threads (or by non pmd
267 * threads holding the nonpmd_mempool_mutex) */
269 free_dpdk_buf(struct dp_packet
*p
)
271 struct rte_mbuf
*pkt
= (struct rte_mbuf
*) p
;
273 rte_pktmbuf_free_seg(pkt
);
277 __rte_pktmbuf_init(struct rte_mempool
*mp
,
278 void *opaque_arg OVS_UNUSED
,
280 unsigned i OVS_UNUSED
)
282 struct rte_mbuf
*m
= _m
;
283 uint32_t buf_len
= mp
->elt_size
- sizeof(struct dp_packet
);
285 RTE_MBUF_ASSERT(mp
->elt_size
>= sizeof(struct dp_packet
));
287 memset(m
, 0, mp
->elt_size
);
289 /* start of buffer is just after mbuf structure */
290 m
->buf_addr
= (char *)m
+ sizeof(struct dp_packet
);
291 m
->buf_physaddr
= rte_mempool_virt2phy(mp
, m
) +
292 sizeof(struct dp_packet
);
293 m
->buf_len
= (uint16_t)buf_len
;
295 /* keep some headroom between start of buffer and data */
296 m
->data_off
= RTE_MIN(RTE_PKTMBUF_HEADROOM
, m
->buf_len
);
298 /* init some constant fields */
305 ovs_rte_pktmbuf_init(struct rte_mempool
*mp
,
306 void *opaque_arg OVS_UNUSED
,
308 unsigned i OVS_UNUSED
)
310 struct rte_mbuf
*m
= _m
;
312 __rte_pktmbuf_init(mp
, opaque_arg
, _m
, i
);
314 dp_packet_init_dpdk((struct dp_packet
*) m
, m
->buf_len
);
317 static struct dpdk_mp
*
318 dpdk_mp_get(int socket_id
, int mtu
) OVS_REQUIRES(dpdk_mutex
)
320 struct dpdk_mp
*dmp
= NULL
;
321 char mp_name
[RTE_MEMPOOL_NAMESIZE
];
324 LIST_FOR_EACH (dmp
, list_node
, &dpdk_mp_list
) {
325 if (dmp
->socket_id
== socket_id
&& dmp
->mtu
== mtu
) {
331 dmp
= dpdk_rte_mzalloc(sizeof *dmp
);
332 dmp
->socket_id
= socket_id
;
336 mp_size
= MAX_NB_MBUF
;
338 if (snprintf(mp_name
, RTE_MEMPOOL_NAMESIZE
, "ovs_mp_%d_%d_%u",
339 dmp
->mtu
, dmp
->socket_id
, mp_size
) < 0) {
343 dmp
->mp
= rte_mempool_create(mp_name
, mp_size
, MBUF_SIZE(mtu
),
345 sizeof(struct rte_pktmbuf_pool_private
),
346 rte_pktmbuf_pool_init
, NULL
,
347 ovs_rte_pktmbuf_init
, NULL
,
349 } while (!dmp
->mp
&& rte_errno
== ENOMEM
&& (mp_size
/= 2) >= MIN_NB_MBUF
);
351 if (dmp
->mp
== NULL
) {
354 VLOG_DBG("Allocated \"%s\" mempool with %u mbufs", mp_name
, mp_size
);
357 list_push_back(&dpdk_mp_list
, &dmp
->list_node
);
362 dpdk_mp_put(struct dpdk_mp
*dmp
)
370 ovs_assert(dmp
->refcount
>= 0);
373 /* I could not find any API to destroy mp. */
374 if (dmp
->refcount
== 0) {
375 list_delete(dmp
->list_node
);
376 /* destroy mp-pool. */
382 check_link_status(struct netdev_dpdk
*dev
)
384 struct rte_eth_link link
;
386 rte_eth_link_get_nowait(dev
->port_id
, &link
);
388 if (dev
->link
.link_status
!= link
.link_status
) {
389 netdev_change_seq_changed(&dev
->up
);
391 dev
->link_reset_cnt
++;
393 if (dev
->link
.link_status
) {
394 VLOG_DBG_RL(&rl
, "Port %d Link Up - speed %u Mbps - %s",
395 dev
->port_id
, (unsigned)dev
->link
.link_speed
,
396 (dev
->link
.link_duplex
== ETH_LINK_FULL_DUPLEX
) ?
397 ("full-duplex") : ("half-duplex"));
399 VLOG_DBG_RL(&rl
, "Port %d Link Down", dev
->port_id
);
405 dpdk_watchdog(void *dummy OVS_UNUSED
)
407 struct netdev_dpdk
*dev
;
409 pthread_detach(pthread_self());
412 ovs_mutex_lock(&dpdk_mutex
);
413 LIST_FOR_EACH (dev
, list_node
, &dpdk_list
) {
414 ovs_mutex_lock(&dev
->mutex
);
415 check_link_status(dev
);
416 ovs_mutex_unlock(&dev
->mutex
);
418 ovs_mutex_unlock(&dpdk_mutex
);
419 xsleep(DPDK_PORT_WATCHDOG_INTERVAL
);
426 dpdk_eth_dev_init(struct netdev_dpdk
*dev
) OVS_REQUIRES(dpdk_mutex
)
428 struct rte_pktmbuf_pool_private
*mbp_priv
;
429 struct rte_eth_dev_info info
;
430 struct ether_addr eth_addr
;
434 if (dev
->port_id
< 0 || dev
->port_id
>= rte_eth_dev_count()) {
438 rte_eth_dev_info_get(dev
->port_id
, &info
);
439 dev
->up
.n_rxq
= MIN(info
.max_rx_queues
, dev
->up
.n_rxq
);
440 dev
->real_n_txq
= MIN(info
.max_tx_queues
, dev
->up
.n_txq
);
442 diag
= rte_eth_dev_configure(dev
->port_id
, dev
->up
.n_rxq
, dev
->real_n_txq
,
445 VLOG_ERR("eth dev config error %d. rxq:%d txq:%d", diag
, dev
->up
.n_rxq
,
450 for (i
= 0; i
< dev
->real_n_txq
; i
++) {
451 diag
= rte_eth_tx_queue_setup(dev
->port_id
, i
, NIC_PORT_TX_Q_SIZE
,
452 dev
->socket_id
, NULL
);
454 VLOG_ERR("eth dev tx queue setup error %d",diag
);
459 for (i
= 0; i
< dev
->up
.n_rxq
; i
++) {
460 diag
= rte_eth_rx_queue_setup(dev
->port_id
, i
, NIC_PORT_RX_Q_SIZE
,
462 NULL
, dev
->dpdk_mp
->mp
);
464 VLOG_ERR("eth dev rx queue setup error %d",diag
);
469 diag
= rte_eth_dev_start(dev
->port_id
);
471 VLOG_ERR("eth dev start error %d",diag
);
475 rte_eth_promiscuous_enable(dev
->port_id
);
476 rte_eth_allmulticast_enable(dev
->port_id
);
478 memset(ð_addr
, 0x0, sizeof(eth_addr
));
479 rte_eth_macaddr_get(dev
->port_id
, ð_addr
);
480 VLOG_INFO_RL(&rl
, "Port %d: "ETH_ADDR_FMT
"",
481 dev
->port_id
, ETH_ADDR_ARGS(eth_addr
.addr_bytes
));
483 memcpy(dev
->hwaddr
, eth_addr
.addr_bytes
, ETH_ADDR_LEN
);
484 rte_eth_link_get_nowait(dev
->port_id
, &dev
->link
);
486 mbp_priv
= rte_mempool_get_priv(dev
->dpdk_mp
->mp
);
487 dev
->buf_size
= mbp_priv
->mbuf_data_room_size
- RTE_PKTMBUF_HEADROOM
;
489 dev
->flags
= NETDEV_UP
| NETDEV_PROMISC
;
493 static struct netdev_dpdk
*
494 netdev_dpdk_cast(const struct netdev
*netdev
)
496 return CONTAINER_OF(netdev
, struct netdev_dpdk
, up
);
499 static struct netdev
*
500 netdev_dpdk_alloc(void)
502 struct netdev_dpdk
*netdev
= dpdk_rte_mzalloc(sizeof *netdev
);
507 netdev_dpdk_alloc_txq(struct netdev_dpdk
*netdev
, unsigned int n_txqs
)
511 netdev
->tx_q
= dpdk_rte_mzalloc(n_txqs
* sizeof *netdev
->tx_q
);
512 for (i
= 0; i
< n_txqs
; i
++) {
513 int numa_id
= ovs_numa_get_numa_id(i
);
515 if (!netdev
->txq_needs_locking
) {
516 /* Each index is considered as a cpu core id, since there should
517 * be one tx queue for each cpu core. If the corresponding core
518 * is not on the same numa node as 'netdev', flags the
520 netdev
->tx_q
[i
].flush_tx
= netdev
->socket_id
== numa_id
;
522 /* Queues are shared among CPUs. Always flush */
523 netdev
->tx_q
[i
].flush_tx
= true;
525 rte_spinlock_init(&netdev
->tx_q
[i
].tx_lock
);
530 netdev_dpdk_init(struct netdev
*netdev_
, unsigned int port_no
,
531 enum dpdk_dev_type type
)
532 OVS_REQUIRES(dpdk_mutex
)
534 struct netdev_dpdk
*netdev
= netdev_dpdk_cast(netdev_
);
538 ovs_mutex_init(&netdev
->mutex
);
539 ovs_mutex_lock(&netdev
->mutex
);
541 rte_spinlock_init(&netdev
->stats_lock
);
543 /* If the 'sid' is negative, it means that the kernel fails
544 * to obtain the pci numa info. In that situation, always
546 if (type
== DPDK_DEV_ETH
) {
547 sid
= rte_eth_dev_socket_id(port_no
);
549 sid
= rte_lcore_to_socket_id(rte_get_master_lcore());
552 netdev
->socket_id
= sid
< 0 ? SOCKET0
: sid
;
553 netdev
->port_id
= port_no
;
556 netdev
->mtu
= ETHER_MTU
;
557 netdev
->max_packet_len
= MTU_TO_MAX_LEN(netdev
->mtu
);
559 netdev
->dpdk_mp
= dpdk_mp_get(netdev
->socket_id
, netdev
->mtu
);
560 if (!netdev
->dpdk_mp
) {
565 netdev_
->n_txq
= NR_QUEUE
;
566 netdev_
->n_rxq
= NR_QUEUE
;
567 netdev
->real_n_txq
= NR_QUEUE
;
569 if (type
== DPDK_DEV_ETH
) {
570 netdev_dpdk_alloc_txq(netdev
, NR_QUEUE
);
571 err
= dpdk_eth_dev_init(netdev
);
577 list_push_back(&dpdk_list
, &netdev
->list_node
);
581 rte_free(netdev
->tx_q
);
583 ovs_mutex_unlock(&netdev
->mutex
);
588 dpdk_dev_parse_name(const char dev_name
[], const char prefix
[],
589 unsigned int *port_no
)
593 if (strncmp(dev_name
, prefix
, strlen(prefix
))) {
597 cport
= dev_name
+ strlen(prefix
);
598 *port_no
= strtol(cport
, NULL
, 0); /* string must be null terminated */
603 vhost_construct_helper(struct netdev
*netdev_
) OVS_REQUIRES(dpdk_mutex
)
605 struct netdev_dpdk
*netdev
= netdev_dpdk_cast(netdev_
);
607 if (rte_eal_init_ret
) {
608 return rte_eal_init_ret
;
611 rte_spinlock_init(&netdev
->vhost_tx_lock
);
612 return netdev_dpdk_init(netdev_
, -1, DPDK_DEV_VHOST
);
616 netdev_dpdk_vhost_cuse_construct(struct netdev
*netdev_
)
618 struct netdev_dpdk
*netdev
= netdev_dpdk_cast(netdev_
);
621 ovs_mutex_lock(&dpdk_mutex
);
622 strncpy(netdev
->vhost_id
, netdev
->up
.name
, sizeof(netdev
->vhost_id
));
623 err
= vhost_construct_helper(netdev_
);
624 ovs_mutex_unlock(&dpdk_mutex
);
629 netdev_dpdk_vhost_user_construct(struct netdev
*netdev_
)
631 struct netdev_dpdk
*netdev
= netdev_dpdk_cast(netdev_
);
634 ovs_mutex_lock(&dpdk_mutex
);
635 /* Take the name of the vhost-user port and append it to the location where
636 * the socket is to be created, then register the socket.
638 snprintf(netdev
->vhost_id
, sizeof(netdev
->vhost_id
), "%s/%s",
639 vhost_sock_dir
, netdev_
->name
);
640 err
= rte_vhost_driver_register(netdev
->vhost_id
);
642 VLOG_ERR("vhost-user socket device setup failure for socket %s\n",
645 VLOG_INFO("Socket %s created for vhost-user port %s\n", netdev
->vhost_id
, netdev_
->name
);
646 err
= vhost_construct_helper(netdev_
);
647 ovs_mutex_unlock(&dpdk_mutex
);
652 netdev_dpdk_construct(struct netdev
*netdev
)
654 unsigned int port_no
;
657 if (rte_eal_init_ret
) {
658 return rte_eal_init_ret
;
661 /* Names always start with "dpdk" */
662 err
= dpdk_dev_parse_name(netdev
->name
, "dpdk", &port_no
);
667 ovs_mutex_lock(&dpdk_mutex
);
668 err
= netdev_dpdk_init(netdev
, port_no
, DPDK_DEV_ETH
);
669 ovs_mutex_unlock(&dpdk_mutex
);
674 netdev_dpdk_destruct(struct netdev
*netdev_
)
676 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev_
);
678 ovs_mutex_lock(&dev
->mutex
);
679 rte_eth_dev_stop(dev
->port_id
);
680 ovs_mutex_unlock(&dev
->mutex
);
682 ovs_mutex_lock(&dpdk_mutex
);
684 list_remove(&dev
->list_node
);
685 dpdk_mp_put(dev
->dpdk_mp
);
686 ovs_mutex_unlock(&dpdk_mutex
);
690 netdev_dpdk_vhost_destruct(struct netdev
*netdev_
)
692 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev_
);
694 /* Can't remove a port while a guest is attached to it. */
695 if (netdev_dpdk_get_virtio(dev
) != NULL
) {
696 VLOG_ERR("Can not remove port, vhost device still attached");
700 ovs_mutex_lock(&dpdk_mutex
);
701 list_remove(&dev
->list_node
);
702 dpdk_mp_put(dev
->dpdk_mp
);
703 ovs_mutex_unlock(&dpdk_mutex
);
707 netdev_dpdk_dealloc(struct netdev
*netdev_
)
709 struct netdev_dpdk
*netdev
= netdev_dpdk_cast(netdev_
);
715 netdev_dpdk_get_config(const struct netdev
*netdev_
, struct smap
*args
)
717 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev_
);
719 ovs_mutex_lock(&dev
->mutex
);
721 smap_add_format(args
, "configured_rx_queues", "%d", netdev_
->n_rxq
);
722 smap_add_format(args
, "requested_tx_queues", "%d", netdev_
->n_txq
);
723 smap_add_format(args
, "configured_tx_queues", "%d", dev
->real_n_txq
);
724 ovs_mutex_unlock(&dev
->mutex
);
730 netdev_dpdk_get_numa_id(const struct netdev
*netdev_
)
732 struct netdev_dpdk
*netdev
= netdev_dpdk_cast(netdev_
);
734 return netdev
->socket_id
;
737 /* Sets the number of tx queues and rx queues for the dpdk interface.
738 * If the configuration fails, do not try restoring its old configuration
739 * and just returns the error. */
741 netdev_dpdk_set_multiq(struct netdev
*netdev_
, unsigned int n_txq
,
744 struct netdev_dpdk
*netdev
= netdev_dpdk_cast(netdev_
);
746 int old_rxq
, old_txq
;
748 if (netdev
->up
.n_txq
== n_txq
&& netdev
->up
.n_rxq
== n_rxq
) {
752 ovs_mutex_lock(&dpdk_mutex
);
753 ovs_mutex_lock(&netdev
->mutex
);
755 rte_eth_dev_stop(netdev
->port_id
);
757 old_txq
= netdev
->up
.n_txq
;
758 old_rxq
= netdev
->up
.n_rxq
;
759 netdev
->up
.n_txq
= n_txq
;
760 netdev
->up
.n_rxq
= n_rxq
;
762 rte_free(netdev
->tx_q
);
763 err
= dpdk_eth_dev_init(netdev
);
764 netdev_dpdk_alloc_txq(netdev
, netdev
->real_n_txq
);
766 /* If there has been an error, it means that the requested queues
767 * have not been created. Restore the old numbers. */
768 netdev
->up
.n_txq
= old_txq
;
769 netdev
->up
.n_rxq
= old_rxq
;
772 netdev
->txq_needs_locking
= netdev
->real_n_txq
!= netdev
->up
.n_txq
;
774 ovs_mutex_unlock(&netdev
->mutex
);
775 ovs_mutex_unlock(&dpdk_mutex
);
781 netdev_dpdk_vhost_set_multiq(struct netdev
*netdev_
, unsigned int n_txq
,
784 struct netdev_dpdk
*netdev
= netdev_dpdk_cast(netdev_
);
787 if (netdev
->up
.n_txq
== n_txq
&& netdev
->up
.n_rxq
== n_rxq
) {
791 ovs_mutex_lock(&dpdk_mutex
);
792 ovs_mutex_lock(&netdev
->mutex
);
794 netdev
->up
.n_txq
= n_txq
;
795 netdev
->real_n_txq
= 1;
796 netdev
->up
.n_rxq
= 1;
798 ovs_mutex_unlock(&netdev
->mutex
);
799 ovs_mutex_unlock(&dpdk_mutex
);
804 static struct netdev_rxq
*
805 netdev_dpdk_rxq_alloc(void)
807 struct netdev_rxq_dpdk
*rx
= dpdk_rte_mzalloc(sizeof *rx
);
812 static struct netdev_rxq_dpdk
*
813 netdev_rxq_dpdk_cast(const struct netdev_rxq
*rx
)
815 return CONTAINER_OF(rx
, struct netdev_rxq_dpdk
, up
);
819 netdev_dpdk_rxq_construct(struct netdev_rxq
*rxq_
)
821 struct netdev_rxq_dpdk
*rx
= netdev_rxq_dpdk_cast(rxq_
);
822 struct netdev_dpdk
*netdev
= netdev_dpdk_cast(rx
->up
.netdev
);
824 ovs_mutex_lock(&netdev
->mutex
);
825 rx
->port_id
= netdev
->port_id
;
826 ovs_mutex_unlock(&netdev
->mutex
);
832 netdev_dpdk_rxq_destruct(struct netdev_rxq
*rxq_ OVS_UNUSED
)
837 netdev_dpdk_rxq_dealloc(struct netdev_rxq
*rxq_
)
839 struct netdev_rxq_dpdk
*rx
= netdev_rxq_dpdk_cast(rxq_
);
845 dpdk_queue_flush__(struct netdev_dpdk
*dev
, int qid
)
847 struct dpdk_tx_queue
*txq
= &dev
->tx_q
[qid
];
850 while (nb_tx
!= txq
->count
) {
853 ret
= rte_eth_tx_burst(dev
->port_id
, qid
, txq
->burst_pkts
+ nb_tx
,
862 if (OVS_UNLIKELY(nb_tx
!= txq
->count
)) {
863 /* free buffers, which we couldn't transmit, one at a time (each
864 * packet could come from a different mempool) */
867 for (i
= nb_tx
; i
< txq
->count
; i
++) {
868 rte_pktmbuf_free_seg(txq
->burst_pkts
[i
]);
870 rte_spinlock_lock(&dev
->stats_lock
);
871 dev
->stats
.tx_dropped
+= txq
->count
-nb_tx
;
872 rte_spinlock_unlock(&dev
->stats_lock
);
876 txq
->tsc
= rte_get_timer_cycles();
880 dpdk_queue_flush(struct netdev_dpdk
*dev
, int qid
)
882 struct dpdk_tx_queue
*txq
= &dev
->tx_q
[qid
];
884 if (txq
->count
== 0) {
887 dpdk_queue_flush__(dev
, qid
);
891 is_vhost_running(struct virtio_net
*dev
)
893 return (dev
!= NULL
&& (dev
->flags
& VIRTIO_DEV_RUNNING
));
897 * The receive path for the vhost port is the TX path out from guest.
900 netdev_dpdk_vhost_rxq_recv(struct netdev_rxq
*rxq_
,
901 struct dp_packet
**packets
, int *c
)
903 struct netdev_rxq_dpdk
*rx
= netdev_rxq_dpdk_cast(rxq_
);
904 struct netdev
*netdev
= rx
->up
.netdev
;
905 struct netdev_dpdk
*vhost_dev
= netdev_dpdk_cast(netdev
);
906 struct virtio_net
*virtio_dev
= netdev_dpdk_get_virtio(vhost_dev
);
910 if (OVS_UNLIKELY(!is_vhost_running(virtio_dev
))) {
914 nb_rx
= rte_vhost_dequeue_burst(virtio_dev
, qid
,
915 vhost_dev
->dpdk_mp
->mp
,
916 (struct rte_mbuf
**)packets
,
922 rte_spinlock_lock(&vhost_dev
->stats_lock
);
923 vhost_dev
->stats
.rx_packets
+= (uint64_t)nb_rx
;
924 rte_spinlock_unlock(&vhost_dev
->stats_lock
);
931 netdev_dpdk_rxq_recv(struct netdev_rxq
*rxq_
, struct dp_packet
**packets
,
934 struct netdev_rxq_dpdk
*rx
= netdev_rxq_dpdk_cast(rxq_
);
935 struct netdev
*netdev
= rx
->up
.netdev
;
936 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
939 /* There is only one tx queue for this core. Do not flush other
941 * Do not flush tx queue which is shared among CPUs
942 * since it is always flushed */
943 if (rxq_
->queue_id
== rte_lcore_id() &&
944 OVS_LIKELY(!dev
->txq_needs_locking
)) {
945 dpdk_queue_flush(dev
, rxq_
->queue_id
);
948 nb_rx
= rte_eth_rx_burst(rx
->port_id
, rxq_
->queue_id
,
949 (struct rte_mbuf
**) packets
,
961 __netdev_dpdk_vhost_send(struct netdev
*netdev
, struct dp_packet
**pkts
,
962 int cnt
, bool may_steal
)
964 struct netdev_dpdk
*vhost_dev
= netdev_dpdk_cast(netdev
);
965 struct virtio_net
*virtio_dev
= netdev_dpdk_get_virtio(vhost_dev
);
966 struct rte_mbuf
**cur_pkts
= (struct rte_mbuf
**) pkts
;
967 unsigned int total_pkts
= cnt
;
970 if (OVS_UNLIKELY(!is_vhost_running(virtio_dev
))) {
971 rte_spinlock_lock(&vhost_dev
->stats_lock
);
972 vhost_dev
->stats
.tx_dropped
+= cnt
;
973 rte_spinlock_unlock(&vhost_dev
->stats_lock
);
977 /* There is vHost TX single queue, So we need to lock it for TX. */
978 rte_spinlock_lock(&vhost_dev
->vhost_tx_lock
);
981 unsigned int tx_pkts
;
983 tx_pkts
= rte_vhost_enqueue_burst(virtio_dev
, VIRTIO_RXQ
,
985 if (OVS_LIKELY(tx_pkts
)) {
986 /* Packets have been sent.*/
988 /* Prepare for possible next iteration.*/
989 cur_pkts
= &cur_pkts
[tx_pkts
];
991 uint64_t timeout
= VHOST_ENQ_RETRY_USECS
* rte_get_timer_hz() / 1E6
;
992 unsigned int expired
= 0;
995 start
= rte_get_timer_cycles();
999 * Unable to enqueue packets to vhost interface.
1000 * Check available entries before retrying.
1002 while (!rte_vring_available_entries(virtio_dev
, VIRTIO_RXQ
)) {
1003 if (OVS_UNLIKELY((rte_get_timer_cycles() - start
) > timeout
)) {
1009 /* break out of main loop. */
1014 rte_spinlock_unlock(&vhost_dev
->vhost_tx_lock
);
1016 rte_spinlock_lock(&vhost_dev
->stats_lock
);
1017 vhost_dev
->stats
.tx_packets
+= (total_pkts
- cnt
);
1018 vhost_dev
->stats
.tx_dropped
+= cnt
;
1019 rte_spinlock_unlock(&vhost_dev
->stats_lock
);
1025 for (i
= 0; i
< total_pkts
; i
++) {
1026 dp_packet_delete(pkts
[i
]);
1032 dpdk_queue_pkts(struct netdev_dpdk
*dev
, int qid
,
1033 struct rte_mbuf
**pkts
, int cnt
)
1035 struct dpdk_tx_queue
*txq
= &dev
->tx_q
[qid
];
1041 int freeslots
= MAX_TX_QUEUE_LEN
- txq
->count
;
1042 int tocopy
= MIN(freeslots
, cnt
-i
);
1044 memcpy(&txq
->burst_pkts
[txq
->count
], &pkts
[i
],
1045 tocopy
* sizeof (struct rte_mbuf
*));
1047 txq
->count
+= tocopy
;
1050 if (txq
->count
== MAX_TX_QUEUE_LEN
|| txq
->flush_tx
) {
1051 dpdk_queue_flush__(dev
, qid
);
1053 diff_tsc
= rte_get_timer_cycles() - txq
->tsc
;
1054 if (diff_tsc
>= DRAIN_TSC
) {
1055 dpdk_queue_flush__(dev
, qid
);
1060 /* Tx function. Transmit packets indefinitely */
1062 dpdk_do_tx_copy(struct netdev
*netdev
, int qid
, struct dp_packet
**pkts
,
1064 OVS_NO_THREAD_SAFETY_ANALYSIS
1066 #if !defined(__CHECKER__) && !defined(_WIN32)
1067 const size_t PKT_ARRAY_SIZE
= cnt
;
1069 /* Sparse or MSVC doesn't like variable length array. */
1070 enum { PKT_ARRAY_SIZE
= NETDEV_MAX_BURST
};
1072 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1073 struct rte_mbuf
*mbufs
[PKT_ARRAY_SIZE
];
1078 /* If we are on a non pmd thread we have to use the mempool mutex, because
1079 * every non pmd thread shares the same mempool cache */
1081 if (!thread_is_pmd()) {
1082 ovs_mutex_lock(&nonpmd_mempool_mutex
);
1085 for (i
= 0; i
< cnt
; i
++) {
1086 int size
= dp_packet_size(pkts
[i
]);
1088 if (OVS_UNLIKELY(size
> dev
->max_packet_len
)) {
1089 VLOG_WARN_RL(&rl
, "Too big size %d max_packet_len %d",
1090 (int)size
, dev
->max_packet_len
);
1096 mbufs
[newcnt
] = rte_pktmbuf_alloc(dev
->dpdk_mp
->mp
);
1098 if (!mbufs
[newcnt
]) {
1103 /* We have to do a copy for now */
1104 memcpy(rte_pktmbuf_mtod(mbufs
[newcnt
], void *), dp_packet_data(pkts
[i
]), size
);
1106 rte_pktmbuf_data_len(mbufs
[newcnt
]) = size
;
1107 rte_pktmbuf_pkt_len(mbufs
[newcnt
]) = size
;
1112 if (OVS_UNLIKELY(dropped
)) {
1113 rte_spinlock_lock(&dev
->stats_lock
);
1114 dev
->stats
.tx_dropped
+= dropped
;
1115 rte_spinlock_unlock(&dev
->stats_lock
);
1118 if (dev
->type
== DPDK_DEV_VHOST
) {
1119 __netdev_dpdk_vhost_send(netdev
, (struct dp_packet
**) mbufs
, newcnt
, true);
1121 dpdk_queue_pkts(dev
, qid
, mbufs
, newcnt
);
1122 dpdk_queue_flush(dev
, qid
);
1125 if (!thread_is_pmd()) {
1126 ovs_mutex_unlock(&nonpmd_mempool_mutex
);
1131 netdev_dpdk_vhost_send(struct netdev
*netdev
, int qid OVS_UNUSED
, struct dp_packet
**pkts
,
1132 int cnt
, bool may_steal
)
1134 if (OVS_UNLIKELY(pkts
[0]->source
!= DPBUF_DPDK
)) {
1137 dpdk_do_tx_copy(netdev
, qid
, pkts
, cnt
);
1139 for (i
= 0; i
< cnt
; i
++) {
1140 dp_packet_delete(pkts
[i
]);
1144 __netdev_dpdk_vhost_send(netdev
, pkts
, cnt
, may_steal
);
1150 netdev_dpdk_send__(struct netdev_dpdk
*dev
, int qid
,
1151 struct dp_packet
**pkts
, int cnt
, bool may_steal
)
1155 if (OVS_UNLIKELY(dev
->txq_needs_locking
)) {
1156 qid
= qid
% dev
->real_n_txq
;
1157 rte_spinlock_lock(&dev
->tx_q
[qid
].tx_lock
);
1160 if (OVS_UNLIKELY(!may_steal
||
1161 pkts
[0]->source
!= DPBUF_DPDK
)) {
1162 struct netdev
*netdev
= &dev
->up
;
1164 dpdk_do_tx_copy(netdev
, qid
, pkts
, cnt
);
1167 for (i
= 0; i
< cnt
; i
++) {
1168 dp_packet_delete(pkts
[i
]);
1172 int next_tx_idx
= 0;
1175 for (i
= 0; i
< cnt
; i
++) {
1176 int size
= dp_packet_size(pkts
[i
]);
1178 if (OVS_UNLIKELY(size
> dev
->max_packet_len
)) {
1179 if (next_tx_idx
!= i
) {
1180 dpdk_queue_pkts(dev
, qid
,
1181 (struct rte_mbuf
**)&pkts
[next_tx_idx
],
1185 VLOG_WARN_RL(&rl
, "Too big size %d max_packet_len %d",
1186 (int)size
, dev
->max_packet_len
);
1188 dp_packet_delete(pkts
[i
]);
1190 next_tx_idx
= i
+ 1;
1193 if (next_tx_idx
!= cnt
) {
1194 dpdk_queue_pkts(dev
, qid
,
1195 (struct rte_mbuf
**)&pkts
[next_tx_idx
],
1199 if (OVS_UNLIKELY(dropped
)) {
1200 rte_spinlock_lock(&dev
->stats_lock
);
1201 dev
->stats
.tx_dropped
+= dropped
;
1202 rte_spinlock_unlock(&dev
->stats_lock
);
1206 if (OVS_UNLIKELY(dev
->txq_needs_locking
)) {
1207 rte_spinlock_unlock(&dev
->tx_q
[qid
].tx_lock
);
1212 netdev_dpdk_eth_send(struct netdev
*netdev
, int qid
,
1213 struct dp_packet
**pkts
, int cnt
, bool may_steal
)
1215 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1217 netdev_dpdk_send__(dev
, qid
, pkts
, cnt
, may_steal
);
1222 netdev_dpdk_set_etheraddr(struct netdev
*netdev
,
1223 const uint8_t mac
[ETH_ADDR_LEN
])
1225 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1227 ovs_mutex_lock(&dev
->mutex
);
1228 if (!eth_addr_equals(dev
->hwaddr
, mac
)) {
1229 memcpy(dev
->hwaddr
, mac
, ETH_ADDR_LEN
);
1230 netdev_change_seq_changed(netdev
);
1232 ovs_mutex_unlock(&dev
->mutex
);
1238 netdev_dpdk_get_etheraddr(const struct netdev
*netdev
,
1239 uint8_t mac
[ETH_ADDR_LEN
])
1241 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1243 ovs_mutex_lock(&dev
->mutex
);
1244 memcpy(mac
, dev
->hwaddr
, ETH_ADDR_LEN
);
1245 ovs_mutex_unlock(&dev
->mutex
);
1251 netdev_dpdk_get_mtu(const struct netdev
*netdev
, int *mtup
)
1253 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1255 ovs_mutex_lock(&dev
->mutex
);
1257 ovs_mutex_unlock(&dev
->mutex
);
1263 netdev_dpdk_set_mtu(const struct netdev
*netdev
, int mtu
)
1265 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1267 struct dpdk_mp
*old_mp
;
1270 ovs_mutex_lock(&dpdk_mutex
);
1271 ovs_mutex_lock(&dev
->mutex
);
1272 if (dev
->mtu
== mtu
) {
1277 mp
= dpdk_mp_get(dev
->socket_id
, dev
->mtu
);
1283 rte_eth_dev_stop(dev
->port_id
);
1286 old_mp
= dev
->dpdk_mp
;
1289 dev
->max_packet_len
= MTU_TO_MAX_LEN(dev
->mtu
);
1291 err
= dpdk_eth_dev_init(dev
);
1295 dev
->dpdk_mp
= old_mp
;
1296 dev
->max_packet_len
= MTU_TO_MAX_LEN(dev
->mtu
);
1297 dpdk_eth_dev_init(dev
);
1301 dpdk_mp_put(old_mp
);
1302 netdev_change_seq_changed(netdev
);
1304 ovs_mutex_unlock(&dev
->mutex
);
1305 ovs_mutex_unlock(&dpdk_mutex
);
1310 netdev_dpdk_get_carrier(const struct netdev
*netdev_
, bool *carrier
);
1313 netdev_dpdk_vhost_get_stats(const struct netdev
*netdev
,
1314 struct netdev_stats
*stats
)
1316 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1318 ovs_mutex_lock(&dev
->mutex
);
1319 memset(stats
, 0, sizeof(*stats
));
1320 /* Unsupported Stats */
1321 stats
->rx_errors
= UINT64_MAX
;
1322 stats
->tx_errors
= UINT64_MAX
;
1323 stats
->multicast
= UINT64_MAX
;
1324 stats
->collisions
= UINT64_MAX
;
1325 stats
->rx_crc_errors
= UINT64_MAX
;
1326 stats
->rx_fifo_errors
= UINT64_MAX
;
1327 stats
->rx_frame_errors
= UINT64_MAX
;
1328 stats
->rx_length_errors
= UINT64_MAX
;
1329 stats
->rx_missed_errors
= UINT64_MAX
;
1330 stats
->rx_over_errors
= UINT64_MAX
;
1331 stats
->tx_aborted_errors
= UINT64_MAX
;
1332 stats
->tx_carrier_errors
= UINT64_MAX
;
1333 stats
->tx_errors
= UINT64_MAX
;
1334 stats
->tx_fifo_errors
= UINT64_MAX
;
1335 stats
->tx_heartbeat_errors
= UINT64_MAX
;
1336 stats
->tx_window_errors
= UINT64_MAX
;
1337 stats
->rx_bytes
+= UINT64_MAX
;
1338 stats
->rx_dropped
+= UINT64_MAX
;
1339 stats
->tx_bytes
+= UINT64_MAX
;
1341 rte_spinlock_lock(&dev
->stats_lock
);
1342 /* Supported Stats */
1343 stats
->rx_packets
+= dev
->stats
.rx_packets
;
1344 stats
->tx_packets
+= dev
->stats
.tx_packets
;
1345 stats
->tx_dropped
+= dev
->stats
.tx_dropped
;
1346 rte_spinlock_unlock(&dev
->stats_lock
);
1347 ovs_mutex_unlock(&dev
->mutex
);
1353 netdev_dpdk_get_stats(const struct netdev
*netdev
, struct netdev_stats
*stats
)
1355 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1356 struct rte_eth_stats rte_stats
;
1359 netdev_dpdk_get_carrier(netdev
, &gg
);
1360 ovs_mutex_lock(&dev
->mutex
);
1361 rte_eth_stats_get(dev
->port_id
, &rte_stats
);
1363 memset(stats
, 0, sizeof(*stats
));
1365 stats
->rx_packets
= rte_stats
.ipackets
;
1366 stats
->tx_packets
= rte_stats
.opackets
;
1367 stats
->rx_bytes
= rte_stats
.ibytes
;
1368 stats
->tx_bytes
= rte_stats
.obytes
;
1369 stats
->rx_errors
= rte_stats
.ierrors
;
1370 stats
->tx_errors
= rte_stats
.oerrors
;
1371 stats
->multicast
= rte_stats
.imcasts
;
1373 rte_spinlock_lock(&dev
->stats_lock
);
1374 stats
->tx_dropped
= dev
->stats
.tx_dropped
;
1375 rte_spinlock_unlock(&dev
->stats_lock
);
1376 ovs_mutex_unlock(&dev
->mutex
);
1382 netdev_dpdk_get_features(const struct netdev
*netdev_
,
1383 enum netdev_features
*current
,
1384 enum netdev_features
*advertised OVS_UNUSED
,
1385 enum netdev_features
*supported OVS_UNUSED
,
1386 enum netdev_features
*peer OVS_UNUSED
)
1388 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev_
);
1389 struct rte_eth_link link
;
1391 ovs_mutex_lock(&dev
->mutex
);
1393 ovs_mutex_unlock(&dev
->mutex
);
1395 if (link
.link_duplex
== ETH_LINK_AUTONEG_DUPLEX
) {
1396 if (link
.link_speed
== ETH_LINK_SPEED_AUTONEG
) {
1397 *current
= NETDEV_F_AUTONEG
;
1399 } else if (link
.link_duplex
== ETH_LINK_HALF_DUPLEX
) {
1400 if (link
.link_speed
== ETH_LINK_SPEED_10
) {
1401 *current
= NETDEV_F_10MB_HD
;
1403 if (link
.link_speed
== ETH_LINK_SPEED_100
) {
1404 *current
= NETDEV_F_100MB_HD
;
1406 if (link
.link_speed
== ETH_LINK_SPEED_1000
) {
1407 *current
= NETDEV_F_1GB_HD
;
1409 } else if (link
.link_duplex
== ETH_LINK_FULL_DUPLEX
) {
1410 if (link
.link_speed
== ETH_LINK_SPEED_10
) {
1411 *current
= NETDEV_F_10MB_FD
;
1413 if (link
.link_speed
== ETH_LINK_SPEED_100
) {
1414 *current
= NETDEV_F_100MB_FD
;
1416 if (link
.link_speed
== ETH_LINK_SPEED_1000
) {
1417 *current
= NETDEV_F_1GB_FD
;
1419 if (link
.link_speed
== ETH_LINK_SPEED_10000
) {
1420 *current
= NETDEV_F_10GB_FD
;
1428 netdev_dpdk_get_ifindex(const struct netdev
*netdev
)
1430 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1433 ovs_mutex_lock(&dev
->mutex
);
1434 ifindex
= dev
->port_id
;
1435 ovs_mutex_unlock(&dev
->mutex
);
1441 netdev_dpdk_get_carrier(const struct netdev
*netdev_
, bool *carrier
)
1443 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev_
);
1445 ovs_mutex_lock(&dev
->mutex
);
1446 check_link_status(dev
);
1447 *carrier
= dev
->link
.link_status
;
1449 ovs_mutex_unlock(&dev
->mutex
);
1455 netdev_dpdk_vhost_get_carrier(const struct netdev
*netdev_
, bool *carrier
)
1457 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev_
);
1458 struct virtio_net
*virtio_dev
= netdev_dpdk_get_virtio(dev
);
1460 ovs_mutex_lock(&dev
->mutex
);
1462 if (is_vhost_running(virtio_dev
)) {
1468 ovs_mutex_unlock(&dev
->mutex
);
1473 static long long int
1474 netdev_dpdk_get_carrier_resets(const struct netdev
*netdev_
)
1476 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev_
);
1477 long long int carrier_resets
;
1479 ovs_mutex_lock(&dev
->mutex
);
1480 carrier_resets
= dev
->link_reset_cnt
;
1481 ovs_mutex_unlock(&dev
->mutex
);
1483 return carrier_resets
;
1487 netdev_dpdk_set_miimon(struct netdev
*netdev_ OVS_UNUSED
,
1488 long long int interval OVS_UNUSED
)
1494 netdev_dpdk_update_flags__(struct netdev_dpdk
*dev
,
1495 enum netdev_flags off
, enum netdev_flags on
,
1496 enum netdev_flags
*old_flagsp
) OVS_REQUIRES(dev
->mutex
)
1500 if ((off
| on
) & ~(NETDEV_UP
| NETDEV_PROMISC
)) {
1504 *old_flagsp
= dev
->flags
;
1508 if (dev
->flags
== *old_flagsp
) {
1512 if (dev
->type
== DPDK_DEV_ETH
) {
1513 if (dev
->flags
& NETDEV_UP
) {
1514 err
= rte_eth_dev_start(dev
->port_id
);
1519 if (dev
->flags
& NETDEV_PROMISC
) {
1520 rte_eth_promiscuous_enable(dev
->port_id
);
1523 if (!(dev
->flags
& NETDEV_UP
)) {
1524 rte_eth_dev_stop(dev
->port_id
);
1532 netdev_dpdk_update_flags(struct netdev
*netdev_
,
1533 enum netdev_flags off
, enum netdev_flags on
,
1534 enum netdev_flags
*old_flagsp
)
1536 struct netdev_dpdk
*netdev
= netdev_dpdk_cast(netdev_
);
1539 ovs_mutex_lock(&netdev
->mutex
);
1540 error
= netdev_dpdk_update_flags__(netdev
, off
, on
, old_flagsp
);
1541 ovs_mutex_unlock(&netdev
->mutex
);
1547 netdev_dpdk_get_status(const struct netdev
*netdev_
, struct smap
*args
)
1549 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev_
);
1550 struct rte_eth_dev_info dev_info
;
1552 if (dev
->port_id
< 0)
1555 ovs_mutex_lock(&dev
->mutex
);
1556 rte_eth_dev_info_get(dev
->port_id
, &dev_info
);
1557 ovs_mutex_unlock(&dev
->mutex
);
1559 smap_add_format(args
, "driver_name", "%s", dev_info
.driver_name
);
1561 smap_add_format(args
, "port_no", "%d", dev
->port_id
);
1562 smap_add_format(args
, "numa_id", "%d", rte_eth_dev_socket_id(dev
->port_id
));
1563 smap_add_format(args
, "driver_name", "%s", dev_info
.driver_name
);
1564 smap_add_format(args
, "min_rx_bufsize", "%u", dev_info
.min_rx_bufsize
);
1565 smap_add_format(args
, "max_rx_pktlen", "%u", dev_info
.max_rx_pktlen
);
1566 smap_add_format(args
, "max_rx_queues", "%u", dev_info
.max_rx_queues
);
1567 smap_add_format(args
, "max_tx_queues", "%u", dev_info
.max_tx_queues
);
1568 smap_add_format(args
, "max_mac_addrs", "%u", dev_info
.max_mac_addrs
);
1569 smap_add_format(args
, "max_hash_mac_addrs", "%u", dev_info
.max_hash_mac_addrs
);
1570 smap_add_format(args
, "max_vfs", "%u", dev_info
.max_vfs
);
1571 smap_add_format(args
, "max_vmdq_pools", "%u", dev_info
.max_vmdq_pools
);
1573 smap_add_format(args
, "pci-vendor_id", "0x%u", dev_info
.pci_dev
->id
.vendor_id
);
1574 smap_add_format(args
, "pci-device_id", "0x%x", dev_info
.pci_dev
->id
.device_id
);
1580 netdev_dpdk_set_admin_state__(struct netdev_dpdk
*dev
, bool admin_state
)
1581 OVS_REQUIRES(dev
->mutex
)
1583 enum netdev_flags old_flags
;
1586 netdev_dpdk_update_flags__(dev
, 0, NETDEV_UP
, &old_flags
);
1588 netdev_dpdk_update_flags__(dev
, NETDEV_UP
, 0, &old_flags
);
1593 netdev_dpdk_set_admin_state(struct unixctl_conn
*conn
, int argc
,
1594 const char *argv
[], void *aux OVS_UNUSED
)
1598 if (!strcasecmp(argv
[argc
- 1], "up")) {
1600 } else if ( !strcasecmp(argv
[argc
- 1], "down")) {
1603 unixctl_command_reply_error(conn
, "Invalid Admin State");
1608 struct netdev
*netdev
= netdev_from_name(argv
[1]);
1609 if (netdev
&& is_dpdk_class(netdev
->netdev_class
)) {
1610 struct netdev_dpdk
*dpdk_dev
= netdev_dpdk_cast(netdev
);
1612 ovs_mutex_lock(&dpdk_dev
->mutex
);
1613 netdev_dpdk_set_admin_state__(dpdk_dev
, up
);
1614 ovs_mutex_unlock(&dpdk_dev
->mutex
);
1616 netdev_close(netdev
);
1618 unixctl_command_reply_error(conn
, "Not a DPDK Interface");
1619 netdev_close(netdev
);
1623 struct netdev_dpdk
*netdev
;
1625 ovs_mutex_lock(&dpdk_mutex
);
1626 LIST_FOR_EACH (netdev
, list_node
, &dpdk_list
) {
1627 ovs_mutex_lock(&netdev
->mutex
);
1628 netdev_dpdk_set_admin_state__(netdev
, up
);
1629 ovs_mutex_unlock(&netdev
->mutex
);
1631 ovs_mutex_unlock(&dpdk_mutex
);
1633 unixctl_command_reply(conn
, "OK");
1637 * Set virtqueue flags so that we do not receive interrupts.
1640 set_irq_status(struct virtio_net
*dev
)
1642 dev
->virtqueue
[VIRTIO_RXQ
]->used
->flags
= VRING_USED_F_NO_NOTIFY
;
1643 dev
->virtqueue
[VIRTIO_TXQ
]->used
->flags
= VRING_USED_F_NO_NOTIFY
;
1647 * A new virtio-net device is added to a vhost port.
1650 new_device(struct virtio_net
*dev
)
1652 struct netdev_dpdk
*netdev
;
1653 bool exists
= false;
1655 ovs_mutex_lock(&dpdk_mutex
);
1656 /* Add device to the vhost port with the same name as that passed down. */
1657 LIST_FOR_EACH(netdev
, list_node
, &dpdk_list
) {
1658 if (strncmp(dev
->ifname
, netdev
->vhost_id
, IF_NAME_SZ
) == 0) {
1659 ovs_mutex_lock(&netdev
->mutex
);
1660 ovsrcu_set(&netdev
->virtio_dev
, dev
);
1661 ovs_mutex_unlock(&netdev
->mutex
);
1663 dev
->flags
|= VIRTIO_DEV_RUNNING
;
1664 /* Disable notifications. */
1665 set_irq_status(dev
);
1669 ovs_mutex_unlock(&dpdk_mutex
);
1672 VLOG_INFO("vHost Device '%s' (%ld) can't be added - name not found",
1673 dev
->ifname
, dev
->device_fh
);
1678 VLOG_INFO("vHost Device '%s' (%ld) has been added",
1679 dev
->ifname
, dev
->device_fh
);
1684 * Remove a virtio-net device from the specific vhost port. Use dev->remove
1685 * flag to stop any more packets from being sent or received to/from a VM and
1686 * ensure all currently queued packets have been sent/received before removing
1690 destroy_device(volatile struct virtio_net
*dev
)
1692 struct netdev_dpdk
*vhost_dev
;
1694 ovs_mutex_lock(&dpdk_mutex
);
1695 LIST_FOR_EACH (vhost_dev
, list_node
, &dpdk_list
) {
1696 if (netdev_dpdk_get_virtio(vhost_dev
) == dev
) {
1698 ovs_mutex_lock(&vhost_dev
->mutex
);
1699 dev
->flags
&= ~VIRTIO_DEV_RUNNING
;
1700 ovsrcu_set(&vhost_dev
->virtio_dev
, NULL
);
1701 ovs_mutex_unlock(&vhost_dev
->mutex
);
1704 * Wait for other threads to quiesce before
1705 * setting the virtio_dev to NULL.
1707 ovsrcu_synchronize();
1709 * As call to ovsrcu_synchronize() will end the quiescent state,
1710 * put thread back into quiescent state before returning.
1712 ovsrcu_quiesce_start();
1715 ovs_mutex_unlock(&dpdk_mutex
);
1717 VLOG_INFO("vHost Device '%s' (%ld) has been removed",
1718 dev
->ifname
, dev
->device_fh
);
1722 netdev_dpdk_get_virtio(const struct netdev_dpdk
*dev
)
1724 return ovsrcu_get(struct virtio_net
*, &dev
->virtio_dev
);
1728 * These callbacks allow virtio-net devices to be added to vhost ports when
1729 * configuration has been fully complete.
1731 static const struct virtio_net_device_ops virtio_net_device_ops
=
1733 .new_device
= new_device
,
1734 .destroy_device
= destroy_device
,
1738 start_vhost_loop(void *dummy OVS_UNUSED
)
1740 pthread_detach(pthread_self());
1741 /* Put the cuse thread into quiescent state. */
1742 ovsrcu_quiesce_start();
1743 rte_vhost_driver_session_start();
1748 dpdk_vhost_class_init(void)
1750 rte_vhost_driver_callback_register(&virtio_net_device_ops
);
1751 ovs_thread_create("vhost_thread", start_vhost_loop
, NULL
);
1756 dpdk_vhost_cuse_class_init(void)
1761 /* Register CUSE device to handle IOCTLs.
1762 * Unless otherwise specified on the vswitchd command line, cuse_dev_name
1763 * is set to vhost-net.
1765 err
= rte_vhost_driver_register(cuse_dev_name
);
1768 VLOG_ERR("CUSE device setup failure.");
1772 dpdk_vhost_class_init();
1777 dpdk_vhost_user_class_init(void)
1779 dpdk_vhost_class_init();
1784 dpdk_common_init(void)
1786 unixctl_command_register("netdev-dpdk/set-admin-state",
1787 "[netdev] up|down", 1, 2,
1788 netdev_dpdk_set_admin_state
, NULL
);
1790 ovs_thread_create("dpdk_watchdog", dpdk_watchdog
, NULL
);
1796 dpdk_ring_create(const char dev_name
[], unsigned int port_no
,
1797 unsigned int *eth_port_id
)
1799 struct dpdk_ring
*ivshmem
;
1803 ivshmem
= dpdk_rte_mzalloc(sizeof *ivshmem
);
1804 if (ivshmem
== NULL
) {
1808 /* XXX: Add support for multiquque ring. */
1809 err
= snprintf(ring_name
, 10, "%s_tx", dev_name
);
1814 /* Create single consumer/producer rings, netdev does explicit locking. */
1815 ivshmem
->cring_tx
= rte_ring_create(ring_name
, DPDK_RING_SIZE
, SOCKET0
,
1816 RING_F_SP_ENQ
| RING_F_SC_DEQ
);
1817 if (ivshmem
->cring_tx
== NULL
) {
1822 err
= snprintf(ring_name
, 10, "%s_rx", dev_name
);
1827 /* Create single consumer/producer rings, netdev does explicit locking. */
1828 ivshmem
->cring_rx
= rte_ring_create(ring_name
, DPDK_RING_SIZE
, SOCKET0
,
1829 RING_F_SP_ENQ
| RING_F_SC_DEQ
);
1830 if (ivshmem
->cring_rx
== NULL
) {
1835 err
= rte_eth_from_rings(dev_name
, &ivshmem
->cring_rx
, 1,
1836 &ivshmem
->cring_tx
, 1, SOCKET0
);
1843 ivshmem
->user_port_id
= port_no
;
1844 ivshmem
->eth_port_id
= rte_eth_dev_count() - 1;
1845 list_push_back(&dpdk_ring_list
, &ivshmem
->list_node
);
1847 *eth_port_id
= ivshmem
->eth_port_id
;
1852 dpdk_ring_open(const char dev_name
[], unsigned int *eth_port_id
) OVS_REQUIRES(dpdk_mutex
)
1854 struct dpdk_ring
*ivshmem
;
1855 unsigned int port_no
;
1858 /* Names always start with "dpdkr" */
1859 err
= dpdk_dev_parse_name(dev_name
, "dpdkr", &port_no
);
1864 /* look through our list to find the device */
1865 LIST_FOR_EACH (ivshmem
, list_node
, &dpdk_ring_list
) {
1866 if (ivshmem
->user_port_id
== port_no
) {
1867 VLOG_INFO("Found dpdk ring device %s:", dev_name
);
1868 *eth_port_id
= ivshmem
->eth_port_id
; /* really all that is needed */
1872 /* Need to create the device rings */
1873 return dpdk_ring_create(dev_name
, port_no
, eth_port_id
);
1877 netdev_dpdk_ring_send(struct netdev
*netdev_
, int qid
,
1878 struct dp_packet
**pkts
, int cnt
, bool may_steal
)
1880 struct netdev_dpdk
*netdev
= netdev_dpdk_cast(netdev_
);
1883 /* When using 'dpdkr' and sending to a DPDK ring, we want to ensure that the
1884 * rss hash field is clear. This is because the same mbuf may be modified by
1885 * the consumer of the ring and return into the datapath without recalculating
1887 for (i
= 0; i
< cnt
; i
++) {
1888 dp_packet_set_rss_hash(pkts
[i
], 0);
1891 netdev_dpdk_send__(netdev
, qid
, pkts
, cnt
, may_steal
);
1896 netdev_dpdk_ring_construct(struct netdev
*netdev
)
1898 unsigned int port_no
= 0;
1901 if (rte_eal_init_ret
) {
1902 return rte_eal_init_ret
;
1905 ovs_mutex_lock(&dpdk_mutex
);
1907 err
= dpdk_ring_open(netdev
->name
, &port_no
);
1912 err
= netdev_dpdk_init(netdev
, port_no
, DPDK_DEV_ETH
);
1915 ovs_mutex_unlock(&dpdk_mutex
);
1919 #define NETDEV_DPDK_CLASS(NAME, INIT, CONSTRUCT, DESTRUCT, MULTIQ, SEND, \
1920 GET_CARRIER, GET_STATS, GET_FEATURES, GET_STATUS, RXQ_RECV) \
1924 NULL, /* netdev_dpdk_run */ \
1925 NULL, /* netdev_dpdk_wait */ \
1927 netdev_dpdk_alloc, \
1930 netdev_dpdk_dealloc, \
1931 netdev_dpdk_get_config, \
1932 NULL, /* netdev_dpdk_set_config */ \
1933 NULL, /* get_tunnel_config */ \
1934 NULL, /* build header */ \
1935 NULL, /* push header */ \
1936 NULL, /* pop header */ \
1937 netdev_dpdk_get_numa_id, /* get_numa_id */ \
1938 MULTIQ, /* set_multiq */ \
1941 NULL, /* send_wait */ \
1943 netdev_dpdk_set_etheraddr, \
1944 netdev_dpdk_get_etheraddr, \
1945 netdev_dpdk_get_mtu, \
1946 netdev_dpdk_set_mtu, \
1947 netdev_dpdk_get_ifindex, \
1949 netdev_dpdk_get_carrier_resets, \
1950 netdev_dpdk_set_miimon, \
1953 NULL, /* set_advertisements */ \
1955 NULL, /* set_policing */ \
1956 NULL, /* get_qos_types */ \
1957 NULL, /* get_qos_capabilities */ \
1958 NULL, /* get_qos */ \
1959 NULL, /* set_qos */ \
1960 NULL, /* get_queue */ \
1961 NULL, /* set_queue */ \
1962 NULL, /* delete_queue */ \
1963 NULL, /* get_queue_stats */ \
1964 NULL, /* queue_dump_start */ \
1965 NULL, /* queue_dump_next */ \
1966 NULL, /* queue_dump_done */ \
1967 NULL, /* dump_queue_stats */ \
1969 NULL, /* get_in4 */ \
1970 NULL, /* set_in4 */ \
1971 NULL, /* get_in6 */ \
1972 NULL, /* add_router */ \
1973 NULL, /* get_next_hop */ \
1975 NULL, /* arp_lookup */ \
1977 netdev_dpdk_update_flags, \
1979 netdev_dpdk_rxq_alloc, \
1980 netdev_dpdk_rxq_construct, \
1981 netdev_dpdk_rxq_destruct, \
1982 netdev_dpdk_rxq_dealloc, \
1984 NULL, /* rx_wait */ \
1985 NULL, /* rxq_drain */ \
1989 process_vhost_flags(char *flag
, char *default_val
, int size
,
1990 char **argv
, char **new_val
)
1994 /* Depending on which version of vhost is in use, process the vhost-specific
1995 * flag if it is provided on the vswitchd command line, otherwise resort to
1998 * For vhost-user: Process "-cuse_dev_name" to set the custom location of
1999 * the vhost-user socket(s).
2000 * For vhost-cuse: Process "-vhost_sock_dir" to set the custom name of the
2001 * vhost-cuse character device.
2003 if (!strcmp(argv
[1], flag
) && (strlen(argv
[2]) <= size
)) {
2005 *new_val
= strdup(argv
[2]);
2006 VLOG_INFO("User-provided %s in use: %s", flag
, *new_val
);
2008 VLOG_INFO("No %s provided - defaulting to %s", flag
, default_val
);
2009 *new_val
= default_val
;
2016 dpdk_init(int argc
, char **argv
)
2020 char *pragram_name
= argv
[0];
2022 if (argc
< 2 || strcmp(argv
[1], "--dpdk"))
2025 /* Remove the --dpdk argument from arg list.*/
2030 if (process_vhost_flags("-cuse_dev_name", strdup("vhost-net"),
2031 PATH_MAX
, argv
, &cuse_dev_name
)) {
2033 if (process_vhost_flags("-vhost_sock_dir", strdup(ovs_rundir()),
2034 NAME_MAX
, argv
, &vhost_sock_dir
)) {
2038 err
= stat(vhost_sock_dir
, &s
);
2040 VLOG_ERR("vHostUser socket DIR '%s' does not exist.",
2045 /* Remove the vhost flag configuration parameters from the argument
2046 * list, so that the correct elements are passed to the DPDK
2047 * initialization function
2050 argv
+= 2; /* Increment by two to bypass the vhost flag arguments */
2054 /* Keep the program name argument as this is needed for call to
2057 argv
[0] = pragram_name
;
2059 /* Make sure things are initialized ... */
2060 result
= rte_eal_init(argc
, argv
);
2062 ovs_abort(result
, "Cannot init EAL");
2065 rte_memzone_dump(stdout
);
2066 rte_eal_init_ret
= 0;
2068 if (argc
> result
) {
2069 argv
[result
] = argv
[0];
2072 /* We are called from the main thread here */
2073 RTE_PER_LCORE(_lcore_id
) = NON_PMD_CORE_ID
;
2075 return result
+ 1 + base
;
2078 static const struct netdev_class dpdk_class
=
2082 netdev_dpdk_construct
,
2083 netdev_dpdk_destruct
,
2084 netdev_dpdk_set_multiq
,
2085 netdev_dpdk_eth_send
,
2086 netdev_dpdk_get_carrier
,
2087 netdev_dpdk_get_stats
,
2088 netdev_dpdk_get_features
,
2089 netdev_dpdk_get_status
,
2090 netdev_dpdk_rxq_recv
);
2092 static const struct netdev_class dpdk_ring_class
=
2096 netdev_dpdk_ring_construct
,
2097 netdev_dpdk_destruct
,
2098 netdev_dpdk_set_multiq
,
2099 netdev_dpdk_ring_send
,
2100 netdev_dpdk_get_carrier
,
2101 netdev_dpdk_get_stats
,
2102 netdev_dpdk_get_features
,
2103 netdev_dpdk_get_status
,
2104 netdev_dpdk_rxq_recv
);
2106 static const struct netdev_class OVS_UNUSED dpdk_vhost_cuse_class
=
2109 dpdk_vhost_cuse_class_init
,
2110 netdev_dpdk_vhost_cuse_construct
,
2111 netdev_dpdk_vhost_destruct
,
2112 netdev_dpdk_vhost_set_multiq
,
2113 netdev_dpdk_vhost_send
,
2114 netdev_dpdk_vhost_get_carrier
,
2115 netdev_dpdk_vhost_get_stats
,
2118 netdev_dpdk_vhost_rxq_recv
);
2120 static const struct netdev_class OVS_UNUSED dpdk_vhost_user_class
=
2123 dpdk_vhost_user_class_init
,
2124 netdev_dpdk_vhost_user_construct
,
2125 netdev_dpdk_vhost_destruct
,
2126 netdev_dpdk_vhost_set_multiq
,
2127 netdev_dpdk_vhost_send
,
2128 netdev_dpdk_vhost_get_carrier
,
2129 netdev_dpdk_vhost_get_stats
,
2132 netdev_dpdk_vhost_rxq_recv
);
2135 netdev_dpdk_register(void)
2137 static struct ovsthread_once once
= OVSTHREAD_ONCE_INITIALIZER
;
2139 if (rte_eal_init_ret
) {
2143 if (ovsthread_once_start(&once
)) {
2145 netdev_register_provider(&dpdk_class
);
2146 netdev_register_provider(&dpdk_ring_class
);
2148 netdev_register_provider(&dpdk_vhost_cuse_class
);
2150 netdev_register_provider(&dpdk_vhost_user_class
);
2152 ovsthread_once_done(&once
);
2157 pmd_thread_setaffinity_cpu(unsigned cpu
)
2163 CPU_SET(cpu
, &cpuset
);
2164 err
= pthread_setaffinity_np(pthread_self(), sizeof(cpu_set_t
), &cpuset
);
2166 VLOG_ERR("Thread affinity error %d",err
);
2169 /* NON_PMD_CORE_ID is reserved for use by non pmd threads. */
2170 ovs_assert(cpu
!= NON_PMD_CORE_ID
);
2171 RTE_PER_LCORE(_lcore_id
) = cpu
;
2179 return rte_lcore_id() != NON_PMD_CORE_ID
;