2 * Copyright (c) 2014, 2015, 2016 Nicira, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
30 #include <sys/types.h>
34 #include "dp-packet.h"
35 #include "dpif-netdev.h"
36 #include "fatal-signal.h"
37 #include "openvswitch/list.h"
38 #include "netdev-dpdk.h"
39 #include "netdev-provider.h"
40 #include "netdev-vport.h"
42 #include "ofp-print.h"
44 #include "ovs-thread.h"
50 #include "unaligned.h"
53 #include "openvswitch/vlog.h"
55 #include "rte_config.h"
57 #include "rte_meter.h"
58 #include "rte_virtio_net.h"
60 VLOG_DEFINE_THIS_MODULE(dpdk
);
61 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(5, 20);
63 #define DPDK_PORT_WATCHDOG_INTERVAL 5
65 #define OVS_CACHE_LINE_SIZE CACHE_LINE_SIZE
66 #define OVS_VPORT_DPDK "ovs_dpdk"
69 * need to reserve tons of extra space in the mbufs so we can align the
70 * DMA addresses to 4KB.
71 * The minimum mbuf size is limited to avoid scatter behaviour and drop in
72 * performance for standard Ethernet MTU.
74 #define ETHER_HDR_MAX_LEN (ETHER_HDR_LEN + ETHER_CRC_LEN + (2 * VLAN_HEADER_LEN))
75 #define MTU_TO_FRAME_LEN(mtu) ((mtu) + ETHER_HDR_LEN + ETHER_CRC_LEN)
76 #define MTU_TO_MAX_FRAME_LEN(mtu) ((mtu) + ETHER_HDR_MAX_LEN)
77 #define FRAME_LEN_TO_MTU(frame_len) ((frame_len)- ETHER_HDR_LEN - ETHER_CRC_LEN)
78 #define MBUF_SIZE(mtu) ( MTU_TO_MAX_FRAME_LEN(mtu) \
79 + sizeof(struct dp_packet) \
80 + RTE_PKTMBUF_HEADROOM)
81 #define NETDEV_DPDK_MBUF_ALIGN 1024
83 /* Max and min number of packets in the mempool. OVS tries to allocate a
84 * mempool with MAX_NB_MBUF: if this fails (because the system doesn't have
85 * enough hugepages) we keep halving the number until the allocation succeeds
86 * or we reach MIN_NB_MBUF */
88 #define MAX_NB_MBUF (4096 * 64)
89 #define MIN_NB_MBUF (4096 * 4)
90 #define MP_CACHE_SZ RTE_MEMPOOL_CACHE_MAX_SIZE
92 /* MAX_NB_MBUF can be divided by 2 many times, until MIN_NB_MBUF */
93 BUILD_ASSERT_DECL(MAX_NB_MBUF
% ROUND_DOWN_POW2(MAX_NB_MBUF
/MIN_NB_MBUF
) == 0);
95 /* The smallest possible NB_MBUF that we're going to try should be a multiple
96 * of MP_CACHE_SZ. This is advised by DPDK documentation. */
97 BUILD_ASSERT_DECL((MAX_NB_MBUF
/ ROUND_DOWN_POW2(MAX_NB_MBUF
/MIN_NB_MBUF
))
102 #define NIC_PORT_RX_Q_SIZE 2048 /* Size of Physical NIC RX Queue, Max (n+32<=4096)*/
103 #define NIC_PORT_TX_Q_SIZE 2048 /* Size of Physical NIC TX Queue, Max (n+32<=4096)*/
105 #define OVS_VHOST_MAX_QUEUE_NUM 1024 /* Maximum number of vHost TX queues. */
106 #define OVS_VHOST_QUEUE_MAP_UNKNOWN (-1) /* Mapping not initialized. */
107 #define OVS_VHOST_QUEUE_DISABLED (-2) /* Queue was disabled by guest and not
108 * yet mapped to another queue. */
110 static char *cuse_dev_name
= NULL
; /* Character device cuse_dev_name. */
111 static char *vhost_sock_dir
= NULL
; /* Location of vhost-user sockets */
114 * Maximum amount of time in micro seconds to try and enqueue to vhost.
116 #define VHOST_ENQ_RETRY_USECS 100
118 static const struct rte_eth_conf port_conf
= {
120 .mq_mode
= ETH_MQ_RX_RSS
,
122 .header_split
= 0, /* Header Split disabled */
123 .hw_ip_checksum
= 0, /* IP checksum offload disabled */
124 .hw_vlan_filter
= 0, /* VLAN filtering disabled */
125 .jumbo_frame
= 0, /* Jumbo Frame Support disabled */
131 .rss_hf
= ETH_RSS_IP
| ETH_RSS_UDP
| ETH_RSS_TCP
,
135 .mq_mode
= ETH_MQ_TX_NONE
,
139 enum { MAX_TX_QUEUE_LEN
= 384 };
140 enum { DPDK_RING_SIZE
= 256 };
141 BUILD_ASSERT_DECL(IS_POW2(DPDK_RING_SIZE
));
142 enum { DRAIN_TSC
= 200000ULL };
149 static int rte_eal_init_ret
= ENODEV
;
151 static struct ovs_mutex dpdk_mutex
= OVS_MUTEX_INITIALIZER
;
153 /* Quality of Service */
155 /* An instance of a QoS configuration. Always associated with a particular
158 * Each QoS implementation subclasses this with whatever additional data it
162 const struct dpdk_qos_ops
*ops
;
165 /* A particular implementation of dpdk QoS operations.
167 * The functions below return 0 if successful or a positive errno value on
168 * failure, except where otherwise noted. All of them must be provided, except
169 * where otherwise noted.
171 struct dpdk_qos_ops
{
173 /* Name of the QoS type */
174 const char *qos_name
;
176 /* Called to construct the QoS implementation on 'netdev'. The
177 * implementation should make the appropriate calls to configure QoS
178 * according to 'details'. The implementation may assume that any current
179 * QoS configuration already installed should be destroyed before
180 * constructing the new configuration.
182 * The contents of 'details' should be documented as valid for 'ovs_name'
183 * in the "other_config" column in the "QoS" table in vswitchd/vswitch.xml
184 * (which is built as ovs-vswitchd.conf.db(8)).
186 * This function must return 0 if and only if it sets 'netdev->qos_conf'
187 * to an initialized 'struct qos_conf'.
189 * For all QoS implementations it should always be non-null.
191 int (*qos_construct
)(struct netdev
*netdev
, const struct smap
*details
);
193 /* Destroys the data structures allocated by the implementation as part of
196 * For all QoS implementations it should always be non-null.
198 void (*qos_destruct
)(struct netdev
*netdev
, struct qos_conf
*conf
);
200 /* Retrieves details of 'netdev->qos_conf' configuration into 'details'.
202 * The contents of 'details' should be documented as valid for 'ovs_name'
203 * in the "other_config" column in the "QoS" table in vswitchd/vswitch.xml
204 * (which is built as ovs-vswitchd.conf.db(8)).
206 int (*qos_get
)(const struct netdev
*netdev
, struct smap
*details
);
208 /* Reconfigures 'netdev->qos_conf' according to 'details', performing any
209 * required calls to complete the reconfiguration.
211 * The contents of 'details' should be documented as valid for 'ovs_name'
212 * in the "other_config" column in the "QoS" table in vswitchd/vswitch.xml
213 * (which is built as ovs-vswitchd.conf.db(8)).
215 * This function may be null if 'qos_conf' is not configurable.
217 int (*qos_set
)(struct netdev
*netdev
, const struct smap
*details
);
219 /* Modify an array of rte_mbufs. The modification is specific to
220 * each qos implementation.
222 * The function should take and array of mbufs and an int representing
223 * the current number of mbufs present in the array.
225 * After the function has performed a qos modification to the array of
226 * mbufs it returns an int representing the number of mbufs now present in
227 * the array. This value is can then be passed to the port send function
228 * along with the modified array for transmission.
230 * For all QoS implementations it should always be non-null.
232 int (*qos_run
)(struct netdev
*netdev
, struct rte_mbuf
**pkts
,
236 /* dpdk_qos_ops for each type of user space QoS implementation */
237 static const struct dpdk_qos_ops egress_policer_ops
;
240 * Array of dpdk_qos_ops, contains pointer to all supported QoS
243 static const struct dpdk_qos_ops
*const qos_confs
[] = {
248 /* Contains all 'struct dpdk_dev's. */
249 static struct ovs_list dpdk_list
OVS_GUARDED_BY(dpdk_mutex
)
250 = OVS_LIST_INITIALIZER(&dpdk_list
);
252 static struct ovs_list dpdk_mp_list
OVS_GUARDED_BY(dpdk_mutex
)
253 = OVS_LIST_INITIALIZER(&dpdk_mp_list
);
255 /* This mutex must be used by non pmd threads when allocating or freeing
256 * mbufs through mempools. Since dpdk_queue_pkts() and dpdk_queue_flush() may
257 * use mempools, a non pmd thread should hold this mutex while calling them */
258 static struct ovs_mutex nonpmd_mempool_mutex
= OVS_MUTEX_INITIALIZER
;
261 struct rte_mempool
*mp
;
265 struct ovs_list list_node
OVS_GUARDED_BY(dpdk_mutex
);
268 /* There should be one 'struct dpdk_tx_queue' created for
270 struct dpdk_tx_queue
{
271 bool flush_tx
; /* Set to true to flush queue everytime */
272 /* pkts are queued. */
274 rte_spinlock_t tx_lock
; /* Protects the members and the NIC queue
275 * from concurrent access. It is used only
276 * if the queue is shared among different
277 * pmd threads (see 'txq_needs_locking'). */
278 int map
; /* Mapping of configured vhost-user queues
279 * to enabled by guest. */
281 struct rte_mbuf
*burst_pkts
[MAX_TX_QUEUE_LEN
];
284 /* dpdk has no way to remove dpdk ring ethernet devices
285 so we have to keep them around once they've been created
288 static struct ovs_list dpdk_ring_list
OVS_GUARDED_BY(dpdk_mutex
)
289 = OVS_LIST_INITIALIZER(&dpdk_ring_list
);
292 /* For the client rings */
293 struct rte_ring
*cring_tx
;
294 struct rte_ring
*cring_rx
;
295 unsigned int user_port_id
; /* User given port no, parsed from port name */
296 int eth_port_id
; /* ethernet device port id */
297 struct ovs_list list_node
OVS_GUARDED_BY(dpdk_mutex
);
304 enum dpdk_dev_type type
;
306 struct dpdk_tx_queue
*tx_q
;
308 struct ovs_mutex mutex
OVS_ACQ_AFTER(dpdk_mutex
);
310 struct dpdk_mp
*dpdk_mp
;
314 struct netdev_stats stats
;
316 rte_spinlock_t stats_lock
;
318 struct eth_addr hwaddr
;
319 enum netdev_flags flags
;
321 struct rte_eth_link link
;
324 /* The user might request more txqs than the NIC has. We remap those
325 * ('up.n_txq') on these ('real_n_txq').
326 * If the numbers match, 'txq_needs_locking' is false, otherwise it is
327 * true and we will take a spinlock on transmission */
330 bool txq_needs_locking
;
332 /* virtio-net structure for vhost device */
333 OVSRCU_TYPE(struct virtio_net
*) virtio_dev
;
335 /* Identifier used to distinguish vhost devices from each other */
336 char vhost_id
[PATH_MAX
];
339 struct ovs_list list_node
OVS_GUARDED_BY(dpdk_mutex
);
341 /* QoS configuration and lock for the device */
342 struct qos_conf
*qos_conf
;
343 rte_spinlock_t qos_lock
;
347 struct netdev_rxq_dpdk
{
348 struct netdev_rxq up
;
352 static bool dpdk_thread_is_pmd(void);
354 static int netdev_dpdk_construct(struct netdev
*);
356 struct virtio_net
* netdev_dpdk_get_virtio(const struct netdev_dpdk
*dev
);
359 is_dpdk_class(const struct netdev_class
*class)
361 return class->construct
== netdev_dpdk_construct
;
364 /* DPDK NIC drivers allocate RX buffers at a particular granularity, typically
365 * aligned at 1k or less. If a declared mbuf size is not a multiple of this
366 * value, insufficient buffers are allocated to accomodate the packet in its
367 * entirety. Furthermore, certain drivers need to ensure that there is also
368 * sufficient space in the Rx buffer to accommodate two VLAN tags (for QinQ
369 * frames). If the RX buffer is too small, then the driver enables scatter RX
370 * behaviour, which reduces performance. To prevent this, use a buffer size that
371 * is closest to 'mtu', but which satisfies the aforementioned criteria.
374 dpdk_buf_size(int mtu
)
376 return ROUND_UP((MTU_TO_MAX_FRAME_LEN(mtu
) + RTE_PKTMBUF_HEADROOM
),
377 NETDEV_DPDK_MBUF_ALIGN
);
380 /* XXX: use dpdk malloc for entire OVS. in fact huge page should be used
381 * for all other segments data, bss and text. */
384 dpdk_rte_mzalloc(size_t sz
)
388 ptr
= rte_zmalloc(OVS_VPORT_DPDK
, sz
, OVS_CACHE_LINE_SIZE
);
395 /* XXX this function should be called only by pmd threads (or by non pmd
396 * threads holding the nonpmd_mempool_mutex) */
398 free_dpdk_buf(struct dp_packet
*p
)
400 struct rte_mbuf
*pkt
= (struct rte_mbuf
*) p
;
402 rte_pktmbuf_free(pkt
);
406 ovs_rte_pktmbuf_init(struct rte_mempool
*mp
,
407 void *opaque_arg OVS_UNUSED
,
409 unsigned i OVS_UNUSED
)
411 struct rte_mbuf
*m
= _m
;
413 rte_pktmbuf_init(mp
, opaque_arg
, _m
, i
);
415 dp_packet_init_dpdk((struct dp_packet
*) m
, m
->buf_len
);
418 static struct dpdk_mp
*
419 dpdk_mp_get(int socket_id
, int mtu
) OVS_REQUIRES(dpdk_mutex
)
421 struct dpdk_mp
*dmp
= NULL
;
422 char mp_name
[RTE_MEMPOOL_NAMESIZE
];
424 struct rte_pktmbuf_pool_private mbp_priv
;
426 LIST_FOR_EACH (dmp
, list_node
, &dpdk_mp_list
) {
427 if (dmp
->socket_id
== socket_id
&& dmp
->mtu
== mtu
) {
433 dmp
= dpdk_rte_mzalloc(sizeof *dmp
);
434 dmp
->socket_id
= socket_id
;
437 mbp_priv
.mbuf_data_room_size
= MBUF_SIZE(mtu
) - sizeof(struct dp_packet
);
438 mbp_priv
.mbuf_priv_size
= sizeof (struct dp_packet
) - sizeof (struct rte_mbuf
);
440 mp_size
= MAX_NB_MBUF
;
442 if (snprintf(mp_name
, RTE_MEMPOOL_NAMESIZE
, "ovs_mp_%d_%d_%u",
443 dmp
->mtu
, dmp
->socket_id
, mp_size
) < 0) {
447 dmp
->mp
= rte_mempool_create(mp_name
, mp_size
, MBUF_SIZE(mtu
),
449 sizeof(struct rte_pktmbuf_pool_private
),
450 rte_pktmbuf_pool_init
, &mbp_priv
,
451 ovs_rte_pktmbuf_init
, NULL
,
453 } while (!dmp
->mp
&& rte_errno
== ENOMEM
&& (mp_size
/= 2) >= MIN_NB_MBUF
);
455 if (dmp
->mp
== NULL
) {
458 VLOG_DBG("Allocated \"%s\" mempool with %u mbufs", mp_name
, mp_size
);
461 list_push_back(&dpdk_mp_list
, &dmp
->list_node
);
466 dpdk_mp_put(struct dpdk_mp
*dmp
)
474 ovs_assert(dmp
->refcount
>= 0);
477 /* I could not find any API to destroy mp. */
478 if (dmp
->refcount
== 0) {
479 list_delete(dmp
->list_node
);
480 /* destroy mp-pool. */
486 check_link_status(struct netdev_dpdk
*dev
)
488 struct rte_eth_link link
;
490 rte_eth_link_get_nowait(dev
->port_id
, &link
);
492 if (dev
->link
.link_status
!= link
.link_status
) {
493 netdev_change_seq_changed(&dev
->up
);
495 dev
->link_reset_cnt
++;
497 if (dev
->link
.link_status
) {
498 VLOG_DBG_RL(&rl
, "Port %d Link Up - speed %u Mbps - %s",
499 dev
->port_id
, (unsigned)dev
->link
.link_speed
,
500 (dev
->link
.link_duplex
== ETH_LINK_FULL_DUPLEX
) ?
501 ("full-duplex") : ("half-duplex"));
503 VLOG_DBG_RL(&rl
, "Port %d Link Down", dev
->port_id
);
509 dpdk_watchdog(void *dummy OVS_UNUSED
)
511 struct netdev_dpdk
*dev
;
513 pthread_detach(pthread_self());
516 ovs_mutex_lock(&dpdk_mutex
);
517 LIST_FOR_EACH (dev
, list_node
, &dpdk_list
) {
518 ovs_mutex_lock(&dev
->mutex
);
519 check_link_status(dev
);
520 ovs_mutex_unlock(&dev
->mutex
);
522 ovs_mutex_unlock(&dpdk_mutex
);
523 xsleep(DPDK_PORT_WATCHDOG_INTERVAL
);
530 dpdk_eth_dev_queue_setup(struct netdev_dpdk
*dev
, int n_rxq
, int n_txq
)
535 /* A device may report more queues than it makes available (this has
536 * been observed for Intel xl710, which reserves some of them for
537 * SRIOV): rte_eth_*_queue_setup will fail if a queue is not
538 * available. When this happens we can retry the configuration
539 * and request less queues */
540 while (n_rxq
&& n_txq
) {
542 VLOG_INFO("Retrying setup with (rxq:%d txq:%d)", n_rxq
, n_txq
);
545 diag
= rte_eth_dev_configure(dev
->port_id
, n_rxq
, n_txq
, &port_conf
);
550 for (i
= 0; i
< n_txq
; i
++) {
551 diag
= rte_eth_tx_queue_setup(dev
->port_id
, i
, NIC_PORT_TX_Q_SIZE
,
552 dev
->socket_id
, NULL
);
554 VLOG_INFO("Interface %s txq(%d) setup error: %s",
555 dev
->up
.name
, i
, rte_strerror(-diag
));
561 /* Retry with less tx queues */
566 for (i
= 0; i
< n_rxq
; i
++) {
567 diag
= rte_eth_rx_queue_setup(dev
->port_id
, i
, NIC_PORT_RX_Q_SIZE
,
568 dev
->socket_id
, NULL
,
571 VLOG_INFO("Interface %s rxq(%d) setup error: %s",
572 dev
->up
.name
, i
, rte_strerror(-diag
));
578 /* Retry with less rx queues */
583 dev
->up
.n_rxq
= n_rxq
;
584 dev
->real_n_txq
= n_txq
;
594 dpdk_eth_dev_init(struct netdev_dpdk
*dev
) OVS_REQUIRES(dpdk_mutex
)
596 struct rte_pktmbuf_pool_private
*mbp_priv
;
597 struct rte_eth_dev_info info
;
598 struct ether_addr eth_addr
;
602 if (dev
->port_id
< 0 || dev
->port_id
>= rte_eth_dev_count()) {
606 rte_eth_dev_info_get(dev
->port_id
, &info
);
608 n_rxq
= MIN(info
.max_rx_queues
, dev
->up
.n_rxq
);
609 n_txq
= MIN(info
.max_tx_queues
, dev
->up
.n_txq
);
611 diag
= dpdk_eth_dev_queue_setup(dev
, n_rxq
, n_txq
);
613 VLOG_ERR("Interface %s(rxq:%d txq:%d) configure error: %s",
614 dev
->up
.name
, n_rxq
, n_txq
, rte_strerror(-diag
));
618 diag
= rte_eth_dev_start(dev
->port_id
);
620 VLOG_ERR("Interface %s start error: %s", dev
->up
.name
,
621 rte_strerror(-diag
));
625 rte_eth_promiscuous_enable(dev
->port_id
);
626 rte_eth_allmulticast_enable(dev
->port_id
);
628 memset(ð_addr
, 0x0, sizeof(eth_addr
));
629 rte_eth_macaddr_get(dev
->port_id
, ð_addr
);
630 VLOG_INFO_RL(&rl
, "Port %d: "ETH_ADDR_FMT
"",
631 dev
->port_id
, ETH_ADDR_BYTES_ARGS(eth_addr
.addr_bytes
));
633 memcpy(dev
->hwaddr
.ea
, eth_addr
.addr_bytes
, ETH_ADDR_LEN
);
634 rte_eth_link_get_nowait(dev
->port_id
, &dev
->link
);
636 mbp_priv
= rte_mempool_get_priv(dev
->dpdk_mp
->mp
);
637 dev
->buf_size
= mbp_priv
->mbuf_data_room_size
- RTE_PKTMBUF_HEADROOM
;
639 dev
->flags
= NETDEV_UP
| NETDEV_PROMISC
;
643 static struct netdev_dpdk
*
644 netdev_dpdk_cast(const struct netdev
*netdev
)
646 return CONTAINER_OF(netdev
, struct netdev_dpdk
, up
);
649 static struct netdev
*
650 netdev_dpdk_alloc(void)
652 struct netdev_dpdk
*netdev
= dpdk_rte_mzalloc(sizeof *netdev
);
657 netdev_dpdk_alloc_txq(struct netdev_dpdk
*netdev
, unsigned int n_txqs
)
661 netdev
->tx_q
= dpdk_rte_mzalloc(n_txqs
* sizeof *netdev
->tx_q
);
662 for (i
= 0; i
< n_txqs
; i
++) {
663 int numa_id
= ovs_numa_get_numa_id(i
);
665 if (!netdev
->txq_needs_locking
) {
666 /* Each index is considered as a cpu core id, since there should
667 * be one tx queue for each cpu core. If the corresponding core
668 * is not on the same numa node as 'netdev', flags the
670 netdev
->tx_q
[i
].flush_tx
= netdev
->socket_id
== numa_id
;
672 /* Queues are shared among CPUs. Always flush */
673 netdev
->tx_q
[i
].flush_tx
= true;
676 /* Initialize map for vhost devices. */
677 netdev
->tx_q
[i
].map
= OVS_VHOST_QUEUE_MAP_UNKNOWN
;
678 rte_spinlock_init(&netdev
->tx_q
[i
].tx_lock
);
683 netdev_dpdk_init(struct netdev
*netdev_
, unsigned int port_no
,
684 enum dpdk_dev_type type
)
685 OVS_REQUIRES(dpdk_mutex
)
687 struct netdev_dpdk
*netdev
= netdev_dpdk_cast(netdev_
);
692 ovs_mutex_init(&netdev
->mutex
);
693 ovs_mutex_lock(&netdev
->mutex
);
695 rte_spinlock_init(&netdev
->stats_lock
);
697 /* If the 'sid' is negative, it means that the kernel fails
698 * to obtain the pci numa info. In that situation, always
700 if (type
== DPDK_DEV_ETH
) {
701 sid
= rte_eth_dev_socket_id(port_no
);
703 sid
= rte_lcore_to_socket_id(rte_get_master_lcore());
706 netdev
->socket_id
= sid
< 0 ? SOCKET0
: sid
;
707 netdev
->port_id
= port_no
;
710 netdev
->mtu
= ETHER_MTU
;
711 netdev
->max_packet_len
= MTU_TO_FRAME_LEN(netdev
->mtu
);
713 buf_size
= dpdk_buf_size(netdev
->mtu
);
714 netdev
->dpdk_mp
= dpdk_mp_get(netdev
->socket_id
, FRAME_LEN_TO_MTU(buf_size
));
715 if (!netdev
->dpdk_mp
) {
720 /* Initialise QoS configuration to NULL and qos lock to unlocked */
721 netdev
->qos_conf
= NULL
;
722 rte_spinlock_init(&netdev
->qos_lock
);
724 netdev_
->n_txq
= NR_QUEUE
;
725 netdev_
->n_rxq
= NR_QUEUE
;
726 netdev_
->requested_n_rxq
= NR_QUEUE
;
727 netdev
->real_n_txq
= NR_QUEUE
;
729 if (type
== DPDK_DEV_ETH
) {
730 netdev_dpdk_alloc_txq(netdev
, NR_QUEUE
);
731 err
= dpdk_eth_dev_init(netdev
);
736 netdev_dpdk_alloc_txq(netdev
, OVS_VHOST_MAX_QUEUE_NUM
);
739 list_push_back(&dpdk_list
, &netdev
->list_node
);
743 rte_free(netdev
->tx_q
);
745 ovs_mutex_unlock(&netdev
->mutex
);
749 /* dev_name must be the prefix followed by a positive decimal number.
750 * (no leading + or - signs are allowed) */
752 dpdk_dev_parse_name(const char dev_name
[], const char prefix
[],
753 unsigned int *port_no
)
757 if (strncmp(dev_name
, prefix
, strlen(prefix
))) {
761 cport
= dev_name
+ strlen(prefix
);
763 if (str_to_uint(cport
, 10, port_no
)) {
771 vhost_construct_helper(struct netdev
*netdev_
) OVS_REQUIRES(dpdk_mutex
)
773 if (rte_eal_init_ret
) {
774 return rte_eal_init_ret
;
777 return netdev_dpdk_init(netdev_
, -1, DPDK_DEV_VHOST
);
781 netdev_dpdk_vhost_cuse_construct(struct netdev
*netdev_
)
783 struct netdev_dpdk
*netdev
= netdev_dpdk_cast(netdev_
);
786 ovs_mutex_lock(&dpdk_mutex
);
787 strncpy(netdev
->vhost_id
, netdev
->up
.name
, sizeof(netdev
->vhost_id
));
788 err
= vhost_construct_helper(netdev_
);
789 ovs_mutex_unlock(&dpdk_mutex
);
794 netdev_dpdk_vhost_user_construct(struct netdev
*netdev_
)
796 struct netdev_dpdk
*netdev
= netdev_dpdk_cast(netdev_
);
797 const char *name
= netdev_
->name
;
800 /* 'name' is appended to 'vhost_sock_dir' and used to create a socket in
801 * the file system. '/' or '\' would traverse directories, so they're not
802 * acceptable in 'name'. */
803 if (strchr(name
, '/') || strchr(name
, '\\')) {
804 VLOG_ERR("\"%s\" is not a valid name for a vhost-user port. "
805 "A valid name must not include '/' or '\\'",
810 ovs_mutex_lock(&dpdk_mutex
);
811 /* Take the name of the vhost-user port and append it to the location where
812 * the socket is to be created, then register the socket.
814 snprintf(netdev
->vhost_id
, sizeof(netdev
->vhost_id
), "%s/%s",
815 vhost_sock_dir
, name
);
817 err
= rte_vhost_driver_register(netdev
->vhost_id
);
819 VLOG_ERR("vhost-user socket device setup failure for socket %s\n",
822 fatal_signal_add_file_to_unlink(netdev
->vhost_id
);
823 VLOG_INFO("Socket %s created for vhost-user port %s\n",
824 netdev
->vhost_id
, name
);
825 err
= vhost_construct_helper(netdev_
);
828 ovs_mutex_unlock(&dpdk_mutex
);
833 netdev_dpdk_construct(struct netdev
*netdev
)
835 unsigned int port_no
;
838 if (rte_eal_init_ret
) {
839 return rte_eal_init_ret
;
842 /* Names always start with "dpdk" */
843 err
= dpdk_dev_parse_name(netdev
->name
, "dpdk", &port_no
);
848 ovs_mutex_lock(&dpdk_mutex
);
849 err
= netdev_dpdk_init(netdev
, port_no
, DPDK_DEV_ETH
);
850 ovs_mutex_unlock(&dpdk_mutex
);
855 netdev_dpdk_destruct(struct netdev
*netdev_
)
857 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev_
);
859 ovs_mutex_lock(&dev
->mutex
);
860 rte_eth_dev_stop(dev
->port_id
);
861 ovs_mutex_unlock(&dev
->mutex
);
863 ovs_mutex_lock(&dpdk_mutex
);
865 list_remove(&dev
->list_node
);
866 dpdk_mp_put(dev
->dpdk_mp
);
867 ovs_mutex_unlock(&dpdk_mutex
);
871 netdev_dpdk_vhost_destruct(struct netdev
*netdev_
)
873 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev_
);
875 /* Guest becomes an orphan if still attached. */
876 if (netdev_dpdk_get_virtio(dev
) != NULL
) {
877 VLOG_ERR("Removing port '%s' while vhost device still attached.",
879 VLOG_ERR("To restore connectivity after re-adding of port, VM on socket"
880 " '%s' must be restarted.",
884 if (rte_vhost_driver_unregister(dev
->vhost_id
)) {
885 VLOG_ERR("Unable to remove vhost-user socket %s", dev
->vhost_id
);
887 fatal_signal_remove_file_to_unlink(dev
->vhost_id
);
890 ovs_mutex_lock(&dpdk_mutex
);
892 list_remove(&dev
->list_node
);
893 dpdk_mp_put(dev
->dpdk_mp
);
894 ovs_mutex_unlock(&dpdk_mutex
);
898 netdev_dpdk_dealloc(struct netdev
*netdev_
)
900 struct netdev_dpdk
*netdev
= netdev_dpdk_cast(netdev_
);
906 netdev_dpdk_get_config(const struct netdev
*netdev
, struct smap
*args
)
908 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
910 ovs_mutex_lock(&dev
->mutex
);
912 smap_add_format(args
, "requested_rx_queues", "%d", netdev
->requested_n_rxq
);
913 smap_add_format(args
, "configured_rx_queues", "%d", netdev
->n_rxq
);
914 smap_add_format(args
, "requested_tx_queues", "%d", netdev
->n_txq
);
915 smap_add_format(args
, "configured_tx_queues", "%d", dev
->real_n_txq
);
916 ovs_mutex_unlock(&dev
->mutex
);
922 netdev_dpdk_set_config(struct netdev
*netdev
, const struct smap
*args
)
924 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
926 ovs_mutex_lock(&dev
->mutex
);
927 netdev
->requested_n_rxq
= MAX(smap_get_int(args
, "n_rxq",
928 netdev
->requested_n_rxq
), 1);
929 netdev_change_seq_changed(netdev
);
930 ovs_mutex_unlock(&dev
->mutex
);
936 netdev_dpdk_get_numa_id(const struct netdev
*netdev_
)
938 struct netdev_dpdk
*netdev
= netdev_dpdk_cast(netdev_
);
940 return netdev
->socket_id
;
943 /* Sets the number of tx queues and rx queues for the dpdk interface.
944 * If the configuration fails, do not try restoring its old configuration
945 * and just returns the error. */
947 netdev_dpdk_set_multiq(struct netdev
*netdev_
, unsigned int n_txq
,
950 struct netdev_dpdk
*netdev
= netdev_dpdk_cast(netdev_
);
952 int old_rxq
, old_txq
;
954 if (netdev
->up
.n_txq
== n_txq
&& netdev
->up
.n_rxq
== n_rxq
) {
958 ovs_mutex_lock(&dpdk_mutex
);
959 ovs_mutex_lock(&netdev
->mutex
);
961 rte_eth_dev_stop(netdev
->port_id
);
963 old_txq
= netdev
->up
.n_txq
;
964 old_rxq
= netdev
->up
.n_rxq
;
965 netdev
->up
.n_txq
= n_txq
;
966 netdev
->up
.n_rxq
= n_rxq
;
968 rte_free(netdev
->tx_q
);
969 err
= dpdk_eth_dev_init(netdev
);
970 netdev_dpdk_alloc_txq(netdev
, netdev
->real_n_txq
);
972 /* If there has been an error, it means that the requested queues
973 * have not been created. Restore the old numbers. */
974 netdev
->up
.n_txq
= old_txq
;
975 netdev
->up
.n_rxq
= old_rxq
;
978 netdev
->txq_needs_locking
= netdev
->real_n_txq
!= netdev
->up
.n_txq
;
980 ovs_mutex_unlock(&netdev
->mutex
);
981 ovs_mutex_unlock(&dpdk_mutex
);
987 netdev_dpdk_vhost_cuse_set_multiq(struct netdev
*netdev_
, unsigned int n_txq
,
990 struct netdev_dpdk
*netdev
= netdev_dpdk_cast(netdev_
);
993 if (netdev
->up
.n_txq
== n_txq
&& netdev
->up
.n_rxq
== n_rxq
) {
997 ovs_mutex_lock(&dpdk_mutex
);
998 ovs_mutex_lock(&netdev
->mutex
);
1000 netdev
->up
.n_txq
= n_txq
;
1001 netdev
->real_n_txq
= 1;
1002 netdev
->up
.n_rxq
= 1;
1003 netdev
->txq_needs_locking
= netdev
->real_n_txq
!= netdev
->up
.n_txq
;
1005 ovs_mutex_unlock(&netdev
->mutex
);
1006 ovs_mutex_unlock(&dpdk_mutex
);
1012 netdev_dpdk_vhost_set_multiq(struct netdev
*netdev_
, unsigned int n_txq
,
1015 struct netdev_dpdk
*netdev
= netdev_dpdk_cast(netdev_
);
1018 if (netdev
->up
.n_txq
== n_txq
&& netdev
->up
.n_rxq
== n_rxq
) {
1022 ovs_mutex_lock(&dpdk_mutex
);
1023 ovs_mutex_lock(&netdev
->mutex
);
1025 netdev
->up
.n_txq
= n_txq
;
1026 netdev
->up
.n_rxq
= n_rxq
;
1028 ovs_mutex_unlock(&netdev
->mutex
);
1029 ovs_mutex_unlock(&dpdk_mutex
);
1034 static struct netdev_rxq
*
1035 netdev_dpdk_rxq_alloc(void)
1037 struct netdev_rxq_dpdk
*rx
= dpdk_rte_mzalloc(sizeof *rx
);
1042 static struct netdev_rxq_dpdk
*
1043 netdev_rxq_dpdk_cast(const struct netdev_rxq
*rx
)
1045 return CONTAINER_OF(rx
, struct netdev_rxq_dpdk
, up
);
1049 netdev_dpdk_rxq_construct(struct netdev_rxq
*rxq_
)
1051 struct netdev_rxq_dpdk
*rx
= netdev_rxq_dpdk_cast(rxq_
);
1052 struct netdev_dpdk
*netdev
= netdev_dpdk_cast(rx
->up
.netdev
);
1054 ovs_mutex_lock(&netdev
->mutex
);
1055 rx
->port_id
= netdev
->port_id
;
1056 ovs_mutex_unlock(&netdev
->mutex
);
1062 netdev_dpdk_rxq_destruct(struct netdev_rxq
*rxq_ OVS_UNUSED
)
1067 netdev_dpdk_rxq_dealloc(struct netdev_rxq
*rxq_
)
1069 struct netdev_rxq_dpdk
*rx
= netdev_rxq_dpdk_cast(rxq_
);
1075 dpdk_queue_flush__(struct netdev_dpdk
*dev
, int qid
)
1077 struct dpdk_tx_queue
*txq
= &dev
->tx_q
[qid
];
1080 while (nb_tx
!= txq
->count
) {
1083 ret
= rte_eth_tx_burst(dev
->port_id
, qid
, txq
->burst_pkts
+ nb_tx
,
1084 txq
->count
- nb_tx
);
1092 if (OVS_UNLIKELY(nb_tx
!= txq
->count
)) {
1093 /* free buffers, which we couldn't transmit, one at a time (each
1094 * packet could come from a different mempool) */
1097 for (i
= nb_tx
; i
< txq
->count
; i
++) {
1098 rte_pktmbuf_free(txq
->burst_pkts
[i
]);
1100 rte_spinlock_lock(&dev
->stats_lock
);
1101 dev
->stats
.tx_dropped
+= txq
->count
-nb_tx
;
1102 rte_spinlock_unlock(&dev
->stats_lock
);
1106 txq
->tsc
= rte_get_timer_cycles();
1110 dpdk_queue_flush(struct netdev_dpdk
*dev
, int qid
)
1112 struct dpdk_tx_queue
*txq
= &dev
->tx_q
[qid
];
1114 if (txq
->count
== 0) {
1117 dpdk_queue_flush__(dev
, qid
);
1121 is_vhost_running(struct virtio_net
*dev
)
1123 return (dev
!= NULL
&& (dev
->flags
& VIRTIO_DEV_RUNNING
));
1127 netdev_dpdk_vhost_update_rx_counters(struct netdev_stats
*stats
,
1128 struct dp_packet
**packets
, int count
)
1131 struct dp_packet
*packet
;
1133 stats
->rx_packets
+= count
;
1134 for (i
= 0; i
< count
; i
++) {
1135 packet
= packets
[i
];
1137 if (OVS_UNLIKELY(dp_packet_size(packet
) < ETH_HEADER_LEN
)) {
1138 /* This only protects the following multicast counting from
1139 * too short packets, but it does not stop the packet from
1140 * further processing. */
1142 stats
->rx_length_errors
++;
1146 struct eth_header
*eh
= (struct eth_header
*) dp_packet_data(packet
);
1147 if (OVS_UNLIKELY(eth_addr_is_multicast(eh
->eth_dst
))) {
1151 stats
->rx_bytes
+= dp_packet_size(packet
);
1156 * The receive path for the vhost port is the TX path out from guest.
1159 netdev_dpdk_vhost_rxq_recv(struct netdev_rxq
*rxq_
,
1160 struct dp_packet
**packets
, int *c
)
1162 struct netdev_rxq_dpdk
*rx
= netdev_rxq_dpdk_cast(rxq_
);
1163 struct netdev
*netdev
= rx
->up
.netdev
;
1164 struct netdev_dpdk
*vhost_dev
= netdev_dpdk_cast(netdev
);
1165 struct virtio_net
*virtio_dev
= netdev_dpdk_get_virtio(vhost_dev
);
1166 int qid
= rxq_
->queue_id
;
1169 if (OVS_UNLIKELY(!is_vhost_running(virtio_dev
))) {
1173 if (rxq_
->queue_id
>= vhost_dev
->real_n_rxq
) {
1177 nb_rx
= rte_vhost_dequeue_burst(virtio_dev
, qid
* VIRTIO_QNUM
+ VIRTIO_TXQ
,
1178 vhost_dev
->dpdk_mp
->mp
,
1179 (struct rte_mbuf
**)packets
,
1185 rte_spinlock_lock(&vhost_dev
->stats_lock
);
1186 netdev_dpdk_vhost_update_rx_counters(&vhost_dev
->stats
, packets
, nb_rx
);
1187 rte_spinlock_unlock(&vhost_dev
->stats_lock
);
1194 netdev_dpdk_rxq_recv(struct netdev_rxq
*rxq_
, struct dp_packet
**packets
,
1197 struct netdev_rxq_dpdk
*rx
= netdev_rxq_dpdk_cast(rxq_
);
1198 struct netdev
*netdev
= rx
->up
.netdev
;
1199 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1202 /* There is only one tx queue for this core. Do not flush other
1204 * Do not flush tx queue which is shared among CPUs
1205 * since it is always flushed */
1206 if (rxq_
->queue_id
== rte_lcore_id() &&
1207 OVS_LIKELY(!dev
->txq_needs_locking
)) {
1208 dpdk_queue_flush(dev
, rxq_
->queue_id
);
1211 nb_rx
= rte_eth_rx_burst(rx
->port_id
, rxq_
->queue_id
,
1212 (struct rte_mbuf
**) packets
,
1224 netdev_dpdk_qos_run__(struct netdev_dpdk
*dev
, struct rte_mbuf
**pkts
,
1227 struct netdev
*netdev
= &dev
->up
;
1229 if (dev
->qos_conf
!= NULL
) {
1230 rte_spinlock_lock(&dev
->qos_lock
);
1231 if (dev
->qos_conf
!= NULL
) {
1232 cnt
= dev
->qos_conf
->ops
->qos_run(netdev
, pkts
, cnt
);
1234 rte_spinlock_unlock(&dev
->qos_lock
);
1241 netdev_dpdk_vhost_update_tx_counters(struct netdev_stats
*stats
,
1242 struct dp_packet
**packets
,
1247 int sent
= attempted
- dropped
;
1249 stats
->tx_packets
+= sent
;
1250 stats
->tx_dropped
+= dropped
;
1252 for (i
= 0; i
< sent
; i
++) {
1253 stats
->tx_bytes
+= dp_packet_size(packets
[i
]);
1258 __netdev_dpdk_vhost_send(struct netdev
*netdev
, int qid
,
1259 struct dp_packet
**pkts
, int cnt
,
1262 struct netdev_dpdk
*vhost_dev
= netdev_dpdk_cast(netdev
);
1263 struct virtio_net
*virtio_dev
= netdev_dpdk_get_virtio(vhost_dev
);
1264 struct rte_mbuf
**cur_pkts
= (struct rte_mbuf
**) pkts
;
1265 unsigned int total_pkts
= cnt
;
1266 unsigned int qos_pkts
= cnt
;
1269 qid
= vhost_dev
->tx_q
[qid
% vhost_dev
->real_n_txq
].map
;
1271 if (OVS_UNLIKELY(!is_vhost_running(virtio_dev
) || qid
< 0)) {
1272 rte_spinlock_lock(&vhost_dev
->stats_lock
);
1273 vhost_dev
->stats
.tx_dropped
+= cnt
;
1274 rte_spinlock_unlock(&vhost_dev
->stats_lock
);
1278 rte_spinlock_lock(&vhost_dev
->tx_q
[qid
].tx_lock
);
1280 /* Check has QoS has been configured for the netdev */
1281 cnt
= netdev_dpdk_qos_run__(vhost_dev
, cur_pkts
, cnt
);
1285 int vhost_qid
= qid
* VIRTIO_QNUM
+ VIRTIO_RXQ
;
1286 unsigned int tx_pkts
;
1288 tx_pkts
= rte_vhost_enqueue_burst(virtio_dev
, vhost_qid
,
1290 if (OVS_LIKELY(tx_pkts
)) {
1291 /* Packets have been sent.*/
1293 /* Prepare for possible next iteration.*/
1294 cur_pkts
= &cur_pkts
[tx_pkts
];
1296 uint64_t timeout
= VHOST_ENQ_RETRY_USECS
* rte_get_timer_hz() / 1E6
;
1297 unsigned int expired
= 0;
1300 start
= rte_get_timer_cycles();
1304 * Unable to enqueue packets to vhost interface.
1305 * Check available entries before retrying.
1307 while (!rte_vring_available_entries(virtio_dev
, vhost_qid
)) {
1308 if (OVS_UNLIKELY((rte_get_timer_cycles() - start
) > timeout
)) {
1314 /* break out of main loop. */
1320 rte_spinlock_unlock(&vhost_dev
->tx_q
[qid
].tx_lock
);
1322 rte_spinlock_lock(&vhost_dev
->stats_lock
);
1324 netdev_dpdk_vhost_update_tx_counters(&vhost_dev
->stats
, pkts
, total_pkts
,
1326 rte_spinlock_unlock(&vhost_dev
->stats_lock
);
1332 for (i
= 0; i
< total_pkts
; i
++) {
1333 dp_packet_delete(pkts
[i
]);
1339 dpdk_queue_pkts(struct netdev_dpdk
*dev
, int qid
,
1340 struct rte_mbuf
**pkts
, int cnt
)
1342 struct dpdk_tx_queue
*txq
= &dev
->tx_q
[qid
];
1348 int freeslots
= MAX_TX_QUEUE_LEN
- txq
->count
;
1349 int tocopy
= MIN(freeslots
, cnt
-i
);
1351 memcpy(&txq
->burst_pkts
[txq
->count
], &pkts
[i
],
1352 tocopy
* sizeof (struct rte_mbuf
*));
1354 txq
->count
+= tocopy
;
1357 if (txq
->count
== MAX_TX_QUEUE_LEN
|| txq
->flush_tx
) {
1358 dpdk_queue_flush__(dev
, qid
);
1360 diff_tsc
= rte_get_timer_cycles() - txq
->tsc
;
1361 if (diff_tsc
>= DRAIN_TSC
) {
1362 dpdk_queue_flush__(dev
, qid
);
1367 /* Tx function. Transmit packets indefinitely */
1369 dpdk_do_tx_copy(struct netdev
*netdev
, int qid
, struct dp_packet
**pkts
,
1371 OVS_NO_THREAD_SAFETY_ANALYSIS
1373 #if !defined(__CHECKER__) && !defined(_WIN32)
1374 const size_t PKT_ARRAY_SIZE
= cnt
;
1376 /* Sparse or MSVC doesn't like variable length array. */
1377 enum { PKT_ARRAY_SIZE
= NETDEV_MAX_BURST
};
1379 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1380 struct rte_mbuf
*mbufs
[PKT_ARRAY_SIZE
];
1385 /* If we are on a non pmd thread we have to use the mempool mutex, because
1386 * every non pmd thread shares the same mempool cache */
1388 if (!dpdk_thread_is_pmd()) {
1389 ovs_mutex_lock(&nonpmd_mempool_mutex
);
1392 for (i
= 0; i
< cnt
; i
++) {
1393 int size
= dp_packet_size(pkts
[i
]);
1395 if (OVS_UNLIKELY(size
> dev
->max_packet_len
)) {
1396 VLOG_WARN_RL(&rl
, "Too big size %d max_packet_len %d",
1397 (int)size
, dev
->max_packet_len
);
1403 mbufs
[newcnt
] = rte_pktmbuf_alloc(dev
->dpdk_mp
->mp
);
1405 if (!mbufs
[newcnt
]) {
1410 /* We have to do a copy for now */
1411 memcpy(rte_pktmbuf_mtod(mbufs
[newcnt
], void *), dp_packet_data(pkts
[i
]), size
);
1413 rte_pktmbuf_data_len(mbufs
[newcnt
]) = size
;
1414 rte_pktmbuf_pkt_len(mbufs
[newcnt
]) = size
;
1419 if (dev
->type
== DPDK_DEV_VHOST
) {
1420 __netdev_dpdk_vhost_send(netdev
, qid
, (struct dp_packet
**) mbufs
, newcnt
, true);
1422 unsigned int qos_pkts
= newcnt
;
1424 /* Check if QoS has been configured for this netdev. */
1425 newcnt
= netdev_dpdk_qos_run__(dev
, mbufs
, newcnt
);
1427 dropped
+= qos_pkts
- newcnt
;
1428 dpdk_queue_pkts(dev
, qid
, mbufs
, newcnt
);
1429 dpdk_queue_flush(dev
, qid
);
1432 if (OVS_UNLIKELY(dropped
)) {
1433 rte_spinlock_lock(&dev
->stats_lock
);
1434 dev
->stats
.tx_dropped
+= dropped
;
1435 rte_spinlock_unlock(&dev
->stats_lock
);
1438 if (!dpdk_thread_is_pmd()) {
1439 ovs_mutex_unlock(&nonpmd_mempool_mutex
);
1444 netdev_dpdk_vhost_send(struct netdev
*netdev
, int qid
, struct dp_packet
**pkts
,
1445 int cnt
, bool may_steal
)
1447 if (OVS_UNLIKELY(pkts
[0]->source
!= DPBUF_DPDK
)) {
1450 dpdk_do_tx_copy(netdev
, qid
, pkts
, cnt
);
1452 for (i
= 0; i
< cnt
; i
++) {
1453 dp_packet_delete(pkts
[i
]);
1457 __netdev_dpdk_vhost_send(netdev
, qid
, pkts
, cnt
, may_steal
);
1463 netdev_dpdk_send__(struct netdev_dpdk
*dev
, int qid
,
1464 struct dp_packet
**pkts
, int cnt
, bool may_steal
)
1468 if (OVS_UNLIKELY(dev
->txq_needs_locking
)) {
1469 qid
= qid
% dev
->real_n_txq
;
1470 rte_spinlock_lock(&dev
->tx_q
[qid
].tx_lock
);
1473 if (OVS_UNLIKELY(!may_steal
||
1474 pkts
[0]->source
!= DPBUF_DPDK
)) {
1475 struct netdev
*netdev
= &dev
->up
;
1477 dpdk_do_tx_copy(netdev
, qid
, pkts
, cnt
);
1480 for (i
= 0; i
< cnt
; i
++) {
1481 dp_packet_delete(pkts
[i
]);
1485 int next_tx_idx
= 0;
1487 unsigned int qos_pkts
= 0;
1488 unsigned int temp_cnt
= 0;
1490 for (i
= 0; i
< cnt
; i
++) {
1491 int size
= dp_packet_size(pkts
[i
]);
1493 if (OVS_UNLIKELY(size
> dev
->max_packet_len
)) {
1494 if (next_tx_idx
!= i
) {
1495 temp_cnt
= i
- next_tx_idx
;
1496 qos_pkts
= temp_cnt
;
1498 temp_cnt
= netdev_dpdk_qos_run__(dev
, (struct rte_mbuf
**)pkts
,
1500 dropped
+= qos_pkts
- temp_cnt
;
1501 dpdk_queue_pkts(dev
, qid
,
1502 (struct rte_mbuf
**)&pkts
[next_tx_idx
],
1507 VLOG_WARN_RL(&rl
, "Too big size %d max_packet_len %d",
1508 (int)size
, dev
->max_packet_len
);
1510 dp_packet_delete(pkts
[i
]);
1512 next_tx_idx
= i
+ 1;
1515 if (next_tx_idx
!= cnt
) {
1519 cnt
= netdev_dpdk_qos_run__(dev
, (struct rte_mbuf
**)pkts
, cnt
);
1520 dropped
+= qos_pkts
- cnt
;
1521 dpdk_queue_pkts(dev
, qid
, (struct rte_mbuf
**)&pkts
[next_tx_idx
],
1525 if (OVS_UNLIKELY(dropped
)) {
1526 rte_spinlock_lock(&dev
->stats_lock
);
1527 dev
->stats
.tx_dropped
+= dropped
;
1528 rte_spinlock_unlock(&dev
->stats_lock
);
1532 if (OVS_UNLIKELY(dev
->txq_needs_locking
)) {
1533 rte_spinlock_unlock(&dev
->tx_q
[qid
].tx_lock
);
1538 netdev_dpdk_eth_send(struct netdev
*netdev
, int qid
,
1539 struct dp_packet
**pkts
, int cnt
, bool may_steal
)
1541 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1543 netdev_dpdk_send__(dev
, qid
, pkts
, cnt
, may_steal
);
1548 netdev_dpdk_set_etheraddr(struct netdev
*netdev
, const struct eth_addr mac
)
1550 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1552 ovs_mutex_lock(&dev
->mutex
);
1553 if (!eth_addr_equals(dev
->hwaddr
, mac
)) {
1555 netdev_change_seq_changed(netdev
);
1557 ovs_mutex_unlock(&dev
->mutex
);
1563 netdev_dpdk_get_etheraddr(const struct netdev
*netdev
, struct eth_addr
*mac
)
1565 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1567 ovs_mutex_lock(&dev
->mutex
);
1569 ovs_mutex_unlock(&dev
->mutex
);
1575 netdev_dpdk_get_mtu(const struct netdev
*netdev
, int *mtup
)
1577 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1579 ovs_mutex_lock(&dev
->mutex
);
1581 ovs_mutex_unlock(&dev
->mutex
);
1587 netdev_dpdk_set_mtu(const struct netdev
*netdev
, int mtu
)
1589 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1590 int old_mtu
, err
, dpdk_mtu
;
1591 struct dpdk_mp
*old_mp
;
1595 ovs_mutex_lock(&dpdk_mutex
);
1596 ovs_mutex_lock(&dev
->mutex
);
1597 if (dev
->mtu
== mtu
) {
1602 buf_size
= dpdk_buf_size(mtu
);
1603 dpdk_mtu
= FRAME_LEN_TO_MTU(buf_size
);
1605 mp
= dpdk_mp_get(dev
->socket_id
, dpdk_mtu
);
1611 rte_eth_dev_stop(dev
->port_id
);
1614 old_mp
= dev
->dpdk_mp
;
1617 dev
->max_packet_len
= MTU_TO_FRAME_LEN(dev
->mtu
);
1619 err
= dpdk_eth_dev_init(dev
);
1623 dev
->dpdk_mp
= old_mp
;
1624 dev
->max_packet_len
= MTU_TO_FRAME_LEN(dev
->mtu
);
1625 dpdk_eth_dev_init(dev
);
1629 dpdk_mp_put(old_mp
);
1630 netdev_change_seq_changed(netdev
);
1632 ovs_mutex_unlock(&dev
->mutex
);
1633 ovs_mutex_unlock(&dpdk_mutex
);
1638 netdev_dpdk_get_carrier(const struct netdev
*netdev_
, bool *carrier
);
1641 netdev_dpdk_vhost_get_stats(const struct netdev
*netdev
,
1642 struct netdev_stats
*stats
)
1644 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1646 ovs_mutex_lock(&dev
->mutex
);
1647 memset(stats
, 0, sizeof(*stats
));
1648 /* Unsupported Stats */
1649 stats
->collisions
= UINT64_MAX
;
1650 stats
->rx_crc_errors
= UINT64_MAX
;
1651 stats
->rx_fifo_errors
= UINT64_MAX
;
1652 stats
->rx_frame_errors
= UINT64_MAX
;
1653 stats
->rx_missed_errors
= UINT64_MAX
;
1654 stats
->rx_over_errors
= UINT64_MAX
;
1655 stats
->tx_aborted_errors
= UINT64_MAX
;
1656 stats
->tx_carrier_errors
= UINT64_MAX
;
1657 stats
->tx_errors
= UINT64_MAX
;
1658 stats
->tx_fifo_errors
= UINT64_MAX
;
1659 stats
->tx_heartbeat_errors
= UINT64_MAX
;
1660 stats
->tx_window_errors
= UINT64_MAX
;
1661 stats
->rx_dropped
+= UINT64_MAX
;
1663 rte_spinlock_lock(&dev
->stats_lock
);
1664 /* Supported Stats */
1665 stats
->rx_packets
+= dev
->stats
.rx_packets
;
1666 stats
->tx_packets
+= dev
->stats
.tx_packets
;
1667 stats
->tx_dropped
+= dev
->stats
.tx_dropped
;
1668 stats
->multicast
= dev
->stats
.multicast
;
1669 stats
->rx_bytes
= dev
->stats
.rx_bytes
;
1670 stats
->tx_bytes
= dev
->stats
.tx_bytes
;
1671 stats
->rx_errors
= dev
->stats
.rx_errors
;
1672 stats
->rx_length_errors
= dev
->stats
.rx_length_errors
;
1673 rte_spinlock_unlock(&dev
->stats_lock
);
1675 ovs_mutex_unlock(&dev
->mutex
);
1681 netdev_dpdk_get_stats(const struct netdev
*netdev
, struct netdev_stats
*stats
)
1683 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1684 struct rte_eth_stats rte_stats
;
1687 netdev_dpdk_get_carrier(netdev
, &gg
);
1688 ovs_mutex_lock(&dev
->mutex
);
1689 rte_eth_stats_get(dev
->port_id
, &rte_stats
);
1691 memset(stats
, 0, sizeof(*stats
));
1693 stats
->rx_packets
= rte_stats
.ipackets
;
1694 stats
->tx_packets
= rte_stats
.opackets
;
1695 stats
->rx_bytes
= rte_stats
.ibytes
;
1696 stats
->tx_bytes
= rte_stats
.obytes
;
1697 /* DPDK counts imissed as errors, but count them here as dropped instead */
1698 stats
->rx_errors
= rte_stats
.ierrors
- rte_stats
.imissed
;
1699 stats
->tx_errors
= rte_stats
.oerrors
;
1700 stats
->multicast
= rte_stats
.imcasts
;
1702 rte_spinlock_lock(&dev
->stats_lock
);
1703 stats
->tx_dropped
= dev
->stats
.tx_dropped
;
1704 rte_spinlock_unlock(&dev
->stats_lock
);
1706 /* These are the available DPDK counters for packets not received due to
1707 * local resource constraints in DPDK and NIC respectively. */
1708 stats
->rx_dropped
= rte_stats
.rx_nombuf
+ rte_stats
.imissed
;
1709 stats
->collisions
= UINT64_MAX
;
1711 stats
->rx_length_errors
= UINT64_MAX
;
1712 stats
->rx_over_errors
= UINT64_MAX
;
1713 stats
->rx_crc_errors
= UINT64_MAX
;
1714 stats
->rx_frame_errors
= UINT64_MAX
;
1715 stats
->rx_fifo_errors
= UINT64_MAX
;
1716 stats
->rx_missed_errors
= rte_stats
.imissed
;
1718 stats
->tx_aborted_errors
= UINT64_MAX
;
1719 stats
->tx_carrier_errors
= UINT64_MAX
;
1720 stats
->tx_fifo_errors
= UINT64_MAX
;
1721 stats
->tx_heartbeat_errors
= UINT64_MAX
;
1722 stats
->tx_window_errors
= UINT64_MAX
;
1724 ovs_mutex_unlock(&dev
->mutex
);
1730 netdev_dpdk_get_features(const struct netdev
*netdev_
,
1731 enum netdev_features
*current
,
1732 enum netdev_features
*advertised OVS_UNUSED
,
1733 enum netdev_features
*supported OVS_UNUSED
,
1734 enum netdev_features
*peer OVS_UNUSED
)
1736 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev_
);
1737 struct rte_eth_link link
;
1739 ovs_mutex_lock(&dev
->mutex
);
1741 ovs_mutex_unlock(&dev
->mutex
);
1743 if (link
.link_duplex
== ETH_LINK_AUTONEG_DUPLEX
) {
1744 if (link
.link_speed
== ETH_LINK_SPEED_AUTONEG
) {
1745 *current
= NETDEV_F_AUTONEG
;
1747 } else if (link
.link_duplex
== ETH_LINK_HALF_DUPLEX
) {
1748 if (link
.link_speed
== ETH_LINK_SPEED_10
) {
1749 *current
= NETDEV_F_10MB_HD
;
1751 if (link
.link_speed
== ETH_LINK_SPEED_100
) {
1752 *current
= NETDEV_F_100MB_HD
;
1754 if (link
.link_speed
== ETH_LINK_SPEED_1000
) {
1755 *current
= NETDEV_F_1GB_HD
;
1757 } else if (link
.link_duplex
== ETH_LINK_FULL_DUPLEX
) {
1758 if (link
.link_speed
== ETH_LINK_SPEED_10
) {
1759 *current
= NETDEV_F_10MB_FD
;
1761 if (link
.link_speed
== ETH_LINK_SPEED_100
) {
1762 *current
= NETDEV_F_100MB_FD
;
1764 if (link
.link_speed
== ETH_LINK_SPEED_1000
) {
1765 *current
= NETDEV_F_1GB_FD
;
1767 if (link
.link_speed
== ETH_LINK_SPEED_10000
) {
1768 *current
= NETDEV_F_10GB_FD
;
1776 netdev_dpdk_get_ifindex(const struct netdev
*netdev
)
1778 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1781 ovs_mutex_lock(&dev
->mutex
);
1782 ifindex
= dev
->port_id
;
1783 ovs_mutex_unlock(&dev
->mutex
);
1789 netdev_dpdk_get_carrier(const struct netdev
*netdev_
, bool *carrier
)
1791 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev_
);
1793 ovs_mutex_lock(&dev
->mutex
);
1794 check_link_status(dev
);
1795 *carrier
= dev
->link
.link_status
;
1797 ovs_mutex_unlock(&dev
->mutex
);
1803 netdev_dpdk_vhost_get_carrier(const struct netdev
*netdev_
, bool *carrier
)
1805 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev_
);
1806 struct virtio_net
*virtio_dev
= netdev_dpdk_get_virtio(dev
);
1808 ovs_mutex_lock(&dev
->mutex
);
1810 if (is_vhost_running(virtio_dev
)) {
1816 ovs_mutex_unlock(&dev
->mutex
);
1821 static long long int
1822 netdev_dpdk_get_carrier_resets(const struct netdev
*netdev_
)
1824 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev_
);
1825 long long int carrier_resets
;
1827 ovs_mutex_lock(&dev
->mutex
);
1828 carrier_resets
= dev
->link_reset_cnt
;
1829 ovs_mutex_unlock(&dev
->mutex
);
1831 return carrier_resets
;
1835 netdev_dpdk_set_miimon(struct netdev
*netdev_ OVS_UNUSED
,
1836 long long int interval OVS_UNUSED
)
1842 netdev_dpdk_update_flags__(struct netdev_dpdk
*dev
,
1843 enum netdev_flags off
, enum netdev_flags on
,
1844 enum netdev_flags
*old_flagsp
) OVS_REQUIRES(dev
->mutex
)
1848 if ((off
| on
) & ~(NETDEV_UP
| NETDEV_PROMISC
)) {
1852 *old_flagsp
= dev
->flags
;
1856 if (dev
->flags
== *old_flagsp
) {
1860 if (dev
->type
== DPDK_DEV_ETH
) {
1861 if (dev
->flags
& NETDEV_UP
) {
1862 err
= rte_eth_dev_start(dev
->port_id
);
1867 if (dev
->flags
& NETDEV_PROMISC
) {
1868 rte_eth_promiscuous_enable(dev
->port_id
);
1871 if (!(dev
->flags
& NETDEV_UP
)) {
1872 rte_eth_dev_stop(dev
->port_id
);
1880 netdev_dpdk_update_flags(struct netdev
*netdev_
,
1881 enum netdev_flags off
, enum netdev_flags on
,
1882 enum netdev_flags
*old_flagsp
)
1884 struct netdev_dpdk
*netdev
= netdev_dpdk_cast(netdev_
);
1887 ovs_mutex_lock(&netdev
->mutex
);
1888 error
= netdev_dpdk_update_flags__(netdev
, off
, on
, old_flagsp
);
1889 ovs_mutex_unlock(&netdev
->mutex
);
1895 netdev_dpdk_get_status(const struct netdev
*netdev_
, struct smap
*args
)
1897 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev_
);
1898 struct rte_eth_dev_info dev_info
;
1900 if (dev
->port_id
< 0)
1903 ovs_mutex_lock(&dev
->mutex
);
1904 rte_eth_dev_info_get(dev
->port_id
, &dev_info
);
1905 ovs_mutex_unlock(&dev
->mutex
);
1907 smap_add_format(args
, "driver_name", "%s", dev_info
.driver_name
);
1909 smap_add_format(args
, "port_no", "%d", dev
->port_id
);
1910 smap_add_format(args
, "numa_id", "%d", rte_eth_dev_socket_id(dev
->port_id
));
1911 smap_add_format(args
, "driver_name", "%s", dev_info
.driver_name
);
1912 smap_add_format(args
, "min_rx_bufsize", "%u", dev_info
.min_rx_bufsize
);
1913 smap_add_format(args
, "max_rx_pktlen", "%u", dev
->max_packet_len
);
1914 smap_add_format(args
, "max_rx_queues", "%u", dev_info
.max_rx_queues
);
1915 smap_add_format(args
, "max_tx_queues", "%u", dev_info
.max_tx_queues
);
1916 smap_add_format(args
, "max_mac_addrs", "%u", dev_info
.max_mac_addrs
);
1917 smap_add_format(args
, "max_hash_mac_addrs", "%u", dev_info
.max_hash_mac_addrs
);
1918 smap_add_format(args
, "max_vfs", "%u", dev_info
.max_vfs
);
1919 smap_add_format(args
, "max_vmdq_pools", "%u", dev_info
.max_vmdq_pools
);
1921 if (dev_info
.pci_dev
) {
1922 smap_add_format(args
, "pci-vendor_id", "0x%u",
1923 dev_info
.pci_dev
->id
.vendor_id
);
1924 smap_add_format(args
, "pci-device_id", "0x%x",
1925 dev_info
.pci_dev
->id
.device_id
);
1932 netdev_dpdk_set_admin_state__(struct netdev_dpdk
*dev
, bool admin_state
)
1933 OVS_REQUIRES(dev
->mutex
)
1935 enum netdev_flags old_flags
;
1938 netdev_dpdk_update_flags__(dev
, 0, NETDEV_UP
, &old_flags
);
1940 netdev_dpdk_update_flags__(dev
, NETDEV_UP
, 0, &old_flags
);
1945 netdev_dpdk_set_admin_state(struct unixctl_conn
*conn
, int argc
,
1946 const char *argv
[], void *aux OVS_UNUSED
)
1950 if (!strcasecmp(argv
[argc
- 1], "up")) {
1952 } else if ( !strcasecmp(argv
[argc
- 1], "down")) {
1955 unixctl_command_reply_error(conn
, "Invalid Admin State");
1960 struct netdev
*netdev
= netdev_from_name(argv
[1]);
1961 if (netdev
&& is_dpdk_class(netdev
->netdev_class
)) {
1962 struct netdev_dpdk
*dpdk_dev
= netdev_dpdk_cast(netdev
);
1964 ovs_mutex_lock(&dpdk_dev
->mutex
);
1965 netdev_dpdk_set_admin_state__(dpdk_dev
, up
);
1966 ovs_mutex_unlock(&dpdk_dev
->mutex
);
1968 netdev_close(netdev
);
1970 unixctl_command_reply_error(conn
, "Not a DPDK Interface");
1971 netdev_close(netdev
);
1975 struct netdev_dpdk
*netdev
;
1977 ovs_mutex_lock(&dpdk_mutex
);
1978 LIST_FOR_EACH (netdev
, list_node
, &dpdk_list
) {
1979 ovs_mutex_lock(&netdev
->mutex
);
1980 netdev_dpdk_set_admin_state__(netdev
, up
);
1981 ovs_mutex_unlock(&netdev
->mutex
);
1983 ovs_mutex_unlock(&dpdk_mutex
);
1985 unixctl_command_reply(conn
, "OK");
1989 * Set virtqueue flags so that we do not receive interrupts.
1992 set_irq_status(struct virtio_net
*dev
)
1997 for (i
= 0; i
< dev
->virt_qp_nb
; i
++) {
1998 idx
= i
* VIRTIO_QNUM
;
1999 rte_vhost_enable_guest_notification(dev
, idx
+ VIRTIO_RXQ
, 0);
2000 rte_vhost_enable_guest_notification(dev
, idx
+ VIRTIO_TXQ
, 0);
2005 * Fixes mapping for vhost-user tx queues. Must be called after each
2006 * enabling/disabling of queues and real_n_txq modifications.
2009 netdev_dpdk_remap_txqs(struct netdev_dpdk
*netdev
)
2010 OVS_REQUIRES(netdev
->mutex
)
2012 int *enabled_queues
, n_enabled
= 0;
2013 int i
, k
, total_txqs
= netdev
->real_n_txq
;
2015 enabled_queues
= dpdk_rte_mzalloc(total_txqs
* sizeof *enabled_queues
);
2017 for (i
= 0; i
< total_txqs
; i
++) {
2018 /* Enabled queues always mapped to themselves. */
2019 if (netdev
->tx_q
[i
].map
== i
) {
2020 enabled_queues
[n_enabled
++] = i
;
2024 if (n_enabled
== 0 && total_txqs
!= 0) {
2025 enabled_queues
[0] = OVS_VHOST_QUEUE_DISABLED
;
2030 for (i
= 0; i
< total_txqs
; i
++) {
2031 if (netdev
->tx_q
[i
].map
!= i
) {
2032 netdev
->tx_q
[i
].map
= enabled_queues
[k
];
2033 k
= (k
+ 1) % n_enabled
;
2037 VLOG_DBG("TX queue mapping for %s\n", netdev
->vhost_id
);
2038 for (i
= 0; i
< total_txqs
; i
++) {
2039 VLOG_DBG("%2d --> %2d", i
, netdev
->tx_q
[i
].map
);
2042 rte_free(enabled_queues
);
2046 netdev_dpdk_vhost_set_queues(struct netdev_dpdk
*netdev
, struct virtio_net
*dev
)
2047 OVS_REQUIRES(netdev
->mutex
)
2051 qp_num
= dev
->virt_qp_nb
;
2052 if (qp_num
> netdev
->up
.n_rxq
) {
2053 VLOG_ERR("vHost Device '%s' %"PRIu64
" can't be added - "
2054 "too many queues %d > %d", dev
->ifname
, dev
->device_fh
,
2055 qp_num
, netdev
->up
.n_rxq
);
2059 netdev
->real_n_rxq
= qp_num
;
2060 netdev
->real_n_txq
= qp_num
;
2061 netdev
->txq_needs_locking
= true;
2062 /* Enable TX queue 0 by default if it wasn't disabled. */
2063 if (netdev
->tx_q
[0].map
== OVS_VHOST_QUEUE_MAP_UNKNOWN
) {
2064 netdev
->tx_q
[0].map
= 0;
2067 netdev_dpdk_remap_txqs(netdev
);
2073 * A new virtio-net device is added to a vhost port.
2076 new_device(struct virtio_net
*dev
)
2078 struct netdev_dpdk
*netdev
;
2079 bool exists
= false;
2081 ovs_mutex_lock(&dpdk_mutex
);
2082 /* Add device to the vhost port with the same name as that passed down. */
2083 LIST_FOR_EACH(netdev
, list_node
, &dpdk_list
) {
2084 if (strncmp(dev
->ifname
, netdev
->vhost_id
, IF_NAME_SZ
) == 0) {
2085 ovs_mutex_lock(&netdev
->mutex
);
2086 if (netdev_dpdk_vhost_set_queues(netdev
, dev
)) {
2087 ovs_mutex_unlock(&netdev
->mutex
);
2088 ovs_mutex_unlock(&dpdk_mutex
);
2091 ovsrcu_set(&netdev
->virtio_dev
, dev
);
2093 dev
->flags
|= VIRTIO_DEV_RUNNING
;
2094 /* Disable notifications. */
2095 set_irq_status(dev
);
2096 ovs_mutex_unlock(&netdev
->mutex
);
2100 ovs_mutex_unlock(&dpdk_mutex
);
2103 VLOG_INFO("vHost Device '%s' %"PRIu64
" can't be added - name not "
2104 "found", dev
->ifname
, dev
->device_fh
);
2109 VLOG_INFO("vHost Device '%s' %"PRIu64
" has been added", dev
->ifname
,
2114 /* Clears mapping for all available queues of vhost interface. */
2116 netdev_dpdk_txq_map_clear(struct netdev_dpdk
*dev
)
2117 OVS_REQUIRES(dev
->mutex
)
2121 for (i
= 0; i
< dev
->real_n_txq
; i
++) {
2122 dev
->tx_q
[i
].map
= OVS_VHOST_QUEUE_MAP_UNKNOWN
;
2127 * Remove a virtio-net device from the specific vhost port. Use dev->remove
2128 * flag to stop any more packets from being sent or received to/from a VM and
2129 * ensure all currently queued packets have been sent/received before removing
2133 destroy_device(volatile struct virtio_net
*dev
)
2135 struct netdev_dpdk
*vhost_dev
;
2136 bool exists
= false;
2138 ovs_mutex_lock(&dpdk_mutex
);
2139 LIST_FOR_EACH (vhost_dev
, list_node
, &dpdk_list
) {
2140 if (netdev_dpdk_get_virtio(vhost_dev
) == dev
) {
2142 ovs_mutex_lock(&vhost_dev
->mutex
);
2143 dev
->flags
&= ~VIRTIO_DEV_RUNNING
;
2144 ovsrcu_set(&vhost_dev
->virtio_dev
, NULL
);
2145 netdev_dpdk_txq_map_clear(vhost_dev
);
2147 ovs_mutex_unlock(&vhost_dev
->mutex
);
2152 ovs_mutex_unlock(&dpdk_mutex
);
2154 if (exists
== true) {
2156 * Wait for other threads to quiesce after setting the 'virtio_dev'
2157 * to NULL, before returning.
2159 ovsrcu_synchronize();
2161 * As call to ovsrcu_synchronize() will end the quiescent state,
2162 * put thread back into quiescent state before returning.
2164 ovsrcu_quiesce_start();
2165 VLOG_INFO("vHost Device '%s' %"PRIu64
" has been removed", dev
->ifname
,
2168 VLOG_INFO("vHost Device '%s' %"PRIu64
" not found", dev
->ifname
,
2175 vring_state_changed(struct virtio_net
*dev
, uint16_t queue_id
, int enable
)
2177 struct netdev_dpdk
*vhost_dev
;
2178 bool exists
= false;
2179 int qid
= queue_id
/ VIRTIO_QNUM
;
2181 if (queue_id
% VIRTIO_QNUM
== VIRTIO_TXQ
) {
2185 ovs_mutex_lock(&dpdk_mutex
);
2186 LIST_FOR_EACH (vhost_dev
, list_node
, &dpdk_list
) {
2187 if (strncmp(dev
->ifname
, vhost_dev
->vhost_id
, IF_NAME_SZ
) == 0) {
2188 ovs_mutex_lock(&vhost_dev
->mutex
);
2190 vhost_dev
->tx_q
[qid
].map
= qid
;
2192 vhost_dev
->tx_q
[qid
].map
= OVS_VHOST_QUEUE_DISABLED
;
2194 netdev_dpdk_remap_txqs(vhost_dev
);
2196 ovs_mutex_unlock(&vhost_dev
->mutex
);
2200 ovs_mutex_unlock(&dpdk_mutex
);
2203 VLOG_INFO("State of queue %d ( tx_qid %d ) of vhost device '%s' %"
2204 PRIu64
" changed to \'%s\'", queue_id
, qid
, dev
->ifname
,
2205 dev
->device_fh
, (enable
== 1) ? "enabled" : "disabled");
2207 VLOG_INFO("vHost Device '%s' %"PRIu64
" not found", dev
->ifname
,
2216 netdev_dpdk_get_virtio(const struct netdev_dpdk
*dev
)
2218 return ovsrcu_get(struct virtio_net
*, &dev
->virtio_dev
);
2222 * These callbacks allow virtio-net devices to be added to vhost ports when
2223 * configuration has been fully complete.
2225 static const struct virtio_net_device_ops virtio_net_device_ops
=
2227 .new_device
= new_device
,
2228 .destroy_device
= destroy_device
,
2229 .vring_state_changed
= vring_state_changed
2233 start_vhost_loop(void *dummy OVS_UNUSED
)
2235 pthread_detach(pthread_self());
2236 /* Put the cuse thread into quiescent state. */
2237 ovsrcu_quiesce_start();
2238 rte_vhost_driver_session_start();
2243 dpdk_vhost_class_init(void)
2245 rte_vhost_driver_callback_register(&virtio_net_device_ops
);
2246 ovs_thread_create("vhost_thread", start_vhost_loop
, NULL
);
2251 dpdk_vhost_cuse_class_init(void)
2256 /* Register CUSE device to handle IOCTLs.
2257 * Unless otherwise specified on the vswitchd command line, cuse_dev_name
2258 * is set to vhost-net.
2260 err
= rte_vhost_driver_register(cuse_dev_name
);
2263 VLOG_ERR("CUSE device setup failure.");
2267 dpdk_vhost_class_init();
2272 dpdk_vhost_user_class_init(void)
2274 dpdk_vhost_class_init();
2279 dpdk_common_init(void)
2281 unixctl_command_register("netdev-dpdk/set-admin-state",
2282 "[netdev] up|down", 1, 2,
2283 netdev_dpdk_set_admin_state
, NULL
);
2285 ovs_thread_create("dpdk_watchdog", dpdk_watchdog
, NULL
);
2291 dpdk_ring_create(const char dev_name
[], unsigned int port_no
,
2292 unsigned int *eth_port_id
)
2294 struct dpdk_ring
*ivshmem
;
2295 char ring_name
[RTE_RING_NAMESIZE
];
2298 ivshmem
= dpdk_rte_mzalloc(sizeof *ivshmem
);
2299 if (ivshmem
== NULL
) {
2303 /* XXX: Add support for multiquque ring. */
2304 err
= snprintf(ring_name
, sizeof(ring_name
), "%s_tx", dev_name
);
2309 /* Create single producer tx ring, netdev does explicit locking. */
2310 ivshmem
->cring_tx
= rte_ring_create(ring_name
, DPDK_RING_SIZE
, SOCKET0
,
2312 if (ivshmem
->cring_tx
== NULL
) {
2317 err
= snprintf(ring_name
, sizeof(ring_name
), "%s_rx", dev_name
);
2322 /* Create single consumer rx ring, netdev does explicit locking. */
2323 ivshmem
->cring_rx
= rte_ring_create(ring_name
, DPDK_RING_SIZE
, SOCKET0
,
2325 if (ivshmem
->cring_rx
== NULL
) {
2330 err
= rte_eth_from_rings(dev_name
, &ivshmem
->cring_rx
, 1,
2331 &ivshmem
->cring_tx
, 1, SOCKET0
);
2338 ivshmem
->user_port_id
= port_no
;
2339 ivshmem
->eth_port_id
= rte_eth_dev_count() - 1;
2340 list_push_back(&dpdk_ring_list
, &ivshmem
->list_node
);
2342 *eth_port_id
= ivshmem
->eth_port_id
;
2347 dpdk_ring_open(const char dev_name
[], unsigned int *eth_port_id
) OVS_REQUIRES(dpdk_mutex
)
2349 struct dpdk_ring
*ivshmem
;
2350 unsigned int port_no
;
2353 /* Names always start with "dpdkr" */
2354 err
= dpdk_dev_parse_name(dev_name
, "dpdkr", &port_no
);
2359 /* look through our list to find the device */
2360 LIST_FOR_EACH (ivshmem
, list_node
, &dpdk_ring_list
) {
2361 if (ivshmem
->user_port_id
== port_no
) {
2362 VLOG_INFO("Found dpdk ring device %s:", dev_name
);
2363 *eth_port_id
= ivshmem
->eth_port_id
; /* really all that is needed */
2367 /* Need to create the device rings */
2368 return dpdk_ring_create(dev_name
, port_no
, eth_port_id
);
2372 netdev_dpdk_ring_send(struct netdev
*netdev_
, int qid
,
2373 struct dp_packet
**pkts
, int cnt
, bool may_steal
)
2375 struct netdev_dpdk
*netdev
= netdev_dpdk_cast(netdev_
);
2378 /* When using 'dpdkr' and sending to a DPDK ring, we want to ensure that the
2379 * rss hash field is clear. This is because the same mbuf may be modified by
2380 * the consumer of the ring and return into the datapath without recalculating
2382 for (i
= 0; i
< cnt
; i
++) {
2383 dp_packet_rss_invalidate(pkts
[i
]);
2386 netdev_dpdk_send__(netdev
, qid
, pkts
, cnt
, may_steal
);
2391 netdev_dpdk_ring_construct(struct netdev
*netdev
)
2393 unsigned int port_no
= 0;
2396 if (rte_eal_init_ret
) {
2397 return rte_eal_init_ret
;
2400 ovs_mutex_lock(&dpdk_mutex
);
2402 err
= dpdk_ring_open(netdev
->name
, &port_no
);
2407 err
= netdev_dpdk_init(netdev
, port_no
, DPDK_DEV_ETH
);
2410 ovs_mutex_unlock(&dpdk_mutex
);
2417 * Initialize QoS configuration operations.
2420 qos_conf_init(struct qos_conf
*conf
, const struct dpdk_qos_ops
*ops
)
2426 * Search existing QoS operations in qos_ops and compare each set of
2427 * operations qos_name to name. Return a dpdk_qos_ops pointer to a match,
2430 static const struct dpdk_qos_ops
*
2431 qos_lookup_name(const char *name
)
2433 const struct dpdk_qos_ops
*const *opsp
;
2435 for (opsp
= qos_confs
; *opsp
!= NULL
; opsp
++) {
2436 const struct dpdk_qos_ops
*ops
= *opsp
;
2437 if (!strcmp(name
, ops
->qos_name
)) {
2445 * Call qos_destruct to clean up items associated with the netdevs
2446 * qos_conf. Set netdevs qos_conf to NULL.
2449 qos_delete_conf(struct netdev
*netdev_
)
2451 struct netdev_dpdk
*netdev
= netdev_dpdk_cast(netdev_
);
2453 rte_spinlock_lock(&netdev
->qos_lock
);
2454 if (netdev
->qos_conf
) {
2455 if (netdev
->qos_conf
->ops
->qos_destruct
) {
2456 netdev
->qos_conf
->ops
->qos_destruct(netdev_
, netdev
->qos_conf
);
2458 netdev
->qos_conf
= NULL
;
2460 rte_spinlock_unlock(&netdev
->qos_lock
);
2464 netdev_dpdk_get_qos_types(const struct netdev
*netdev OVS_UNUSED
,
2467 const struct dpdk_qos_ops
*const *opsp
;
2469 for (opsp
= qos_confs
; *opsp
!= NULL
; opsp
++) {
2470 const struct dpdk_qos_ops
*ops
= *opsp
;
2471 if (ops
->qos_construct
&& ops
->qos_name
[0] != '\0') {
2472 sset_add(types
, ops
->qos_name
);
2479 netdev_dpdk_get_qos(const struct netdev
*netdev_
,
2480 const char **typep
, struct smap
*details
)
2482 struct netdev_dpdk
*netdev
= netdev_dpdk_cast(netdev_
);
2485 ovs_mutex_lock(&netdev
->mutex
);
2486 if(netdev
->qos_conf
) {
2487 *typep
= netdev
->qos_conf
->ops
->qos_name
;
2488 error
= (netdev
->qos_conf
->ops
->qos_get
2489 ? netdev
->qos_conf
->ops
->qos_get(netdev_
, details
): 0);
2491 ovs_mutex_unlock(&netdev
->mutex
);
2497 netdev_dpdk_set_qos(struct netdev
*netdev_
,
2498 const char *type
, const struct smap
*details
)
2500 struct netdev_dpdk
*netdev
= netdev_dpdk_cast(netdev_
);
2501 const struct dpdk_qos_ops
*new_ops
= NULL
;
2504 /* If type is empty or unsupported then the current QoS configuration
2505 * for the dpdk-netdev can be destroyed */
2506 new_ops
= qos_lookup_name(type
);
2508 if (type
[0] == '\0' || !new_ops
|| !new_ops
->qos_construct
) {
2509 qos_delete_conf(netdev_
);
2513 ovs_mutex_lock(&netdev
->mutex
);
2515 if (netdev
->qos_conf
) {
2516 if (new_ops
== netdev
->qos_conf
->ops
) {
2517 error
= new_ops
->qos_set
? new_ops
->qos_set(netdev_
, details
) : 0;
2519 /* Delete existing QoS configuration. */
2520 qos_delete_conf(netdev_
);
2521 ovs_assert(netdev
->qos_conf
== NULL
);
2523 /* Install new QoS configuration. */
2524 error
= new_ops
->qos_construct(netdev_
, details
);
2525 ovs_assert((error
== 0) == (netdev
->qos_conf
!= NULL
));
2528 error
= new_ops
->qos_construct(netdev_
, details
);
2529 ovs_assert((error
== 0) == (netdev
->qos_conf
!= NULL
));
2532 ovs_mutex_unlock(&netdev
->mutex
);
2536 /* egress-policer details */
2538 struct egress_policer
{
2539 struct qos_conf qos_conf
;
2540 struct rte_meter_srtcm_params app_srtcm_params
;
2541 struct rte_meter_srtcm egress_meter
;
2544 static struct egress_policer
*
2545 egress_policer_get__(const struct netdev
*netdev_
)
2547 struct netdev_dpdk
*netdev
= netdev_dpdk_cast(netdev_
);
2548 return CONTAINER_OF(netdev
->qos_conf
, struct egress_policer
, qos_conf
);
2552 egress_policer_qos_construct(struct netdev
*netdev_
,
2553 const struct smap
*details
)
2555 struct netdev_dpdk
*netdev
= netdev_dpdk_cast(netdev_
);
2556 struct egress_policer
*policer
;
2561 rte_spinlock_lock(&netdev
->qos_lock
);
2562 policer
= xmalloc(sizeof *policer
);
2563 qos_conf_init(&policer
->qos_conf
, &egress_policer_ops
);
2564 netdev
->qos_conf
= &policer
->qos_conf
;
2565 cir_s
= smap_get(details
, "cir");
2566 cbs_s
= smap_get(details
, "cbs");
2567 policer
->app_srtcm_params
.cir
= cir_s
? strtoull(cir_s
, NULL
, 10) : 0;
2568 policer
->app_srtcm_params
.cbs
= cbs_s
? strtoull(cbs_s
, NULL
, 10) : 0;
2569 policer
->app_srtcm_params
.ebs
= 0;
2570 err
= rte_meter_srtcm_config(&policer
->egress_meter
,
2571 &policer
->app_srtcm_params
);
2572 rte_spinlock_unlock(&netdev
->qos_lock
);
2578 egress_policer_qos_destruct(struct netdev
*netdev_ OVS_UNUSED
,
2579 struct qos_conf
*conf
)
2581 struct egress_policer
*policer
= CONTAINER_OF(conf
, struct egress_policer
,
2587 egress_policer_qos_get(const struct netdev
*netdev
, struct smap
*details
)
2589 struct egress_policer
*policer
= egress_policer_get__(netdev
);
2590 smap_add_format(details
, "cir", "%llu",
2591 1ULL * policer
->app_srtcm_params
.cir
);
2592 smap_add_format(details
, "cbs", "%llu",
2593 1ULL * policer
->app_srtcm_params
.cbs
);
2598 egress_policer_qos_set(struct netdev
*netdev_
, const struct smap
*details
)
2600 struct egress_policer
*policer
;
2605 policer
= egress_policer_get__(netdev_
);
2606 cir_s
= smap_get(details
, "cir");
2607 cbs_s
= smap_get(details
, "cbs");
2608 policer
->app_srtcm_params
.cir
= cir_s
? strtoull(cir_s
, NULL
, 10) : 0;
2609 policer
->app_srtcm_params
.cbs
= cbs_s
? strtoull(cbs_s
, NULL
, 10) : 0;
2610 policer
->app_srtcm_params
.ebs
= 0;
2611 err
= rte_meter_srtcm_config(&policer
->egress_meter
,
2612 &policer
->app_srtcm_params
);
2618 egress_policer_pkt_handle__(struct rte_meter_srtcm
*meter
,
2619 struct rte_mbuf
*pkt
, uint64_t time
)
2621 uint32_t pkt_len
= rte_pktmbuf_pkt_len(pkt
) - sizeof(struct ether_hdr
);
2623 return rte_meter_srtcm_color_blind_check(meter
, time
, pkt_len
) ==
2628 egress_policer_run(struct netdev
*netdev_
, struct rte_mbuf
**pkts
,
2633 struct egress_policer
*policer
= egress_policer_get__(netdev_
);
2634 struct rte_mbuf
*pkt
= NULL
;
2635 uint64_t current_time
= rte_rdtsc();
2637 for(i
= 0; i
< pkt_cnt
; i
++) {
2639 /* Handle current packet */
2640 if (egress_policer_pkt_handle__(&policer
->egress_meter
, pkt
,
2647 rte_pktmbuf_free(pkt
);
2654 static const struct dpdk_qos_ops egress_policer_ops
= {
2655 "egress-policer", /* qos_name */
2656 egress_policer_qos_construct
,
2657 egress_policer_qos_destruct
,
2658 egress_policer_qos_get
,
2659 egress_policer_qos_set
,
2663 #define NETDEV_DPDK_CLASS(NAME, INIT, CONSTRUCT, DESTRUCT, MULTIQ, SEND, \
2664 GET_CARRIER, GET_STATS, GET_FEATURES, GET_STATUS, RXQ_RECV) \
2667 true, /* is_pmd */ \
2669 NULL, /* netdev_dpdk_run */ \
2670 NULL, /* netdev_dpdk_wait */ \
2672 netdev_dpdk_alloc, \
2675 netdev_dpdk_dealloc, \
2676 netdev_dpdk_get_config, \
2677 netdev_dpdk_set_config, \
2678 NULL, /* get_tunnel_config */ \
2679 NULL, /* build header */ \
2680 NULL, /* push header */ \
2681 NULL, /* pop header */ \
2682 netdev_dpdk_get_numa_id, /* get_numa_id */ \
2683 MULTIQ, /* set_multiq */ \
2686 NULL, /* send_wait */ \
2688 netdev_dpdk_set_etheraddr, \
2689 netdev_dpdk_get_etheraddr, \
2690 netdev_dpdk_get_mtu, \
2691 netdev_dpdk_set_mtu, \
2692 netdev_dpdk_get_ifindex, \
2694 netdev_dpdk_get_carrier_resets, \
2695 netdev_dpdk_set_miimon, \
2698 NULL, /* set_advertisements */ \
2700 NULL, /* set_policing */ \
2701 netdev_dpdk_get_qos_types, \
2702 NULL, /* get_qos_capabilities */ \
2703 netdev_dpdk_get_qos, \
2704 netdev_dpdk_set_qos, \
2705 NULL, /* get_queue */ \
2706 NULL, /* set_queue */ \
2707 NULL, /* delete_queue */ \
2708 NULL, /* get_queue_stats */ \
2709 NULL, /* queue_dump_start */ \
2710 NULL, /* queue_dump_next */ \
2711 NULL, /* queue_dump_done */ \
2712 NULL, /* dump_queue_stats */ \
2714 NULL, /* set_in4 */ \
2715 NULL, /* get_addr_list */ \
2716 NULL, /* add_router */ \
2717 NULL, /* get_next_hop */ \
2719 NULL, /* arp_lookup */ \
2721 netdev_dpdk_update_flags, \
2723 netdev_dpdk_rxq_alloc, \
2724 netdev_dpdk_rxq_construct, \
2725 netdev_dpdk_rxq_destruct, \
2726 netdev_dpdk_rxq_dealloc, \
2728 NULL, /* rx_wait */ \
2729 NULL, /* rxq_drain */ \
2733 process_vhost_flags(char *flag
, char *default_val
, int size
,
2734 char **argv
, char **new_val
)
2738 /* Depending on which version of vhost is in use, process the vhost-specific
2739 * flag if it is provided on the vswitchd command line, otherwise resort to
2742 * For vhost-user: Process "-vhost_sock_dir" to set the custom location of
2743 * the vhost-user socket(s).
2744 * For vhost-cuse: Process "-cuse_dev_name" to set the custom name of the
2745 * vhost-cuse character device.
2747 if (!strcmp(argv
[1], flag
) && (strlen(argv
[2]) <= size
)) {
2749 *new_val
= xstrdup(argv
[2]);
2750 VLOG_INFO("User-provided %s in use: %s", flag
, *new_val
);
2752 VLOG_INFO("No %s provided - defaulting to %s", flag
, default_val
);
2753 *new_val
= default_val
;
2760 dpdk_init(int argc
, char **argv
)
2764 char *pragram_name
= argv
[0];
2766 if (argc
< 2 || strcmp(argv
[1], "--dpdk"))
2769 /* Remove the --dpdk argument from arg list.*/
2773 /* Reject --user option */
2775 for (i
= 0; i
< argc
; i
++) {
2776 if (!strcmp(argv
[i
], "--user")) {
2777 VLOG_ERR("Can not mix --dpdk and --user options, aborting.");
2782 if (process_vhost_flags("-cuse_dev_name", xstrdup("vhost-net"),
2783 PATH_MAX
, argv
, &cuse_dev_name
)) {
2785 if (process_vhost_flags("-vhost_sock_dir", xstrdup(ovs_rundir()),
2786 NAME_MAX
, argv
, &vhost_sock_dir
)) {
2790 err
= stat(vhost_sock_dir
, &s
);
2792 VLOG_ERR("vHostUser socket DIR '%s' does not exist.",
2797 /* Remove the vhost flag configuration parameters from the argument
2798 * list, so that the correct elements are passed to the DPDK
2799 * initialization function
2802 argv
+= 2; /* Increment by two to bypass the vhost flag arguments */
2806 /* Keep the program name argument as this is needed for call to
2809 argv
[0] = pragram_name
;
2811 /* Make sure things are initialized ... */
2812 result
= rte_eal_init(argc
, argv
);
2814 ovs_abort(result
, "Cannot init EAL");
2817 rte_memzone_dump(stdout
);
2818 rte_eal_init_ret
= 0;
2820 if (argc
> result
) {
2821 argv
[result
] = argv
[0];
2824 /* We are called from the main thread here */
2825 RTE_PER_LCORE(_lcore_id
) = NON_PMD_CORE_ID
;
2827 return result
+ 1 + base
;
2830 static const struct netdev_class dpdk_class
=
2834 netdev_dpdk_construct
,
2835 netdev_dpdk_destruct
,
2836 netdev_dpdk_set_multiq
,
2837 netdev_dpdk_eth_send
,
2838 netdev_dpdk_get_carrier
,
2839 netdev_dpdk_get_stats
,
2840 netdev_dpdk_get_features
,
2841 netdev_dpdk_get_status
,
2842 netdev_dpdk_rxq_recv
);
2844 static const struct netdev_class dpdk_ring_class
=
2848 netdev_dpdk_ring_construct
,
2849 netdev_dpdk_destruct
,
2850 netdev_dpdk_set_multiq
,
2851 netdev_dpdk_ring_send
,
2852 netdev_dpdk_get_carrier
,
2853 netdev_dpdk_get_stats
,
2854 netdev_dpdk_get_features
,
2855 netdev_dpdk_get_status
,
2856 netdev_dpdk_rxq_recv
);
2858 static const struct netdev_class OVS_UNUSED dpdk_vhost_cuse_class
=
2861 dpdk_vhost_cuse_class_init
,
2862 netdev_dpdk_vhost_cuse_construct
,
2863 netdev_dpdk_vhost_destruct
,
2864 netdev_dpdk_vhost_cuse_set_multiq
,
2865 netdev_dpdk_vhost_send
,
2866 netdev_dpdk_vhost_get_carrier
,
2867 netdev_dpdk_vhost_get_stats
,
2870 netdev_dpdk_vhost_rxq_recv
);
2872 static const struct netdev_class OVS_UNUSED dpdk_vhost_user_class
=
2875 dpdk_vhost_user_class_init
,
2876 netdev_dpdk_vhost_user_construct
,
2877 netdev_dpdk_vhost_destruct
,
2878 netdev_dpdk_vhost_set_multiq
,
2879 netdev_dpdk_vhost_send
,
2880 netdev_dpdk_vhost_get_carrier
,
2881 netdev_dpdk_vhost_get_stats
,
2884 netdev_dpdk_vhost_rxq_recv
);
2887 netdev_dpdk_register(void)
2889 static struct ovsthread_once once
= OVSTHREAD_ONCE_INITIALIZER
;
2891 if (rte_eal_init_ret
) {
2895 if (ovsthread_once_start(&once
)) {
2897 netdev_register_provider(&dpdk_class
);
2898 netdev_register_provider(&dpdk_ring_class
);
2900 netdev_register_provider(&dpdk_vhost_cuse_class
);
2902 netdev_register_provider(&dpdk_vhost_user_class
);
2904 ovsthread_once_done(&once
);
2909 pmd_thread_setaffinity_cpu(unsigned cpu
)
2915 CPU_SET(cpu
, &cpuset
);
2916 err
= pthread_setaffinity_np(pthread_self(), sizeof(cpu_set_t
), &cpuset
);
2918 VLOG_ERR("Thread affinity error %d",err
);
2921 /* NON_PMD_CORE_ID is reserved for use by non pmd threads. */
2922 ovs_assert(cpu
!= NON_PMD_CORE_ID
);
2923 RTE_PER_LCORE(_lcore_id
) = cpu
;
2929 dpdk_thread_is_pmd(void)
2931 return rte_lcore_id() != NON_PMD_CORE_ID
;