2 * Copyright (c) 2014, 2015, 2016 Nicira, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
30 #include <sys/types.h>
34 #include "dp-packet.h"
35 #include "dpif-netdev.h"
36 #include "fatal-signal.h"
38 #include "netdev-dpdk.h"
39 #include "netdev-provider.h"
40 #include "netdev-vport.h"
42 #include "ofp-print.h"
44 #include "ovs-thread.h"
50 #include "unaligned.h"
53 #include "openvswitch/vlog.h"
55 #include "rte_config.h"
57 #include "rte_meter.h"
58 #include "rte_virtio_net.h"
60 VLOG_DEFINE_THIS_MODULE(dpdk
);
61 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(5, 20);
63 #define DPDK_PORT_WATCHDOG_INTERVAL 5
65 #define OVS_CACHE_LINE_SIZE CACHE_LINE_SIZE
66 #define OVS_VPORT_DPDK "ovs_dpdk"
69 * need to reserve tons of extra space in the mbufs so we can align the
70 * DMA addresses to 4KB.
71 * The minimum mbuf size is limited to avoid scatter behaviour and drop in
72 * performance for standard Ethernet MTU.
74 #define ETHER_HDR_MAX_LEN (ETHER_HDR_LEN + ETHER_CRC_LEN + (2 * VLAN_HEADER_LEN))
75 #define MTU_TO_FRAME_LEN(mtu) ((mtu) + ETHER_HDR_LEN + ETHER_CRC_LEN)
76 #define MTU_TO_MAX_FRAME_LEN(mtu) ((mtu) + ETHER_HDR_MAX_LEN)
77 #define FRAME_LEN_TO_MTU(frame_len) ((frame_len)- ETHER_HDR_LEN - ETHER_CRC_LEN)
78 #define MBUF_SIZE(mtu) ( MTU_TO_MAX_FRAME_LEN(mtu) \
79 + sizeof(struct dp_packet) \
80 + RTE_PKTMBUF_HEADROOM)
81 #define NETDEV_DPDK_MBUF_ALIGN 1024
83 /* Max and min number of packets in the mempool. OVS tries to allocate a
84 * mempool with MAX_NB_MBUF: if this fails (because the system doesn't have
85 * enough hugepages) we keep halving the number until the allocation succeeds
86 * or we reach MIN_NB_MBUF */
88 #define MAX_NB_MBUF (4096 * 64)
89 #define MIN_NB_MBUF (4096 * 4)
90 #define MP_CACHE_SZ RTE_MEMPOOL_CACHE_MAX_SIZE
92 /* MAX_NB_MBUF can be divided by 2 many times, until MIN_NB_MBUF */
93 BUILD_ASSERT_DECL(MAX_NB_MBUF
% ROUND_DOWN_POW2(MAX_NB_MBUF
/MIN_NB_MBUF
) == 0);
95 /* The smallest possible NB_MBUF that we're going to try should be a multiple
96 * of MP_CACHE_SZ. This is advised by DPDK documentation. */
97 BUILD_ASSERT_DECL((MAX_NB_MBUF
/ ROUND_DOWN_POW2(MAX_NB_MBUF
/MIN_NB_MBUF
))
102 #define NIC_PORT_RX_Q_SIZE 2048 /* Size of Physical NIC RX Queue, Max (n+32<=4096)*/
103 #define NIC_PORT_TX_Q_SIZE 2048 /* Size of Physical NIC TX Queue, Max (n+32<=4096)*/
105 #define OVS_VHOST_MAX_QUEUE_NUM 1024 /* Maximum number of vHost TX queues. */
107 static char *cuse_dev_name
= NULL
; /* Character device cuse_dev_name. */
108 static char *vhost_sock_dir
= NULL
; /* Location of vhost-user sockets */
111 * Maximum amount of time in micro seconds to try and enqueue to vhost.
113 #define VHOST_ENQ_RETRY_USECS 100
115 static const struct rte_eth_conf port_conf
= {
117 .mq_mode
= ETH_MQ_RX_RSS
,
119 .header_split
= 0, /* Header Split disabled */
120 .hw_ip_checksum
= 0, /* IP checksum offload disabled */
121 .hw_vlan_filter
= 0, /* VLAN filtering disabled */
122 .jumbo_frame
= 0, /* Jumbo Frame Support disabled */
128 .rss_hf
= ETH_RSS_IP
| ETH_RSS_UDP
| ETH_RSS_TCP
,
132 .mq_mode
= ETH_MQ_TX_NONE
,
136 enum { MAX_TX_QUEUE_LEN
= 384 };
137 enum { DPDK_RING_SIZE
= 256 };
138 BUILD_ASSERT_DECL(IS_POW2(DPDK_RING_SIZE
));
139 enum { DRAIN_TSC
= 200000ULL };
146 static int rte_eal_init_ret
= ENODEV
;
148 static struct ovs_mutex dpdk_mutex
= OVS_MUTEX_INITIALIZER
;
150 /* Quality of Service */
152 /* An instance of a QoS configuration. Always associated with a particular
155 * Each QoS implementation subclasses this with whatever additional data it
159 const struct dpdk_qos_ops
*ops
;
162 /* A particular implementation of dpdk QoS operations.
164 * The functions below return 0 if successful or a positive errno value on
165 * failure, except where otherwise noted. All of them must be provided, except
166 * where otherwise noted.
168 struct dpdk_qos_ops
{
170 /* Name of the QoS type */
171 const char *qos_name
;
173 /* Called to construct the QoS implementation on 'netdev'. The
174 * implementation should make the appropriate calls to configure QoS
175 * according to 'details'. The implementation may assume that any current
176 * QoS configuration already installed should be destroyed before
177 * constructing the new configuration.
179 * The contents of 'details' should be documented as valid for 'ovs_name'
180 * in the "other_config" column in the "QoS" table in vswitchd/vswitch.xml
181 * (which is built as ovs-vswitchd.conf.db(8)).
183 * This function must return 0 if and only if it sets 'netdev->qos_conf'
184 * to an initialized 'struct qos_conf'.
186 * For all QoS implementations it should always be non-null.
188 int (*qos_construct
)(struct netdev
*netdev
, const struct smap
*details
);
190 /* Destroys the data structures allocated by the implementation as part of
193 * For all QoS implementations it should always be non-null.
195 void (*qos_destruct
)(struct netdev
*netdev
, struct qos_conf
*conf
);
197 /* Retrieves details of 'netdev->qos_conf' configuration into 'details'.
199 * The contents of 'details' should be documented as valid for 'ovs_name'
200 * in the "other_config" column in the "QoS" table in vswitchd/vswitch.xml
201 * (which is built as ovs-vswitchd.conf.db(8)).
203 int (*qos_get
)(const struct netdev
*netdev
, struct smap
*details
);
205 /* Reconfigures 'netdev->qos_conf' according to 'details', performing any
206 * required calls to complete the reconfiguration.
208 * The contents of 'details' should be documented as valid for 'ovs_name'
209 * in the "other_config" column in the "QoS" table in vswitchd/vswitch.xml
210 * (which is built as ovs-vswitchd.conf.db(8)).
212 * This function may be null if 'qos_conf' is not configurable.
214 int (*qos_set
)(struct netdev
*netdev
, const struct smap
*details
);
216 /* Modify an array of rte_mbufs. The modification is specific to
217 * each qos implementation.
219 * The function should take and array of mbufs and an int representing
220 * the current number of mbufs present in the array.
222 * After the function has performed a qos modification to the array of
223 * mbufs it returns an int representing the number of mbufs now present in
224 * the array. This value is can then be passed to the port send function
225 * along with the modified array for transmission.
227 * For all QoS implementations it should always be non-null.
229 int (*qos_run
)(struct netdev
*netdev
, struct rte_mbuf
**pkts
,
233 /* dpdk_qos_ops for each type of user space QoS implementation */
234 static const struct dpdk_qos_ops egress_policer_ops
;
237 * Array of dpdk_qos_ops, contains pointer to all supported QoS
240 static const struct dpdk_qos_ops
*const qos_confs
[] = {
245 /* Contains all 'struct dpdk_dev's. */
246 static struct ovs_list dpdk_list
OVS_GUARDED_BY(dpdk_mutex
)
247 = OVS_LIST_INITIALIZER(&dpdk_list
);
249 static struct ovs_list dpdk_mp_list
OVS_GUARDED_BY(dpdk_mutex
)
250 = OVS_LIST_INITIALIZER(&dpdk_mp_list
);
252 /* This mutex must be used by non pmd threads when allocating or freeing
253 * mbufs through mempools. Since dpdk_queue_pkts() and dpdk_queue_flush() may
254 * use mempools, a non pmd thread should hold this mutex while calling them */
255 static struct ovs_mutex nonpmd_mempool_mutex
= OVS_MUTEX_INITIALIZER
;
258 struct rte_mempool
*mp
;
262 struct ovs_list list_node
OVS_GUARDED_BY(dpdk_mutex
);
265 /* There should be one 'struct dpdk_tx_queue' created for
267 struct dpdk_tx_queue
{
268 bool flush_tx
; /* Set to true to flush queue everytime */
269 /* pkts are queued. */
271 rte_spinlock_t tx_lock
; /* Protects the members and the NIC queue
272 * from concurrent access. It is used only
273 * if the queue is shared among different
274 * pmd threads (see 'txq_needs_locking'). */
275 int map
; /* Mapping of configured vhost-user queues
276 * to enabled by guest. */
278 struct rte_mbuf
*burst_pkts
[MAX_TX_QUEUE_LEN
];
281 /* dpdk has no way to remove dpdk ring ethernet devices
282 so we have to keep them around once they've been created
285 static struct ovs_list dpdk_ring_list
OVS_GUARDED_BY(dpdk_mutex
)
286 = OVS_LIST_INITIALIZER(&dpdk_ring_list
);
289 /* For the client rings */
290 struct rte_ring
*cring_tx
;
291 struct rte_ring
*cring_rx
;
292 unsigned int user_port_id
; /* User given port no, parsed from port name */
293 int eth_port_id
; /* ethernet device port id */
294 struct ovs_list list_node
OVS_GUARDED_BY(dpdk_mutex
);
301 enum dpdk_dev_type type
;
303 struct dpdk_tx_queue
*tx_q
;
305 struct ovs_mutex mutex
OVS_ACQ_AFTER(dpdk_mutex
);
307 struct dpdk_mp
*dpdk_mp
;
311 struct netdev_stats stats
;
313 rte_spinlock_t stats_lock
;
315 struct eth_addr hwaddr
;
316 enum netdev_flags flags
;
318 struct rte_eth_link link
;
321 /* The user might request more txqs than the NIC has. We remap those
322 * ('up.n_txq') on these ('real_n_txq').
323 * If the numbers match, 'txq_needs_locking' is false, otherwise it is
324 * true and we will take a spinlock on transmission */
327 bool txq_needs_locking
;
329 /* virtio-net structure for vhost device */
330 OVSRCU_TYPE(struct virtio_net
*) virtio_dev
;
332 /* Identifier used to distinguish vhost devices from each other */
333 char vhost_id
[PATH_MAX
];
336 struct ovs_list list_node
OVS_GUARDED_BY(dpdk_mutex
);
338 /* QoS configuration and lock for the device */
339 struct qos_conf
*qos_conf
;
340 rte_spinlock_t qos_lock
;
344 struct netdev_rxq_dpdk
{
345 struct netdev_rxq up
;
349 static bool dpdk_thread_is_pmd(void);
351 static int netdev_dpdk_construct(struct netdev
*);
353 struct virtio_net
* netdev_dpdk_get_virtio(const struct netdev_dpdk
*dev
);
356 is_dpdk_class(const struct netdev_class
*class)
358 return class->construct
== netdev_dpdk_construct
;
361 /* DPDK NIC drivers allocate RX buffers at a particular granularity, typically
362 * aligned at 1k or less. If a declared mbuf size is not a multiple of this
363 * value, insufficient buffers are allocated to accomodate the packet in its
364 * entirety. Furthermore, certain drivers need to ensure that there is also
365 * sufficient space in the Rx buffer to accommodate two VLAN tags (for QinQ
366 * frames). If the RX buffer is too small, then the driver enables scatter RX
367 * behaviour, which reduces performance. To prevent this, use a buffer size that
368 * is closest to 'mtu', but which satisfies the aforementioned criteria.
371 dpdk_buf_size(int mtu
)
373 return ROUND_UP((MTU_TO_MAX_FRAME_LEN(mtu
) + RTE_PKTMBUF_HEADROOM
),
374 NETDEV_DPDK_MBUF_ALIGN
);
377 /* XXX: use dpdk malloc for entire OVS. in fact huge page should be used
378 * for all other segments data, bss and text. */
381 dpdk_rte_mzalloc(size_t sz
)
385 ptr
= rte_zmalloc(OVS_VPORT_DPDK
, sz
, OVS_CACHE_LINE_SIZE
);
392 /* XXX this function should be called only by pmd threads (or by non pmd
393 * threads holding the nonpmd_mempool_mutex) */
395 free_dpdk_buf(struct dp_packet
*p
)
397 struct rte_mbuf
*pkt
= (struct rte_mbuf
*) p
;
399 rte_pktmbuf_free(pkt
);
403 ovs_rte_pktmbuf_init(struct rte_mempool
*mp
,
404 void *opaque_arg OVS_UNUSED
,
406 unsigned i OVS_UNUSED
)
408 struct rte_mbuf
*m
= _m
;
410 rte_pktmbuf_init(mp
, opaque_arg
, _m
, i
);
412 dp_packet_init_dpdk((struct dp_packet
*) m
, m
->buf_len
);
415 static struct dpdk_mp
*
416 dpdk_mp_get(int socket_id
, int mtu
) OVS_REQUIRES(dpdk_mutex
)
418 struct dpdk_mp
*dmp
= NULL
;
419 char mp_name
[RTE_MEMPOOL_NAMESIZE
];
421 struct rte_pktmbuf_pool_private mbp_priv
;
423 LIST_FOR_EACH (dmp
, list_node
, &dpdk_mp_list
) {
424 if (dmp
->socket_id
== socket_id
&& dmp
->mtu
== mtu
) {
430 dmp
= dpdk_rte_mzalloc(sizeof *dmp
);
431 dmp
->socket_id
= socket_id
;
434 mbp_priv
.mbuf_data_room_size
= MBUF_SIZE(mtu
) - sizeof(struct dp_packet
);
435 mbp_priv
.mbuf_priv_size
= sizeof (struct dp_packet
) - sizeof (struct rte_mbuf
);
437 mp_size
= MAX_NB_MBUF
;
439 if (snprintf(mp_name
, RTE_MEMPOOL_NAMESIZE
, "ovs_mp_%d_%d_%u",
440 dmp
->mtu
, dmp
->socket_id
, mp_size
) < 0) {
444 dmp
->mp
= rte_mempool_create(mp_name
, mp_size
, MBUF_SIZE(mtu
),
446 sizeof(struct rte_pktmbuf_pool_private
),
447 rte_pktmbuf_pool_init
, &mbp_priv
,
448 ovs_rte_pktmbuf_init
, NULL
,
450 } while (!dmp
->mp
&& rte_errno
== ENOMEM
&& (mp_size
/= 2) >= MIN_NB_MBUF
);
452 if (dmp
->mp
== NULL
) {
455 VLOG_DBG("Allocated \"%s\" mempool with %u mbufs", mp_name
, mp_size
);
458 list_push_back(&dpdk_mp_list
, &dmp
->list_node
);
463 dpdk_mp_put(struct dpdk_mp
*dmp
)
471 ovs_assert(dmp
->refcount
>= 0);
474 /* I could not find any API to destroy mp. */
475 if (dmp
->refcount
== 0) {
476 list_delete(dmp
->list_node
);
477 /* destroy mp-pool. */
483 check_link_status(struct netdev_dpdk
*dev
)
485 struct rte_eth_link link
;
487 rte_eth_link_get_nowait(dev
->port_id
, &link
);
489 if (dev
->link
.link_status
!= link
.link_status
) {
490 netdev_change_seq_changed(&dev
->up
);
492 dev
->link_reset_cnt
++;
494 if (dev
->link
.link_status
) {
495 VLOG_DBG_RL(&rl
, "Port %d Link Up - speed %u Mbps - %s",
496 dev
->port_id
, (unsigned)dev
->link
.link_speed
,
497 (dev
->link
.link_duplex
== ETH_LINK_FULL_DUPLEX
) ?
498 ("full-duplex") : ("half-duplex"));
500 VLOG_DBG_RL(&rl
, "Port %d Link Down", dev
->port_id
);
506 dpdk_watchdog(void *dummy OVS_UNUSED
)
508 struct netdev_dpdk
*dev
;
510 pthread_detach(pthread_self());
513 ovs_mutex_lock(&dpdk_mutex
);
514 LIST_FOR_EACH (dev
, list_node
, &dpdk_list
) {
515 ovs_mutex_lock(&dev
->mutex
);
516 check_link_status(dev
);
517 ovs_mutex_unlock(&dev
->mutex
);
519 ovs_mutex_unlock(&dpdk_mutex
);
520 xsleep(DPDK_PORT_WATCHDOG_INTERVAL
);
527 dpdk_eth_dev_queue_setup(struct netdev_dpdk
*dev
, int n_rxq
, int n_txq
)
532 /* A device may report more queues than it makes available (this has
533 * been observed for Intel xl710, which reserves some of them for
534 * SRIOV): rte_eth_*_queue_setup will fail if a queue is not
535 * available. When this happens we can retry the configuration
536 * and request less queues */
537 while (n_rxq
&& n_txq
) {
539 VLOG_INFO("Retrying setup with (rxq:%d txq:%d)", n_rxq
, n_txq
);
542 diag
= rte_eth_dev_configure(dev
->port_id
, n_rxq
, n_txq
, &port_conf
);
547 for (i
= 0; i
< n_txq
; i
++) {
548 diag
= rte_eth_tx_queue_setup(dev
->port_id
, i
, NIC_PORT_TX_Q_SIZE
,
549 dev
->socket_id
, NULL
);
551 VLOG_INFO("Interface %s txq(%d) setup error: %s",
552 dev
->up
.name
, i
, rte_strerror(-diag
));
558 /* Retry with less tx queues */
563 for (i
= 0; i
< n_rxq
; i
++) {
564 diag
= rte_eth_rx_queue_setup(dev
->port_id
, i
, NIC_PORT_RX_Q_SIZE
,
565 dev
->socket_id
, NULL
,
568 VLOG_INFO("Interface %s rxq(%d) setup error: %s",
569 dev
->up
.name
, i
, rte_strerror(-diag
));
575 /* Retry with less rx queues */
580 dev
->up
.n_rxq
= n_rxq
;
581 dev
->real_n_txq
= n_txq
;
591 dpdk_eth_dev_init(struct netdev_dpdk
*dev
) OVS_REQUIRES(dpdk_mutex
)
593 struct rte_pktmbuf_pool_private
*mbp_priv
;
594 struct rte_eth_dev_info info
;
595 struct ether_addr eth_addr
;
599 if (dev
->port_id
< 0 || dev
->port_id
>= rte_eth_dev_count()) {
603 rte_eth_dev_info_get(dev
->port_id
, &info
);
605 n_rxq
= MIN(info
.max_rx_queues
, dev
->up
.n_rxq
);
606 n_txq
= MIN(info
.max_tx_queues
, dev
->up
.n_txq
);
608 diag
= dpdk_eth_dev_queue_setup(dev
, n_rxq
, n_txq
);
610 VLOG_ERR("Interface %s(rxq:%d txq:%d) configure error: %s",
611 dev
->up
.name
, n_rxq
, n_txq
, rte_strerror(-diag
));
615 diag
= rte_eth_dev_start(dev
->port_id
);
617 VLOG_ERR("Interface %s start error: %s", dev
->up
.name
,
618 rte_strerror(-diag
));
622 rte_eth_promiscuous_enable(dev
->port_id
);
623 rte_eth_allmulticast_enable(dev
->port_id
);
625 memset(ð_addr
, 0x0, sizeof(eth_addr
));
626 rte_eth_macaddr_get(dev
->port_id
, ð_addr
);
627 VLOG_INFO_RL(&rl
, "Port %d: "ETH_ADDR_FMT
"",
628 dev
->port_id
, ETH_ADDR_BYTES_ARGS(eth_addr
.addr_bytes
));
630 memcpy(dev
->hwaddr
.ea
, eth_addr
.addr_bytes
, ETH_ADDR_LEN
);
631 rte_eth_link_get_nowait(dev
->port_id
, &dev
->link
);
633 mbp_priv
= rte_mempool_get_priv(dev
->dpdk_mp
->mp
);
634 dev
->buf_size
= mbp_priv
->mbuf_data_room_size
- RTE_PKTMBUF_HEADROOM
;
636 dev
->flags
= NETDEV_UP
| NETDEV_PROMISC
;
640 static struct netdev_dpdk
*
641 netdev_dpdk_cast(const struct netdev
*netdev
)
643 return CONTAINER_OF(netdev
, struct netdev_dpdk
, up
);
646 static struct netdev
*
647 netdev_dpdk_alloc(void)
649 struct netdev_dpdk
*netdev
= dpdk_rte_mzalloc(sizeof *netdev
);
654 netdev_dpdk_alloc_txq(struct netdev_dpdk
*netdev
, unsigned int n_txqs
)
658 netdev
->tx_q
= dpdk_rte_mzalloc(n_txqs
* sizeof *netdev
->tx_q
);
659 for (i
= 0; i
< n_txqs
; i
++) {
660 int numa_id
= ovs_numa_get_numa_id(i
);
662 if (!netdev
->txq_needs_locking
) {
663 /* Each index is considered as a cpu core id, since there should
664 * be one tx queue for each cpu core. If the corresponding core
665 * is not on the same numa node as 'netdev', flags the
667 netdev
->tx_q
[i
].flush_tx
= netdev
->socket_id
== numa_id
;
669 /* Queues are shared among CPUs. Always flush */
670 netdev
->tx_q
[i
].flush_tx
= true;
673 /* Initialize map for vhost devices. */
674 netdev
->tx_q
[i
].map
= -1;
675 rte_spinlock_init(&netdev
->tx_q
[i
].tx_lock
);
680 netdev_dpdk_init(struct netdev
*netdev_
, unsigned int port_no
,
681 enum dpdk_dev_type type
)
682 OVS_REQUIRES(dpdk_mutex
)
684 struct netdev_dpdk
*netdev
= netdev_dpdk_cast(netdev_
);
689 ovs_mutex_init(&netdev
->mutex
);
690 ovs_mutex_lock(&netdev
->mutex
);
692 rte_spinlock_init(&netdev
->stats_lock
);
694 /* If the 'sid' is negative, it means that the kernel fails
695 * to obtain the pci numa info. In that situation, always
697 if (type
== DPDK_DEV_ETH
) {
698 sid
= rte_eth_dev_socket_id(port_no
);
700 sid
= rte_lcore_to_socket_id(rte_get_master_lcore());
703 netdev
->socket_id
= sid
< 0 ? SOCKET0
: sid
;
704 netdev
->port_id
= port_no
;
707 netdev
->mtu
= ETHER_MTU
;
708 netdev
->max_packet_len
= MTU_TO_FRAME_LEN(netdev
->mtu
);
710 buf_size
= dpdk_buf_size(netdev
->mtu
);
711 netdev
->dpdk_mp
= dpdk_mp_get(netdev
->socket_id
, FRAME_LEN_TO_MTU(buf_size
));
712 if (!netdev
->dpdk_mp
) {
717 /* Initialise QoS configuration to NULL and qos lock to unlocked */
718 netdev
->qos_conf
= NULL
;
719 rte_spinlock_init(&netdev
->qos_lock
);
721 netdev_
->n_txq
= NR_QUEUE
;
722 netdev_
->n_rxq
= NR_QUEUE
;
723 netdev_
->requested_n_rxq
= NR_QUEUE
;
724 netdev
->real_n_txq
= NR_QUEUE
;
726 if (type
== DPDK_DEV_ETH
) {
727 netdev_dpdk_alloc_txq(netdev
, NR_QUEUE
);
728 err
= dpdk_eth_dev_init(netdev
);
733 netdev_dpdk_alloc_txq(netdev
, OVS_VHOST_MAX_QUEUE_NUM
);
736 list_push_back(&dpdk_list
, &netdev
->list_node
);
740 rte_free(netdev
->tx_q
);
742 ovs_mutex_unlock(&netdev
->mutex
);
746 /* dev_name must be the prefix followed by a positive decimal number.
747 * (no leading + or - signs are allowed) */
749 dpdk_dev_parse_name(const char dev_name
[], const char prefix
[],
750 unsigned int *port_no
)
754 if (strncmp(dev_name
, prefix
, strlen(prefix
))) {
758 cport
= dev_name
+ strlen(prefix
);
760 if (str_to_uint(cport
, 10, port_no
)) {
768 vhost_construct_helper(struct netdev
*netdev_
) OVS_REQUIRES(dpdk_mutex
)
770 if (rte_eal_init_ret
) {
771 return rte_eal_init_ret
;
774 return netdev_dpdk_init(netdev_
, -1, DPDK_DEV_VHOST
);
778 netdev_dpdk_vhost_cuse_construct(struct netdev
*netdev_
)
780 struct netdev_dpdk
*netdev
= netdev_dpdk_cast(netdev_
);
783 ovs_mutex_lock(&dpdk_mutex
);
784 strncpy(netdev
->vhost_id
, netdev
->up
.name
, sizeof(netdev
->vhost_id
));
785 err
= vhost_construct_helper(netdev_
);
786 ovs_mutex_unlock(&dpdk_mutex
);
791 netdev_dpdk_vhost_user_construct(struct netdev
*netdev_
)
793 struct netdev_dpdk
*netdev
= netdev_dpdk_cast(netdev_
);
794 const char *name
= netdev_
->name
;
797 /* 'name' is appended to 'vhost_sock_dir' and used to create a socket in
798 * the file system. '/' or '\' would traverse directories, so they're not
799 * acceptable in 'name'. */
800 if (strchr(name
, '/') || strchr(name
, '\\')) {
801 VLOG_ERR("\"%s\" is not a valid name for a vhost-user port. "
802 "A valid name must not include '/' or '\\'",
807 ovs_mutex_lock(&dpdk_mutex
);
808 /* Take the name of the vhost-user port and append it to the location where
809 * the socket is to be created, then register the socket.
811 snprintf(netdev
->vhost_id
, sizeof(netdev
->vhost_id
), "%s/%s",
812 vhost_sock_dir
, name
);
814 err
= rte_vhost_driver_register(netdev
->vhost_id
);
816 VLOG_ERR("vhost-user socket device setup failure for socket %s\n",
819 fatal_signal_add_file_to_unlink(netdev
->vhost_id
);
820 VLOG_INFO("Socket %s created for vhost-user port %s\n",
821 netdev
->vhost_id
, name
);
822 err
= vhost_construct_helper(netdev_
);
825 ovs_mutex_unlock(&dpdk_mutex
);
830 netdev_dpdk_construct(struct netdev
*netdev
)
832 unsigned int port_no
;
835 if (rte_eal_init_ret
) {
836 return rte_eal_init_ret
;
839 /* Names always start with "dpdk" */
840 err
= dpdk_dev_parse_name(netdev
->name
, "dpdk", &port_no
);
845 ovs_mutex_lock(&dpdk_mutex
);
846 err
= netdev_dpdk_init(netdev
, port_no
, DPDK_DEV_ETH
);
847 ovs_mutex_unlock(&dpdk_mutex
);
852 netdev_dpdk_destruct(struct netdev
*netdev_
)
854 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev_
);
856 ovs_mutex_lock(&dev
->mutex
);
857 rte_eth_dev_stop(dev
->port_id
);
858 ovs_mutex_unlock(&dev
->mutex
);
860 ovs_mutex_lock(&dpdk_mutex
);
862 list_remove(&dev
->list_node
);
863 dpdk_mp_put(dev
->dpdk_mp
);
864 ovs_mutex_unlock(&dpdk_mutex
);
868 netdev_dpdk_vhost_destruct(struct netdev
*netdev_
)
870 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev_
);
872 /* Guest becomes an orphan if still attached. */
873 if (netdev_dpdk_get_virtio(dev
) != NULL
) {
874 VLOG_ERR("Removing port '%s' while vhost device still attached.",
876 VLOG_ERR("To restore connectivity after re-adding of port, VM on socket"
877 " '%s' must be restarted.",
881 if (rte_vhost_driver_unregister(dev
->vhost_id
)) {
882 VLOG_ERR("Unable to remove vhost-user socket %s", dev
->vhost_id
);
884 fatal_signal_remove_file_to_unlink(dev
->vhost_id
);
887 ovs_mutex_lock(&dpdk_mutex
);
889 list_remove(&dev
->list_node
);
890 dpdk_mp_put(dev
->dpdk_mp
);
891 ovs_mutex_unlock(&dpdk_mutex
);
895 netdev_dpdk_dealloc(struct netdev
*netdev_
)
897 struct netdev_dpdk
*netdev
= netdev_dpdk_cast(netdev_
);
903 netdev_dpdk_get_config(const struct netdev
*netdev
, struct smap
*args
)
905 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
907 ovs_mutex_lock(&dev
->mutex
);
909 smap_add_format(args
, "requested_rx_queues", "%d", netdev
->requested_n_rxq
);
910 smap_add_format(args
, "configured_rx_queues", "%d", netdev
->n_rxq
);
911 smap_add_format(args
, "requested_tx_queues", "%d", netdev
->n_txq
);
912 smap_add_format(args
, "configured_tx_queues", "%d", dev
->real_n_txq
);
913 ovs_mutex_unlock(&dev
->mutex
);
919 netdev_dpdk_set_config(struct netdev
*netdev
, const struct smap
*args
)
921 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
923 ovs_mutex_lock(&dev
->mutex
);
924 netdev
->requested_n_rxq
= MAX(smap_get_int(args
, "n_rxq",
925 netdev
->requested_n_rxq
), 1);
926 netdev_change_seq_changed(netdev
);
927 ovs_mutex_unlock(&dev
->mutex
);
933 netdev_dpdk_get_numa_id(const struct netdev
*netdev_
)
935 struct netdev_dpdk
*netdev
= netdev_dpdk_cast(netdev_
);
937 return netdev
->socket_id
;
940 /* Sets the number of tx queues and rx queues for the dpdk interface.
941 * If the configuration fails, do not try restoring its old configuration
942 * and just returns the error. */
944 netdev_dpdk_set_multiq(struct netdev
*netdev_
, unsigned int n_txq
,
947 struct netdev_dpdk
*netdev
= netdev_dpdk_cast(netdev_
);
949 int old_rxq
, old_txq
;
951 if (netdev
->up
.n_txq
== n_txq
&& netdev
->up
.n_rxq
== n_rxq
) {
955 ovs_mutex_lock(&dpdk_mutex
);
956 ovs_mutex_lock(&netdev
->mutex
);
958 rte_eth_dev_stop(netdev
->port_id
);
960 old_txq
= netdev
->up
.n_txq
;
961 old_rxq
= netdev
->up
.n_rxq
;
962 netdev
->up
.n_txq
= n_txq
;
963 netdev
->up
.n_rxq
= n_rxq
;
965 rte_free(netdev
->tx_q
);
966 err
= dpdk_eth_dev_init(netdev
);
967 netdev_dpdk_alloc_txq(netdev
, netdev
->real_n_txq
);
969 /* If there has been an error, it means that the requested queues
970 * have not been created. Restore the old numbers. */
971 netdev
->up
.n_txq
= old_txq
;
972 netdev
->up
.n_rxq
= old_rxq
;
975 netdev
->txq_needs_locking
= netdev
->real_n_txq
!= netdev
->up
.n_txq
;
977 ovs_mutex_unlock(&netdev
->mutex
);
978 ovs_mutex_unlock(&dpdk_mutex
);
984 netdev_dpdk_vhost_cuse_set_multiq(struct netdev
*netdev_
, unsigned int n_txq
,
987 struct netdev_dpdk
*netdev
= netdev_dpdk_cast(netdev_
);
990 if (netdev
->up
.n_txq
== n_txq
&& netdev
->up
.n_rxq
== n_rxq
) {
994 ovs_mutex_lock(&dpdk_mutex
);
995 ovs_mutex_lock(&netdev
->mutex
);
997 netdev
->up
.n_txq
= n_txq
;
998 netdev
->real_n_txq
= 1;
999 netdev
->up
.n_rxq
= 1;
1000 netdev
->txq_needs_locking
= netdev
->real_n_txq
!= netdev
->up
.n_txq
;
1002 ovs_mutex_unlock(&netdev
->mutex
);
1003 ovs_mutex_unlock(&dpdk_mutex
);
1009 netdev_dpdk_vhost_set_multiq(struct netdev
*netdev_
, unsigned int n_txq
,
1012 struct netdev_dpdk
*netdev
= netdev_dpdk_cast(netdev_
);
1015 if (netdev
->up
.n_txq
== n_txq
&& netdev
->up
.n_rxq
== n_rxq
) {
1019 ovs_mutex_lock(&dpdk_mutex
);
1020 ovs_mutex_lock(&netdev
->mutex
);
1022 netdev
->up
.n_txq
= n_txq
;
1023 netdev
->up
.n_rxq
= n_rxq
;
1025 ovs_mutex_unlock(&netdev
->mutex
);
1026 ovs_mutex_unlock(&dpdk_mutex
);
1031 static struct netdev_rxq
*
1032 netdev_dpdk_rxq_alloc(void)
1034 struct netdev_rxq_dpdk
*rx
= dpdk_rte_mzalloc(sizeof *rx
);
1039 static struct netdev_rxq_dpdk
*
1040 netdev_rxq_dpdk_cast(const struct netdev_rxq
*rx
)
1042 return CONTAINER_OF(rx
, struct netdev_rxq_dpdk
, up
);
1046 netdev_dpdk_rxq_construct(struct netdev_rxq
*rxq_
)
1048 struct netdev_rxq_dpdk
*rx
= netdev_rxq_dpdk_cast(rxq_
);
1049 struct netdev_dpdk
*netdev
= netdev_dpdk_cast(rx
->up
.netdev
);
1051 ovs_mutex_lock(&netdev
->mutex
);
1052 rx
->port_id
= netdev
->port_id
;
1053 ovs_mutex_unlock(&netdev
->mutex
);
1059 netdev_dpdk_rxq_destruct(struct netdev_rxq
*rxq_ OVS_UNUSED
)
1064 netdev_dpdk_rxq_dealloc(struct netdev_rxq
*rxq_
)
1066 struct netdev_rxq_dpdk
*rx
= netdev_rxq_dpdk_cast(rxq_
);
1072 dpdk_queue_flush__(struct netdev_dpdk
*dev
, int qid
)
1074 struct dpdk_tx_queue
*txq
= &dev
->tx_q
[qid
];
1077 while (nb_tx
!= txq
->count
) {
1080 ret
= rte_eth_tx_burst(dev
->port_id
, qid
, txq
->burst_pkts
+ nb_tx
,
1081 txq
->count
- nb_tx
);
1089 if (OVS_UNLIKELY(nb_tx
!= txq
->count
)) {
1090 /* free buffers, which we couldn't transmit, one at a time (each
1091 * packet could come from a different mempool) */
1094 for (i
= nb_tx
; i
< txq
->count
; i
++) {
1095 rte_pktmbuf_free(txq
->burst_pkts
[i
]);
1097 rte_spinlock_lock(&dev
->stats_lock
);
1098 dev
->stats
.tx_dropped
+= txq
->count
-nb_tx
;
1099 rte_spinlock_unlock(&dev
->stats_lock
);
1103 txq
->tsc
= rte_get_timer_cycles();
1107 dpdk_queue_flush(struct netdev_dpdk
*dev
, int qid
)
1109 struct dpdk_tx_queue
*txq
= &dev
->tx_q
[qid
];
1111 if (txq
->count
== 0) {
1114 dpdk_queue_flush__(dev
, qid
);
1118 is_vhost_running(struct virtio_net
*dev
)
1120 return (dev
!= NULL
&& (dev
->flags
& VIRTIO_DEV_RUNNING
));
1124 netdev_dpdk_vhost_update_rx_counters(struct netdev_stats
*stats
,
1125 struct dp_packet
**packets
, int count
)
1128 struct dp_packet
*packet
;
1130 stats
->rx_packets
+= count
;
1131 for (i
= 0; i
< count
; i
++) {
1132 packet
= packets
[i
];
1134 if (OVS_UNLIKELY(dp_packet_size(packet
) < ETH_HEADER_LEN
)) {
1135 /* This only protects the following multicast counting from
1136 * too short packets, but it does not stop the packet from
1137 * further processing. */
1139 stats
->rx_length_errors
++;
1143 struct eth_header
*eh
= (struct eth_header
*) dp_packet_data(packet
);
1144 if (OVS_UNLIKELY(eth_addr_is_multicast(eh
->eth_dst
))) {
1148 stats
->rx_bytes
+= dp_packet_size(packet
);
1153 * The receive path for the vhost port is the TX path out from guest.
1156 netdev_dpdk_vhost_rxq_recv(struct netdev_rxq
*rxq_
,
1157 struct dp_packet
**packets
, int *c
)
1159 struct netdev_rxq_dpdk
*rx
= netdev_rxq_dpdk_cast(rxq_
);
1160 struct netdev
*netdev
= rx
->up
.netdev
;
1161 struct netdev_dpdk
*vhost_dev
= netdev_dpdk_cast(netdev
);
1162 struct virtio_net
*virtio_dev
= netdev_dpdk_get_virtio(vhost_dev
);
1163 int qid
= rxq_
->queue_id
;
1166 if (OVS_UNLIKELY(!is_vhost_running(virtio_dev
))) {
1170 if (rxq_
->queue_id
>= vhost_dev
->real_n_rxq
) {
1174 nb_rx
= rte_vhost_dequeue_burst(virtio_dev
, qid
* VIRTIO_QNUM
+ VIRTIO_TXQ
,
1175 vhost_dev
->dpdk_mp
->mp
,
1176 (struct rte_mbuf
**)packets
,
1182 rte_spinlock_lock(&vhost_dev
->stats_lock
);
1183 netdev_dpdk_vhost_update_rx_counters(&vhost_dev
->stats
, packets
, nb_rx
);
1184 rte_spinlock_unlock(&vhost_dev
->stats_lock
);
1191 netdev_dpdk_rxq_recv(struct netdev_rxq
*rxq_
, struct dp_packet
**packets
,
1194 struct netdev_rxq_dpdk
*rx
= netdev_rxq_dpdk_cast(rxq_
);
1195 struct netdev
*netdev
= rx
->up
.netdev
;
1196 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1199 /* There is only one tx queue for this core. Do not flush other
1201 * Do not flush tx queue which is shared among CPUs
1202 * since it is always flushed */
1203 if (rxq_
->queue_id
== rte_lcore_id() &&
1204 OVS_LIKELY(!dev
->txq_needs_locking
)) {
1205 dpdk_queue_flush(dev
, rxq_
->queue_id
);
1208 nb_rx
= rte_eth_rx_burst(rx
->port_id
, rxq_
->queue_id
,
1209 (struct rte_mbuf
**) packets
,
1221 netdev_dpdk_qos_run__(struct netdev_dpdk
*dev
, struct rte_mbuf
**pkts
,
1224 struct netdev
*netdev
= &dev
->up
;
1226 if (dev
->qos_conf
!= NULL
) {
1227 rte_spinlock_lock(&dev
->qos_lock
);
1228 if (dev
->qos_conf
!= NULL
) {
1229 cnt
= dev
->qos_conf
->ops
->qos_run(netdev
, pkts
, cnt
);
1231 rte_spinlock_unlock(&dev
->qos_lock
);
1238 netdev_dpdk_vhost_update_tx_counters(struct netdev_stats
*stats
,
1239 struct dp_packet
**packets
,
1244 int sent
= attempted
- dropped
;
1246 stats
->tx_packets
+= sent
;
1247 stats
->tx_dropped
+= dropped
;
1249 for (i
= 0; i
< sent
; i
++) {
1250 stats
->tx_bytes
+= dp_packet_size(packets
[i
]);
1255 __netdev_dpdk_vhost_send(struct netdev
*netdev
, int qid
,
1256 struct dp_packet
**pkts
, int cnt
,
1259 struct netdev_dpdk
*vhost_dev
= netdev_dpdk_cast(netdev
);
1260 struct virtio_net
*virtio_dev
= netdev_dpdk_get_virtio(vhost_dev
);
1261 struct rte_mbuf
**cur_pkts
= (struct rte_mbuf
**) pkts
;
1262 unsigned int total_pkts
= cnt
;
1263 unsigned int qos_pkts
= cnt
;
1266 qid
= vhost_dev
->tx_q
[qid
% vhost_dev
->real_n_txq
].map
;
1268 if (OVS_UNLIKELY(!is_vhost_running(virtio_dev
) || qid
== -1)) {
1269 rte_spinlock_lock(&vhost_dev
->stats_lock
);
1270 vhost_dev
->stats
.tx_dropped
+= cnt
;
1271 rte_spinlock_unlock(&vhost_dev
->stats_lock
);
1275 rte_spinlock_lock(&vhost_dev
->tx_q
[qid
].tx_lock
);
1277 /* Check has QoS has been configured for the netdev */
1278 cnt
= netdev_dpdk_qos_run__(vhost_dev
, cur_pkts
, cnt
);
1282 int vhost_qid
= qid
* VIRTIO_QNUM
+ VIRTIO_RXQ
;
1283 unsigned int tx_pkts
;
1285 tx_pkts
= rte_vhost_enqueue_burst(virtio_dev
, vhost_qid
,
1287 if (OVS_LIKELY(tx_pkts
)) {
1288 /* Packets have been sent.*/
1290 /* Prepare for possible next iteration.*/
1291 cur_pkts
= &cur_pkts
[tx_pkts
];
1293 uint64_t timeout
= VHOST_ENQ_RETRY_USECS
* rte_get_timer_hz() / 1E6
;
1294 unsigned int expired
= 0;
1297 start
= rte_get_timer_cycles();
1301 * Unable to enqueue packets to vhost interface.
1302 * Check available entries before retrying.
1304 while (!rte_vring_available_entries(virtio_dev
, vhost_qid
)) {
1305 if (OVS_UNLIKELY((rte_get_timer_cycles() - start
) > timeout
)) {
1311 /* break out of main loop. */
1317 rte_spinlock_unlock(&vhost_dev
->tx_q
[qid
].tx_lock
);
1319 rte_spinlock_lock(&vhost_dev
->stats_lock
);
1321 netdev_dpdk_vhost_update_tx_counters(&vhost_dev
->stats
, pkts
, total_pkts
,
1323 rte_spinlock_unlock(&vhost_dev
->stats_lock
);
1329 for (i
= 0; i
< total_pkts
; i
++) {
1330 dp_packet_delete(pkts
[i
]);
1336 dpdk_queue_pkts(struct netdev_dpdk
*dev
, int qid
,
1337 struct rte_mbuf
**pkts
, int cnt
)
1339 struct dpdk_tx_queue
*txq
= &dev
->tx_q
[qid
];
1345 int freeslots
= MAX_TX_QUEUE_LEN
- txq
->count
;
1346 int tocopy
= MIN(freeslots
, cnt
-i
);
1348 memcpy(&txq
->burst_pkts
[txq
->count
], &pkts
[i
],
1349 tocopy
* sizeof (struct rte_mbuf
*));
1351 txq
->count
+= tocopy
;
1354 if (txq
->count
== MAX_TX_QUEUE_LEN
|| txq
->flush_tx
) {
1355 dpdk_queue_flush__(dev
, qid
);
1357 diff_tsc
= rte_get_timer_cycles() - txq
->tsc
;
1358 if (diff_tsc
>= DRAIN_TSC
) {
1359 dpdk_queue_flush__(dev
, qid
);
1364 /* Tx function. Transmit packets indefinitely */
1366 dpdk_do_tx_copy(struct netdev
*netdev
, int qid
, struct dp_packet
**pkts
,
1368 OVS_NO_THREAD_SAFETY_ANALYSIS
1370 #if !defined(__CHECKER__) && !defined(_WIN32)
1371 const size_t PKT_ARRAY_SIZE
= cnt
;
1373 /* Sparse or MSVC doesn't like variable length array. */
1374 enum { PKT_ARRAY_SIZE
= NETDEV_MAX_BURST
};
1376 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1377 struct rte_mbuf
*mbufs
[PKT_ARRAY_SIZE
];
1382 /* If we are on a non pmd thread we have to use the mempool mutex, because
1383 * every non pmd thread shares the same mempool cache */
1385 if (!dpdk_thread_is_pmd()) {
1386 ovs_mutex_lock(&nonpmd_mempool_mutex
);
1389 for (i
= 0; i
< cnt
; i
++) {
1390 int size
= dp_packet_size(pkts
[i
]);
1392 if (OVS_UNLIKELY(size
> dev
->max_packet_len
)) {
1393 VLOG_WARN_RL(&rl
, "Too big size %d max_packet_len %d",
1394 (int)size
, dev
->max_packet_len
);
1400 mbufs
[newcnt
] = rte_pktmbuf_alloc(dev
->dpdk_mp
->mp
);
1402 if (!mbufs
[newcnt
]) {
1407 /* We have to do a copy for now */
1408 memcpy(rte_pktmbuf_mtod(mbufs
[newcnt
], void *), dp_packet_data(pkts
[i
]), size
);
1410 rte_pktmbuf_data_len(mbufs
[newcnt
]) = size
;
1411 rte_pktmbuf_pkt_len(mbufs
[newcnt
]) = size
;
1416 if (dev
->type
== DPDK_DEV_VHOST
) {
1417 __netdev_dpdk_vhost_send(netdev
, qid
, (struct dp_packet
**) mbufs
, newcnt
, true);
1419 unsigned int qos_pkts
= newcnt
;
1421 /* Check if QoS has been configured for this netdev. */
1422 newcnt
= netdev_dpdk_qos_run__(dev
, mbufs
, newcnt
);
1424 dropped
+= qos_pkts
- newcnt
;
1425 dpdk_queue_pkts(dev
, qid
, mbufs
, newcnt
);
1426 dpdk_queue_flush(dev
, qid
);
1429 if (OVS_UNLIKELY(dropped
)) {
1430 rte_spinlock_lock(&dev
->stats_lock
);
1431 dev
->stats
.tx_dropped
+= dropped
;
1432 rte_spinlock_unlock(&dev
->stats_lock
);
1435 if (!dpdk_thread_is_pmd()) {
1436 ovs_mutex_unlock(&nonpmd_mempool_mutex
);
1441 netdev_dpdk_vhost_send(struct netdev
*netdev
, int qid
, struct dp_packet
**pkts
,
1442 int cnt
, bool may_steal
)
1444 if (OVS_UNLIKELY(pkts
[0]->source
!= DPBUF_DPDK
)) {
1447 dpdk_do_tx_copy(netdev
, qid
, pkts
, cnt
);
1449 for (i
= 0; i
< cnt
; i
++) {
1450 dp_packet_delete(pkts
[i
]);
1454 __netdev_dpdk_vhost_send(netdev
, qid
, pkts
, cnt
, may_steal
);
1460 netdev_dpdk_send__(struct netdev_dpdk
*dev
, int qid
,
1461 struct dp_packet
**pkts
, int cnt
, bool may_steal
)
1465 if (OVS_UNLIKELY(dev
->txq_needs_locking
)) {
1466 qid
= qid
% dev
->real_n_txq
;
1467 rte_spinlock_lock(&dev
->tx_q
[qid
].tx_lock
);
1470 if (OVS_UNLIKELY(!may_steal
||
1471 pkts
[0]->source
!= DPBUF_DPDK
)) {
1472 struct netdev
*netdev
= &dev
->up
;
1474 dpdk_do_tx_copy(netdev
, qid
, pkts
, cnt
);
1477 for (i
= 0; i
< cnt
; i
++) {
1478 dp_packet_delete(pkts
[i
]);
1482 int next_tx_idx
= 0;
1484 unsigned int qos_pkts
= 0;
1485 unsigned int temp_cnt
= 0;
1487 for (i
= 0; i
< cnt
; i
++) {
1488 int size
= dp_packet_size(pkts
[i
]);
1490 if (OVS_UNLIKELY(size
> dev
->max_packet_len
)) {
1491 if (next_tx_idx
!= i
) {
1492 temp_cnt
= i
- next_tx_idx
;
1493 qos_pkts
= temp_cnt
;
1495 temp_cnt
= netdev_dpdk_qos_run__(dev
, (struct rte_mbuf
**)pkts
,
1497 dropped
+= qos_pkts
- temp_cnt
;
1498 dpdk_queue_pkts(dev
, qid
,
1499 (struct rte_mbuf
**)&pkts
[next_tx_idx
],
1504 VLOG_WARN_RL(&rl
, "Too big size %d max_packet_len %d",
1505 (int)size
, dev
->max_packet_len
);
1507 dp_packet_delete(pkts
[i
]);
1509 next_tx_idx
= i
+ 1;
1512 if (next_tx_idx
!= cnt
) {
1516 cnt
= netdev_dpdk_qos_run__(dev
, (struct rte_mbuf
**)pkts
, cnt
);
1517 dropped
+= qos_pkts
- cnt
;
1518 dpdk_queue_pkts(dev
, qid
, (struct rte_mbuf
**)&pkts
[next_tx_idx
],
1522 if (OVS_UNLIKELY(dropped
)) {
1523 rte_spinlock_lock(&dev
->stats_lock
);
1524 dev
->stats
.tx_dropped
+= dropped
;
1525 rte_spinlock_unlock(&dev
->stats_lock
);
1529 if (OVS_UNLIKELY(dev
->txq_needs_locking
)) {
1530 rte_spinlock_unlock(&dev
->tx_q
[qid
].tx_lock
);
1535 netdev_dpdk_eth_send(struct netdev
*netdev
, int qid
,
1536 struct dp_packet
**pkts
, int cnt
, bool may_steal
)
1538 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1540 netdev_dpdk_send__(dev
, qid
, pkts
, cnt
, may_steal
);
1545 netdev_dpdk_set_etheraddr(struct netdev
*netdev
, const struct eth_addr mac
)
1547 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1549 ovs_mutex_lock(&dev
->mutex
);
1550 if (!eth_addr_equals(dev
->hwaddr
, mac
)) {
1552 netdev_change_seq_changed(netdev
);
1554 ovs_mutex_unlock(&dev
->mutex
);
1560 netdev_dpdk_get_etheraddr(const struct netdev
*netdev
, struct eth_addr
*mac
)
1562 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1564 ovs_mutex_lock(&dev
->mutex
);
1566 ovs_mutex_unlock(&dev
->mutex
);
1572 netdev_dpdk_get_mtu(const struct netdev
*netdev
, int *mtup
)
1574 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1576 ovs_mutex_lock(&dev
->mutex
);
1578 ovs_mutex_unlock(&dev
->mutex
);
1584 netdev_dpdk_set_mtu(const struct netdev
*netdev
, int mtu
)
1586 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1587 int old_mtu
, err
, dpdk_mtu
;
1588 struct dpdk_mp
*old_mp
;
1592 ovs_mutex_lock(&dpdk_mutex
);
1593 ovs_mutex_lock(&dev
->mutex
);
1594 if (dev
->mtu
== mtu
) {
1599 buf_size
= dpdk_buf_size(mtu
);
1600 dpdk_mtu
= FRAME_LEN_TO_MTU(buf_size
);
1602 mp
= dpdk_mp_get(dev
->socket_id
, dpdk_mtu
);
1608 rte_eth_dev_stop(dev
->port_id
);
1611 old_mp
= dev
->dpdk_mp
;
1614 dev
->max_packet_len
= MTU_TO_FRAME_LEN(dev
->mtu
);
1616 err
= dpdk_eth_dev_init(dev
);
1620 dev
->dpdk_mp
= old_mp
;
1621 dev
->max_packet_len
= MTU_TO_FRAME_LEN(dev
->mtu
);
1622 dpdk_eth_dev_init(dev
);
1626 dpdk_mp_put(old_mp
);
1627 netdev_change_seq_changed(netdev
);
1629 ovs_mutex_unlock(&dev
->mutex
);
1630 ovs_mutex_unlock(&dpdk_mutex
);
1635 netdev_dpdk_get_carrier(const struct netdev
*netdev_
, bool *carrier
);
1638 netdev_dpdk_vhost_get_stats(const struct netdev
*netdev
,
1639 struct netdev_stats
*stats
)
1641 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1643 ovs_mutex_lock(&dev
->mutex
);
1644 memset(stats
, 0, sizeof(*stats
));
1645 /* Unsupported Stats */
1646 stats
->collisions
= UINT64_MAX
;
1647 stats
->rx_crc_errors
= UINT64_MAX
;
1648 stats
->rx_fifo_errors
= UINT64_MAX
;
1649 stats
->rx_frame_errors
= UINT64_MAX
;
1650 stats
->rx_missed_errors
= UINT64_MAX
;
1651 stats
->rx_over_errors
= UINT64_MAX
;
1652 stats
->tx_aborted_errors
= UINT64_MAX
;
1653 stats
->tx_carrier_errors
= UINT64_MAX
;
1654 stats
->tx_errors
= UINT64_MAX
;
1655 stats
->tx_fifo_errors
= UINT64_MAX
;
1656 stats
->tx_heartbeat_errors
= UINT64_MAX
;
1657 stats
->tx_window_errors
= UINT64_MAX
;
1658 stats
->rx_dropped
+= UINT64_MAX
;
1660 rte_spinlock_lock(&dev
->stats_lock
);
1661 /* Supported Stats */
1662 stats
->rx_packets
+= dev
->stats
.rx_packets
;
1663 stats
->tx_packets
+= dev
->stats
.tx_packets
;
1664 stats
->tx_dropped
+= dev
->stats
.tx_dropped
;
1665 stats
->multicast
= dev
->stats
.multicast
;
1666 stats
->rx_bytes
= dev
->stats
.rx_bytes
;
1667 stats
->tx_bytes
= dev
->stats
.tx_bytes
;
1668 stats
->rx_errors
= dev
->stats
.rx_errors
;
1669 stats
->rx_length_errors
= dev
->stats
.rx_length_errors
;
1670 rte_spinlock_unlock(&dev
->stats_lock
);
1672 ovs_mutex_unlock(&dev
->mutex
);
1678 netdev_dpdk_get_stats(const struct netdev
*netdev
, struct netdev_stats
*stats
)
1680 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1681 struct rte_eth_stats rte_stats
;
1684 netdev_dpdk_get_carrier(netdev
, &gg
);
1685 ovs_mutex_lock(&dev
->mutex
);
1686 rte_eth_stats_get(dev
->port_id
, &rte_stats
);
1688 memset(stats
, 0, sizeof(*stats
));
1690 stats
->rx_packets
= rte_stats
.ipackets
;
1691 stats
->tx_packets
= rte_stats
.opackets
;
1692 stats
->rx_bytes
= rte_stats
.ibytes
;
1693 stats
->tx_bytes
= rte_stats
.obytes
;
1694 /* DPDK counts imissed as errors, but count them here as dropped instead */
1695 stats
->rx_errors
= rte_stats
.ierrors
- rte_stats
.imissed
;
1696 stats
->tx_errors
= rte_stats
.oerrors
;
1697 stats
->multicast
= rte_stats
.imcasts
;
1699 rte_spinlock_lock(&dev
->stats_lock
);
1700 stats
->tx_dropped
= dev
->stats
.tx_dropped
;
1701 rte_spinlock_unlock(&dev
->stats_lock
);
1703 /* These are the available DPDK counters for packets not received due to
1704 * local resource constraints in DPDK and NIC respectively. */
1705 stats
->rx_dropped
= rte_stats
.rx_nombuf
+ rte_stats
.imissed
;
1706 stats
->collisions
= UINT64_MAX
;
1708 stats
->rx_length_errors
= UINT64_MAX
;
1709 stats
->rx_over_errors
= UINT64_MAX
;
1710 stats
->rx_crc_errors
= UINT64_MAX
;
1711 stats
->rx_frame_errors
= UINT64_MAX
;
1712 stats
->rx_fifo_errors
= UINT64_MAX
;
1713 stats
->rx_missed_errors
= rte_stats
.imissed
;
1715 stats
->tx_aborted_errors
= UINT64_MAX
;
1716 stats
->tx_carrier_errors
= UINT64_MAX
;
1717 stats
->tx_fifo_errors
= UINT64_MAX
;
1718 stats
->tx_heartbeat_errors
= UINT64_MAX
;
1719 stats
->tx_window_errors
= UINT64_MAX
;
1721 ovs_mutex_unlock(&dev
->mutex
);
1727 netdev_dpdk_get_features(const struct netdev
*netdev_
,
1728 enum netdev_features
*current
,
1729 enum netdev_features
*advertised OVS_UNUSED
,
1730 enum netdev_features
*supported OVS_UNUSED
,
1731 enum netdev_features
*peer OVS_UNUSED
)
1733 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev_
);
1734 struct rte_eth_link link
;
1736 ovs_mutex_lock(&dev
->mutex
);
1738 ovs_mutex_unlock(&dev
->mutex
);
1740 if (link
.link_duplex
== ETH_LINK_AUTONEG_DUPLEX
) {
1741 if (link
.link_speed
== ETH_LINK_SPEED_AUTONEG
) {
1742 *current
= NETDEV_F_AUTONEG
;
1744 } else if (link
.link_duplex
== ETH_LINK_HALF_DUPLEX
) {
1745 if (link
.link_speed
== ETH_LINK_SPEED_10
) {
1746 *current
= NETDEV_F_10MB_HD
;
1748 if (link
.link_speed
== ETH_LINK_SPEED_100
) {
1749 *current
= NETDEV_F_100MB_HD
;
1751 if (link
.link_speed
== ETH_LINK_SPEED_1000
) {
1752 *current
= NETDEV_F_1GB_HD
;
1754 } else if (link
.link_duplex
== ETH_LINK_FULL_DUPLEX
) {
1755 if (link
.link_speed
== ETH_LINK_SPEED_10
) {
1756 *current
= NETDEV_F_10MB_FD
;
1758 if (link
.link_speed
== ETH_LINK_SPEED_100
) {
1759 *current
= NETDEV_F_100MB_FD
;
1761 if (link
.link_speed
== ETH_LINK_SPEED_1000
) {
1762 *current
= NETDEV_F_1GB_FD
;
1764 if (link
.link_speed
== ETH_LINK_SPEED_10000
) {
1765 *current
= NETDEV_F_10GB_FD
;
1773 netdev_dpdk_get_ifindex(const struct netdev
*netdev
)
1775 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1778 ovs_mutex_lock(&dev
->mutex
);
1779 ifindex
= dev
->port_id
;
1780 ovs_mutex_unlock(&dev
->mutex
);
1786 netdev_dpdk_get_carrier(const struct netdev
*netdev_
, bool *carrier
)
1788 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev_
);
1790 ovs_mutex_lock(&dev
->mutex
);
1791 check_link_status(dev
);
1792 *carrier
= dev
->link
.link_status
;
1794 ovs_mutex_unlock(&dev
->mutex
);
1800 netdev_dpdk_vhost_get_carrier(const struct netdev
*netdev_
, bool *carrier
)
1802 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev_
);
1803 struct virtio_net
*virtio_dev
= netdev_dpdk_get_virtio(dev
);
1805 ovs_mutex_lock(&dev
->mutex
);
1807 if (is_vhost_running(virtio_dev
)) {
1813 ovs_mutex_unlock(&dev
->mutex
);
1818 static long long int
1819 netdev_dpdk_get_carrier_resets(const struct netdev
*netdev_
)
1821 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev_
);
1822 long long int carrier_resets
;
1824 ovs_mutex_lock(&dev
->mutex
);
1825 carrier_resets
= dev
->link_reset_cnt
;
1826 ovs_mutex_unlock(&dev
->mutex
);
1828 return carrier_resets
;
1832 netdev_dpdk_set_miimon(struct netdev
*netdev_ OVS_UNUSED
,
1833 long long int interval OVS_UNUSED
)
1839 netdev_dpdk_update_flags__(struct netdev_dpdk
*dev
,
1840 enum netdev_flags off
, enum netdev_flags on
,
1841 enum netdev_flags
*old_flagsp
) OVS_REQUIRES(dev
->mutex
)
1845 if ((off
| on
) & ~(NETDEV_UP
| NETDEV_PROMISC
)) {
1849 *old_flagsp
= dev
->flags
;
1853 if (dev
->flags
== *old_flagsp
) {
1857 if (dev
->type
== DPDK_DEV_ETH
) {
1858 if (dev
->flags
& NETDEV_UP
) {
1859 err
= rte_eth_dev_start(dev
->port_id
);
1864 if (dev
->flags
& NETDEV_PROMISC
) {
1865 rte_eth_promiscuous_enable(dev
->port_id
);
1868 if (!(dev
->flags
& NETDEV_UP
)) {
1869 rte_eth_dev_stop(dev
->port_id
);
1877 netdev_dpdk_update_flags(struct netdev
*netdev_
,
1878 enum netdev_flags off
, enum netdev_flags on
,
1879 enum netdev_flags
*old_flagsp
)
1881 struct netdev_dpdk
*netdev
= netdev_dpdk_cast(netdev_
);
1884 ovs_mutex_lock(&netdev
->mutex
);
1885 error
= netdev_dpdk_update_flags__(netdev
, off
, on
, old_flagsp
);
1886 ovs_mutex_unlock(&netdev
->mutex
);
1892 netdev_dpdk_get_status(const struct netdev
*netdev_
, struct smap
*args
)
1894 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev_
);
1895 struct rte_eth_dev_info dev_info
;
1897 if (dev
->port_id
< 0)
1900 ovs_mutex_lock(&dev
->mutex
);
1901 rte_eth_dev_info_get(dev
->port_id
, &dev_info
);
1902 ovs_mutex_unlock(&dev
->mutex
);
1904 smap_add_format(args
, "driver_name", "%s", dev_info
.driver_name
);
1906 smap_add_format(args
, "port_no", "%d", dev
->port_id
);
1907 smap_add_format(args
, "numa_id", "%d", rte_eth_dev_socket_id(dev
->port_id
));
1908 smap_add_format(args
, "driver_name", "%s", dev_info
.driver_name
);
1909 smap_add_format(args
, "min_rx_bufsize", "%u", dev_info
.min_rx_bufsize
);
1910 smap_add_format(args
, "max_rx_pktlen", "%u", dev
->max_packet_len
);
1911 smap_add_format(args
, "max_rx_queues", "%u", dev_info
.max_rx_queues
);
1912 smap_add_format(args
, "max_tx_queues", "%u", dev_info
.max_tx_queues
);
1913 smap_add_format(args
, "max_mac_addrs", "%u", dev_info
.max_mac_addrs
);
1914 smap_add_format(args
, "max_hash_mac_addrs", "%u", dev_info
.max_hash_mac_addrs
);
1915 smap_add_format(args
, "max_vfs", "%u", dev_info
.max_vfs
);
1916 smap_add_format(args
, "max_vmdq_pools", "%u", dev_info
.max_vmdq_pools
);
1918 if (dev_info
.pci_dev
) {
1919 smap_add_format(args
, "pci-vendor_id", "0x%u",
1920 dev_info
.pci_dev
->id
.vendor_id
);
1921 smap_add_format(args
, "pci-device_id", "0x%x",
1922 dev_info
.pci_dev
->id
.device_id
);
1929 netdev_dpdk_set_admin_state__(struct netdev_dpdk
*dev
, bool admin_state
)
1930 OVS_REQUIRES(dev
->mutex
)
1932 enum netdev_flags old_flags
;
1935 netdev_dpdk_update_flags__(dev
, 0, NETDEV_UP
, &old_flags
);
1937 netdev_dpdk_update_flags__(dev
, NETDEV_UP
, 0, &old_flags
);
1942 netdev_dpdk_set_admin_state(struct unixctl_conn
*conn
, int argc
,
1943 const char *argv
[], void *aux OVS_UNUSED
)
1947 if (!strcasecmp(argv
[argc
- 1], "up")) {
1949 } else if ( !strcasecmp(argv
[argc
- 1], "down")) {
1952 unixctl_command_reply_error(conn
, "Invalid Admin State");
1957 struct netdev
*netdev
= netdev_from_name(argv
[1]);
1958 if (netdev
&& is_dpdk_class(netdev
->netdev_class
)) {
1959 struct netdev_dpdk
*dpdk_dev
= netdev_dpdk_cast(netdev
);
1961 ovs_mutex_lock(&dpdk_dev
->mutex
);
1962 netdev_dpdk_set_admin_state__(dpdk_dev
, up
);
1963 ovs_mutex_unlock(&dpdk_dev
->mutex
);
1965 netdev_close(netdev
);
1967 unixctl_command_reply_error(conn
, "Not a DPDK Interface");
1968 netdev_close(netdev
);
1972 struct netdev_dpdk
*netdev
;
1974 ovs_mutex_lock(&dpdk_mutex
);
1975 LIST_FOR_EACH (netdev
, list_node
, &dpdk_list
) {
1976 ovs_mutex_lock(&netdev
->mutex
);
1977 netdev_dpdk_set_admin_state__(netdev
, up
);
1978 ovs_mutex_unlock(&netdev
->mutex
);
1980 ovs_mutex_unlock(&dpdk_mutex
);
1982 unixctl_command_reply(conn
, "OK");
1986 * Set virtqueue flags so that we do not receive interrupts.
1989 set_irq_status(struct virtio_net
*dev
)
1994 for (i
= 0; i
< dev
->virt_qp_nb
; i
++) {
1995 idx
= i
* VIRTIO_QNUM
;
1996 rte_vhost_enable_guest_notification(dev
, idx
+ VIRTIO_RXQ
, 0);
1997 rte_vhost_enable_guest_notification(dev
, idx
+ VIRTIO_TXQ
, 0);
2002 * Fixes mapping for vhost-user tx queues. Must be called after each
2003 * enabling/disabling of queues and real_n_txq modifications.
2006 netdev_dpdk_remap_txqs(struct netdev_dpdk
*netdev
)
2007 OVS_REQUIRES(netdev
->mutex
)
2009 int *enabled_queues
, n_enabled
= 0;
2010 int i
, k
, total_txqs
= netdev
->real_n_txq
;
2012 enabled_queues
= dpdk_rte_mzalloc(total_txqs
* sizeof *enabled_queues
);
2014 for (i
= 0; i
< total_txqs
; i
++) {
2015 /* Enabled queues always mapped to themselves. */
2016 if (netdev
->tx_q
[i
].map
== i
) {
2017 enabled_queues
[n_enabled
++] = i
;
2021 if (n_enabled
== 0 && total_txqs
!= 0) {
2022 enabled_queues
[0] = -1;
2027 for (i
= 0; i
< total_txqs
; i
++) {
2028 if (netdev
->tx_q
[i
].map
!= i
) {
2029 netdev
->tx_q
[i
].map
= enabled_queues
[k
];
2030 k
= (k
+ 1) % n_enabled
;
2034 VLOG_DBG("TX queue mapping for %s\n", netdev
->vhost_id
);
2035 for (i
= 0; i
< total_txqs
; i
++) {
2036 VLOG_DBG("%2d --> %2d", i
, netdev
->tx_q
[i
].map
);
2039 rte_free(enabled_queues
);
2043 netdev_dpdk_vhost_set_queues(struct netdev_dpdk
*netdev
, struct virtio_net
*dev
)
2044 OVS_REQUIRES(netdev
->mutex
)
2048 qp_num
= dev
->virt_qp_nb
;
2049 if (qp_num
> netdev
->up
.n_rxq
) {
2050 VLOG_ERR("vHost Device '%s' %"PRIu64
" can't be added - "
2051 "too many queues %d > %d", dev
->ifname
, dev
->device_fh
,
2052 qp_num
, netdev
->up
.n_rxq
);
2056 netdev
->real_n_rxq
= qp_num
;
2057 netdev
->real_n_txq
= qp_num
;
2058 netdev
->txq_needs_locking
= true;
2060 netdev_dpdk_remap_txqs(netdev
);
2066 * A new virtio-net device is added to a vhost port.
2069 new_device(struct virtio_net
*dev
)
2071 struct netdev_dpdk
*netdev
;
2072 bool exists
= false;
2074 ovs_mutex_lock(&dpdk_mutex
);
2075 /* Add device to the vhost port with the same name as that passed down. */
2076 LIST_FOR_EACH(netdev
, list_node
, &dpdk_list
) {
2077 if (strncmp(dev
->ifname
, netdev
->vhost_id
, IF_NAME_SZ
) == 0) {
2078 ovs_mutex_lock(&netdev
->mutex
);
2079 if (netdev_dpdk_vhost_set_queues(netdev
, dev
)) {
2080 ovs_mutex_unlock(&netdev
->mutex
);
2081 ovs_mutex_unlock(&dpdk_mutex
);
2084 ovsrcu_set(&netdev
->virtio_dev
, dev
);
2086 dev
->flags
|= VIRTIO_DEV_RUNNING
;
2087 /* Disable notifications. */
2088 set_irq_status(dev
);
2089 ovs_mutex_unlock(&netdev
->mutex
);
2093 ovs_mutex_unlock(&dpdk_mutex
);
2096 VLOG_INFO("vHost Device '%s' %"PRIu64
" can't be added - name not "
2097 "found", dev
->ifname
, dev
->device_fh
);
2102 VLOG_INFO("vHost Device '%s' %"PRIu64
" has been added", dev
->ifname
,
2108 * Remove a virtio-net device from the specific vhost port. Use dev->remove
2109 * flag to stop any more packets from being sent or received to/from a VM and
2110 * ensure all currently queued packets have been sent/received before removing
2114 destroy_device(volatile struct virtio_net
*dev
)
2116 struct netdev_dpdk
*vhost_dev
;
2117 bool exists
= false;
2119 ovs_mutex_lock(&dpdk_mutex
);
2120 LIST_FOR_EACH (vhost_dev
, list_node
, &dpdk_list
) {
2121 if (netdev_dpdk_get_virtio(vhost_dev
) == dev
) {
2123 ovs_mutex_lock(&vhost_dev
->mutex
);
2124 dev
->flags
&= ~VIRTIO_DEV_RUNNING
;
2125 ovsrcu_set(&vhost_dev
->virtio_dev
, NULL
);
2127 ovs_mutex_unlock(&vhost_dev
->mutex
);
2132 ovs_mutex_unlock(&dpdk_mutex
);
2134 if (exists
== true) {
2136 * Wait for other threads to quiesce after setting the 'virtio_dev'
2137 * to NULL, before returning.
2139 ovsrcu_synchronize();
2141 * As call to ovsrcu_synchronize() will end the quiescent state,
2142 * put thread back into quiescent state before returning.
2144 ovsrcu_quiesce_start();
2145 VLOG_INFO("vHost Device '%s' %"PRIu64
" has been removed", dev
->ifname
,
2148 VLOG_INFO("vHost Device '%s' %"PRIu64
" not found", dev
->ifname
,
2155 vring_state_changed(struct virtio_net
*dev
, uint16_t queue_id
, int enable
)
2157 struct netdev_dpdk
*vhost_dev
;
2158 bool exists
= false;
2159 int qid
= queue_id
/ VIRTIO_QNUM
;
2161 if (queue_id
% VIRTIO_QNUM
== VIRTIO_TXQ
) {
2165 ovs_mutex_lock(&dpdk_mutex
);
2166 LIST_FOR_EACH (vhost_dev
, list_node
, &dpdk_list
) {
2167 if (strncmp(dev
->ifname
, vhost_dev
->vhost_id
, IF_NAME_SZ
) == 0) {
2168 ovs_mutex_lock(&vhost_dev
->mutex
);
2170 vhost_dev
->tx_q
[qid
].map
= qid
;
2172 vhost_dev
->tx_q
[qid
].map
= -1;
2174 netdev_dpdk_remap_txqs(vhost_dev
);
2176 ovs_mutex_unlock(&vhost_dev
->mutex
);
2180 ovs_mutex_unlock(&dpdk_mutex
);
2183 VLOG_INFO("State of queue %d ( tx_qid %d ) of vhost device '%s' %"
2184 PRIu64
" changed to \'%s\'", queue_id
, qid
, dev
->ifname
,
2185 dev
->device_fh
, (enable
== 1) ? "enabled" : "disabled");
2187 VLOG_INFO("vHost Device '%s' %"PRIu64
" not found", dev
->ifname
,
2196 netdev_dpdk_get_virtio(const struct netdev_dpdk
*dev
)
2198 return ovsrcu_get(struct virtio_net
*, &dev
->virtio_dev
);
2202 * These callbacks allow virtio-net devices to be added to vhost ports when
2203 * configuration has been fully complete.
2205 static const struct virtio_net_device_ops virtio_net_device_ops
=
2207 .new_device
= new_device
,
2208 .destroy_device
= destroy_device
,
2209 .vring_state_changed
= vring_state_changed
2213 start_vhost_loop(void *dummy OVS_UNUSED
)
2215 pthread_detach(pthread_self());
2216 /* Put the cuse thread into quiescent state. */
2217 ovsrcu_quiesce_start();
2218 rte_vhost_driver_session_start();
2223 dpdk_vhost_class_init(void)
2225 rte_vhost_driver_callback_register(&virtio_net_device_ops
);
2226 ovs_thread_create("vhost_thread", start_vhost_loop
, NULL
);
2231 dpdk_vhost_cuse_class_init(void)
2236 /* Register CUSE device to handle IOCTLs.
2237 * Unless otherwise specified on the vswitchd command line, cuse_dev_name
2238 * is set to vhost-net.
2240 err
= rte_vhost_driver_register(cuse_dev_name
);
2243 VLOG_ERR("CUSE device setup failure.");
2247 dpdk_vhost_class_init();
2252 dpdk_vhost_user_class_init(void)
2254 dpdk_vhost_class_init();
2259 dpdk_common_init(void)
2261 unixctl_command_register("netdev-dpdk/set-admin-state",
2262 "[netdev] up|down", 1, 2,
2263 netdev_dpdk_set_admin_state
, NULL
);
2265 ovs_thread_create("dpdk_watchdog", dpdk_watchdog
, NULL
);
2271 dpdk_ring_create(const char dev_name
[], unsigned int port_no
,
2272 unsigned int *eth_port_id
)
2274 struct dpdk_ring
*ivshmem
;
2275 char ring_name
[RTE_RING_NAMESIZE
];
2278 ivshmem
= dpdk_rte_mzalloc(sizeof *ivshmem
);
2279 if (ivshmem
== NULL
) {
2283 /* XXX: Add support for multiquque ring. */
2284 err
= snprintf(ring_name
, sizeof(ring_name
), "%s_tx", dev_name
);
2289 /* Create single producer tx ring, netdev does explicit locking. */
2290 ivshmem
->cring_tx
= rte_ring_create(ring_name
, DPDK_RING_SIZE
, SOCKET0
,
2292 if (ivshmem
->cring_tx
== NULL
) {
2297 err
= snprintf(ring_name
, sizeof(ring_name
), "%s_rx", dev_name
);
2302 /* Create single consumer rx ring, netdev does explicit locking. */
2303 ivshmem
->cring_rx
= rte_ring_create(ring_name
, DPDK_RING_SIZE
, SOCKET0
,
2305 if (ivshmem
->cring_rx
== NULL
) {
2310 err
= rte_eth_from_rings(dev_name
, &ivshmem
->cring_rx
, 1,
2311 &ivshmem
->cring_tx
, 1, SOCKET0
);
2318 ivshmem
->user_port_id
= port_no
;
2319 ivshmem
->eth_port_id
= rte_eth_dev_count() - 1;
2320 list_push_back(&dpdk_ring_list
, &ivshmem
->list_node
);
2322 *eth_port_id
= ivshmem
->eth_port_id
;
2327 dpdk_ring_open(const char dev_name
[], unsigned int *eth_port_id
) OVS_REQUIRES(dpdk_mutex
)
2329 struct dpdk_ring
*ivshmem
;
2330 unsigned int port_no
;
2333 /* Names always start with "dpdkr" */
2334 err
= dpdk_dev_parse_name(dev_name
, "dpdkr", &port_no
);
2339 /* look through our list to find the device */
2340 LIST_FOR_EACH (ivshmem
, list_node
, &dpdk_ring_list
) {
2341 if (ivshmem
->user_port_id
== port_no
) {
2342 VLOG_INFO("Found dpdk ring device %s:", dev_name
);
2343 *eth_port_id
= ivshmem
->eth_port_id
; /* really all that is needed */
2347 /* Need to create the device rings */
2348 return dpdk_ring_create(dev_name
, port_no
, eth_port_id
);
2352 netdev_dpdk_ring_send(struct netdev
*netdev_
, int qid
,
2353 struct dp_packet
**pkts
, int cnt
, bool may_steal
)
2355 struct netdev_dpdk
*netdev
= netdev_dpdk_cast(netdev_
);
2358 /* When using 'dpdkr' and sending to a DPDK ring, we want to ensure that the
2359 * rss hash field is clear. This is because the same mbuf may be modified by
2360 * the consumer of the ring and return into the datapath without recalculating
2362 for (i
= 0; i
< cnt
; i
++) {
2363 dp_packet_rss_invalidate(pkts
[i
]);
2366 netdev_dpdk_send__(netdev
, qid
, pkts
, cnt
, may_steal
);
2371 netdev_dpdk_ring_construct(struct netdev
*netdev
)
2373 unsigned int port_no
= 0;
2376 if (rte_eal_init_ret
) {
2377 return rte_eal_init_ret
;
2380 ovs_mutex_lock(&dpdk_mutex
);
2382 err
= dpdk_ring_open(netdev
->name
, &port_no
);
2387 err
= netdev_dpdk_init(netdev
, port_no
, DPDK_DEV_ETH
);
2390 ovs_mutex_unlock(&dpdk_mutex
);
2397 * Initialize QoS configuration operations.
2400 qos_conf_init(struct qos_conf
*conf
, const struct dpdk_qos_ops
*ops
)
2406 * Search existing QoS operations in qos_ops and compare each set of
2407 * operations qos_name to name. Return a dpdk_qos_ops pointer to a match,
2410 static const struct dpdk_qos_ops
*
2411 qos_lookup_name(const char *name
)
2413 const struct dpdk_qos_ops
*const *opsp
;
2415 for (opsp
= qos_confs
; *opsp
!= NULL
; opsp
++) {
2416 const struct dpdk_qos_ops
*ops
= *opsp
;
2417 if (!strcmp(name
, ops
->qos_name
)) {
2425 * Call qos_destruct to clean up items associated with the netdevs
2426 * qos_conf. Set netdevs qos_conf to NULL.
2429 qos_delete_conf(struct netdev
*netdev_
)
2431 struct netdev_dpdk
*netdev
= netdev_dpdk_cast(netdev_
);
2433 rte_spinlock_lock(&netdev
->qos_lock
);
2434 if (netdev
->qos_conf
) {
2435 if (netdev
->qos_conf
->ops
->qos_destruct
) {
2436 netdev
->qos_conf
->ops
->qos_destruct(netdev_
, netdev
->qos_conf
);
2438 netdev
->qos_conf
= NULL
;
2440 rte_spinlock_unlock(&netdev
->qos_lock
);
2444 netdev_dpdk_get_qos_types(const struct netdev
*netdev OVS_UNUSED
,
2447 const struct dpdk_qos_ops
*const *opsp
;
2449 for (opsp
= qos_confs
; *opsp
!= NULL
; opsp
++) {
2450 const struct dpdk_qos_ops
*ops
= *opsp
;
2451 if (ops
->qos_construct
&& ops
->qos_name
[0] != '\0') {
2452 sset_add(types
, ops
->qos_name
);
2459 netdev_dpdk_get_qos(const struct netdev
*netdev_
,
2460 const char **typep
, struct smap
*details
)
2462 struct netdev_dpdk
*netdev
= netdev_dpdk_cast(netdev_
);
2465 ovs_mutex_lock(&netdev
->mutex
);
2466 if(netdev
->qos_conf
) {
2467 *typep
= netdev
->qos_conf
->ops
->qos_name
;
2468 error
= (netdev
->qos_conf
->ops
->qos_get
2469 ? netdev
->qos_conf
->ops
->qos_get(netdev_
, details
): 0);
2471 ovs_mutex_unlock(&netdev
->mutex
);
2477 netdev_dpdk_set_qos(struct netdev
*netdev_
,
2478 const char *type
, const struct smap
*details
)
2480 struct netdev_dpdk
*netdev
= netdev_dpdk_cast(netdev_
);
2481 const struct dpdk_qos_ops
*new_ops
= NULL
;
2484 /* If type is empty or unsupported then the current QoS configuration
2485 * for the dpdk-netdev can be destroyed */
2486 new_ops
= qos_lookup_name(type
);
2488 if (type
[0] == '\0' || !new_ops
|| !new_ops
->qos_construct
) {
2489 qos_delete_conf(netdev_
);
2493 ovs_mutex_lock(&netdev
->mutex
);
2495 if (netdev
->qos_conf
) {
2496 if (new_ops
== netdev
->qos_conf
->ops
) {
2497 error
= new_ops
->qos_set
? new_ops
->qos_set(netdev_
, details
) : 0;
2499 /* Delete existing QoS configuration. */
2500 qos_delete_conf(netdev_
);
2501 ovs_assert(netdev
->qos_conf
== NULL
);
2503 /* Install new QoS configuration. */
2504 error
= new_ops
->qos_construct(netdev_
, details
);
2505 ovs_assert((error
== 0) == (netdev
->qos_conf
!= NULL
));
2508 error
= new_ops
->qos_construct(netdev_
, details
);
2509 ovs_assert((error
== 0) == (netdev
->qos_conf
!= NULL
));
2512 ovs_mutex_unlock(&netdev
->mutex
);
2516 /* egress-policer details */
2518 struct egress_policer
{
2519 struct qos_conf qos_conf
;
2520 struct rte_meter_srtcm_params app_srtcm_params
;
2521 struct rte_meter_srtcm egress_meter
;
2524 static struct egress_policer
*
2525 egress_policer_get__(const struct netdev
*netdev_
)
2527 struct netdev_dpdk
*netdev
= netdev_dpdk_cast(netdev_
);
2528 return CONTAINER_OF(netdev
->qos_conf
, struct egress_policer
, qos_conf
);
2532 egress_policer_qos_construct(struct netdev
*netdev_
,
2533 const struct smap
*details
)
2535 struct netdev_dpdk
*netdev
= netdev_dpdk_cast(netdev_
);
2536 struct egress_policer
*policer
;
2541 rte_spinlock_lock(&netdev
->qos_lock
);
2542 policer
= xmalloc(sizeof *policer
);
2543 qos_conf_init(&policer
->qos_conf
, &egress_policer_ops
);
2544 netdev
->qos_conf
= &policer
->qos_conf
;
2545 cir_s
= smap_get(details
, "cir");
2546 cbs_s
= smap_get(details
, "cbs");
2547 policer
->app_srtcm_params
.cir
= cir_s
? strtoull(cir_s
, NULL
, 10) : 0;
2548 policer
->app_srtcm_params
.cbs
= cbs_s
? strtoull(cbs_s
, NULL
, 10) : 0;
2549 policer
->app_srtcm_params
.ebs
= 0;
2550 err
= rte_meter_srtcm_config(&policer
->egress_meter
,
2551 &policer
->app_srtcm_params
);
2552 rte_spinlock_unlock(&netdev
->qos_lock
);
2558 egress_policer_qos_destruct(struct netdev
*netdev_ OVS_UNUSED
,
2559 struct qos_conf
*conf
)
2561 struct egress_policer
*policer
= CONTAINER_OF(conf
, struct egress_policer
,
2567 egress_policer_qos_get(const struct netdev
*netdev
, struct smap
*details
)
2569 struct egress_policer
*policer
= egress_policer_get__(netdev
);
2570 smap_add_format(details
, "cir", "%llu",
2571 1ULL * policer
->app_srtcm_params
.cir
);
2572 smap_add_format(details
, "cbs", "%llu",
2573 1ULL * policer
->app_srtcm_params
.cbs
);
2578 egress_policer_qos_set(struct netdev
*netdev_
, const struct smap
*details
)
2580 struct egress_policer
*policer
;
2585 policer
= egress_policer_get__(netdev_
);
2586 cir_s
= smap_get(details
, "cir");
2587 cbs_s
= smap_get(details
, "cbs");
2588 policer
->app_srtcm_params
.cir
= cir_s
? strtoull(cir_s
, NULL
, 10) : 0;
2589 policer
->app_srtcm_params
.cbs
= cbs_s
? strtoull(cbs_s
, NULL
, 10) : 0;
2590 policer
->app_srtcm_params
.ebs
= 0;
2591 err
= rte_meter_srtcm_config(&policer
->egress_meter
,
2592 &policer
->app_srtcm_params
);
2598 egress_policer_pkt_handle__(struct rte_meter_srtcm
*meter
,
2599 struct rte_mbuf
*pkt
, uint64_t time
)
2601 uint32_t pkt_len
= rte_pktmbuf_pkt_len(pkt
) - sizeof(struct ether_hdr
);
2603 return rte_meter_srtcm_color_blind_check(meter
, time
, pkt_len
) ==
2608 egress_policer_run(struct netdev
*netdev_
, struct rte_mbuf
**pkts
,
2613 struct egress_policer
*policer
= egress_policer_get__(netdev_
);
2614 struct rte_mbuf
*pkt
= NULL
;
2615 uint64_t current_time
= rte_rdtsc();
2617 for(i
= 0; i
< pkt_cnt
; i
++) {
2619 /* Handle current packet */
2620 if (egress_policer_pkt_handle__(&policer
->egress_meter
, pkt
,
2627 rte_pktmbuf_free(pkt
);
2634 static const struct dpdk_qos_ops egress_policer_ops
= {
2635 "egress-policer", /* qos_name */
2636 egress_policer_qos_construct
,
2637 egress_policer_qos_destruct
,
2638 egress_policer_qos_get
,
2639 egress_policer_qos_set
,
2643 #define NETDEV_DPDK_CLASS(NAME, INIT, CONSTRUCT, DESTRUCT, MULTIQ, SEND, \
2644 GET_CARRIER, GET_STATS, GET_FEATURES, GET_STATUS, RXQ_RECV) \
2647 true, /* is_pmd */ \
2649 NULL, /* netdev_dpdk_run */ \
2650 NULL, /* netdev_dpdk_wait */ \
2652 netdev_dpdk_alloc, \
2655 netdev_dpdk_dealloc, \
2656 netdev_dpdk_get_config, \
2657 netdev_dpdk_set_config, \
2658 NULL, /* get_tunnel_config */ \
2659 NULL, /* build header */ \
2660 NULL, /* push header */ \
2661 NULL, /* pop header */ \
2662 netdev_dpdk_get_numa_id, /* get_numa_id */ \
2663 MULTIQ, /* set_multiq */ \
2666 NULL, /* send_wait */ \
2668 netdev_dpdk_set_etheraddr, \
2669 netdev_dpdk_get_etheraddr, \
2670 netdev_dpdk_get_mtu, \
2671 netdev_dpdk_set_mtu, \
2672 netdev_dpdk_get_ifindex, \
2674 netdev_dpdk_get_carrier_resets, \
2675 netdev_dpdk_set_miimon, \
2678 NULL, /* set_advertisements */ \
2680 NULL, /* set_policing */ \
2681 netdev_dpdk_get_qos_types, \
2682 NULL, /* get_qos_capabilities */ \
2683 netdev_dpdk_get_qos, \
2684 netdev_dpdk_set_qos, \
2685 NULL, /* get_queue */ \
2686 NULL, /* set_queue */ \
2687 NULL, /* delete_queue */ \
2688 NULL, /* get_queue_stats */ \
2689 NULL, /* queue_dump_start */ \
2690 NULL, /* queue_dump_next */ \
2691 NULL, /* queue_dump_done */ \
2692 NULL, /* dump_queue_stats */ \
2694 NULL, /* set_in4 */ \
2695 NULL, /* get_addr_list */ \
2696 NULL, /* add_router */ \
2697 NULL, /* get_next_hop */ \
2699 NULL, /* arp_lookup */ \
2701 netdev_dpdk_update_flags, \
2703 netdev_dpdk_rxq_alloc, \
2704 netdev_dpdk_rxq_construct, \
2705 netdev_dpdk_rxq_destruct, \
2706 netdev_dpdk_rxq_dealloc, \
2708 NULL, /* rx_wait */ \
2709 NULL, /* rxq_drain */ \
2713 process_vhost_flags(char *flag
, char *default_val
, int size
,
2714 char **argv
, char **new_val
)
2718 /* Depending on which version of vhost is in use, process the vhost-specific
2719 * flag if it is provided on the vswitchd command line, otherwise resort to
2722 * For vhost-user: Process "-vhost_sock_dir" to set the custom location of
2723 * the vhost-user socket(s).
2724 * For vhost-cuse: Process "-cuse_dev_name" to set the custom name of the
2725 * vhost-cuse character device.
2727 if (!strcmp(argv
[1], flag
) && (strlen(argv
[2]) <= size
)) {
2729 *new_val
= xstrdup(argv
[2]);
2730 VLOG_INFO("User-provided %s in use: %s", flag
, *new_val
);
2732 VLOG_INFO("No %s provided - defaulting to %s", flag
, default_val
);
2733 *new_val
= default_val
;
2740 dpdk_init(int argc
, char **argv
)
2744 char *pragram_name
= argv
[0];
2746 if (argc
< 2 || strcmp(argv
[1], "--dpdk"))
2749 /* Remove the --dpdk argument from arg list.*/
2753 /* Reject --user option */
2755 for (i
= 0; i
< argc
; i
++) {
2756 if (!strcmp(argv
[i
], "--user")) {
2757 VLOG_ERR("Can not mix --dpdk and --user options, aborting.");
2762 if (process_vhost_flags("-cuse_dev_name", xstrdup("vhost-net"),
2763 PATH_MAX
, argv
, &cuse_dev_name
)) {
2765 if (process_vhost_flags("-vhost_sock_dir", xstrdup(ovs_rundir()),
2766 NAME_MAX
, argv
, &vhost_sock_dir
)) {
2770 err
= stat(vhost_sock_dir
, &s
);
2772 VLOG_ERR("vHostUser socket DIR '%s' does not exist.",
2777 /* Remove the vhost flag configuration parameters from the argument
2778 * list, so that the correct elements are passed to the DPDK
2779 * initialization function
2782 argv
+= 2; /* Increment by two to bypass the vhost flag arguments */
2786 /* Keep the program name argument as this is needed for call to
2789 argv
[0] = pragram_name
;
2791 /* Make sure things are initialized ... */
2792 result
= rte_eal_init(argc
, argv
);
2794 ovs_abort(result
, "Cannot init EAL");
2797 rte_memzone_dump(stdout
);
2798 rte_eal_init_ret
= 0;
2800 if (argc
> result
) {
2801 argv
[result
] = argv
[0];
2804 /* We are called from the main thread here */
2805 RTE_PER_LCORE(_lcore_id
) = NON_PMD_CORE_ID
;
2807 return result
+ 1 + base
;
2810 static const struct netdev_class dpdk_class
=
2814 netdev_dpdk_construct
,
2815 netdev_dpdk_destruct
,
2816 netdev_dpdk_set_multiq
,
2817 netdev_dpdk_eth_send
,
2818 netdev_dpdk_get_carrier
,
2819 netdev_dpdk_get_stats
,
2820 netdev_dpdk_get_features
,
2821 netdev_dpdk_get_status
,
2822 netdev_dpdk_rxq_recv
);
2824 static const struct netdev_class dpdk_ring_class
=
2828 netdev_dpdk_ring_construct
,
2829 netdev_dpdk_destruct
,
2830 netdev_dpdk_set_multiq
,
2831 netdev_dpdk_ring_send
,
2832 netdev_dpdk_get_carrier
,
2833 netdev_dpdk_get_stats
,
2834 netdev_dpdk_get_features
,
2835 netdev_dpdk_get_status
,
2836 netdev_dpdk_rxq_recv
);
2838 static const struct netdev_class OVS_UNUSED dpdk_vhost_cuse_class
=
2841 dpdk_vhost_cuse_class_init
,
2842 netdev_dpdk_vhost_cuse_construct
,
2843 netdev_dpdk_vhost_destruct
,
2844 netdev_dpdk_vhost_cuse_set_multiq
,
2845 netdev_dpdk_vhost_send
,
2846 netdev_dpdk_vhost_get_carrier
,
2847 netdev_dpdk_vhost_get_stats
,
2850 netdev_dpdk_vhost_rxq_recv
);
2852 static const struct netdev_class OVS_UNUSED dpdk_vhost_user_class
=
2855 dpdk_vhost_user_class_init
,
2856 netdev_dpdk_vhost_user_construct
,
2857 netdev_dpdk_vhost_destruct
,
2858 netdev_dpdk_vhost_set_multiq
,
2859 netdev_dpdk_vhost_send
,
2860 netdev_dpdk_vhost_get_carrier
,
2861 netdev_dpdk_vhost_get_stats
,
2864 netdev_dpdk_vhost_rxq_recv
);
2867 netdev_dpdk_register(void)
2869 static struct ovsthread_once once
= OVSTHREAD_ONCE_INITIALIZER
;
2871 if (rte_eal_init_ret
) {
2875 if (ovsthread_once_start(&once
)) {
2877 netdev_register_provider(&dpdk_class
);
2878 netdev_register_provider(&dpdk_ring_class
);
2880 netdev_register_provider(&dpdk_vhost_cuse_class
);
2882 netdev_register_provider(&dpdk_vhost_user_class
);
2884 ovsthread_once_done(&once
);
2889 pmd_thread_setaffinity_cpu(unsigned cpu
)
2895 CPU_SET(cpu
, &cpuset
);
2896 err
= pthread_setaffinity_np(pthread_self(), sizeof(cpu_set_t
), &cpuset
);
2898 VLOG_ERR("Thread affinity error %d",err
);
2901 /* NON_PMD_CORE_ID is reserved for use by non pmd threads. */
2902 ovs_assert(cpu
!= NON_PMD_CORE_ID
);
2903 RTE_PER_LCORE(_lcore_id
) = cpu
;
2909 dpdk_thread_is_pmd(void)
2911 return rte_lcore_id() != NON_PMD_CORE_ID
;