2 * Copyright (c) 2014, 2015, 2016 Nicira, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
30 #include <sys/types.h>
35 #include "dp-packet.h"
36 #include "dpif-netdev.h"
37 #include "fatal-signal.h"
38 #include "netdev-dpdk.h"
39 #include "netdev-provider.h"
40 #include "netdev-vport.h"
42 #include "openvswitch/dynamic-string.h"
43 #include "openvswitch/list.h"
44 #include "openvswitch/ofp-print.h"
45 #include "openvswitch/vlog.h"
47 #include "ovs-thread.h"
53 #include "unaligned.h"
57 #include "rte_config.h"
59 #include "rte_meter.h"
60 #include "rte_virtio_net.h"
62 VLOG_DEFINE_THIS_MODULE(dpdk
);
63 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(5, 20);
65 #define DPDK_PORT_WATCHDOG_INTERVAL 5
67 #define OVS_CACHE_LINE_SIZE CACHE_LINE_SIZE
68 #define OVS_VPORT_DPDK "ovs_dpdk"
71 * need to reserve tons of extra space in the mbufs so we can align the
72 * DMA addresses to 4KB.
73 * The minimum mbuf size is limited to avoid scatter behaviour and drop in
74 * performance for standard Ethernet MTU.
76 #define ETHER_HDR_MAX_LEN (ETHER_HDR_LEN + ETHER_CRC_LEN + (2 * VLAN_HEADER_LEN))
77 #define MTU_TO_FRAME_LEN(mtu) ((mtu) + ETHER_HDR_LEN + ETHER_CRC_LEN)
78 #define MTU_TO_MAX_FRAME_LEN(mtu) ((mtu) + ETHER_HDR_MAX_LEN)
79 #define FRAME_LEN_TO_MTU(frame_len) ((frame_len)- ETHER_HDR_LEN - ETHER_CRC_LEN)
80 #define MBUF_SIZE(mtu) ( MTU_TO_MAX_FRAME_LEN(mtu) \
81 + sizeof(struct dp_packet) \
82 + RTE_PKTMBUF_HEADROOM)
83 #define NETDEV_DPDK_MBUF_ALIGN 1024
85 /* Max and min number of packets in the mempool. OVS tries to allocate a
86 * mempool with MAX_NB_MBUF: if this fails (because the system doesn't have
87 * enough hugepages) we keep halving the number until the allocation succeeds
88 * or we reach MIN_NB_MBUF */
90 #define MAX_NB_MBUF (4096 * 64)
91 #define MIN_NB_MBUF (4096 * 4)
92 #define MP_CACHE_SZ RTE_MEMPOOL_CACHE_MAX_SIZE
94 /* MAX_NB_MBUF can be divided by 2 many times, until MIN_NB_MBUF */
95 BUILD_ASSERT_DECL(MAX_NB_MBUF
% ROUND_DOWN_POW2(MAX_NB_MBUF
/MIN_NB_MBUF
) == 0);
97 /* The smallest possible NB_MBUF that we're going to try should be a multiple
98 * of MP_CACHE_SZ. This is advised by DPDK documentation. */
99 BUILD_ASSERT_DECL((MAX_NB_MBUF
/ ROUND_DOWN_POW2(MAX_NB_MBUF
/MIN_NB_MBUF
))
103 * DPDK XSTATS Counter names definition
105 #define XSTAT_RX_64_PACKETS "rx_size_64_packets"
106 #define XSTAT_RX_65_TO_127_PACKETS "rx_size_65_to_127_packets"
107 #define XSTAT_RX_128_TO_255_PACKETS "rx_size_128_to_255_packets"
108 #define XSTAT_RX_256_TO_511_PACKETS "rx_size_256_to_511_packets"
109 #define XSTAT_RX_512_TO_1023_PACKETS "rx_size_512_to_1023_packets"
110 #define XSTAT_RX_1024_TO_1522_PACKETS "rx_size_1024_to_1522_packets"
111 #define XSTAT_RX_1523_TO_MAX_PACKETS "rx_size_1523_to_max_packets"
113 #define XSTAT_TX_64_PACKETS "tx_size_64_packets"
114 #define XSTAT_TX_65_TO_127_PACKETS "tx_size_65_to_127_packets"
115 #define XSTAT_TX_128_TO_255_PACKETS "tx_size_128_to_255_packets"
116 #define XSTAT_TX_256_TO_511_PACKETS "tx_size_256_to_511_packets"
117 #define XSTAT_TX_512_TO_1023_PACKETS "tx_size_512_to_1023_packets"
118 #define XSTAT_TX_1024_TO_1522_PACKETS "tx_size_1024_to_1522_packets"
119 #define XSTAT_TX_1523_TO_MAX_PACKETS "tx_size_1523_to_max_packets"
121 #define XSTAT_TX_MULTICAST_PACKETS "tx_multicast_packets"
122 #define XSTAT_RX_BROADCAST_PACKETS "rx_broadcast_packets"
123 #define XSTAT_TX_BROADCAST_PACKETS "tx_broadcast_packets"
124 #define XSTAT_RX_UNDERSIZED_ERRORS "rx_undersized_errors"
125 #define XSTAT_RX_OVERSIZE_ERRORS "rx_oversize_errors"
126 #define XSTAT_RX_FRAGMENTED_ERRORS "rx_fragmented_errors"
127 #define XSTAT_RX_JABBER_ERRORS "rx_jabber_errors"
131 #define NIC_PORT_RX_Q_SIZE 2048 /* Size of Physical NIC RX Queue, Max (n+32<=4096)*/
132 #define NIC_PORT_TX_Q_SIZE 2048 /* Size of Physical NIC TX Queue, Max (n+32<=4096)*/
134 #define OVS_VHOST_MAX_QUEUE_NUM 1024 /* Maximum number of vHost TX queues. */
135 #define OVS_VHOST_QUEUE_MAP_UNKNOWN (-1) /* Mapping not initialized. */
136 #define OVS_VHOST_QUEUE_DISABLED (-2) /* Queue was disabled by guest and not
137 * yet mapped to another queue. */
140 static char *cuse_dev_name
= NULL
; /* Character device cuse_dev_name. */
142 static char *vhost_sock_dir
= NULL
; /* Location of vhost-user sockets */
145 * Maximum amount of time in micro seconds to try and enqueue to vhost.
147 #define VHOST_ENQ_RETRY_USECS 100
149 static const struct rte_eth_conf port_conf
= {
151 .mq_mode
= ETH_MQ_RX_RSS
,
153 .header_split
= 0, /* Header Split disabled */
154 .hw_ip_checksum
= 0, /* IP checksum offload disabled */
155 .hw_vlan_filter
= 0, /* VLAN filtering disabled */
156 .jumbo_frame
= 0, /* Jumbo Frame Support disabled */
162 .rss_hf
= ETH_RSS_IP
| ETH_RSS_UDP
| ETH_RSS_TCP
,
166 .mq_mode
= ETH_MQ_TX_NONE
,
170 enum { MAX_TX_QUEUE_LEN
= 384 };
171 enum { DPDK_RING_SIZE
= 256 };
172 BUILD_ASSERT_DECL(IS_POW2(DPDK_RING_SIZE
));
173 enum { DRAIN_TSC
= 200000ULL };
180 static int rte_eal_init_ret
= ENODEV
;
182 static struct ovs_mutex dpdk_mutex
= OVS_MUTEX_INITIALIZER
;
184 /* Quality of Service */
186 /* An instance of a QoS configuration. Always associated with a particular
189 * Each QoS implementation subclasses this with whatever additional data it
193 const struct dpdk_qos_ops
*ops
;
196 /* A particular implementation of dpdk QoS operations.
198 * The functions below return 0 if successful or a positive errno value on
199 * failure, except where otherwise noted. All of them must be provided, except
200 * where otherwise noted.
202 struct dpdk_qos_ops
{
204 /* Name of the QoS type */
205 const char *qos_name
;
207 /* Called to construct the QoS implementation on 'netdev'. The
208 * implementation should make the appropriate calls to configure QoS
209 * according to 'details'. The implementation may assume that any current
210 * QoS configuration already installed should be destroyed before
211 * constructing the new configuration.
213 * The contents of 'details' should be documented as valid for 'ovs_name'
214 * in the "other_config" column in the "QoS" table in vswitchd/vswitch.xml
215 * (which is built as ovs-vswitchd.conf.db(8)).
217 * This function must return 0 if and only if it sets 'netdev->qos_conf'
218 * to an initialized 'struct qos_conf'.
220 * For all QoS implementations it should always be non-null.
222 int (*qos_construct
)(struct netdev
*netdev
, const struct smap
*details
);
224 /* Destroys the data structures allocated by the implementation as part of
227 * For all QoS implementations it should always be non-null.
229 void (*qos_destruct
)(struct netdev
*netdev
, struct qos_conf
*conf
);
231 /* Retrieves details of 'netdev->qos_conf' configuration into 'details'.
233 * The contents of 'details' should be documented as valid for 'ovs_name'
234 * in the "other_config" column in the "QoS" table in vswitchd/vswitch.xml
235 * (which is built as ovs-vswitchd.conf.db(8)).
237 int (*qos_get
)(const struct netdev
*netdev
, struct smap
*details
);
239 /* Reconfigures 'netdev->qos_conf' according to 'details', performing any
240 * required calls to complete the reconfiguration.
242 * The contents of 'details' should be documented as valid for 'ovs_name'
243 * in the "other_config" column in the "QoS" table in vswitchd/vswitch.xml
244 * (which is built as ovs-vswitchd.conf.db(8)).
246 * This function may be null if 'qos_conf' is not configurable.
248 int (*qos_set
)(struct netdev
*netdev
, const struct smap
*details
);
250 /* Modify an array of rte_mbufs. The modification is specific to
251 * each qos implementation.
253 * The function should take and array of mbufs and an int representing
254 * the current number of mbufs present in the array.
256 * After the function has performed a qos modification to the array of
257 * mbufs it returns an int representing the number of mbufs now present in
258 * the array. This value is can then be passed to the port send function
259 * along with the modified array for transmission.
261 * For all QoS implementations it should always be non-null.
263 int (*qos_run
)(struct netdev
*netdev
, struct rte_mbuf
**pkts
,
267 /* dpdk_qos_ops for each type of user space QoS implementation */
268 static const struct dpdk_qos_ops egress_policer_ops
;
271 * Array of dpdk_qos_ops, contains pointer to all supported QoS
274 static const struct dpdk_qos_ops
*const qos_confs
[] = {
279 /* Contains all 'struct dpdk_dev's. */
280 static struct ovs_list dpdk_list
OVS_GUARDED_BY(dpdk_mutex
)
281 = OVS_LIST_INITIALIZER(&dpdk_list
);
283 static struct ovs_list dpdk_mp_list
OVS_GUARDED_BY(dpdk_mutex
)
284 = OVS_LIST_INITIALIZER(&dpdk_mp_list
);
286 /* This mutex must be used by non pmd threads when allocating or freeing
287 * mbufs through mempools. Since dpdk_queue_pkts() and dpdk_queue_flush() may
288 * use mempools, a non pmd thread should hold this mutex while calling them */
289 static struct ovs_mutex nonpmd_mempool_mutex
= OVS_MUTEX_INITIALIZER
;
292 struct rte_mempool
*mp
;
296 struct ovs_list list_node
OVS_GUARDED_BY(dpdk_mutex
);
299 /* There should be one 'struct dpdk_tx_queue' created for
301 struct dpdk_tx_queue
{
302 bool flush_tx
; /* Set to true to flush queue everytime */
303 /* pkts are queued. */
305 rte_spinlock_t tx_lock
; /* Protects the members and the NIC queue
306 * from concurrent access. It is used only
307 * if the queue is shared among different
308 * pmd threads (see 'txq_needs_locking'). */
309 int map
; /* Mapping of configured vhost-user queues
310 * to enabled by guest. */
312 struct rte_mbuf
*burst_pkts
[MAX_TX_QUEUE_LEN
];
315 /* dpdk has no way to remove dpdk ring ethernet devices
316 so we have to keep them around once they've been created
319 static struct ovs_list dpdk_ring_list
OVS_GUARDED_BY(dpdk_mutex
)
320 = OVS_LIST_INITIALIZER(&dpdk_ring_list
);
323 /* For the client rings */
324 struct rte_ring
*cring_tx
;
325 struct rte_ring
*cring_rx
;
326 unsigned int user_port_id
; /* User given port no, parsed from port name */
327 int eth_port_id
; /* ethernet device port id */
328 struct ovs_list list_node
OVS_GUARDED_BY(dpdk_mutex
);
335 enum dpdk_dev_type type
;
337 struct dpdk_tx_queue
*tx_q
;
339 struct ovs_mutex mutex
OVS_ACQ_AFTER(dpdk_mutex
);
341 struct dpdk_mp
*dpdk_mp
;
345 struct netdev_stats stats
;
347 rte_spinlock_t stats_lock
;
349 struct eth_addr hwaddr
;
350 enum netdev_flags flags
;
352 struct rte_eth_link link
;
355 /* The user might request more txqs than the NIC has. We remap those
356 * ('up.n_txq') on these ('real_n_txq').
357 * If the numbers match, 'txq_needs_locking' is false, otherwise it is
358 * true and we will take a spinlock on transmission */
361 bool txq_needs_locking
;
363 /* virtio-net structure for vhost device */
364 OVSRCU_TYPE(struct virtio_net
*) virtio_dev
;
366 /* Identifier used to distinguish vhost devices from each other */
367 char vhost_id
[PATH_MAX
];
370 struct ovs_list list_node
OVS_GUARDED_BY(dpdk_mutex
);
372 /* QoS configuration and lock for the device */
373 struct qos_conf
*qos_conf
;
374 rte_spinlock_t qos_lock
;
378 struct netdev_rxq_dpdk
{
379 struct netdev_rxq up
;
383 static bool dpdk_thread_is_pmd(void);
385 static int netdev_dpdk_construct(struct netdev
*);
387 struct virtio_net
* netdev_dpdk_get_virtio(const struct netdev_dpdk
*dev
);
390 is_dpdk_class(const struct netdev_class
*class)
392 return class->construct
== netdev_dpdk_construct
;
395 /* DPDK NIC drivers allocate RX buffers at a particular granularity, typically
396 * aligned at 1k or less. If a declared mbuf size is not a multiple of this
397 * value, insufficient buffers are allocated to accomodate the packet in its
398 * entirety. Furthermore, certain drivers need to ensure that there is also
399 * sufficient space in the Rx buffer to accommodate two VLAN tags (for QinQ
400 * frames). If the RX buffer is too small, then the driver enables scatter RX
401 * behaviour, which reduces performance. To prevent this, use a buffer size that
402 * is closest to 'mtu', but which satisfies the aforementioned criteria.
405 dpdk_buf_size(int mtu
)
407 return ROUND_UP((MTU_TO_MAX_FRAME_LEN(mtu
) + RTE_PKTMBUF_HEADROOM
),
408 NETDEV_DPDK_MBUF_ALIGN
);
411 /* XXX: use dpdk malloc for entire OVS. in fact huge page should be used
412 * for all other segments data, bss and text. */
415 dpdk_rte_mzalloc(size_t sz
)
419 ptr
= rte_zmalloc(OVS_VPORT_DPDK
, sz
, OVS_CACHE_LINE_SIZE
);
426 /* XXX this function should be called only by pmd threads (or by non pmd
427 * threads holding the nonpmd_mempool_mutex) */
429 free_dpdk_buf(struct dp_packet
*p
)
431 struct rte_mbuf
*pkt
= (struct rte_mbuf
*) p
;
433 rte_pktmbuf_free(pkt
);
437 ovs_rte_pktmbuf_init(struct rte_mempool
*mp
,
438 void *opaque_arg OVS_UNUSED
,
440 unsigned i OVS_UNUSED
)
442 struct rte_mbuf
*m
= _m
;
444 rte_pktmbuf_init(mp
, opaque_arg
, _m
, i
);
446 dp_packet_init_dpdk((struct dp_packet
*) m
, m
->buf_len
);
449 static struct dpdk_mp
*
450 dpdk_mp_get(int socket_id
, int mtu
) OVS_REQUIRES(dpdk_mutex
)
452 struct dpdk_mp
*dmp
= NULL
;
453 char mp_name
[RTE_MEMPOOL_NAMESIZE
];
455 struct rte_pktmbuf_pool_private mbp_priv
;
457 LIST_FOR_EACH (dmp
, list_node
, &dpdk_mp_list
) {
458 if (dmp
->socket_id
== socket_id
&& dmp
->mtu
== mtu
) {
464 dmp
= dpdk_rte_mzalloc(sizeof *dmp
);
465 dmp
->socket_id
= socket_id
;
468 mbp_priv
.mbuf_data_room_size
= MBUF_SIZE(mtu
) - sizeof(struct dp_packet
);
469 mbp_priv
.mbuf_priv_size
= sizeof (struct dp_packet
) - sizeof (struct rte_mbuf
);
471 mp_size
= MAX_NB_MBUF
;
473 if (snprintf(mp_name
, RTE_MEMPOOL_NAMESIZE
, "ovs_mp_%d_%d_%u",
474 dmp
->mtu
, dmp
->socket_id
, mp_size
) < 0) {
478 dmp
->mp
= rte_mempool_create(mp_name
, mp_size
, MBUF_SIZE(mtu
),
480 sizeof(struct rte_pktmbuf_pool_private
),
481 rte_pktmbuf_pool_init
, &mbp_priv
,
482 ovs_rte_pktmbuf_init
, NULL
,
484 } while (!dmp
->mp
&& rte_errno
== ENOMEM
&& (mp_size
/= 2) >= MIN_NB_MBUF
);
486 if (dmp
->mp
== NULL
) {
489 VLOG_DBG("Allocated \"%s\" mempool with %u mbufs", mp_name
, mp_size
);
492 ovs_list_push_back(&dpdk_mp_list
, &dmp
->list_node
);
497 dpdk_mp_put(struct dpdk_mp
*dmp
)
505 ovs_assert(dmp
->refcount
>= 0);
508 /* I could not find any API to destroy mp. */
509 if (dmp
->refcount
== 0) {
510 list_delete(dmp
->list_node
);
511 /* destroy mp-pool. */
517 check_link_status(struct netdev_dpdk
*dev
)
519 struct rte_eth_link link
;
521 rte_eth_link_get_nowait(dev
->port_id
, &link
);
523 if (dev
->link
.link_status
!= link
.link_status
) {
524 netdev_change_seq_changed(&dev
->up
);
526 dev
->link_reset_cnt
++;
528 if (dev
->link
.link_status
) {
529 VLOG_DBG_RL(&rl
, "Port %d Link Up - speed %u Mbps - %s",
530 dev
->port_id
, (unsigned)dev
->link
.link_speed
,
531 (dev
->link
.link_duplex
== ETH_LINK_FULL_DUPLEX
) ?
532 ("full-duplex") : ("half-duplex"));
534 VLOG_DBG_RL(&rl
, "Port %d Link Down", dev
->port_id
);
540 dpdk_watchdog(void *dummy OVS_UNUSED
)
542 struct netdev_dpdk
*dev
;
544 pthread_detach(pthread_self());
547 ovs_mutex_lock(&dpdk_mutex
);
548 LIST_FOR_EACH (dev
, list_node
, &dpdk_list
) {
549 ovs_mutex_lock(&dev
->mutex
);
550 check_link_status(dev
);
551 ovs_mutex_unlock(&dev
->mutex
);
553 ovs_mutex_unlock(&dpdk_mutex
);
554 xsleep(DPDK_PORT_WATCHDOG_INTERVAL
);
561 dpdk_eth_dev_queue_setup(struct netdev_dpdk
*dev
, int n_rxq
, int n_txq
)
566 /* A device may report more queues than it makes available (this has
567 * been observed for Intel xl710, which reserves some of them for
568 * SRIOV): rte_eth_*_queue_setup will fail if a queue is not
569 * available. When this happens we can retry the configuration
570 * and request less queues */
571 while (n_rxq
&& n_txq
) {
573 VLOG_INFO("Retrying setup with (rxq:%d txq:%d)", n_rxq
, n_txq
);
576 diag
= rte_eth_dev_configure(dev
->port_id
, n_rxq
, n_txq
, &port_conf
);
581 for (i
= 0; i
< n_txq
; i
++) {
582 diag
= rte_eth_tx_queue_setup(dev
->port_id
, i
, NIC_PORT_TX_Q_SIZE
,
583 dev
->socket_id
, NULL
);
585 VLOG_INFO("Interface %s txq(%d) setup error: %s",
586 dev
->up
.name
, i
, rte_strerror(-diag
));
592 /* Retry with less tx queues */
597 for (i
= 0; i
< n_rxq
; i
++) {
598 diag
= rte_eth_rx_queue_setup(dev
->port_id
, i
, NIC_PORT_RX_Q_SIZE
,
599 dev
->socket_id
, NULL
,
602 VLOG_INFO("Interface %s rxq(%d) setup error: %s",
603 dev
->up
.name
, i
, rte_strerror(-diag
));
609 /* Retry with less rx queues */
614 dev
->up
.n_rxq
= n_rxq
;
615 dev
->real_n_txq
= n_txq
;
625 dpdk_eth_dev_init(struct netdev_dpdk
*dev
) OVS_REQUIRES(dpdk_mutex
)
627 struct rte_pktmbuf_pool_private
*mbp_priv
;
628 struct rte_eth_dev_info info
;
629 struct ether_addr eth_addr
;
633 if (dev
->port_id
< 0 || dev
->port_id
>= rte_eth_dev_count()) {
637 rte_eth_dev_info_get(dev
->port_id
, &info
);
639 n_rxq
= MIN(info
.max_rx_queues
, dev
->up
.n_rxq
);
640 n_txq
= MIN(info
.max_tx_queues
, dev
->up
.n_txq
);
642 diag
= dpdk_eth_dev_queue_setup(dev
, n_rxq
, n_txq
);
644 VLOG_ERR("Interface %s(rxq:%d txq:%d) configure error: %s",
645 dev
->up
.name
, n_rxq
, n_txq
, rte_strerror(-diag
));
649 diag
= rte_eth_dev_start(dev
->port_id
);
651 VLOG_ERR("Interface %s start error: %s", dev
->up
.name
,
652 rte_strerror(-diag
));
656 rte_eth_promiscuous_enable(dev
->port_id
);
657 rte_eth_allmulticast_enable(dev
->port_id
);
659 memset(ð_addr
, 0x0, sizeof(eth_addr
));
660 rte_eth_macaddr_get(dev
->port_id
, ð_addr
);
661 VLOG_INFO_RL(&rl
, "Port %d: "ETH_ADDR_FMT
"",
662 dev
->port_id
, ETH_ADDR_BYTES_ARGS(eth_addr
.addr_bytes
));
664 memcpy(dev
->hwaddr
.ea
, eth_addr
.addr_bytes
, ETH_ADDR_LEN
);
665 rte_eth_link_get_nowait(dev
->port_id
, &dev
->link
);
667 mbp_priv
= rte_mempool_get_priv(dev
->dpdk_mp
->mp
);
668 dev
->buf_size
= mbp_priv
->mbuf_data_room_size
- RTE_PKTMBUF_HEADROOM
;
670 dev
->flags
= NETDEV_UP
| NETDEV_PROMISC
;
674 static struct netdev_dpdk
*
675 netdev_dpdk_cast(const struct netdev
*netdev
)
677 return CONTAINER_OF(netdev
, struct netdev_dpdk
, up
);
680 static struct netdev
*
681 netdev_dpdk_alloc(void)
683 struct netdev_dpdk
*dev
;
685 if (!rte_eal_init_ret
) { /* Only after successful initialization */
686 dev
= dpdk_rte_mzalloc(sizeof *dev
);
695 netdev_dpdk_alloc_txq(struct netdev_dpdk
*dev
, unsigned int n_txqs
)
699 dev
->tx_q
= dpdk_rte_mzalloc(n_txqs
* sizeof *dev
->tx_q
);
700 for (i
= 0; i
< n_txqs
; i
++) {
701 int numa_id
= ovs_numa_get_numa_id(i
);
703 if (!dev
->txq_needs_locking
) {
704 /* Each index is considered as a cpu core id, since there should
705 * be one tx queue for each cpu core. If the corresponding core
706 * is not on the same numa node as 'dev', flags the
708 dev
->tx_q
[i
].flush_tx
= dev
->socket_id
== numa_id
;
710 /* Queues are shared among CPUs. Always flush */
711 dev
->tx_q
[i
].flush_tx
= true;
714 /* Initialize map for vhost devices. */
715 dev
->tx_q
[i
].map
= OVS_VHOST_QUEUE_MAP_UNKNOWN
;
716 rte_spinlock_init(&dev
->tx_q
[i
].tx_lock
);
721 netdev_dpdk_init(struct netdev
*netdev
, unsigned int port_no
,
722 enum dpdk_dev_type type
)
723 OVS_REQUIRES(dpdk_mutex
)
725 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
730 ovs_mutex_init(&dev
->mutex
);
731 ovs_mutex_lock(&dev
->mutex
);
733 rte_spinlock_init(&dev
->stats_lock
);
735 /* If the 'sid' is negative, it means that the kernel fails
736 * to obtain the pci numa info. In that situation, always
738 if (type
== DPDK_DEV_ETH
) {
739 sid
= rte_eth_dev_socket_id(port_no
);
741 sid
= rte_lcore_to_socket_id(rte_get_master_lcore());
744 dev
->socket_id
= sid
< 0 ? SOCKET0
: sid
;
745 dev
->port_id
= port_no
;
748 dev
->mtu
= ETHER_MTU
;
749 dev
->max_packet_len
= MTU_TO_FRAME_LEN(dev
->mtu
);
751 buf_size
= dpdk_buf_size(dev
->mtu
);
752 dev
->dpdk_mp
= dpdk_mp_get(dev
->socket_id
, FRAME_LEN_TO_MTU(buf_size
));
758 /* Initialise QoS configuration to NULL and qos lock to unlocked */
759 dev
->qos_conf
= NULL
;
760 rte_spinlock_init(&dev
->qos_lock
);
762 netdev
->n_txq
= NR_QUEUE
;
763 netdev
->n_rxq
= NR_QUEUE
;
764 netdev
->requested_n_rxq
= NR_QUEUE
;
765 dev
->real_n_txq
= NR_QUEUE
;
767 if (type
== DPDK_DEV_ETH
) {
768 netdev_dpdk_alloc_txq(dev
, NR_QUEUE
);
769 err
= dpdk_eth_dev_init(dev
);
774 netdev_dpdk_alloc_txq(dev
, OVS_VHOST_MAX_QUEUE_NUM
);
777 ovs_list_push_back(&dpdk_list
, &dev
->list_node
);
783 ovs_mutex_unlock(&dev
->mutex
);
787 /* dev_name must be the prefix followed by a positive decimal number.
788 * (no leading + or - signs are allowed) */
790 dpdk_dev_parse_name(const char dev_name
[], const char prefix
[],
791 unsigned int *port_no
)
795 if (strncmp(dev_name
, prefix
, strlen(prefix
))) {
799 cport
= dev_name
+ strlen(prefix
);
801 if (str_to_uint(cport
, 10, port_no
)) {
809 vhost_construct_helper(struct netdev
*netdev
) OVS_REQUIRES(dpdk_mutex
)
811 if (rte_eal_init_ret
) {
812 return rte_eal_init_ret
;
815 return netdev_dpdk_init(netdev
, -1, DPDK_DEV_VHOST
);
819 netdev_dpdk_vhost_cuse_construct(struct netdev
*netdev
)
821 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
824 if (rte_eal_init_ret
) {
825 return rte_eal_init_ret
;
828 ovs_mutex_lock(&dpdk_mutex
);
829 strncpy(dev
->vhost_id
, netdev
->name
, sizeof(dev
->vhost_id
));
830 err
= vhost_construct_helper(netdev
);
831 ovs_mutex_unlock(&dpdk_mutex
);
836 netdev_dpdk_vhost_user_construct(struct netdev
*netdev
)
838 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
839 const char *name
= netdev
->name
;
842 /* 'name' is appended to 'vhost_sock_dir' and used to create a socket in
843 * the file system. '/' or '\' would traverse directories, so they're not
844 * acceptable in 'name'. */
845 if (strchr(name
, '/') || strchr(name
, '\\')) {
846 VLOG_ERR("\"%s\" is not a valid name for a vhost-user port. "
847 "A valid name must not include '/' or '\\'",
852 if (rte_eal_init_ret
) {
853 return rte_eal_init_ret
;
856 ovs_mutex_lock(&dpdk_mutex
);
857 /* Take the name of the vhost-user port and append it to the location where
858 * the socket is to be created, then register the socket.
860 snprintf(dev
->vhost_id
, sizeof(dev
->vhost_id
), "%s/%s",
861 vhost_sock_dir
, name
);
863 err
= rte_vhost_driver_register(dev
->vhost_id
);
865 VLOG_ERR("vhost-user socket device setup failure for socket %s\n",
868 fatal_signal_add_file_to_unlink(dev
->vhost_id
);
869 VLOG_INFO("Socket %s created for vhost-user port %s\n",
870 dev
->vhost_id
, name
);
871 err
= vhost_construct_helper(netdev
);
874 ovs_mutex_unlock(&dpdk_mutex
);
879 netdev_dpdk_construct(struct netdev
*netdev
)
881 unsigned int port_no
;
884 if (rte_eal_init_ret
) {
885 return rte_eal_init_ret
;
888 /* Names always start with "dpdk" */
889 err
= dpdk_dev_parse_name(netdev
->name
, "dpdk", &port_no
);
894 ovs_mutex_lock(&dpdk_mutex
);
895 err
= netdev_dpdk_init(netdev
, port_no
, DPDK_DEV_ETH
);
896 ovs_mutex_unlock(&dpdk_mutex
);
901 netdev_dpdk_destruct(struct netdev
*netdev
)
903 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
905 ovs_mutex_lock(&dev
->mutex
);
906 rte_eth_dev_stop(dev
->port_id
);
907 ovs_mutex_unlock(&dev
->mutex
);
909 ovs_mutex_lock(&dpdk_mutex
);
911 ovs_list_remove(&dev
->list_node
);
912 dpdk_mp_put(dev
->dpdk_mp
);
913 ovs_mutex_unlock(&dpdk_mutex
);
917 netdev_dpdk_vhost_destruct(struct netdev
*netdev
)
919 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
921 /* Guest becomes an orphan if still attached. */
922 if (netdev_dpdk_get_virtio(dev
) != NULL
) {
923 VLOG_ERR("Removing port '%s' while vhost device still attached.",
925 VLOG_ERR("To restore connectivity after re-adding of port, VM on socket"
926 " '%s' must be restarted.",
930 if (rte_vhost_driver_unregister(dev
->vhost_id
)) {
931 VLOG_ERR("Unable to remove vhost-user socket %s", dev
->vhost_id
);
933 fatal_signal_remove_file_to_unlink(dev
->vhost_id
);
936 ovs_mutex_lock(&dpdk_mutex
);
938 ovs_list_remove(&dev
->list_node
);
939 dpdk_mp_put(dev
->dpdk_mp
);
940 ovs_mutex_unlock(&dpdk_mutex
);
944 netdev_dpdk_dealloc(struct netdev
*netdev
)
946 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
952 netdev_dpdk_get_config(const struct netdev
*netdev
, struct smap
*args
)
954 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
956 ovs_mutex_lock(&dev
->mutex
);
958 smap_add_format(args
, "requested_rx_queues", "%d", netdev
->requested_n_rxq
);
959 smap_add_format(args
, "configured_rx_queues", "%d", netdev
->n_rxq
);
960 smap_add_format(args
, "requested_tx_queues", "%d", netdev
->n_txq
);
961 smap_add_format(args
, "configured_tx_queues", "%d", dev
->real_n_txq
);
962 ovs_mutex_unlock(&dev
->mutex
);
968 netdev_dpdk_set_config(struct netdev
*netdev
, const struct smap
*args
)
970 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
972 ovs_mutex_lock(&dev
->mutex
);
973 netdev
->requested_n_rxq
= MAX(smap_get_int(args
, "n_rxq",
974 netdev
->requested_n_rxq
), 1);
975 netdev_change_seq_changed(netdev
);
976 ovs_mutex_unlock(&dev
->mutex
);
982 netdev_dpdk_get_numa_id(const struct netdev
*netdev
)
984 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
986 return dev
->socket_id
;
989 /* Sets the number of tx queues and rx queues for the dpdk interface.
990 * If the configuration fails, do not try restoring its old configuration
991 * and just returns the error. */
993 netdev_dpdk_set_multiq(struct netdev
*netdev
, unsigned int n_txq
,
996 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
998 int old_rxq
, old_txq
;
1000 if (netdev
->n_txq
== n_txq
&& netdev
->n_rxq
== n_rxq
) {
1004 ovs_mutex_lock(&dpdk_mutex
);
1005 ovs_mutex_lock(&dev
->mutex
);
1007 rte_eth_dev_stop(dev
->port_id
);
1009 old_txq
= netdev
->n_txq
;
1010 old_rxq
= netdev
->n_rxq
;
1011 netdev
->n_txq
= n_txq
;
1012 netdev
->n_rxq
= n_rxq
;
1014 rte_free(dev
->tx_q
);
1015 err
= dpdk_eth_dev_init(dev
);
1016 netdev_dpdk_alloc_txq(dev
, dev
->real_n_txq
);
1018 /* If there has been an error, it means that the requested queues
1019 * have not been created. Restore the old numbers. */
1020 netdev
->n_txq
= old_txq
;
1021 netdev
->n_rxq
= old_rxq
;
1024 dev
->txq_needs_locking
= dev
->real_n_txq
!= netdev
->n_txq
;
1026 ovs_mutex_unlock(&dev
->mutex
);
1027 ovs_mutex_unlock(&dpdk_mutex
);
1033 netdev_dpdk_vhost_cuse_set_multiq(struct netdev
*netdev
, unsigned int n_txq
,
1036 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1039 if (netdev
->n_txq
== n_txq
&& netdev
->n_rxq
== n_rxq
) {
1043 ovs_mutex_lock(&dpdk_mutex
);
1044 ovs_mutex_lock(&dev
->mutex
);
1046 netdev
->n_txq
= n_txq
;
1047 dev
->real_n_txq
= 1;
1049 dev
->txq_needs_locking
= dev
->real_n_txq
!= netdev
->n_txq
;
1051 ovs_mutex_unlock(&dev
->mutex
);
1052 ovs_mutex_unlock(&dpdk_mutex
);
1058 netdev_dpdk_vhost_set_multiq(struct netdev
*netdev
, unsigned int n_txq
,
1061 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1064 if (netdev
->n_txq
== n_txq
&& netdev
->n_rxq
== n_rxq
) {
1068 ovs_mutex_lock(&dpdk_mutex
);
1069 ovs_mutex_lock(&dev
->mutex
);
1071 netdev
->n_txq
= n_txq
;
1072 netdev
->n_rxq
= n_rxq
;
1074 ovs_mutex_unlock(&dev
->mutex
);
1075 ovs_mutex_unlock(&dpdk_mutex
);
1080 static struct netdev_rxq
*
1081 netdev_dpdk_rxq_alloc(void)
1083 struct netdev_rxq_dpdk
*rx
= dpdk_rte_mzalloc(sizeof *rx
);
1088 static struct netdev_rxq_dpdk
*
1089 netdev_rxq_dpdk_cast(const struct netdev_rxq
*rxq
)
1091 return CONTAINER_OF(rxq
, struct netdev_rxq_dpdk
, up
);
1095 netdev_dpdk_rxq_construct(struct netdev_rxq
*rxq
)
1097 struct netdev_rxq_dpdk
*rx
= netdev_rxq_dpdk_cast(rxq
);
1098 struct netdev_dpdk
*dev
= netdev_dpdk_cast(rxq
->netdev
);
1100 ovs_mutex_lock(&dev
->mutex
);
1101 rx
->port_id
= dev
->port_id
;
1102 ovs_mutex_unlock(&dev
->mutex
);
1108 netdev_dpdk_rxq_destruct(struct netdev_rxq
*rxq OVS_UNUSED
)
1113 netdev_dpdk_rxq_dealloc(struct netdev_rxq
*rxq
)
1115 struct netdev_rxq_dpdk
*rx
= netdev_rxq_dpdk_cast(rxq
);
1121 dpdk_queue_flush__(struct netdev_dpdk
*dev
, int qid
)
1123 struct dpdk_tx_queue
*txq
= &dev
->tx_q
[qid
];
1126 while (nb_tx
!= txq
->count
) {
1129 ret
= rte_eth_tx_burst(dev
->port_id
, qid
, txq
->burst_pkts
+ nb_tx
,
1130 txq
->count
- nb_tx
);
1138 if (OVS_UNLIKELY(nb_tx
!= txq
->count
)) {
1139 /* free buffers, which we couldn't transmit, one at a time (each
1140 * packet could come from a different mempool) */
1143 for (i
= nb_tx
; i
< txq
->count
; i
++) {
1144 rte_pktmbuf_free(txq
->burst_pkts
[i
]);
1146 rte_spinlock_lock(&dev
->stats_lock
);
1147 dev
->stats
.tx_dropped
+= txq
->count
-nb_tx
;
1148 rte_spinlock_unlock(&dev
->stats_lock
);
1152 txq
->tsc
= rte_get_timer_cycles();
1156 dpdk_queue_flush(struct netdev_dpdk
*dev
, int qid
)
1158 struct dpdk_tx_queue
*txq
= &dev
->tx_q
[qid
];
1160 if (txq
->count
== 0) {
1163 dpdk_queue_flush__(dev
, qid
);
1167 is_vhost_running(struct virtio_net
*virtio_dev
)
1169 return (virtio_dev
!= NULL
&& (virtio_dev
->flags
& VIRTIO_DEV_RUNNING
));
1173 netdev_dpdk_vhost_update_rx_size_counters(struct netdev_stats
*stats
,
1174 unsigned int packet_size
)
1176 /* Hard-coded search for the size bucket. */
1177 if (packet_size
< 256) {
1178 if (packet_size
>= 128) {
1179 stats
->rx_128_to_255_packets
++;
1180 } else if (packet_size
<= 64) {
1181 stats
->rx_1_to_64_packets
++;
1183 stats
->rx_65_to_127_packets
++;
1186 if (packet_size
>= 1523) {
1187 stats
->rx_1523_to_max_packets
++;
1188 } else if (packet_size
>= 1024) {
1189 stats
->rx_1024_to_1522_packets
++;
1190 } else if (packet_size
< 512) {
1191 stats
->rx_256_to_511_packets
++;
1193 stats
->rx_512_to_1023_packets
++;
1199 netdev_dpdk_vhost_update_rx_counters(struct netdev_stats
*stats
,
1200 struct dp_packet
**packets
, int count
)
1203 unsigned int packet_size
;
1204 struct dp_packet
*packet
;
1206 stats
->rx_packets
+= count
;
1207 for (i
= 0; i
< count
; i
++) {
1208 packet
= packets
[i
];
1209 packet_size
= dp_packet_size(packet
);
1211 if (OVS_UNLIKELY(packet_size
< ETH_HEADER_LEN
)) {
1212 /* This only protects the following multicast counting from
1213 * too short packets, but it does not stop the packet from
1214 * further processing. */
1216 stats
->rx_length_errors
++;
1220 netdev_dpdk_vhost_update_rx_size_counters(stats
, packet_size
);
1222 struct eth_header
*eh
= (struct eth_header
*) dp_packet_data(packet
);
1223 if (OVS_UNLIKELY(eth_addr_is_multicast(eh
->eth_dst
))) {
1227 stats
->rx_bytes
+= packet_size
;
1232 * The receive path for the vhost port is the TX path out from guest.
1235 netdev_dpdk_vhost_rxq_recv(struct netdev_rxq
*rxq
,
1236 struct dp_packet
**packets
, int *c
)
1238 struct netdev_dpdk
*dev
= netdev_dpdk_cast(rxq
->netdev
);
1239 struct virtio_net
*virtio_dev
= netdev_dpdk_get_virtio(dev
);
1240 int qid
= rxq
->queue_id
;
1243 if (OVS_UNLIKELY(!is_vhost_running(virtio_dev
))) {
1247 if (rxq
->queue_id
>= dev
->real_n_rxq
) {
1251 nb_rx
= rte_vhost_dequeue_burst(virtio_dev
, qid
* VIRTIO_QNUM
+ VIRTIO_TXQ
,
1253 (struct rte_mbuf
**)packets
,
1259 rte_spinlock_lock(&dev
->stats_lock
);
1260 netdev_dpdk_vhost_update_rx_counters(&dev
->stats
, packets
, nb_rx
);
1261 rte_spinlock_unlock(&dev
->stats_lock
);
1268 netdev_dpdk_rxq_recv(struct netdev_rxq
*rxq
, struct dp_packet
**packets
,
1271 struct netdev_rxq_dpdk
*rx
= netdev_rxq_dpdk_cast(rxq
);
1272 struct netdev_dpdk
*dev
= netdev_dpdk_cast(rxq
->netdev
);
1275 /* There is only one tx queue for this core. Do not flush other
1277 * Do not flush tx queue which is shared among CPUs
1278 * since it is always flushed */
1279 if (rxq
->queue_id
== rte_lcore_id() &&
1280 OVS_LIKELY(!dev
->txq_needs_locking
)) {
1281 dpdk_queue_flush(dev
, rxq
->queue_id
);
1284 nb_rx
= rte_eth_rx_burst(rx
->port_id
, rxq
->queue_id
,
1285 (struct rte_mbuf
**) packets
,
1297 netdev_dpdk_qos_run__(struct netdev_dpdk
*dev
, struct rte_mbuf
**pkts
,
1300 struct netdev
*netdev
= &dev
->up
;
1302 if (dev
->qos_conf
!= NULL
) {
1303 rte_spinlock_lock(&dev
->qos_lock
);
1304 if (dev
->qos_conf
!= NULL
) {
1305 cnt
= dev
->qos_conf
->ops
->qos_run(netdev
, pkts
, cnt
);
1307 rte_spinlock_unlock(&dev
->qos_lock
);
1314 netdev_dpdk_vhost_update_tx_counters(struct netdev_stats
*stats
,
1315 struct dp_packet
**packets
,
1320 int sent
= attempted
- dropped
;
1322 stats
->tx_packets
+= sent
;
1323 stats
->tx_dropped
+= dropped
;
1325 for (i
= 0; i
< sent
; i
++) {
1326 stats
->tx_bytes
+= dp_packet_size(packets
[i
]);
1331 __netdev_dpdk_vhost_send(struct netdev
*netdev
, int qid
,
1332 struct dp_packet
**pkts
, int cnt
,
1335 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1336 struct virtio_net
*virtio_dev
= netdev_dpdk_get_virtio(dev
);
1337 struct rte_mbuf
**cur_pkts
= (struct rte_mbuf
**) pkts
;
1338 unsigned int total_pkts
= cnt
;
1339 unsigned int qos_pkts
= cnt
;
1342 qid
= dev
->tx_q
[qid
% dev
->real_n_txq
].map
;
1344 if (OVS_UNLIKELY(!is_vhost_running(virtio_dev
) || qid
< 0)) {
1345 rte_spinlock_lock(&dev
->stats_lock
);
1346 dev
->stats
.tx_dropped
+= cnt
;
1347 rte_spinlock_unlock(&dev
->stats_lock
);
1351 rte_spinlock_lock(&dev
->tx_q
[qid
].tx_lock
);
1353 /* Check has QoS has been configured for the netdev */
1354 cnt
= netdev_dpdk_qos_run__(dev
, cur_pkts
, cnt
);
1358 int vhost_qid
= qid
* VIRTIO_QNUM
+ VIRTIO_RXQ
;
1359 unsigned int tx_pkts
;
1361 tx_pkts
= rte_vhost_enqueue_burst(virtio_dev
, vhost_qid
,
1363 if (OVS_LIKELY(tx_pkts
)) {
1364 /* Packets have been sent.*/
1366 /* Prepare for possible next iteration.*/
1367 cur_pkts
= &cur_pkts
[tx_pkts
];
1369 uint64_t timeout
= VHOST_ENQ_RETRY_USECS
* rte_get_timer_hz() / 1E6
;
1370 unsigned int expired
= 0;
1373 start
= rte_get_timer_cycles();
1377 * Unable to enqueue packets to vhost interface.
1378 * Check available entries before retrying.
1380 while (!rte_vring_available_entries(virtio_dev
, vhost_qid
)) {
1381 if (OVS_UNLIKELY((rte_get_timer_cycles() - start
) > timeout
)) {
1387 /* break out of main loop. */
1393 rte_spinlock_unlock(&dev
->tx_q
[qid
].tx_lock
);
1395 rte_spinlock_lock(&dev
->stats_lock
);
1397 netdev_dpdk_vhost_update_tx_counters(&dev
->stats
, pkts
, total_pkts
, cnt
);
1398 rte_spinlock_unlock(&dev
->stats_lock
);
1404 for (i
= 0; i
< total_pkts
; i
++) {
1405 dp_packet_delete(pkts
[i
]);
1411 dpdk_queue_pkts(struct netdev_dpdk
*dev
, int qid
,
1412 struct rte_mbuf
**pkts
, int cnt
)
1414 struct dpdk_tx_queue
*txq
= &dev
->tx_q
[qid
];
1420 int freeslots
= MAX_TX_QUEUE_LEN
- txq
->count
;
1421 int tocopy
= MIN(freeslots
, cnt
-i
);
1423 memcpy(&txq
->burst_pkts
[txq
->count
], &pkts
[i
],
1424 tocopy
* sizeof (struct rte_mbuf
*));
1426 txq
->count
+= tocopy
;
1429 if (txq
->count
== MAX_TX_QUEUE_LEN
|| txq
->flush_tx
) {
1430 dpdk_queue_flush__(dev
, qid
);
1432 diff_tsc
= rte_get_timer_cycles() - txq
->tsc
;
1433 if (diff_tsc
>= DRAIN_TSC
) {
1434 dpdk_queue_flush__(dev
, qid
);
1439 /* Tx function. Transmit packets indefinitely */
1441 dpdk_do_tx_copy(struct netdev
*netdev
, int qid
, struct dp_packet
**pkts
,
1443 OVS_NO_THREAD_SAFETY_ANALYSIS
1445 #if !defined(__CHECKER__) && !defined(_WIN32)
1446 const size_t PKT_ARRAY_SIZE
= cnt
;
1448 /* Sparse or MSVC doesn't like variable length array. */
1449 enum { PKT_ARRAY_SIZE
= NETDEV_MAX_BURST
};
1451 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1452 struct rte_mbuf
*mbufs
[PKT_ARRAY_SIZE
];
1457 /* If we are on a non pmd thread we have to use the mempool mutex, because
1458 * every non pmd thread shares the same mempool cache */
1460 if (!dpdk_thread_is_pmd()) {
1461 ovs_mutex_lock(&nonpmd_mempool_mutex
);
1464 for (i
= 0; i
< cnt
; i
++) {
1465 int size
= dp_packet_size(pkts
[i
]);
1467 if (OVS_UNLIKELY(size
> dev
->max_packet_len
)) {
1468 VLOG_WARN_RL(&rl
, "Too big size %d max_packet_len %d",
1469 (int)size
, dev
->max_packet_len
);
1475 mbufs
[newcnt
] = rte_pktmbuf_alloc(dev
->dpdk_mp
->mp
);
1477 if (!mbufs
[newcnt
]) {
1482 /* We have to do a copy for now */
1483 memcpy(rte_pktmbuf_mtod(mbufs
[newcnt
], void *), dp_packet_data(pkts
[i
]), size
);
1485 rte_pktmbuf_data_len(mbufs
[newcnt
]) = size
;
1486 rte_pktmbuf_pkt_len(mbufs
[newcnt
]) = size
;
1491 if (dev
->type
== DPDK_DEV_VHOST
) {
1492 __netdev_dpdk_vhost_send(netdev
, qid
, (struct dp_packet
**) mbufs
, newcnt
, true);
1494 unsigned int qos_pkts
= newcnt
;
1496 /* Check if QoS has been configured for this netdev. */
1497 newcnt
= netdev_dpdk_qos_run__(dev
, mbufs
, newcnt
);
1499 dropped
+= qos_pkts
- newcnt
;
1500 dpdk_queue_pkts(dev
, qid
, mbufs
, newcnt
);
1501 dpdk_queue_flush(dev
, qid
);
1504 if (OVS_UNLIKELY(dropped
)) {
1505 rte_spinlock_lock(&dev
->stats_lock
);
1506 dev
->stats
.tx_dropped
+= dropped
;
1507 rte_spinlock_unlock(&dev
->stats_lock
);
1510 if (!dpdk_thread_is_pmd()) {
1511 ovs_mutex_unlock(&nonpmd_mempool_mutex
);
1516 netdev_dpdk_vhost_send(struct netdev
*netdev
, int qid
, struct dp_packet
**pkts
,
1517 int cnt
, bool may_steal
)
1519 if (OVS_UNLIKELY(pkts
[0]->source
!= DPBUF_DPDK
)) {
1522 dpdk_do_tx_copy(netdev
, qid
, pkts
, cnt
);
1524 for (i
= 0; i
< cnt
; i
++) {
1525 dp_packet_delete(pkts
[i
]);
1529 __netdev_dpdk_vhost_send(netdev
, qid
, pkts
, cnt
, may_steal
);
1535 netdev_dpdk_send__(struct netdev_dpdk
*dev
, int qid
,
1536 struct dp_packet
**pkts
, int cnt
, bool may_steal
)
1540 if (OVS_UNLIKELY(dev
->txq_needs_locking
)) {
1541 qid
= qid
% dev
->real_n_txq
;
1542 rte_spinlock_lock(&dev
->tx_q
[qid
].tx_lock
);
1545 if (OVS_UNLIKELY(!may_steal
||
1546 pkts
[0]->source
!= DPBUF_DPDK
)) {
1547 struct netdev
*netdev
= &dev
->up
;
1549 dpdk_do_tx_copy(netdev
, qid
, pkts
, cnt
);
1552 for (i
= 0; i
< cnt
; i
++) {
1553 dp_packet_delete(pkts
[i
]);
1557 int next_tx_idx
= 0;
1559 unsigned int qos_pkts
= 0;
1560 unsigned int temp_cnt
= 0;
1562 for (i
= 0; i
< cnt
; i
++) {
1563 int size
= dp_packet_size(pkts
[i
]);
1565 if (OVS_UNLIKELY(size
> dev
->max_packet_len
)) {
1566 if (next_tx_idx
!= i
) {
1567 temp_cnt
= i
- next_tx_idx
;
1568 qos_pkts
= temp_cnt
;
1570 temp_cnt
= netdev_dpdk_qos_run__(dev
, (struct rte_mbuf
**)pkts
,
1572 dropped
+= qos_pkts
- temp_cnt
;
1573 dpdk_queue_pkts(dev
, qid
,
1574 (struct rte_mbuf
**)&pkts
[next_tx_idx
],
1579 VLOG_WARN_RL(&rl
, "Too big size %d max_packet_len %d",
1580 (int)size
, dev
->max_packet_len
);
1582 dp_packet_delete(pkts
[i
]);
1584 next_tx_idx
= i
+ 1;
1587 if (next_tx_idx
!= cnt
) {
1591 cnt
= netdev_dpdk_qos_run__(dev
, (struct rte_mbuf
**)pkts
, cnt
);
1592 dropped
+= qos_pkts
- cnt
;
1593 dpdk_queue_pkts(dev
, qid
, (struct rte_mbuf
**)&pkts
[next_tx_idx
],
1597 if (OVS_UNLIKELY(dropped
)) {
1598 rte_spinlock_lock(&dev
->stats_lock
);
1599 dev
->stats
.tx_dropped
+= dropped
;
1600 rte_spinlock_unlock(&dev
->stats_lock
);
1604 if (OVS_UNLIKELY(dev
->txq_needs_locking
)) {
1605 rte_spinlock_unlock(&dev
->tx_q
[qid
].tx_lock
);
1610 netdev_dpdk_eth_send(struct netdev
*netdev
, int qid
,
1611 struct dp_packet
**pkts
, int cnt
, bool may_steal
)
1613 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1615 netdev_dpdk_send__(dev
, qid
, pkts
, cnt
, may_steal
);
1620 netdev_dpdk_set_etheraddr(struct netdev
*netdev
, const struct eth_addr mac
)
1622 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1624 ovs_mutex_lock(&dev
->mutex
);
1625 if (!eth_addr_equals(dev
->hwaddr
, mac
)) {
1627 netdev_change_seq_changed(netdev
);
1629 ovs_mutex_unlock(&dev
->mutex
);
1635 netdev_dpdk_get_etheraddr(const struct netdev
*netdev
, struct eth_addr
*mac
)
1637 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1639 ovs_mutex_lock(&dev
->mutex
);
1641 ovs_mutex_unlock(&dev
->mutex
);
1647 netdev_dpdk_get_mtu(const struct netdev
*netdev
, int *mtup
)
1649 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1651 ovs_mutex_lock(&dev
->mutex
);
1653 ovs_mutex_unlock(&dev
->mutex
);
1659 netdev_dpdk_set_mtu(const struct netdev
*netdev
, int mtu
)
1661 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1662 int old_mtu
, err
, dpdk_mtu
;
1663 struct dpdk_mp
*old_mp
;
1667 ovs_mutex_lock(&dpdk_mutex
);
1668 ovs_mutex_lock(&dev
->mutex
);
1669 if (dev
->mtu
== mtu
) {
1674 buf_size
= dpdk_buf_size(mtu
);
1675 dpdk_mtu
= FRAME_LEN_TO_MTU(buf_size
);
1677 mp
= dpdk_mp_get(dev
->socket_id
, dpdk_mtu
);
1683 rte_eth_dev_stop(dev
->port_id
);
1686 old_mp
= dev
->dpdk_mp
;
1689 dev
->max_packet_len
= MTU_TO_FRAME_LEN(dev
->mtu
);
1691 err
= dpdk_eth_dev_init(dev
);
1695 dev
->dpdk_mp
= old_mp
;
1696 dev
->max_packet_len
= MTU_TO_FRAME_LEN(dev
->mtu
);
1697 dpdk_eth_dev_init(dev
);
1701 dpdk_mp_put(old_mp
);
1702 netdev_change_seq_changed(netdev
);
1704 ovs_mutex_unlock(&dev
->mutex
);
1705 ovs_mutex_unlock(&dpdk_mutex
);
1710 netdev_dpdk_get_carrier(const struct netdev
*netdev
, bool *carrier
);
1713 netdev_dpdk_vhost_get_stats(const struct netdev
*netdev
,
1714 struct netdev_stats
*stats
)
1716 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1718 ovs_mutex_lock(&dev
->mutex
);
1720 rte_spinlock_lock(&dev
->stats_lock
);
1721 /* Supported Stats */
1722 stats
->rx_packets
+= dev
->stats
.rx_packets
;
1723 stats
->tx_packets
+= dev
->stats
.tx_packets
;
1724 stats
->tx_dropped
+= dev
->stats
.tx_dropped
;
1725 stats
->multicast
= dev
->stats
.multicast
;
1726 stats
->rx_bytes
= dev
->stats
.rx_bytes
;
1727 stats
->tx_bytes
= dev
->stats
.tx_bytes
;
1728 stats
->rx_errors
= dev
->stats
.rx_errors
;
1729 stats
->rx_length_errors
= dev
->stats
.rx_length_errors
;
1731 stats
->rx_1_to_64_packets
= dev
->stats
.rx_1_to_64_packets
;
1732 stats
->rx_65_to_127_packets
= dev
->stats
.rx_65_to_127_packets
;
1733 stats
->rx_128_to_255_packets
= dev
->stats
.rx_128_to_255_packets
;
1734 stats
->rx_256_to_511_packets
= dev
->stats
.rx_256_to_511_packets
;
1735 stats
->rx_512_to_1023_packets
= dev
->stats
.rx_512_to_1023_packets
;
1736 stats
->rx_1024_to_1522_packets
= dev
->stats
.rx_1024_to_1522_packets
;
1737 stats
->rx_1523_to_max_packets
= dev
->stats
.rx_1523_to_max_packets
;
1739 rte_spinlock_unlock(&dev
->stats_lock
);
1741 ovs_mutex_unlock(&dev
->mutex
);
1747 netdev_dpdk_convert_xstats(struct netdev_stats
*stats
,
1748 const struct rte_eth_xstats
*xstats
,
1749 const unsigned int size
)
1751 /* XXX Current implementation is simple search through an array
1752 * to find hardcoded counter names. In future DPDK release (TBD)
1753 * XSTATS API will change so each counter will be represented by
1754 * unique ID instead of String. */
1756 for (unsigned int i
= 0; i
< size
; i
++) {
1757 if (strcmp(XSTAT_RX_64_PACKETS
, xstats
[i
].name
) == 0) {
1758 stats
->rx_1_to_64_packets
= xstats
[i
].value
;
1759 } else if (strcmp(XSTAT_RX_65_TO_127_PACKETS
, xstats
[i
].name
) == 0) {
1760 stats
->rx_65_to_127_packets
= xstats
[i
].value
;
1761 } else if (strcmp(XSTAT_RX_128_TO_255_PACKETS
, xstats
[i
].name
) == 0) {
1762 stats
->rx_128_to_255_packets
= xstats
[i
].value
;
1763 } else if (strcmp(XSTAT_RX_256_TO_511_PACKETS
, xstats
[i
].name
) == 0) {
1764 stats
->rx_256_to_511_packets
= xstats
[i
].value
;
1765 } else if (strcmp(XSTAT_RX_512_TO_1023_PACKETS
,
1766 xstats
[i
].name
) == 0) {
1767 stats
->rx_512_to_1023_packets
= xstats
[i
].value
;
1768 } else if (strcmp(XSTAT_RX_1024_TO_1522_PACKETS
,
1769 xstats
[i
].name
) == 0) {
1770 stats
->rx_1024_to_1522_packets
= xstats
[i
].value
;
1771 } else if (strcmp(XSTAT_RX_1523_TO_MAX_PACKETS
,
1772 xstats
[i
].name
) == 0) {
1773 stats
->rx_1523_to_max_packets
= xstats
[i
].value
;
1774 } else if (strcmp(XSTAT_TX_64_PACKETS
, xstats
[i
].name
) == 0) {
1775 stats
->tx_1_to_64_packets
= xstats
[i
].value
;
1776 } else if (strcmp(XSTAT_TX_65_TO_127_PACKETS
, xstats
[i
].name
) == 0) {
1777 stats
->tx_65_to_127_packets
= xstats
[i
].value
;
1778 } else if (strcmp(XSTAT_TX_128_TO_255_PACKETS
, xstats
[i
].name
) == 0) {
1779 stats
->tx_128_to_255_packets
= xstats
[i
].value
;
1780 } else if (strcmp(XSTAT_TX_256_TO_511_PACKETS
, xstats
[i
].name
) == 0) {
1781 stats
->tx_256_to_511_packets
= xstats
[i
].value
;
1782 } else if (strcmp(XSTAT_TX_512_TO_1023_PACKETS
,
1783 xstats
[i
].name
) == 0) {
1784 stats
->tx_512_to_1023_packets
= xstats
[i
].value
;
1785 } else if (strcmp(XSTAT_TX_1024_TO_1522_PACKETS
,
1786 xstats
[i
].name
) == 0) {
1787 stats
->tx_1024_to_1522_packets
= xstats
[i
].value
;
1788 } else if (strcmp(XSTAT_TX_1523_TO_MAX_PACKETS
,
1789 xstats
[i
].name
) == 0) {
1790 stats
->tx_1523_to_max_packets
= xstats
[i
].value
;
1791 } else if (strcmp(XSTAT_TX_MULTICAST_PACKETS
, xstats
[i
].name
) == 0) {
1792 stats
->tx_multicast_packets
= xstats
[i
].value
;
1793 } else if (strcmp(XSTAT_RX_BROADCAST_PACKETS
, xstats
[i
].name
) == 0) {
1794 stats
->rx_broadcast_packets
= xstats
[i
].value
;
1795 } else if (strcmp(XSTAT_TX_BROADCAST_PACKETS
, xstats
[i
].name
) == 0) {
1796 stats
->tx_broadcast_packets
= xstats
[i
].value
;
1797 } else if (strcmp(XSTAT_RX_UNDERSIZED_ERRORS
, xstats
[i
].name
) == 0) {
1798 stats
->rx_undersized_errors
= xstats
[i
].value
;
1799 } else if (strcmp(XSTAT_RX_FRAGMENTED_ERRORS
, xstats
[i
].name
) == 0) {
1800 stats
->rx_fragmented_errors
= xstats
[i
].value
;
1801 } else if (strcmp(XSTAT_RX_JABBER_ERRORS
, xstats
[i
].name
) == 0) {
1802 stats
->rx_jabber_errors
= xstats
[i
].value
;
1808 netdev_dpdk_get_stats(const struct netdev
*netdev
, struct netdev_stats
*stats
)
1810 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1811 struct rte_eth_stats rte_stats
;
1814 netdev_dpdk_get_carrier(netdev
, &gg
);
1815 ovs_mutex_lock(&dev
->mutex
);
1817 struct rte_eth_xstats
*rte_xstats
;
1818 int rte_xstats_len
, rte_xstats_ret
;
1820 if (rte_eth_stats_get(dev
->port_id
, &rte_stats
)) {
1821 VLOG_ERR("Can't get ETH statistics for port: %i.", dev
->port_id
);
1822 ovs_mutex_unlock(&dev
->mutex
);
1826 rte_xstats_len
= rte_eth_xstats_get(dev
->port_id
, NULL
, 0);
1827 if (rte_xstats_len
> 0) {
1828 rte_xstats
= dpdk_rte_mzalloc(sizeof(*rte_xstats
) * rte_xstats_len
);
1829 memset(rte_xstats
, 0xff, sizeof(*rte_xstats
) * rte_xstats_len
);
1830 rte_xstats_ret
= rte_eth_xstats_get(dev
->port_id
, rte_xstats
,
1832 if (rte_xstats_ret
> 0 && rte_xstats_ret
<= rte_xstats_len
) {
1833 netdev_dpdk_convert_xstats(stats
, rte_xstats
, rte_xstats_ret
);
1835 rte_free(rte_xstats
);
1837 VLOG_WARN("Can't get XSTATS counters for port: %i.", dev
->port_id
);
1840 stats
->rx_packets
= rte_stats
.ipackets
;
1841 stats
->tx_packets
= rte_stats
.opackets
;
1842 stats
->rx_bytes
= rte_stats
.ibytes
;
1843 stats
->tx_bytes
= rte_stats
.obytes
;
1844 /* DPDK counts imissed as errors, but count them here as dropped instead */
1845 stats
->rx_errors
= rte_stats
.ierrors
- rte_stats
.imissed
;
1846 stats
->tx_errors
= rte_stats
.oerrors
;
1847 stats
->multicast
= rte_stats
.imcasts
;
1849 rte_spinlock_lock(&dev
->stats_lock
);
1850 stats
->tx_dropped
= dev
->stats
.tx_dropped
;
1851 rte_spinlock_unlock(&dev
->stats_lock
);
1853 /* These are the available DPDK counters for packets not received due to
1854 * local resource constraints in DPDK and NIC respectively. */
1855 stats
->rx_dropped
= rte_stats
.rx_nombuf
+ rte_stats
.imissed
;
1856 stats
->rx_missed_errors
= rte_stats
.imissed
;
1858 ovs_mutex_unlock(&dev
->mutex
);
1864 netdev_dpdk_get_features(const struct netdev
*netdev
,
1865 enum netdev_features
*current
,
1866 enum netdev_features
*advertised OVS_UNUSED
,
1867 enum netdev_features
*supported OVS_UNUSED
,
1868 enum netdev_features
*peer OVS_UNUSED
)
1870 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1871 struct rte_eth_link link
;
1873 ovs_mutex_lock(&dev
->mutex
);
1875 ovs_mutex_unlock(&dev
->mutex
);
1877 if (link
.link_duplex
== ETH_LINK_HALF_DUPLEX
) {
1878 if (link
.link_speed
== ETH_SPEED_NUM_10M
) {
1879 *current
= NETDEV_F_10MB_HD
;
1881 if (link
.link_speed
== ETH_SPEED_NUM_100M
) {
1882 *current
= NETDEV_F_100MB_HD
;
1884 if (link
.link_speed
== ETH_SPEED_NUM_1G
) {
1885 *current
= NETDEV_F_1GB_HD
;
1887 } else if (link
.link_duplex
== ETH_LINK_FULL_DUPLEX
) {
1888 if (link
.link_speed
== ETH_SPEED_NUM_10M
) {
1889 *current
= NETDEV_F_10MB_FD
;
1891 if (link
.link_speed
== ETH_SPEED_NUM_100M
) {
1892 *current
= NETDEV_F_100MB_FD
;
1894 if (link
.link_speed
== ETH_SPEED_NUM_1G
) {
1895 *current
= NETDEV_F_1GB_FD
;
1897 if (link
.link_speed
== ETH_SPEED_NUM_10G
) {
1898 *current
= NETDEV_F_10GB_FD
;
1902 if (link
.link_autoneg
) {
1903 *current
|= NETDEV_F_AUTONEG
;
1910 netdev_dpdk_get_ifindex(const struct netdev
*netdev
)
1912 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1915 ovs_mutex_lock(&dev
->mutex
);
1916 ifindex
= dev
->port_id
;
1917 ovs_mutex_unlock(&dev
->mutex
);
1923 netdev_dpdk_get_carrier(const struct netdev
*netdev
, bool *carrier
)
1925 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1927 ovs_mutex_lock(&dev
->mutex
);
1928 check_link_status(dev
);
1929 *carrier
= dev
->link
.link_status
;
1931 ovs_mutex_unlock(&dev
->mutex
);
1937 netdev_dpdk_vhost_get_carrier(const struct netdev
*netdev
, bool *carrier
)
1939 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1940 struct virtio_net
*virtio_dev
= netdev_dpdk_get_virtio(dev
);
1942 ovs_mutex_lock(&dev
->mutex
);
1944 if (is_vhost_running(virtio_dev
)) {
1950 ovs_mutex_unlock(&dev
->mutex
);
1955 static long long int
1956 netdev_dpdk_get_carrier_resets(const struct netdev
*netdev
)
1958 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1959 long long int carrier_resets
;
1961 ovs_mutex_lock(&dev
->mutex
);
1962 carrier_resets
= dev
->link_reset_cnt
;
1963 ovs_mutex_unlock(&dev
->mutex
);
1965 return carrier_resets
;
1969 netdev_dpdk_set_miimon(struct netdev
*netdev OVS_UNUSED
,
1970 long long int interval OVS_UNUSED
)
1976 netdev_dpdk_update_flags__(struct netdev_dpdk
*dev
,
1977 enum netdev_flags off
, enum netdev_flags on
,
1978 enum netdev_flags
*old_flagsp
) OVS_REQUIRES(dev
->mutex
)
1982 if ((off
| on
) & ~(NETDEV_UP
| NETDEV_PROMISC
)) {
1986 *old_flagsp
= dev
->flags
;
1990 if (dev
->flags
== *old_flagsp
) {
1994 if (dev
->type
== DPDK_DEV_ETH
) {
1995 if (dev
->flags
& NETDEV_UP
) {
1996 err
= rte_eth_dev_start(dev
->port_id
);
2001 if (dev
->flags
& NETDEV_PROMISC
) {
2002 rte_eth_promiscuous_enable(dev
->port_id
);
2005 if (!(dev
->flags
& NETDEV_UP
)) {
2006 rte_eth_dev_stop(dev
->port_id
);
2014 netdev_dpdk_update_flags(struct netdev
*netdev
,
2015 enum netdev_flags off
, enum netdev_flags on
,
2016 enum netdev_flags
*old_flagsp
)
2018 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
2021 ovs_mutex_lock(&dev
->mutex
);
2022 error
= netdev_dpdk_update_flags__(dev
, off
, on
, old_flagsp
);
2023 ovs_mutex_unlock(&dev
->mutex
);
2029 netdev_dpdk_get_status(const struct netdev
*netdev
, struct smap
*args
)
2031 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
2032 struct rte_eth_dev_info dev_info
;
2034 if (dev
->port_id
< 0)
2037 ovs_mutex_lock(&dev
->mutex
);
2038 rte_eth_dev_info_get(dev
->port_id
, &dev_info
);
2039 ovs_mutex_unlock(&dev
->mutex
);
2041 smap_add_format(args
, "driver_name", "%s", dev_info
.driver_name
);
2043 smap_add_format(args
, "port_no", "%d", dev
->port_id
);
2044 smap_add_format(args
, "numa_id", "%d", rte_eth_dev_socket_id(dev
->port_id
));
2045 smap_add_format(args
, "driver_name", "%s", dev_info
.driver_name
);
2046 smap_add_format(args
, "min_rx_bufsize", "%u", dev_info
.min_rx_bufsize
);
2047 smap_add_format(args
, "max_rx_pktlen", "%u", dev
->max_packet_len
);
2048 smap_add_format(args
, "max_rx_queues", "%u", dev_info
.max_rx_queues
);
2049 smap_add_format(args
, "max_tx_queues", "%u", dev_info
.max_tx_queues
);
2050 smap_add_format(args
, "max_mac_addrs", "%u", dev_info
.max_mac_addrs
);
2051 smap_add_format(args
, "max_hash_mac_addrs", "%u", dev_info
.max_hash_mac_addrs
);
2052 smap_add_format(args
, "max_vfs", "%u", dev_info
.max_vfs
);
2053 smap_add_format(args
, "max_vmdq_pools", "%u", dev_info
.max_vmdq_pools
);
2055 if (dev_info
.pci_dev
) {
2056 smap_add_format(args
, "pci-vendor_id", "0x%u",
2057 dev_info
.pci_dev
->id
.vendor_id
);
2058 smap_add_format(args
, "pci-device_id", "0x%x",
2059 dev_info
.pci_dev
->id
.device_id
);
2066 netdev_dpdk_set_admin_state__(struct netdev_dpdk
*dev
, bool admin_state
)
2067 OVS_REQUIRES(dev
->mutex
)
2069 enum netdev_flags old_flags
;
2072 netdev_dpdk_update_flags__(dev
, 0, NETDEV_UP
, &old_flags
);
2074 netdev_dpdk_update_flags__(dev
, NETDEV_UP
, 0, &old_flags
);
2079 netdev_dpdk_set_admin_state(struct unixctl_conn
*conn
, int argc
,
2080 const char *argv
[], void *aux OVS_UNUSED
)
2084 if (!strcasecmp(argv
[argc
- 1], "up")) {
2086 } else if ( !strcasecmp(argv
[argc
- 1], "down")) {
2089 unixctl_command_reply_error(conn
, "Invalid Admin State");
2094 struct netdev
*netdev
= netdev_from_name(argv
[1]);
2095 if (netdev
&& is_dpdk_class(netdev
->netdev_class
)) {
2096 struct netdev_dpdk
*dpdk_dev
= netdev_dpdk_cast(netdev
);
2098 ovs_mutex_lock(&dpdk_dev
->mutex
);
2099 netdev_dpdk_set_admin_state__(dpdk_dev
, up
);
2100 ovs_mutex_unlock(&dpdk_dev
->mutex
);
2102 netdev_close(netdev
);
2104 unixctl_command_reply_error(conn
, "Not a DPDK Interface");
2105 netdev_close(netdev
);
2109 struct netdev_dpdk
*netdev
;
2111 ovs_mutex_lock(&dpdk_mutex
);
2112 LIST_FOR_EACH (netdev
, list_node
, &dpdk_list
) {
2113 ovs_mutex_lock(&netdev
->mutex
);
2114 netdev_dpdk_set_admin_state__(netdev
, up
);
2115 ovs_mutex_unlock(&netdev
->mutex
);
2117 ovs_mutex_unlock(&dpdk_mutex
);
2119 unixctl_command_reply(conn
, "OK");
2123 * Set virtqueue flags so that we do not receive interrupts.
2126 set_irq_status(struct virtio_net
*virtio_dev
)
2131 for (i
= 0; i
< virtio_dev
->virt_qp_nb
; i
++) {
2132 idx
= i
* VIRTIO_QNUM
;
2133 rte_vhost_enable_guest_notification(virtio_dev
, idx
+ VIRTIO_RXQ
, 0);
2134 rte_vhost_enable_guest_notification(virtio_dev
, idx
+ VIRTIO_TXQ
, 0);
2139 * Fixes mapping for vhost-user tx queues. Must be called after each
2140 * enabling/disabling of queues and real_n_txq modifications.
2143 netdev_dpdk_remap_txqs(struct netdev_dpdk
*dev
)
2144 OVS_REQUIRES(dev
->mutex
)
2146 int *enabled_queues
, n_enabled
= 0;
2147 int i
, k
, total_txqs
= dev
->real_n_txq
;
2149 enabled_queues
= dpdk_rte_mzalloc(total_txqs
* sizeof *enabled_queues
);
2151 for (i
= 0; i
< total_txqs
; i
++) {
2152 /* Enabled queues always mapped to themselves. */
2153 if (dev
->tx_q
[i
].map
== i
) {
2154 enabled_queues
[n_enabled
++] = i
;
2158 if (n_enabled
== 0 && total_txqs
!= 0) {
2159 enabled_queues
[0] = OVS_VHOST_QUEUE_DISABLED
;
2164 for (i
= 0; i
< total_txqs
; i
++) {
2165 if (dev
->tx_q
[i
].map
!= i
) {
2166 dev
->tx_q
[i
].map
= enabled_queues
[k
];
2167 k
= (k
+ 1) % n_enabled
;
2171 VLOG_DBG("TX queue mapping for %s\n", dev
->vhost_id
);
2172 for (i
= 0; i
< total_txqs
; i
++) {
2173 VLOG_DBG("%2d --> %2d", i
, dev
->tx_q
[i
].map
);
2176 rte_free(enabled_queues
);
2180 netdev_dpdk_vhost_set_queues(struct netdev_dpdk
*dev
, struct virtio_net
*virtio_dev
)
2181 OVS_REQUIRES(dev
->mutex
)
2185 qp_num
= virtio_dev
->virt_qp_nb
;
2186 if (qp_num
> dev
->up
.n_rxq
) {
2187 VLOG_ERR("vHost Device '%s' %"PRIu64
" can't be added - "
2188 "too many queues %d > %d", virtio_dev
->ifname
, virtio_dev
->device_fh
,
2189 qp_num
, dev
->up
.n_rxq
);
2193 dev
->real_n_rxq
= qp_num
;
2194 dev
->real_n_txq
= qp_num
;
2195 dev
->txq_needs_locking
= true;
2196 /* Enable TX queue 0 by default if it wasn't disabled. */
2197 if (dev
->tx_q
[0].map
== OVS_VHOST_QUEUE_MAP_UNKNOWN
) {
2198 dev
->tx_q
[0].map
= 0;
2201 netdev_dpdk_remap_txqs(dev
);
2207 * A new virtio-net device is added to a vhost port.
2210 new_device(struct virtio_net
*virtio_dev
)
2212 struct netdev_dpdk
*dev
;
2213 bool exists
= false;
2215 ovs_mutex_lock(&dpdk_mutex
);
2216 /* Add device to the vhost port with the same name as that passed down. */
2217 LIST_FOR_EACH(dev
, list_node
, &dpdk_list
) {
2218 if (strncmp(virtio_dev
->ifname
, dev
->vhost_id
, IF_NAME_SZ
) == 0) {
2219 ovs_mutex_lock(&dev
->mutex
);
2220 if (netdev_dpdk_vhost_set_queues(dev
, virtio_dev
)) {
2221 ovs_mutex_unlock(&dev
->mutex
);
2222 ovs_mutex_unlock(&dpdk_mutex
);
2225 ovsrcu_set(&dev
->virtio_dev
, virtio_dev
);
2227 virtio_dev
->flags
|= VIRTIO_DEV_RUNNING
;
2228 /* Disable notifications. */
2229 set_irq_status(virtio_dev
);
2230 ovs_mutex_unlock(&dev
->mutex
);
2234 ovs_mutex_unlock(&dpdk_mutex
);
2237 VLOG_INFO("vHost Device '%s' %"PRIu64
" can't be added - name not "
2238 "found", virtio_dev
->ifname
, virtio_dev
->device_fh
);
2243 VLOG_INFO("vHost Device '%s' %"PRIu64
" has been added", virtio_dev
->ifname
,
2244 virtio_dev
->device_fh
);
2248 /* Clears mapping for all available queues of vhost interface. */
2250 netdev_dpdk_txq_map_clear(struct netdev_dpdk
*dev
)
2251 OVS_REQUIRES(dev
->mutex
)
2255 for (i
= 0; i
< dev
->real_n_txq
; i
++) {
2256 dev
->tx_q
[i
].map
= OVS_VHOST_QUEUE_MAP_UNKNOWN
;
2261 * Remove a virtio-net device from the specific vhost port. Use dev->remove
2262 * flag to stop any more packets from being sent or received to/from a VM and
2263 * ensure all currently queued packets have been sent/received before removing
2267 destroy_device(volatile struct virtio_net
*virtio_dev
)
2269 struct netdev_dpdk
*dev
;
2270 bool exists
= false;
2272 ovs_mutex_lock(&dpdk_mutex
);
2273 LIST_FOR_EACH (dev
, list_node
, &dpdk_list
) {
2274 if (netdev_dpdk_get_virtio(dev
) == virtio_dev
) {
2276 ovs_mutex_lock(&dev
->mutex
);
2277 virtio_dev
->flags
&= ~VIRTIO_DEV_RUNNING
;
2278 ovsrcu_set(&dev
->virtio_dev
, NULL
);
2279 netdev_dpdk_txq_map_clear(dev
);
2281 ovs_mutex_unlock(&dev
->mutex
);
2286 ovs_mutex_unlock(&dpdk_mutex
);
2288 if (exists
== true) {
2290 * Wait for other threads to quiesce after setting the 'virtio_dev'
2291 * to NULL, before returning.
2293 ovsrcu_synchronize();
2295 * As call to ovsrcu_synchronize() will end the quiescent state,
2296 * put thread back into quiescent state before returning.
2298 ovsrcu_quiesce_start();
2299 VLOG_INFO("vHost Device '%s' %"PRIu64
" has been removed",
2300 virtio_dev
->ifname
, virtio_dev
->device_fh
);
2302 VLOG_INFO("vHost Device '%s' %"PRIu64
" not found", virtio_dev
->ifname
,
2303 virtio_dev
->device_fh
);
2308 vring_state_changed(struct virtio_net
*virtio_dev
, uint16_t queue_id
,
2311 struct netdev_dpdk
*dev
;
2312 bool exists
= false;
2313 int qid
= queue_id
/ VIRTIO_QNUM
;
2315 if (queue_id
% VIRTIO_QNUM
== VIRTIO_TXQ
) {
2319 ovs_mutex_lock(&dpdk_mutex
);
2320 LIST_FOR_EACH (dev
, list_node
, &dpdk_list
) {
2321 if (strncmp(virtio_dev
->ifname
, dev
->vhost_id
, IF_NAME_SZ
) == 0) {
2322 ovs_mutex_lock(&dev
->mutex
);
2324 dev
->tx_q
[qid
].map
= qid
;
2326 dev
->tx_q
[qid
].map
= OVS_VHOST_QUEUE_DISABLED
;
2328 netdev_dpdk_remap_txqs(dev
);
2330 ovs_mutex_unlock(&dev
->mutex
);
2334 ovs_mutex_unlock(&dpdk_mutex
);
2337 VLOG_INFO("State of queue %d ( tx_qid %d ) of vhost device '%s' %"
2338 PRIu64
" changed to \'%s\'", queue_id
, qid
,
2339 virtio_dev
->ifname
, virtio_dev
->device_fh
,
2340 (enable
== 1) ? "enabled" : "disabled");
2342 VLOG_INFO("vHost Device '%s' %"PRIu64
" not found", virtio_dev
->ifname
,
2343 virtio_dev
->device_fh
);
2351 netdev_dpdk_get_virtio(const struct netdev_dpdk
*dev
)
2353 return ovsrcu_get(struct virtio_net
*, &dev
->virtio_dev
);
2357 * These callbacks allow virtio-net devices to be added to vhost ports when
2358 * configuration has been fully complete.
2360 static const struct virtio_net_device_ops virtio_net_device_ops
=
2362 .new_device
= new_device
,
2363 .destroy_device
= destroy_device
,
2364 .vring_state_changed
= vring_state_changed
2368 start_vhost_loop(void *dummy OVS_UNUSED
)
2370 pthread_detach(pthread_self());
2371 /* Put the cuse thread into quiescent state. */
2372 ovsrcu_quiesce_start();
2373 rte_vhost_driver_session_start();
2378 dpdk_vhost_class_init(void)
2380 rte_vhost_driver_callback_register(&virtio_net_device_ops
);
2381 rte_vhost_feature_disable(1ULL << VIRTIO_NET_F_HOST_TSO4
2382 | 1ULL << VIRTIO_NET_F_HOST_TSO6
2383 | 1ULL << VIRTIO_NET_F_CSUM
);
2385 ovs_thread_create("vhost_thread", start_vhost_loop
, NULL
);
2390 dpdk_vhost_cuse_class_init(void)
2396 dpdk_vhost_user_class_init(void)
2402 dpdk_common_init(void)
2404 unixctl_command_register("netdev-dpdk/set-admin-state",
2405 "[netdev] up|down", 1, 2,
2406 netdev_dpdk_set_admin_state
, NULL
);
2413 dpdk_ring_create(const char dev_name
[], unsigned int port_no
,
2414 unsigned int *eth_port_id
)
2416 struct dpdk_ring
*ivshmem
;
2417 char ring_name
[RTE_RING_NAMESIZE
];
2420 ivshmem
= dpdk_rte_mzalloc(sizeof *ivshmem
);
2421 if (ivshmem
== NULL
) {
2425 /* XXX: Add support for multiquque ring. */
2426 err
= snprintf(ring_name
, sizeof(ring_name
), "%s_tx", dev_name
);
2431 /* Create single producer tx ring, netdev does explicit locking. */
2432 ivshmem
->cring_tx
= rte_ring_create(ring_name
, DPDK_RING_SIZE
, SOCKET0
,
2434 if (ivshmem
->cring_tx
== NULL
) {
2439 err
= snprintf(ring_name
, sizeof(ring_name
), "%s_rx", dev_name
);
2444 /* Create single consumer rx ring, netdev does explicit locking. */
2445 ivshmem
->cring_rx
= rte_ring_create(ring_name
, DPDK_RING_SIZE
, SOCKET0
,
2447 if (ivshmem
->cring_rx
== NULL
) {
2452 err
= rte_eth_from_rings(dev_name
, &ivshmem
->cring_rx
, 1,
2453 &ivshmem
->cring_tx
, 1, SOCKET0
);
2460 ivshmem
->user_port_id
= port_no
;
2461 ivshmem
->eth_port_id
= rte_eth_dev_count() - 1;
2462 ovs_list_push_back(&dpdk_ring_list
, &ivshmem
->list_node
);
2464 *eth_port_id
= ivshmem
->eth_port_id
;
2469 dpdk_ring_open(const char dev_name
[], unsigned int *eth_port_id
) OVS_REQUIRES(dpdk_mutex
)
2471 struct dpdk_ring
*ivshmem
;
2472 unsigned int port_no
;
2475 /* Names always start with "dpdkr" */
2476 err
= dpdk_dev_parse_name(dev_name
, "dpdkr", &port_no
);
2481 /* look through our list to find the device */
2482 LIST_FOR_EACH (ivshmem
, list_node
, &dpdk_ring_list
) {
2483 if (ivshmem
->user_port_id
== port_no
) {
2484 VLOG_INFO("Found dpdk ring device %s:", dev_name
);
2485 *eth_port_id
= ivshmem
->eth_port_id
; /* really all that is needed */
2489 /* Need to create the device rings */
2490 return dpdk_ring_create(dev_name
, port_no
, eth_port_id
);
2494 netdev_dpdk_ring_send(struct netdev
*netdev
, int qid
,
2495 struct dp_packet
**pkts
, int cnt
, bool may_steal
)
2497 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
2500 /* When using 'dpdkr' and sending to a DPDK ring, we want to ensure that the
2501 * rss hash field is clear. This is because the same mbuf may be modified by
2502 * the consumer of the ring and return into the datapath without recalculating
2504 for (i
= 0; i
< cnt
; i
++) {
2505 dp_packet_rss_invalidate(pkts
[i
]);
2508 netdev_dpdk_send__(dev
, qid
, pkts
, cnt
, may_steal
);
2513 netdev_dpdk_ring_construct(struct netdev
*netdev
)
2515 unsigned int port_no
= 0;
2518 if (rte_eal_init_ret
) {
2519 return rte_eal_init_ret
;
2522 ovs_mutex_lock(&dpdk_mutex
);
2524 err
= dpdk_ring_open(netdev
->name
, &port_no
);
2529 err
= netdev_dpdk_init(netdev
, port_no
, DPDK_DEV_ETH
);
2532 ovs_mutex_unlock(&dpdk_mutex
);
2539 * Initialize QoS configuration operations.
2542 qos_conf_init(struct qos_conf
*conf
, const struct dpdk_qos_ops
*ops
)
2548 * Search existing QoS operations in qos_ops and compare each set of
2549 * operations qos_name to name. Return a dpdk_qos_ops pointer to a match,
2552 static const struct dpdk_qos_ops
*
2553 qos_lookup_name(const char *name
)
2555 const struct dpdk_qos_ops
*const *opsp
;
2557 for (opsp
= qos_confs
; *opsp
!= NULL
; opsp
++) {
2558 const struct dpdk_qos_ops
*ops
= *opsp
;
2559 if (!strcmp(name
, ops
->qos_name
)) {
2567 * Call qos_destruct to clean up items associated with the netdevs
2568 * qos_conf. Set netdevs qos_conf to NULL.
2571 qos_delete_conf(struct netdev
*netdev
)
2573 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
2575 rte_spinlock_lock(&dev
->qos_lock
);
2576 if (dev
->qos_conf
) {
2577 if (dev
->qos_conf
->ops
->qos_destruct
) {
2578 dev
->qos_conf
->ops
->qos_destruct(netdev
, dev
->qos_conf
);
2580 dev
->qos_conf
= NULL
;
2582 rte_spinlock_unlock(&dev
->qos_lock
);
2586 netdev_dpdk_get_qos_types(const struct netdev
*netdev OVS_UNUSED
,
2589 const struct dpdk_qos_ops
*const *opsp
;
2591 for (opsp
= qos_confs
; *opsp
!= NULL
; opsp
++) {
2592 const struct dpdk_qos_ops
*ops
= *opsp
;
2593 if (ops
->qos_construct
&& ops
->qos_name
[0] != '\0') {
2594 sset_add(types
, ops
->qos_name
);
2601 netdev_dpdk_get_qos(const struct netdev
*netdev
,
2602 const char **typep
, struct smap
*details
)
2604 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
2607 ovs_mutex_lock(&dev
->mutex
);
2609 *typep
= dev
->qos_conf
->ops
->qos_name
;
2610 error
= (dev
->qos_conf
->ops
->qos_get
2611 ? dev
->qos_conf
->ops
->qos_get(netdev
, details
): 0);
2613 ovs_mutex_unlock(&dev
->mutex
);
2619 netdev_dpdk_set_qos(struct netdev
*netdev
,
2620 const char *type
, const struct smap
*details
)
2622 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
2623 const struct dpdk_qos_ops
*new_ops
= NULL
;
2626 /* If type is empty or unsupported then the current QoS configuration
2627 * for the dpdk-netdev can be destroyed */
2628 new_ops
= qos_lookup_name(type
);
2630 if (type
[0] == '\0' || !new_ops
|| !new_ops
->qos_construct
) {
2631 qos_delete_conf(netdev
);
2635 ovs_mutex_lock(&dev
->mutex
);
2637 if (dev
->qos_conf
) {
2638 if (new_ops
== dev
->qos_conf
->ops
) {
2639 error
= new_ops
->qos_set
? new_ops
->qos_set(netdev
, details
) : 0;
2641 /* Delete existing QoS configuration. */
2642 qos_delete_conf(netdev
);
2643 ovs_assert(dev
->qos_conf
== NULL
);
2645 /* Install new QoS configuration. */
2646 error
= new_ops
->qos_construct(netdev
, details
);
2647 ovs_assert((error
== 0) == (dev
->qos_conf
!= NULL
));
2650 error
= new_ops
->qos_construct(netdev
, details
);
2651 ovs_assert((error
== 0) == (dev
->qos_conf
!= NULL
));
2654 ovs_mutex_unlock(&dev
->mutex
);
2658 /* egress-policer details */
2660 struct egress_policer
{
2661 struct qos_conf qos_conf
;
2662 struct rte_meter_srtcm_params app_srtcm_params
;
2663 struct rte_meter_srtcm egress_meter
;
2666 static struct egress_policer
*
2667 egress_policer_get__(const struct netdev
*netdev
)
2669 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
2670 return CONTAINER_OF(dev
->qos_conf
, struct egress_policer
, qos_conf
);
2674 egress_policer_qos_construct(struct netdev
*netdev
,
2675 const struct smap
*details
)
2677 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
2678 struct egress_policer
*policer
;
2683 rte_spinlock_lock(&dev
->qos_lock
);
2684 policer
= xmalloc(sizeof *policer
);
2685 qos_conf_init(&policer
->qos_conf
, &egress_policer_ops
);
2686 dev
->qos_conf
= &policer
->qos_conf
;
2687 cir_s
= smap_get(details
, "cir");
2688 cbs_s
= smap_get(details
, "cbs");
2689 policer
->app_srtcm_params
.cir
= cir_s
? strtoull(cir_s
, NULL
, 10) : 0;
2690 policer
->app_srtcm_params
.cbs
= cbs_s
? strtoull(cbs_s
, NULL
, 10) : 0;
2691 policer
->app_srtcm_params
.ebs
= 0;
2692 err
= rte_meter_srtcm_config(&policer
->egress_meter
,
2693 &policer
->app_srtcm_params
);
2694 rte_spinlock_unlock(&dev
->qos_lock
);
2700 egress_policer_qos_destruct(struct netdev
*netdev OVS_UNUSED
,
2701 struct qos_conf
*conf
)
2703 struct egress_policer
*policer
= CONTAINER_OF(conf
, struct egress_policer
,
2709 egress_policer_qos_get(const struct netdev
*netdev
, struct smap
*details
)
2711 struct egress_policer
*policer
= egress_policer_get__(netdev
);
2712 smap_add_format(details
, "cir", "%llu",
2713 1ULL * policer
->app_srtcm_params
.cir
);
2714 smap_add_format(details
, "cbs", "%llu",
2715 1ULL * policer
->app_srtcm_params
.cbs
);
2720 egress_policer_qos_set(struct netdev
*netdev
, const struct smap
*details
)
2722 struct egress_policer
*policer
;
2727 policer
= egress_policer_get__(netdev
);
2728 cir_s
= smap_get(details
, "cir");
2729 cbs_s
= smap_get(details
, "cbs");
2730 policer
->app_srtcm_params
.cir
= cir_s
? strtoull(cir_s
, NULL
, 10) : 0;
2731 policer
->app_srtcm_params
.cbs
= cbs_s
? strtoull(cbs_s
, NULL
, 10) : 0;
2732 policer
->app_srtcm_params
.ebs
= 0;
2733 err
= rte_meter_srtcm_config(&policer
->egress_meter
,
2734 &policer
->app_srtcm_params
);
2740 egress_policer_pkt_handle__(struct rte_meter_srtcm
*meter
,
2741 struct rte_mbuf
*pkt
, uint64_t time
)
2743 uint32_t pkt_len
= rte_pktmbuf_pkt_len(pkt
) - sizeof(struct ether_hdr
);
2745 return rte_meter_srtcm_color_blind_check(meter
, time
, pkt_len
) ==
2750 egress_policer_run(struct netdev
*netdev
, struct rte_mbuf
**pkts
,
2755 struct egress_policer
*policer
= egress_policer_get__(netdev
);
2756 struct rte_mbuf
*pkt
= NULL
;
2757 uint64_t current_time
= rte_rdtsc();
2759 for(i
= 0; i
< pkt_cnt
; i
++) {
2761 /* Handle current packet */
2762 if (egress_policer_pkt_handle__(&policer
->egress_meter
, pkt
,
2769 rte_pktmbuf_free(pkt
);
2776 static const struct dpdk_qos_ops egress_policer_ops
= {
2777 "egress-policer", /* qos_name */
2778 egress_policer_qos_construct
,
2779 egress_policer_qos_destruct
,
2780 egress_policer_qos_get
,
2781 egress_policer_qos_set
,
2785 #define NETDEV_DPDK_CLASS(NAME, INIT, CONSTRUCT, DESTRUCT, MULTIQ, SEND, \
2786 GET_CARRIER, GET_STATS, GET_FEATURES, GET_STATUS, RXQ_RECV) \
2789 true, /* is_pmd */ \
2791 NULL, /* netdev_dpdk_run */ \
2792 NULL, /* netdev_dpdk_wait */ \
2794 netdev_dpdk_alloc, \
2797 netdev_dpdk_dealloc, \
2798 netdev_dpdk_get_config, \
2799 netdev_dpdk_set_config, \
2800 NULL, /* get_tunnel_config */ \
2801 NULL, /* build header */ \
2802 NULL, /* push header */ \
2803 NULL, /* pop header */ \
2804 netdev_dpdk_get_numa_id, /* get_numa_id */ \
2805 MULTIQ, /* set_multiq */ \
2808 NULL, /* send_wait */ \
2810 netdev_dpdk_set_etheraddr, \
2811 netdev_dpdk_get_etheraddr, \
2812 netdev_dpdk_get_mtu, \
2813 netdev_dpdk_set_mtu, \
2814 netdev_dpdk_get_ifindex, \
2816 netdev_dpdk_get_carrier_resets, \
2817 netdev_dpdk_set_miimon, \
2820 NULL, /* set_advertisements */ \
2822 NULL, /* set_policing */ \
2823 netdev_dpdk_get_qos_types, \
2824 NULL, /* get_qos_capabilities */ \
2825 netdev_dpdk_get_qos, \
2826 netdev_dpdk_set_qos, \
2827 NULL, /* get_queue */ \
2828 NULL, /* set_queue */ \
2829 NULL, /* delete_queue */ \
2830 NULL, /* get_queue_stats */ \
2831 NULL, /* queue_dump_start */ \
2832 NULL, /* queue_dump_next */ \
2833 NULL, /* queue_dump_done */ \
2834 NULL, /* dump_queue_stats */ \
2836 NULL, /* set_in4 */ \
2837 NULL, /* get_addr_list */ \
2838 NULL, /* add_router */ \
2839 NULL, /* get_next_hop */ \
2841 NULL, /* arp_lookup */ \
2843 netdev_dpdk_update_flags, \
2844 NULL, /* reconfigure */ \
2846 netdev_dpdk_rxq_alloc, \
2847 netdev_dpdk_rxq_construct, \
2848 netdev_dpdk_rxq_destruct, \
2849 netdev_dpdk_rxq_dealloc, \
2851 NULL, /* rx_wait */ \
2852 NULL, /* rxq_drain */ \
2856 process_vhost_flags(char *flag
, char *default_val
, int size
,
2857 const struct smap
*ovs_other_config
,
2863 val
= smap_get(ovs_other_config
, flag
);
2865 /* Depending on which version of vhost is in use, process the vhost-specific
2866 * flag if it is provided, otherwise resort to default value.
2868 if (val
&& (strlen(val
) <= size
)) {
2870 *new_val
= xstrdup(val
);
2871 VLOG_INFO("User-provided %s in use: %s", flag
, *new_val
);
2873 VLOG_INFO("No %s provided - defaulting to %s", flag
, default_val
);
2874 *new_val
= default_val
;
2881 grow_argv(char ***argv
, size_t cur_siz
, size_t grow_by
)
2883 return xrealloc(*argv
, sizeof(char *) * (cur_siz
+ grow_by
));
2887 dpdk_option_extend(char ***argv
, int argc
, const char *option
,
2890 char **newargv
= grow_argv(argv
, argc
, 2);
2892 newargv
[argc
] = xstrdup(option
);
2893 newargv
[argc
+1] = xstrdup(value
);
2897 move_argv(char ***argv
, size_t cur_size
, char **src_argv
, size_t src_argc
)
2899 char **newargv
= grow_argv(argv
, cur_size
, src_argc
);
2900 while (src_argc
--) {
2901 newargv
[cur_size
+src_argc
] = src_argv
[src_argc
];
2902 src_argv
[src_argc
] = NULL
;
2908 extra_dpdk_args(const char *ovs_extra_config
, char ***argv
, int argc
)
2911 char *release_tok
= xstrdup(ovs_extra_config
);
2912 char *tok
= release_tok
, *endptr
= NULL
;
2914 for (tok
= strtok_r(release_tok
, " ", &endptr
); tok
!= NULL
;
2915 tok
= strtok_r(NULL
, " ", &endptr
)) {
2916 char **newarg
= grow_argv(argv
, ret
, 1);
2918 newarg
[ret
++] = xstrdup(tok
);
2925 argv_contains(char **argv_haystack
, const size_t argc_haystack
,
2928 for (size_t i
= 0; i
< argc_haystack
; ++i
) {
2929 if (!strcmp(argv_haystack
[i
], needle
))
2936 construct_dpdk_options(const struct smap
*ovs_other_config
,
2937 char ***argv
, const int initial_size
,
2938 char **extra_args
, const size_t extra_argc
)
2940 struct dpdk_options_map
{
2941 const char *ovs_configuration
;
2942 const char *dpdk_option
;
2943 bool default_enabled
;
2944 const char *default_value
;
2946 {"dpdk-lcore-mask", "-c", false, NULL
},
2947 {"dpdk-hugepage-dir", "--huge-dir", false, NULL
},
2950 int i
, ret
= initial_size
;
2952 /*First, construct from the flat-options (non-mutex)*/
2953 for (i
= 0; i
< ARRAY_SIZE(opts
); ++i
) {
2954 const char *lookup
= smap_get(ovs_other_config
,
2955 opts
[i
].ovs_configuration
);
2956 if (!lookup
&& opts
[i
].default_enabled
) {
2957 lookup
= opts
[i
].default_value
;
2961 if (!argv_contains(extra_args
, extra_argc
, opts
[i
].dpdk_option
)) {
2962 dpdk_option_extend(argv
, ret
, opts
[i
].dpdk_option
, lookup
);
2965 VLOG_WARN("Ignoring database defined option '%s' due to "
2966 "dpdk_extras config", opts
[i
].dpdk_option
);
2974 #define MAX_DPDK_EXCL_OPTS 10
2977 construct_dpdk_mutex_options(const struct smap
*ovs_other_config
,
2978 char ***argv
, const int initial_size
,
2979 char **extra_args
, const size_t extra_argc
)
2981 struct dpdk_exclusive_options_map
{
2982 const char *category
;
2983 const char *ovs_dpdk_options
[MAX_DPDK_EXCL_OPTS
];
2984 const char *eal_dpdk_options
[MAX_DPDK_EXCL_OPTS
];
2985 const char *default_value
;
2989 {"dpdk-alloc-mem", "dpdk-socket-mem", NULL
,},
2990 {"-m", "--socket-mem", NULL
,},
2995 int i
, ret
= initial_size
;
2996 for (i
= 0; i
< ARRAY_SIZE(excl_opts
); ++i
) {
2997 int found_opts
= 0, scan
, found_pos
= -1;
2998 const char *found_value
;
2999 struct dpdk_exclusive_options_map
*popt
= &excl_opts
[i
];
3001 for (scan
= 0; scan
< MAX_DPDK_EXCL_OPTS
3002 && popt
->ovs_dpdk_options
[scan
]; ++scan
) {
3003 const char *lookup
= smap_get(ovs_other_config
,
3004 popt
->ovs_dpdk_options
[scan
]);
3005 if (lookup
&& strlen(lookup
)) {
3008 found_value
= lookup
;
3013 if (popt
->default_option
) {
3014 found_pos
= popt
->default_option
;
3015 found_value
= popt
->default_value
;
3021 if (found_opts
> 1) {
3022 VLOG_ERR("Multiple defined options for %s. Please check your"
3023 " database settings and reconfigure if necessary.",
3027 if (!argv_contains(extra_args
, extra_argc
,
3028 popt
->eal_dpdk_options
[found_pos
])) {
3029 dpdk_option_extend(argv
, ret
, popt
->eal_dpdk_options
[found_pos
],
3033 VLOG_WARN("Ignoring database defined option '%s' due to "
3034 "dpdk_extras config", popt
->eal_dpdk_options
[found_pos
]);
3042 get_dpdk_args(const struct smap
*ovs_other_config
, char ***argv
,
3045 const char *extra_configuration
;
3046 char **extra_args
= NULL
;
3048 size_t extra_argc
= 0;
3050 extra_configuration
= smap_get(ovs_other_config
, "dpdk-extra");
3051 if (extra_configuration
) {
3052 extra_argc
= extra_dpdk_args(extra_configuration
, &extra_args
, 0);
3055 i
= construct_dpdk_options(ovs_other_config
, argv
, argc
, extra_args
,
3057 i
= construct_dpdk_mutex_options(ovs_other_config
, argv
, i
, extra_args
,
3060 if (extra_configuration
) {
3061 *argv
= move_argv(argv
, i
, extra_args
, extra_argc
);
3064 return i
+ extra_argc
;
3067 static char **dpdk_argv
;
3068 static int dpdk_argc
;
3071 deferred_argv_release(void)
3074 for (result
= 0; result
< dpdk_argc
; ++result
) {
3075 free(dpdk_argv
[result
]);
3082 dpdk_init__(const struct smap
*ovs_other_config
)
3087 bool auto_determine
= true;
3091 char *sock_dir_subcomponent
;
3094 if (!smap_get_bool(ovs_other_config
, "dpdk-init", false)) {
3095 VLOG_INFO("DPDK Disabled - to change this requires a restart.\n");
3099 VLOG_INFO("DPDK Enabled, initializing");
3102 if (process_vhost_flags("cuse-dev-name", xstrdup("vhost-net"),
3103 PATH_MAX
, ovs_other_config
, &cuse_dev_name
)) {
3105 if (process_vhost_flags("vhost-sock-dir", xstrdup(ovs_rundir()),
3106 NAME_MAX
, ovs_other_config
,
3107 &sock_dir_subcomponent
)) {
3109 if (!strstr(sock_dir_subcomponent
, "..")) {
3110 vhost_sock_dir
= xasprintf("%s/%s", ovs_rundir(),
3111 sock_dir_subcomponent
);
3113 err
= stat(vhost_sock_dir
, &s
);
3115 VLOG_ERR("vhost-user sock directory '%s' does not exist.",
3119 vhost_sock_dir
= xstrdup(ovs_rundir());
3120 VLOG_ERR("vhost-user sock directory request '%s/%s' has invalid"
3121 "characters '..' - using %s instead.",
3122 ovs_rundir(), sock_dir_subcomponent
, ovs_rundir());
3124 free(sock_dir_subcomponent
);
3126 vhost_sock_dir
= sock_dir_subcomponent
;
3130 argv
= grow_argv(&argv
, 0, 1);
3132 argv
[0] = xstrdup(ovs_get_program_name());
3133 argc_tmp
= get_dpdk_args(ovs_other_config
, &argv
, argc
);
3135 while (argc_tmp
!= argc
) {
3136 if (!strcmp("-c", argv
[argc
]) || !strcmp("-l", argv
[argc
])) {
3137 auto_determine
= false;
3145 * NOTE: This is an unsophisticated mechanism for determining the DPDK
3146 * lcore for the DPDK Master.
3148 if (auto_determine
) {
3150 /* Get the main thread affinity */
3152 err
= pthread_getaffinity_np(pthread_self(), sizeof(cpu_set_t
),
3155 for (i
= 0; i
< CPU_SETSIZE
; i
++) {
3156 if (CPU_ISSET(i
, &cpuset
)) {
3157 argv
= grow_argv(&argv
, argc
, 2);
3158 argv
[argc
++] = xstrdup("-c");
3159 argv
[argc
++] = xasprintf("0x%08llX", (1ULL<<i
));
3164 VLOG_ERR("Thread getaffinity error %d. Using core 0x1", err
);
3165 /* User did not set dpdk-lcore-mask and unable to get current
3166 * thread affintity - default to core 0x1 */
3167 argv
= grow_argv(&argv
, argc
, 2);
3168 argv
[argc
++] = xstrdup("-c");
3169 argv
[argc
++] = xasprintf("0x%X", 1);
3173 argv
= grow_argv(&argv
, argc
, 1);
3178 if (VLOG_IS_INFO_ENABLED()) {
3182 ds_put_cstr(&eal_args
, "EAL ARGS:");
3183 for (opt
= 0; opt
< argc
; ++opt
) {
3184 ds_put_cstr(&eal_args
, " ");
3185 ds_put_cstr(&eal_args
, argv
[opt
]);
3187 VLOG_INFO("%s", ds_cstr_ro(&eal_args
));
3188 ds_destroy(&eal_args
);
3191 /* Make sure things are initialized ... */
3192 result
= rte_eal_init(argc
, argv
);
3194 ovs_abort(result
, "Cannot init EAL");
3197 /* Set the main thread affinity back to pre rte_eal_init() value */
3198 if (auto_determine
&& !err
) {
3199 err
= pthread_setaffinity_np(pthread_self(), sizeof(cpu_set_t
),
3202 VLOG_ERR("Thread setaffinity error %d", err
);
3209 atexit(deferred_argv_release
);
3211 rte_memzone_dump(stdout
);
3212 rte_eal_init_ret
= 0;
3214 /* We are called from the main thread here */
3215 RTE_PER_LCORE(_lcore_id
) = NON_PMD_CORE_ID
;
3217 ovs_thread_create("dpdk_watchdog", dpdk_watchdog
, NULL
);
3220 /* Register CUSE device to handle IOCTLs.
3221 * Unless otherwise specified, cuse_dev_name is set to vhost-net.
3223 err
= rte_vhost_driver_register(cuse_dev_name
);
3226 VLOG_ERR("CUSE device setup failure.");
3231 dpdk_vhost_class_init();
3233 /* Finally, register the dpdk classes */
3234 netdev_dpdk_register();
3238 dpdk_init(const struct smap
*ovs_other_config
)
3240 static struct ovsthread_once once
= OVSTHREAD_ONCE_INITIALIZER
;
3242 if (ovs_other_config
&& ovsthread_once_start(&once
)) {
3243 dpdk_init__(ovs_other_config
);
3244 ovsthread_once_done(&once
);
3248 static const struct netdev_class dpdk_class
=
3252 netdev_dpdk_construct
,
3253 netdev_dpdk_destruct
,
3254 netdev_dpdk_set_multiq
,
3255 netdev_dpdk_eth_send
,
3256 netdev_dpdk_get_carrier
,
3257 netdev_dpdk_get_stats
,
3258 netdev_dpdk_get_features
,
3259 netdev_dpdk_get_status
,
3260 netdev_dpdk_rxq_recv
);
3262 static const struct netdev_class dpdk_ring_class
=
3266 netdev_dpdk_ring_construct
,
3267 netdev_dpdk_destruct
,
3268 netdev_dpdk_set_multiq
,
3269 netdev_dpdk_ring_send
,
3270 netdev_dpdk_get_carrier
,
3271 netdev_dpdk_get_stats
,
3272 netdev_dpdk_get_features
,
3273 netdev_dpdk_get_status
,
3274 netdev_dpdk_rxq_recv
);
3276 static const struct netdev_class OVS_UNUSED dpdk_vhost_cuse_class
=
3279 dpdk_vhost_cuse_class_init
,
3280 netdev_dpdk_vhost_cuse_construct
,
3281 netdev_dpdk_vhost_destruct
,
3282 netdev_dpdk_vhost_cuse_set_multiq
,
3283 netdev_dpdk_vhost_send
,
3284 netdev_dpdk_vhost_get_carrier
,
3285 netdev_dpdk_vhost_get_stats
,
3288 netdev_dpdk_vhost_rxq_recv
);
3290 static const struct netdev_class OVS_UNUSED dpdk_vhost_user_class
=
3293 dpdk_vhost_user_class_init
,
3294 netdev_dpdk_vhost_user_construct
,
3295 netdev_dpdk_vhost_destruct
,
3296 netdev_dpdk_vhost_set_multiq
,
3297 netdev_dpdk_vhost_send
,
3298 netdev_dpdk_vhost_get_carrier
,
3299 netdev_dpdk_vhost_get_stats
,
3302 netdev_dpdk_vhost_rxq_recv
);
3305 netdev_dpdk_register(void)
3308 netdev_register_provider(&dpdk_class
);
3309 netdev_register_provider(&dpdk_ring_class
);
3311 netdev_register_provider(&dpdk_vhost_cuse_class
);
3313 netdev_register_provider(&dpdk_vhost_user_class
);
3318 pmd_thread_setaffinity_cpu(unsigned cpu
)
3324 CPU_SET(cpu
, &cpuset
);
3325 err
= pthread_setaffinity_np(pthread_self(), sizeof(cpu_set_t
), &cpuset
);
3327 VLOG_ERR("Thread affinity error %d",err
);
3330 /* NON_PMD_CORE_ID is reserved for use by non pmd threads. */
3331 ovs_assert(cpu
!= NON_PMD_CORE_ID
);
3332 RTE_PER_LCORE(_lcore_id
) = cpu
;
3338 dpdk_thread_is_pmd(void)
3340 return rte_lcore_id() != NON_PMD_CORE_ID
;