2 * Copyright (c) 2014, 2015, 2016 Nicira, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
30 #include <sys/types.h>
35 #include "dp-packet.h"
36 #include "dpif-netdev.h"
37 #include "fatal-signal.h"
38 #include "netdev-dpdk.h"
39 #include "netdev-provider.h"
40 #include "netdev-vport.h"
42 #include "openvswitch/dynamic-string.h"
43 #include "openvswitch/list.h"
44 #include "openvswitch/ofp-print.h"
45 #include "openvswitch/vlog.h"
47 #include "ovs-thread.h"
50 #include "openvswitch/shash.h"
53 #include "unaligned.h"
57 #include "rte_config.h"
59 #include "rte_meter.h"
61 #include "rte_pdump.h"
63 #include "rte_virtio_net.h"
65 VLOG_DEFINE_THIS_MODULE(dpdk
);
66 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(5, 20);
68 #define DPDK_PORT_WATCHDOG_INTERVAL 5
70 #define OVS_CACHE_LINE_SIZE CACHE_LINE_SIZE
71 #define OVS_VPORT_DPDK "ovs_dpdk"
74 * need to reserve tons of extra space in the mbufs so we can align the
75 * DMA addresses to 4KB.
76 * The minimum mbuf size is limited to avoid scatter behaviour and drop in
77 * performance for standard Ethernet MTU.
79 #define ETHER_HDR_MAX_LEN (ETHER_HDR_LEN + ETHER_CRC_LEN \
80 + (2 * VLAN_HEADER_LEN))
81 #define MTU_TO_FRAME_LEN(mtu) ((mtu) + ETHER_HDR_LEN + ETHER_CRC_LEN)
82 #define MTU_TO_MAX_FRAME_LEN(mtu) ((mtu) + ETHER_HDR_MAX_LEN)
83 #define FRAME_LEN_TO_MTU(frame_len) ((frame_len) \
84 - ETHER_HDR_LEN - ETHER_CRC_LEN)
85 #define MBUF_SIZE(mtu) (MTU_TO_MAX_FRAME_LEN(mtu) \
86 + sizeof(struct dp_packet) \
87 + RTE_PKTMBUF_HEADROOM)
88 #define NETDEV_DPDK_MBUF_ALIGN 1024
89 #define NETDEV_DPDK_MAX_PKT_LEN 9728
91 /* Max and min number of packets in the mempool. OVS tries to allocate a
92 * mempool with MAX_NB_MBUF: if this fails (because the system doesn't have
93 * enough hugepages) we keep halving the number until the allocation succeeds
94 * or we reach MIN_NB_MBUF */
96 #define MAX_NB_MBUF (4096 * 64)
97 #define MIN_NB_MBUF (4096 * 4)
98 #define MP_CACHE_SZ RTE_MEMPOOL_CACHE_MAX_SIZE
100 /* MAX_NB_MBUF can be divided by 2 many times, until MIN_NB_MBUF */
101 BUILD_ASSERT_DECL(MAX_NB_MBUF
% ROUND_DOWN_POW2(MAX_NB_MBUF
/MIN_NB_MBUF
) == 0);
103 /* The smallest possible NB_MBUF that we're going to try should be a multiple
104 * of MP_CACHE_SZ. This is advised by DPDK documentation. */
105 BUILD_ASSERT_DECL((MAX_NB_MBUF
/ ROUND_DOWN_POW2(MAX_NB_MBUF
/MIN_NB_MBUF
))
109 * DPDK XSTATS Counter names definition
111 #define XSTAT_RX_64_PACKETS "rx_size_64_packets"
112 #define XSTAT_RX_65_TO_127_PACKETS "rx_size_65_to_127_packets"
113 #define XSTAT_RX_128_TO_255_PACKETS "rx_size_128_to_255_packets"
114 #define XSTAT_RX_256_TO_511_PACKETS "rx_size_256_to_511_packets"
115 #define XSTAT_RX_512_TO_1023_PACKETS "rx_size_512_to_1023_packets"
116 #define XSTAT_RX_1024_TO_1522_PACKETS "rx_size_1024_to_1522_packets"
117 #define XSTAT_RX_1523_TO_MAX_PACKETS "rx_size_1523_to_max_packets"
119 #define XSTAT_TX_64_PACKETS "tx_size_64_packets"
120 #define XSTAT_TX_65_TO_127_PACKETS "tx_size_65_to_127_packets"
121 #define XSTAT_TX_128_TO_255_PACKETS "tx_size_128_to_255_packets"
122 #define XSTAT_TX_256_TO_511_PACKETS "tx_size_256_to_511_packets"
123 #define XSTAT_TX_512_TO_1023_PACKETS "tx_size_512_to_1023_packets"
124 #define XSTAT_TX_1024_TO_1522_PACKETS "tx_size_1024_to_1522_packets"
125 #define XSTAT_TX_1523_TO_MAX_PACKETS "tx_size_1523_to_max_packets"
127 #define XSTAT_TX_MULTICAST_PACKETS "tx_multicast_packets"
128 #define XSTAT_RX_BROADCAST_PACKETS "rx_broadcast_packets"
129 #define XSTAT_TX_BROADCAST_PACKETS "tx_broadcast_packets"
130 #define XSTAT_RX_UNDERSIZED_ERRORS "rx_undersized_errors"
131 #define XSTAT_RX_OVERSIZE_ERRORS "rx_oversize_errors"
132 #define XSTAT_RX_FRAGMENTED_ERRORS "rx_fragmented_errors"
133 #define XSTAT_RX_JABBER_ERRORS "rx_jabber_errors"
137 /* Size of Physical NIC RX Queue, Max (n + 32 <= 4096) */
138 #define NIC_PORT_RX_Q_SIZE 2048
139 /* Size of Physical NIC TX Queue, Max (n + 32 <= 4096) */
140 #define NIC_PORT_TX_Q_SIZE 2048
142 #define OVS_VHOST_MAX_QUEUE_NUM 1024 /* Maximum number of vHost TX queues. */
143 #define OVS_VHOST_QUEUE_MAP_UNKNOWN (-1) /* Mapping not initialized. */
144 #define OVS_VHOST_QUEUE_DISABLED (-2) /* Queue was disabled by guest and not
145 * yet mapped to another queue. */
147 static char *vhost_sock_dir
= NULL
; /* Location of vhost-user sockets */
149 #define VHOST_ENQ_RETRY_NUM 8
150 #define IF_NAME_SZ (PATH_MAX > IFNAMSIZ ? PATH_MAX : IFNAMSIZ)
152 static const struct rte_eth_conf port_conf
= {
154 .mq_mode
= ETH_MQ_RX_RSS
,
156 .header_split
= 0, /* Header Split disabled */
157 .hw_ip_checksum
= 0, /* IP checksum offload disabled */
158 .hw_vlan_filter
= 0, /* VLAN filtering disabled */
159 .jumbo_frame
= 0, /* Jumbo Frame Support disabled */
165 .rss_hf
= ETH_RSS_IP
| ETH_RSS_UDP
| ETH_RSS_TCP
,
169 .mq_mode
= ETH_MQ_TX_NONE
,
173 enum { DPDK_RING_SIZE
= 256 };
174 BUILD_ASSERT_DECL(IS_POW2(DPDK_RING_SIZE
));
175 enum { DRAIN_TSC
= 200000ULL };
182 static int rte_eal_init_ret
= ENODEV
;
184 /* Quality of Service */
186 /* An instance of a QoS configuration. Always associated with a particular
189 * Each QoS implementation subclasses this with whatever additional data it
193 const struct dpdk_qos_ops
*ops
;
196 /* A particular implementation of dpdk QoS operations.
198 * The functions below return 0 if successful or a positive errno value on
199 * failure, except where otherwise noted. All of them must be provided, except
200 * where otherwise noted.
202 struct dpdk_qos_ops
{
204 /* Name of the QoS type */
205 const char *qos_name
;
207 /* Called to construct the QoS implementation on 'netdev'. The
208 * implementation should make the appropriate calls to configure QoS
209 * according to 'details'. The implementation may assume that any current
210 * QoS configuration already installed should be destroyed before
211 * constructing the new configuration.
213 * The contents of 'details' should be documented as valid for 'ovs_name'
214 * in the "other_config" column in the "QoS" table in vswitchd/vswitch.xml
215 * (which is built as ovs-vswitchd.conf.db(8)).
217 * This function must return 0 if and only if it sets 'netdev->qos_conf'
218 * to an initialized 'struct qos_conf'.
220 * For all QoS implementations it should always be non-null.
222 int (*qos_construct
)(struct netdev
*netdev
, const struct smap
*details
);
224 /* Destroys the data structures allocated by the implementation as part of
227 * For all QoS implementations it should always be non-null.
229 void (*qos_destruct
)(struct netdev
*netdev
, struct qos_conf
*conf
);
231 /* Retrieves details of 'netdev->qos_conf' configuration into 'details'.
233 * The contents of 'details' should be documented as valid for 'ovs_name'
234 * in the "other_config" column in the "QoS" table in vswitchd/vswitch.xml
235 * (which is built as ovs-vswitchd.conf.db(8)).
237 int (*qos_get
)(const struct netdev
*netdev
, struct smap
*details
);
239 /* Reconfigures 'netdev->qos_conf' according to 'details', performing any
240 * required calls to complete the reconfiguration.
242 * The contents of 'details' should be documented as valid for 'ovs_name'
243 * in the "other_config" column in the "QoS" table in vswitchd/vswitch.xml
244 * (which is built as ovs-vswitchd.conf.db(8)).
246 * This function may be null if 'qos_conf' is not configurable.
248 int (*qos_set
)(struct netdev
*netdev
, const struct smap
*details
);
250 /* Modify an array of rte_mbufs. The modification is specific to
251 * each qos implementation.
253 * The function should take and array of mbufs and an int representing
254 * the current number of mbufs present in the array.
256 * After the function has performed a qos modification to the array of
257 * mbufs it returns an int representing the number of mbufs now present in
258 * the array. This value is can then be passed to the port send function
259 * along with the modified array for transmission.
261 * For all QoS implementations it should always be non-null.
263 int (*qos_run
)(struct netdev
*netdev
, struct rte_mbuf
**pkts
,
267 /* dpdk_qos_ops for each type of user space QoS implementation */
268 static const struct dpdk_qos_ops egress_policer_ops
;
271 * Array of dpdk_qos_ops, contains pointer to all supported QoS
274 static const struct dpdk_qos_ops
*const qos_confs
[] = {
279 static struct ovs_mutex dpdk_mutex
= OVS_MUTEX_INITIALIZER
;
281 /* Contains all 'struct dpdk_dev's. */
282 static struct ovs_list dpdk_list
OVS_GUARDED_BY(dpdk_mutex
)
283 = OVS_LIST_INITIALIZER(&dpdk_list
);
285 static struct ovs_mutex dpdk_mp_mutex
OVS_ACQ_AFTER(dpdk_mutex
)
286 = OVS_MUTEX_INITIALIZER
;
288 static struct ovs_list dpdk_mp_list
OVS_GUARDED_BY(dpdk_mp_mutex
)
289 = OVS_LIST_INITIALIZER(&dpdk_mp_list
);
291 /* This mutex must be used by non pmd threads when allocating or freeing
292 * mbufs through mempools. */
293 static struct ovs_mutex nonpmd_mempool_mutex
= OVS_MUTEX_INITIALIZER
;
296 struct rte_mempool
*mp
;
300 struct ovs_list list_node
OVS_GUARDED_BY(dpdk_mp_mutex
);
303 /* There should be one 'struct dpdk_tx_queue' created for
305 struct dpdk_tx_queue
{
306 rte_spinlock_t tx_lock
; /* Protects the members and the NIC queue
307 * from concurrent access. It is used only
308 * if the queue is shared among different
309 * pmd threads (see 'concurrent_txq'). */
310 int map
; /* Mapping of configured vhost-user queues
311 * to enabled by guest. */
314 /* dpdk has no way to remove dpdk ring ethernet devices
315 so we have to keep them around once they've been created
318 static struct ovs_list dpdk_ring_list
OVS_GUARDED_BY(dpdk_mutex
)
319 = OVS_LIST_INITIALIZER(&dpdk_ring_list
);
322 /* For the client rings */
323 struct rte_ring
*cring_tx
;
324 struct rte_ring
*cring_rx
;
325 unsigned int user_port_id
; /* User given port no, parsed from port name */
326 int eth_port_id
; /* ethernet device port id */
327 struct ovs_list list_node
OVS_GUARDED_BY(dpdk_mutex
);
330 struct ingress_policer
{
331 struct rte_meter_srtcm_params app_srtcm_params
;
332 struct rte_meter_srtcm in_policer
;
333 rte_spinlock_t policer_lock
;
340 enum dpdk_dev_type type
;
342 struct dpdk_tx_queue
*tx_q
;
344 struct ovs_mutex mutex
OVS_ACQ_AFTER(dpdk_mutex
);
346 struct dpdk_mp
*dpdk_mp
;
350 struct netdev_stats stats
;
352 rte_spinlock_t stats_lock
;
354 struct eth_addr hwaddr
;
355 enum netdev_flags flags
;
357 struct rte_eth_link link
;
360 /* virtio identifier for vhost devices */
363 /* True if vHost device is 'up' and has been reconfigured at least once */
364 bool vhost_reconfigured
;
366 /* Identifier used to distinguish vhost devices from each other. */
367 char vhost_id
[PATH_MAX
];
370 struct ovs_list list_node
OVS_GUARDED_BY(dpdk_mutex
);
372 /* QoS configuration and lock for the device */
373 struct qos_conf
*qos_conf
;
374 rte_spinlock_t qos_lock
;
376 /* The following properties cannot be changed when a device is running,
377 * so we remember the request and update them next time
378 * netdev_dpdk*_reconfigure() is called */
383 /* Socket ID detected when vHost device is brought up */
384 int requested_socket_id
;
386 /* Denotes whether vHost port is client/server mode */
387 uint64_t vhost_driver_flags
;
389 /* Ingress Policer */
390 OVSRCU_TYPE(struct ingress_policer
*) ingress_policer
;
391 uint32_t policer_rate
;
392 uint32_t policer_burst
;
394 /* DPDK-ETH Flow control */
395 struct rte_eth_fc_conf fc_conf
;
398 struct netdev_rxq_dpdk
{
399 struct netdev_rxq up
;
403 static bool dpdk_thread_is_pmd(void);
405 static int netdev_dpdk_construct(struct netdev
*);
407 int netdev_dpdk_get_vid(const struct netdev_dpdk
*dev
);
409 struct ingress_policer
*
410 netdev_dpdk_get_ingress_policer(const struct netdev_dpdk
*dev
);
413 is_dpdk_class(const struct netdev_class
*class)
415 return class->construct
== netdev_dpdk_construct
;
418 /* DPDK NIC drivers allocate RX buffers at a particular granularity, typically
419 * aligned at 1k or less. If a declared mbuf size is not a multiple of this
420 * value, insufficient buffers are allocated to accomodate the packet in its
421 * entirety. Furthermore, certain drivers need to ensure that there is also
422 * sufficient space in the Rx buffer to accommodate two VLAN tags (for QinQ
423 * frames). If the RX buffer is too small, then the driver enables scatter RX
424 * behaviour, which reduces performance. To prevent this, use a buffer size
425 * that is closest to 'mtu', but which satisfies the aforementioned criteria.
428 dpdk_buf_size(int mtu
)
430 return ROUND_UP((MTU_TO_MAX_FRAME_LEN(mtu
) + RTE_PKTMBUF_HEADROOM
),
431 NETDEV_DPDK_MBUF_ALIGN
);
434 /* XXX: use dpdk malloc for entire OVS. in fact huge page should be used
435 * for all other segments data, bss and text. */
438 dpdk_rte_mzalloc(size_t sz
)
442 ptr
= rte_zmalloc(OVS_VPORT_DPDK
, sz
, OVS_CACHE_LINE_SIZE
);
449 /* XXX this function should be called only by pmd threads (or by non pmd
450 * threads holding the nonpmd_mempool_mutex) */
452 free_dpdk_buf(struct dp_packet
*p
)
454 struct rte_mbuf
*pkt
= (struct rte_mbuf
*) p
;
456 rte_pktmbuf_free(pkt
);
460 ovs_rte_pktmbuf_init(struct rte_mempool
*mp
,
461 void *opaque_arg OVS_UNUSED
,
463 unsigned i OVS_UNUSED
)
465 struct rte_mbuf
*pkt
= _p
;
467 rte_pktmbuf_init(mp
, opaque_arg
, _p
, i
);
469 dp_packet_init_dpdk((struct dp_packet
*) pkt
, pkt
->buf_len
);
472 static struct dpdk_mp
*
473 dpdk_mp_get(int socket_id
, int mtu
)
475 struct dpdk_mp
*dmp
= NULL
;
476 char mp_name
[RTE_MEMPOOL_NAMESIZE
];
478 struct rte_pktmbuf_pool_private mbp_priv
;
481 ovs_mutex_lock(&dpdk_mp_mutex
);
482 LIST_FOR_EACH (dmp
, list_node
, &dpdk_mp_list
) {
483 if (dmp
->socket_id
== socket_id
&& dmp
->mtu
== mtu
) {
489 dmp
= dpdk_rte_mzalloc(sizeof *dmp
);
490 dmp
->socket_id
= socket_id
;
493 mbp_priv
.mbuf_data_room_size
= MBUF_SIZE(mtu
) - sizeof(struct dp_packet
);
494 mbp_priv
.mbuf_priv_size
= sizeof(struct dp_packet
)
495 - sizeof(struct rte_mbuf
);
496 /* XXX: this is a really rough method of provisioning memory.
497 * It's impossible to determine what the exact memory requirements are when
498 * when the number of ports and rxqs that utilize a particular mempool can
499 * change dynamically at runtime. For now, use this rough heurisitic.
501 if (mtu
>= ETHER_MTU
) {
502 mp_size
= MAX_NB_MBUF
;
504 mp_size
= MIN_NB_MBUF
;
508 if (snprintf(mp_name
, RTE_MEMPOOL_NAMESIZE
, "ovs_mp_%d_%d_%u",
509 dmp
->mtu
, dmp
->socket_id
, mp_size
) < 0) {
514 dmp
->mp
= rte_mempool_create(mp_name
, mp_size
, MBUF_SIZE(mtu
),
516 sizeof(struct rte_pktmbuf_pool_private
),
517 rte_pktmbuf_pool_init
, &mbp_priv
,
518 ovs_rte_pktmbuf_init
, NULL
,
520 } while (!dmp
->mp
&& rte_errno
== ENOMEM
&& (mp_size
/= 2) >= MIN_NB_MBUF
);
522 if (dmp
->mp
== NULL
) {
526 VLOG_DBG("Allocated \"%s\" mempool with %u mbufs", mp_name
, mp_size
);
529 ovs_list_push_back(&dpdk_mp_list
, &dmp
->list_node
);
532 ovs_mutex_unlock(&dpdk_mp_mutex
);
542 dpdk_mp_put(struct dpdk_mp
*dmp
)
548 ovs_mutex_lock(&dpdk_mp_mutex
);
549 ovs_assert(dmp
->refcount
);
551 if (!--dmp
->refcount
) {
552 ovs_list_remove(&dmp
->list_node
);
553 rte_mempool_free(dmp
->mp
);
556 ovs_mutex_unlock(&dpdk_mp_mutex
);
559 /* Tries to allocate new mempool on requested_socket_id with
560 * mbuf size corresponding to requested_mtu.
561 * On success new configuration will be applied.
562 * On error, device will be left unchanged. */
564 netdev_dpdk_mempool_configure(struct netdev_dpdk
*dev
)
565 OVS_REQUIRES(dev
->mutex
)
567 uint32_t buf_size
= dpdk_buf_size(dev
->requested_mtu
);
570 mp
= dpdk_mp_get(dev
->requested_socket_id
, FRAME_LEN_TO_MTU(buf_size
));
572 VLOG_ERR("Insufficient memory to create memory pool for netdev "
573 "%s, with MTU %d on socket %d\n",
574 dev
->up
.name
, dev
->requested_mtu
, dev
->requested_socket_id
);
577 dpdk_mp_put(dev
->dpdk_mp
);
579 dev
->mtu
= dev
->requested_mtu
;
580 dev
->socket_id
= dev
->requested_socket_id
;
581 dev
->max_packet_len
= MTU_TO_FRAME_LEN(dev
->mtu
);
588 check_link_status(struct netdev_dpdk
*dev
)
590 struct rte_eth_link link
;
592 rte_eth_link_get_nowait(dev
->port_id
, &link
);
594 if (dev
->link
.link_status
!= link
.link_status
) {
595 netdev_change_seq_changed(&dev
->up
);
597 dev
->link_reset_cnt
++;
599 if (dev
->link
.link_status
) {
600 VLOG_DBG_RL(&rl
, "Port %d Link Up - speed %u Mbps - %s",
601 dev
->port_id
, (unsigned) dev
->link
.link_speed
,
602 (dev
->link
.link_duplex
== ETH_LINK_FULL_DUPLEX
) ?
603 ("full-duplex") : ("half-duplex"));
605 VLOG_DBG_RL(&rl
, "Port %d Link Down", dev
->port_id
);
611 dpdk_watchdog(void *dummy OVS_UNUSED
)
613 struct netdev_dpdk
*dev
;
615 pthread_detach(pthread_self());
618 ovs_mutex_lock(&dpdk_mutex
);
619 LIST_FOR_EACH (dev
, list_node
, &dpdk_list
) {
620 ovs_mutex_lock(&dev
->mutex
);
621 if (dev
->type
== DPDK_DEV_ETH
) {
622 check_link_status(dev
);
624 ovs_mutex_unlock(&dev
->mutex
);
626 ovs_mutex_unlock(&dpdk_mutex
);
627 xsleep(DPDK_PORT_WATCHDOG_INTERVAL
);
634 dpdk_eth_dev_queue_setup(struct netdev_dpdk
*dev
, int n_rxq
, int n_txq
)
638 struct rte_eth_conf conf
= port_conf
;
640 if (dev
->mtu
> ETHER_MTU
) {
641 conf
.rxmode
.jumbo_frame
= 1;
642 conf
.rxmode
.max_rx_pkt_len
= dev
->max_packet_len
;
644 conf
.rxmode
.jumbo_frame
= 0;
645 conf
.rxmode
.max_rx_pkt_len
= 0;
647 /* A device may report more queues than it makes available (this has
648 * been observed for Intel xl710, which reserves some of them for
649 * SRIOV): rte_eth_*_queue_setup will fail if a queue is not
650 * available. When this happens we can retry the configuration
651 * and request less queues */
652 while (n_rxq
&& n_txq
) {
654 VLOG_INFO("Retrying setup with (rxq:%d txq:%d)", n_rxq
, n_txq
);
657 diag
= rte_eth_dev_configure(dev
->port_id
, n_rxq
, n_txq
, &conf
);
659 VLOG_WARN("Interface %s eth_dev setup error %s\n",
660 dev
->up
.name
, rte_strerror(-diag
));
664 for (i
= 0; i
< n_txq
; i
++) {
665 diag
= rte_eth_tx_queue_setup(dev
->port_id
, i
, NIC_PORT_TX_Q_SIZE
,
666 dev
->socket_id
, NULL
);
668 VLOG_INFO("Interface %s txq(%d) setup error: %s",
669 dev
->up
.name
, i
, rte_strerror(-diag
));
675 /* Retry with less tx queues */
680 for (i
= 0; i
< n_rxq
; i
++) {
681 diag
= rte_eth_rx_queue_setup(dev
->port_id
, i
, NIC_PORT_RX_Q_SIZE
,
682 dev
->socket_id
, NULL
,
685 VLOG_INFO("Interface %s rxq(%d) setup error: %s",
686 dev
->up
.name
, i
, rte_strerror(-diag
));
692 /* Retry with less rx queues */
697 dev
->up
.n_rxq
= n_rxq
;
698 dev
->up
.n_txq
= n_txq
;
707 dpdk_eth_flow_ctrl_setup(struct netdev_dpdk
*dev
) OVS_REQUIRES(dev
->mutex
)
709 if (rte_eth_dev_flow_ctrl_set(dev
->port_id
, &dev
->fc_conf
)) {
710 VLOG_WARN("Failed to enable flow control on device %d", dev
->port_id
);
715 dpdk_eth_dev_init(struct netdev_dpdk
*dev
)
716 OVS_REQUIRES(dev
->mutex
)
718 struct rte_pktmbuf_pool_private
*mbp_priv
;
719 struct rte_eth_dev_info info
;
720 struct ether_addr eth_addr
;
724 if (!rte_eth_dev_is_valid_port(dev
->port_id
)) {
728 rte_eth_dev_info_get(dev
->port_id
, &info
);
730 n_rxq
= MIN(info
.max_rx_queues
, dev
->up
.n_rxq
);
731 n_txq
= MIN(info
.max_tx_queues
, dev
->up
.n_txq
);
733 diag
= dpdk_eth_dev_queue_setup(dev
, n_rxq
, n_txq
);
735 VLOG_ERR("Interface %s(rxq:%d txq:%d) configure error: %s",
736 dev
->up
.name
, n_rxq
, n_txq
, rte_strerror(-diag
));
740 diag
= rte_eth_dev_start(dev
->port_id
);
742 VLOG_ERR("Interface %s start error: %s", dev
->up
.name
,
743 rte_strerror(-diag
));
747 rte_eth_promiscuous_enable(dev
->port_id
);
748 rte_eth_allmulticast_enable(dev
->port_id
);
750 memset(ð_addr
, 0x0, sizeof(eth_addr
));
751 rte_eth_macaddr_get(dev
->port_id
, ð_addr
);
752 VLOG_INFO_RL(&rl
, "Port %d: "ETH_ADDR_FMT
"",
753 dev
->port_id
, ETH_ADDR_BYTES_ARGS(eth_addr
.addr_bytes
));
755 memcpy(dev
->hwaddr
.ea
, eth_addr
.addr_bytes
, ETH_ADDR_LEN
);
756 rte_eth_link_get_nowait(dev
->port_id
, &dev
->link
);
758 mbp_priv
= rte_mempool_get_priv(dev
->dpdk_mp
->mp
);
759 dev
->buf_size
= mbp_priv
->mbuf_data_room_size
- RTE_PKTMBUF_HEADROOM
;
761 dev
->flags
= NETDEV_UP
| NETDEV_PROMISC
;
763 /* Get the Flow control configuration for DPDK-ETH */
764 diag
= rte_eth_dev_flow_ctrl_get(dev
->port_id
, &dev
->fc_conf
);
766 VLOG_DBG("cannot get flow control parameters on port=%d, err=%d",
773 static struct netdev_dpdk
*
774 netdev_dpdk_cast(const struct netdev
*netdev
)
776 return CONTAINER_OF(netdev
, struct netdev_dpdk
, up
);
779 static struct netdev
*
780 netdev_dpdk_alloc(void)
782 struct netdev_dpdk
*dev
;
784 if (!rte_eal_init_ret
) { /* Only after successful initialization */
785 dev
= dpdk_rte_mzalloc(sizeof *dev
);
794 netdev_dpdk_alloc_txq(struct netdev_dpdk
*dev
, unsigned int n_txqs
)
798 dev
->tx_q
= dpdk_rte_mzalloc(n_txqs
* sizeof *dev
->tx_q
);
799 for (i
= 0; i
< n_txqs
; i
++) {
800 /* Initialize map for vhost devices. */
801 dev
->tx_q
[i
].map
= OVS_VHOST_QUEUE_MAP_UNKNOWN
;
802 rte_spinlock_init(&dev
->tx_q
[i
].tx_lock
);
807 netdev_dpdk_init(struct netdev
*netdev
, unsigned int port_no
,
808 enum dpdk_dev_type type
)
809 OVS_REQUIRES(dpdk_mutex
)
811 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
815 ovs_mutex_init(&dev
->mutex
);
816 ovs_mutex_lock(&dev
->mutex
);
818 rte_spinlock_init(&dev
->stats_lock
);
820 /* If the 'sid' is negative, it means that the kernel fails
821 * to obtain the pci numa info. In that situation, always
823 if (type
== DPDK_DEV_ETH
) {
824 sid
= rte_eth_dev_socket_id(port_no
);
826 sid
= rte_lcore_to_socket_id(rte_get_master_lcore());
829 dev
->socket_id
= sid
< 0 ? SOCKET0
: sid
;
830 dev
->requested_socket_id
= dev
->socket_id
;
831 dev
->port_id
= port_no
;
834 dev
->requested_mtu
= dev
->mtu
= ETHER_MTU
;
835 dev
->max_packet_len
= MTU_TO_FRAME_LEN(dev
->mtu
);
836 ovsrcu_index_init(&dev
->vid
, -1);
837 dev
->vhost_reconfigured
= false;
839 err
= netdev_dpdk_mempool_configure(dev
);
844 /* Initialise QoS configuration to NULL and qos lock to unlocked */
845 dev
->qos_conf
= NULL
;
846 rte_spinlock_init(&dev
->qos_lock
);
848 /* Initialise rcu pointer for ingress policer to NULL */
849 ovsrcu_init(&dev
->ingress_policer
, NULL
);
850 dev
->policer_rate
= 0;
851 dev
->policer_burst
= 0;
853 netdev
->n_rxq
= NR_QUEUE
;
854 netdev
->n_txq
= NR_QUEUE
;
855 dev
->requested_n_rxq
= netdev
->n_rxq
;
856 dev
->requested_n_txq
= netdev
->n_txq
;
858 /* Initialize the flow control to NULL */
859 memset(&dev
->fc_conf
, 0, sizeof dev
->fc_conf
);
860 if (type
== DPDK_DEV_ETH
) {
861 err
= dpdk_eth_dev_init(dev
);
865 netdev_dpdk_alloc_txq(dev
, netdev
->n_txq
);
867 netdev_dpdk_alloc_txq(dev
, OVS_VHOST_MAX_QUEUE_NUM
);
868 /* Enable DPDK_DEV_VHOST device and set promiscuous mode flag. */
869 dev
->flags
= NETDEV_UP
| NETDEV_PROMISC
;
872 ovs_list_push_back(&dpdk_list
, &dev
->list_node
);
875 ovs_mutex_unlock(&dev
->mutex
);
879 /* dev_name must be the prefix followed by a positive decimal number.
880 * (no leading + or - signs are allowed) */
882 dpdk_dev_parse_name(const char dev_name
[], const char prefix
[],
883 unsigned int *port_no
)
887 if (strncmp(dev_name
, prefix
, strlen(prefix
))) {
891 cport
= dev_name
+ strlen(prefix
);
893 if (str_to_uint(cport
, 10, port_no
)) {
901 netdev_dpdk_vhost_construct(struct netdev
*netdev
)
903 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
904 const char *name
= netdev
->name
;
907 /* 'name' is appended to 'vhost_sock_dir' and used to create a socket in
908 * the file system. '/' or '\' would traverse directories, so they're not
909 * acceptable in 'name'. */
910 if (strchr(name
, '/') || strchr(name
, '\\')) {
911 VLOG_ERR("\"%s\" is not a valid name for a vhost-user port. "
912 "A valid name must not include '/' or '\\'",
917 if (rte_eal_init_ret
) {
918 return rte_eal_init_ret
;
921 ovs_mutex_lock(&dpdk_mutex
);
922 /* Take the name of the vhost-user port and append it to the location where
923 * the socket is to be created, then register the socket.
925 snprintf(dev
->vhost_id
, sizeof dev
->vhost_id
, "%s/%s",
926 vhost_sock_dir
, name
);
928 dev
->vhost_driver_flags
&= ~RTE_VHOST_USER_CLIENT
;
929 err
= rte_vhost_driver_register(dev
->vhost_id
, dev
->vhost_driver_flags
);
931 VLOG_ERR("vhost-user socket device setup failure for socket %s\n",
934 fatal_signal_add_file_to_unlink(dev
->vhost_id
);
935 VLOG_INFO("Socket %s created for vhost-user port %s\n",
936 dev
->vhost_id
, name
);
938 err
= netdev_dpdk_init(netdev
, -1, DPDK_DEV_VHOST
);
940 ovs_mutex_unlock(&dpdk_mutex
);
945 netdev_dpdk_vhost_client_construct(struct netdev
*netdev
)
949 if (rte_eal_init_ret
) {
950 return rte_eal_init_ret
;
953 ovs_mutex_lock(&dpdk_mutex
);
954 err
= netdev_dpdk_init(netdev
, -1, DPDK_DEV_VHOST
);
955 ovs_mutex_unlock(&dpdk_mutex
);
960 netdev_dpdk_construct(struct netdev
*netdev
)
962 unsigned int port_no
;
965 if (rte_eal_init_ret
) {
966 return rte_eal_init_ret
;
969 /* Names always start with "dpdk" */
970 err
= dpdk_dev_parse_name(netdev
->name
, "dpdk", &port_no
);
975 ovs_mutex_lock(&dpdk_mutex
);
976 err
= netdev_dpdk_init(netdev
, port_no
, DPDK_DEV_ETH
);
977 ovs_mutex_unlock(&dpdk_mutex
);
982 netdev_dpdk_destruct(struct netdev
*netdev
)
984 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
986 ovs_mutex_lock(&dpdk_mutex
);
987 ovs_mutex_lock(&dev
->mutex
);
989 rte_eth_dev_stop(dev
->port_id
);
990 free(ovsrcu_get_protected(struct ingress_policer
*,
991 &dev
->ingress_policer
));
994 ovs_list_remove(&dev
->list_node
);
995 dpdk_mp_put(dev
->dpdk_mp
);
997 ovs_mutex_unlock(&dev
->mutex
);
998 ovs_mutex_unlock(&dpdk_mutex
);
1001 /* rte_vhost_driver_unregister() can call back destroy_device(), which will
1002 * try to acquire 'dpdk_mutex' and possibly 'dev->mutex'. To avoid a
1003 * deadlock, none of the mutexes must be held while calling this function. */
1005 dpdk_vhost_driver_unregister(struct netdev_dpdk
*dev OVS_UNUSED
,
1007 OVS_EXCLUDED(dpdk_mutex
)
1008 OVS_EXCLUDED(dev
->mutex
)
1010 return rte_vhost_driver_unregister(vhost_id
);
1014 netdev_dpdk_vhost_destruct(struct netdev
*netdev
)
1016 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1019 ovs_mutex_lock(&dpdk_mutex
);
1020 ovs_mutex_lock(&dev
->mutex
);
1022 /* Guest becomes an orphan if still attached. */
1023 if (netdev_dpdk_get_vid(dev
) >= 0
1024 && !(dev
->vhost_driver_flags
& RTE_VHOST_USER_CLIENT
)) {
1025 VLOG_ERR("Removing port '%s' while vhost device still attached.",
1027 VLOG_ERR("To restore connectivity after re-adding of port, VM on "
1028 "socket '%s' must be restarted.", dev
->vhost_id
);
1031 free(ovsrcu_get_protected(struct ingress_policer
*,
1032 &dev
->ingress_policer
));
1034 rte_free(dev
->tx_q
);
1035 ovs_list_remove(&dev
->list_node
);
1036 dpdk_mp_put(dev
->dpdk_mp
);
1038 vhost_id
= xstrdup(dev
->vhost_id
);
1040 ovs_mutex_unlock(&dev
->mutex
);
1041 ovs_mutex_unlock(&dpdk_mutex
);
1043 if (dpdk_vhost_driver_unregister(dev
, vhost_id
)) {
1044 VLOG_ERR("%s: Unable to unregister vhost driver for socket '%s'.\n",
1045 netdev
->name
, vhost_id
);
1046 } else if (!(dev
->vhost_driver_flags
& RTE_VHOST_USER_CLIENT
)) {
1047 /* OVS server mode - remove this socket from list for deletion */
1048 fatal_signal_remove_file_to_unlink(vhost_id
);
1054 netdev_dpdk_dealloc(struct netdev
*netdev
)
1056 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1062 netdev_dpdk_get_config(const struct netdev
*netdev
, struct smap
*args
)
1064 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1066 ovs_mutex_lock(&dev
->mutex
);
1068 smap_add_format(args
, "requested_rx_queues", "%d", dev
->requested_n_rxq
);
1069 smap_add_format(args
, "configured_rx_queues", "%d", netdev
->n_rxq
);
1070 smap_add_format(args
, "requested_tx_queues", "%d", dev
->requested_n_txq
);
1071 smap_add_format(args
, "configured_tx_queues", "%d", netdev
->n_txq
);
1072 smap_add_format(args
, "mtu", "%d", dev
->mtu
);
1073 ovs_mutex_unlock(&dev
->mutex
);
1079 dpdk_set_rxq_config(struct netdev_dpdk
*dev
, const struct smap
*args
)
1083 new_n_rxq
= MAX(smap_get_int(args
, "n_rxq", dev
->requested_n_rxq
), 1);
1084 if (new_n_rxq
!= dev
->requested_n_rxq
) {
1085 dev
->requested_n_rxq
= new_n_rxq
;
1086 netdev_request_reconfigure(&dev
->up
);
1091 netdev_dpdk_set_config(struct netdev
*netdev
, const struct smap
*args
)
1093 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1095 ovs_mutex_lock(&dev
->mutex
);
1097 dpdk_set_rxq_config(dev
, args
);
1099 /* Flow control support is only available for DPDK Ethernet ports. */
1100 bool rx_fc_en
= false;
1101 bool tx_fc_en
= false;
1102 enum rte_eth_fc_mode fc_mode_set
[2][2] =
1103 {{RTE_FC_NONE
, RTE_FC_TX_PAUSE
},
1104 {RTE_FC_RX_PAUSE
, RTE_FC_FULL
}
1106 rx_fc_en
= smap_get_bool(args
, "rx-flow-ctrl", false);
1107 tx_fc_en
= smap_get_bool(args
, "tx-flow-ctrl", false);
1108 dev
->fc_conf
.autoneg
= smap_get_bool(args
, "flow-ctrl-autoneg", false);
1109 dev
->fc_conf
.mode
= fc_mode_set
[tx_fc_en
][rx_fc_en
];
1111 dpdk_eth_flow_ctrl_setup(dev
);
1113 ovs_mutex_unlock(&dev
->mutex
);
1119 netdev_dpdk_ring_set_config(struct netdev
*netdev
, const struct smap
*args
)
1121 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1123 ovs_mutex_lock(&dev
->mutex
);
1124 dpdk_set_rxq_config(dev
, args
);
1125 ovs_mutex_unlock(&dev
->mutex
);
1131 netdev_dpdk_vhost_client_set_config(struct netdev
*netdev
,
1132 const struct smap
*args
)
1134 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1137 ovs_mutex_lock(&dev
->mutex
);
1138 if (!(dev
->vhost_driver_flags
& RTE_VHOST_USER_CLIENT
)) {
1139 path
= smap_get(args
, "vhost-server-path");
1140 if (path
&& strcmp(path
, dev
->vhost_id
)) {
1141 strcpy(dev
->vhost_id
, path
);
1142 netdev_request_reconfigure(netdev
);
1145 ovs_mutex_unlock(&dev
->mutex
);
1151 netdev_dpdk_get_numa_id(const struct netdev
*netdev
)
1153 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1155 return dev
->socket_id
;
1158 /* Sets the number of tx queues for the dpdk interface. */
1160 netdev_dpdk_set_tx_multiq(struct netdev
*netdev
, unsigned int n_txq
)
1162 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1164 ovs_mutex_lock(&dev
->mutex
);
1166 if (dev
->requested_n_txq
== n_txq
) {
1170 dev
->requested_n_txq
= n_txq
;
1171 netdev_request_reconfigure(netdev
);
1174 ovs_mutex_unlock(&dev
->mutex
);
1178 static struct netdev_rxq
*
1179 netdev_dpdk_rxq_alloc(void)
1181 struct netdev_rxq_dpdk
*rx
= dpdk_rte_mzalloc(sizeof *rx
);
1186 static struct netdev_rxq_dpdk
*
1187 netdev_rxq_dpdk_cast(const struct netdev_rxq
*rxq
)
1189 return CONTAINER_OF(rxq
, struct netdev_rxq_dpdk
, up
);
1193 netdev_dpdk_rxq_construct(struct netdev_rxq
*rxq
)
1195 struct netdev_rxq_dpdk
*rx
= netdev_rxq_dpdk_cast(rxq
);
1196 struct netdev_dpdk
*dev
= netdev_dpdk_cast(rxq
->netdev
);
1198 ovs_mutex_lock(&dev
->mutex
);
1199 rx
->port_id
= dev
->port_id
;
1200 ovs_mutex_unlock(&dev
->mutex
);
1206 netdev_dpdk_rxq_destruct(struct netdev_rxq
*rxq OVS_UNUSED
)
1211 netdev_dpdk_rxq_dealloc(struct netdev_rxq
*rxq
)
1213 struct netdev_rxq_dpdk
*rx
= netdev_rxq_dpdk_cast(rxq
);
1219 netdev_dpdk_eth_tx_burst(struct netdev_dpdk
*dev
, int qid
,
1220 struct rte_mbuf
**pkts
, int cnt
)
1224 while (nb_tx
!= cnt
) {
1227 ret
= rte_eth_tx_burst(dev
->port_id
, qid
, pkts
+ nb_tx
, cnt
- nb_tx
);
1235 if (OVS_UNLIKELY(nb_tx
!= cnt
)) {
1236 /* free buffers, which we couldn't transmit, one at a time (each
1237 * packet could come from a different mempool) */
1240 for (i
= nb_tx
; i
< cnt
; i
++) {
1241 rte_pktmbuf_free(pkts
[i
]);
1243 rte_spinlock_lock(&dev
->stats_lock
);
1244 dev
->stats
.tx_dropped
+= cnt
- nb_tx
;
1245 rte_spinlock_unlock(&dev
->stats_lock
);
1250 netdev_dpdk_policer_pkt_handle(struct rte_meter_srtcm
*meter
,
1251 struct rte_mbuf
*pkt
, uint64_t time
)
1253 uint32_t pkt_len
= rte_pktmbuf_pkt_len(pkt
) - sizeof(struct ether_hdr
);
1255 return rte_meter_srtcm_color_blind_check(meter
, time
, pkt_len
) ==
1260 netdev_dpdk_policer_run(struct rte_meter_srtcm
*meter
,
1261 struct rte_mbuf
**pkts
, int pkt_cnt
)
1265 struct rte_mbuf
*pkt
= NULL
;
1266 uint64_t current_time
= rte_rdtsc();
1268 for (i
= 0; i
< pkt_cnt
; i
++) {
1270 /* Handle current packet */
1271 if (netdev_dpdk_policer_pkt_handle(meter
, pkt
, current_time
)) {
1277 rte_pktmbuf_free(pkt
);
1285 ingress_policer_run(struct ingress_policer
*policer
, struct rte_mbuf
**pkts
,
1290 rte_spinlock_lock(&policer
->policer_lock
);
1291 cnt
= netdev_dpdk_policer_run(&policer
->in_policer
, pkts
, pkt_cnt
);
1292 rte_spinlock_unlock(&policer
->policer_lock
);
1298 is_vhost_running(struct netdev_dpdk
*dev
)
1300 return (netdev_dpdk_get_vid(dev
) >= 0 && dev
->vhost_reconfigured
);
1304 netdev_dpdk_vhost_update_rx_size_counters(struct netdev_stats
*stats
,
1305 unsigned int packet_size
)
1307 /* Hard-coded search for the size bucket. */
1308 if (packet_size
< 256) {
1309 if (packet_size
>= 128) {
1310 stats
->rx_128_to_255_packets
++;
1311 } else if (packet_size
<= 64) {
1312 stats
->rx_1_to_64_packets
++;
1314 stats
->rx_65_to_127_packets
++;
1317 if (packet_size
>= 1523) {
1318 stats
->rx_1523_to_max_packets
++;
1319 } else if (packet_size
>= 1024) {
1320 stats
->rx_1024_to_1522_packets
++;
1321 } else if (packet_size
< 512) {
1322 stats
->rx_256_to_511_packets
++;
1324 stats
->rx_512_to_1023_packets
++;
1330 netdev_dpdk_vhost_update_rx_counters(struct netdev_stats
*stats
,
1331 struct dp_packet
**packets
, int count
,
1335 unsigned int packet_size
;
1336 struct dp_packet
*packet
;
1338 stats
->rx_packets
+= count
;
1339 stats
->rx_dropped
+= dropped
;
1340 for (i
= 0; i
< count
; i
++) {
1341 packet
= packets
[i
];
1342 packet_size
= dp_packet_size(packet
);
1344 if (OVS_UNLIKELY(packet_size
< ETH_HEADER_LEN
)) {
1345 /* This only protects the following multicast counting from
1346 * too short packets, but it does not stop the packet from
1347 * further processing. */
1349 stats
->rx_length_errors
++;
1353 netdev_dpdk_vhost_update_rx_size_counters(stats
, packet_size
);
1355 struct eth_header
*eh
= (struct eth_header
*) dp_packet_data(packet
);
1356 if (OVS_UNLIKELY(eth_addr_is_multicast(eh
->eth_dst
))) {
1360 stats
->rx_bytes
+= packet_size
;
1365 * The receive path for the vhost port is the TX path out from guest.
1368 netdev_dpdk_vhost_rxq_recv(struct netdev_rxq
*rxq
,
1369 struct dp_packet_batch
*batch
)
1371 struct netdev_dpdk
*dev
= netdev_dpdk_cast(rxq
->netdev
);
1372 int qid
= rxq
->queue_id
;
1373 struct ingress_policer
*policer
= netdev_dpdk_get_ingress_policer(dev
);
1375 uint16_t dropped
= 0;
1377 if (OVS_UNLIKELY(!is_vhost_running(dev
)
1378 || !(dev
->flags
& NETDEV_UP
))) {
1382 nb_rx
= rte_vhost_dequeue_burst(netdev_dpdk_get_vid(dev
),
1383 qid
* VIRTIO_QNUM
+ VIRTIO_TXQ
,
1385 (struct rte_mbuf
**) batch
->packets
,
1393 nb_rx
= ingress_policer_run(policer
,
1394 (struct rte_mbuf
**) batch
->packets
,
1399 rte_spinlock_lock(&dev
->stats_lock
);
1400 netdev_dpdk_vhost_update_rx_counters(&dev
->stats
, batch
->packets
,
1402 rte_spinlock_unlock(&dev
->stats_lock
);
1404 batch
->count
= (int) nb_rx
;
1409 netdev_dpdk_rxq_recv(struct netdev_rxq
*rxq
, struct dp_packet_batch
*batch
)
1411 struct netdev_rxq_dpdk
*rx
= netdev_rxq_dpdk_cast(rxq
);
1412 struct netdev_dpdk
*dev
= netdev_dpdk_cast(rxq
->netdev
);
1413 struct ingress_policer
*policer
= netdev_dpdk_get_ingress_policer(dev
);
1417 nb_rx
= rte_eth_rx_burst(rx
->port_id
, rxq
->queue_id
,
1418 (struct rte_mbuf
**) batch
->packets
,
1426 nb_rx
= ingress_policer_run(policer
,
1427 (struct rte_mbuf
**) batch
->packets
,
1432 /* Update stats to reflect dropped packets */
1433 if (OVS_UNLIKELY(dropped
)) {
1434 rte_spinlock_lock(&dev
->stats_lock
);
1435 dev
->stats
.rx_dropped
+= dropped
;
1436 rte_spinlock_unlock(&dev
->stats_lock
);
1439 batch
->count
= nb_rx
;
1445 netdev_dpdk_qos_run__(struct netdev_dpdk
*dev
, struct rte_mbuf
**pkts
,
1448 struct netdev
*netdev
= &dev
->up
;
1450 if (dev
->qos_conf
!= NULL
) {
1451 rte_spinlock_lock(&dev
->qos_lock
);
1452 if (dev
->qos_conf
!= NULL
) {
1453 cnt
= dev
->qos_conf
->ops
->qos_run(netdev
, pkts
, cnt
);
1455 rte_spinlock_unlock(&dev
->qos_lock
);
1462 netdev_dpdk_filter_packet_len(struct netdev_dpdk
*dev
, struct rte_mbuf
**pkts
,
1467 struct rte_mbuf
*pkt
;
1469 for (i
= 0; i
< pkt_cnt
; i
++) {
1471 if (OVS_UNLIKELY(pkt
->pkt_len
> dev
->max_packet_len
)) {
1472 VLOG_WARN_RL(&rl
, "%s: Too big size %" PRIu32
" max_packet_len %d",
1473 dev
->up
.name
, pkt
->pkt_len
, dev
->max_packet_len
);
1474 rte_pktmbuf_free(pkt
);
1478 if (OVS_UNLIKELY(i
!= cnt
)) {
1488 netdev_dpdk_vhost_update_tx_counters(struct netdev_stats
*stats
,
1489 struct dp_packet
**packets
,
1494 int sent
= attempted
- dropped
;
1496 stats
->tx_packets
+= sent
;
1497 stats
->tx_dropped
+= dropped
;
1499 for (i
= 0; i
< sent
; i
++) {
1500 stats
->tx_bytes
+= dp_packet_size(packets
[i
]);
1505 __netdev_dpdk_vhost_send(struct netdev
*netdev
, int qid
,
1506 struct dp_packet
**pkts
, int cnt
)
1508 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1509 struct rte_mbuf
**cur_pkts
= (struct rte_mbuf
**) pkts
;
1510 unsigned int total_pkts
= cnt
;
1511 unsigned int dropped
= 0;
1514 qid
= dev
->tx_q
[qid
% netdev
->n_txq
].map
;
1516 if (OVS_UNLIKELY(!is_vhost_running(dev
) || qid
< 0
1517 || !(dev
->flags
& NETDEV_UP
))) {
1518 rte_spinlock_lock(&dev
->stats_lock
);
1519 dev
->stats
.tx_dropped
+= cnt
;
1520 rte_spinlock_unlock(&dev
->stats_lock
);
1524 rte_spinlock_lock(&dev
->tx_q
[qid
].tx_lock
);
1526 cnt
= netdev_dpdk_filter_packet_len(dev
, cur_pkts
, cnt
);
1527 /* Check has QoS has been configured for the netdev */
1528 cnt
= netdev_dpdk_qos_run__(dev
, cur_pkts
, cnt
);
1529 dropped
= total_pkts
- cnt
;
1532 int vhost_qid
= qid
* VIRTIO_QNUM
+ VIRTIO_RXQ
;
1533 unsigned int tx_pkts
;
1535 tx_pkts
= rte_vhost_enqueue_burst(netdev_dpdk_get_vid(dev
),
1536 vhost_qid
, cur_pkts
, cnt
);
1537 if (OVS_LIKELY(tx_pkts
)) {
1538 /* Packets have been sent.*/
1540 /* Prepare for possible retry.*/
1541 cur_pkts
= &cur_pkts
[tx_pkts
];
1543 /* No packets sent - do not retry.*/
1546 } while (cnt
&& (retries
++ <= VHOST_ENQ_RETRY_NUM
));
1548 rte_spinlock_unlock(&dev
->tx_q
[qid
].tx_lock
);
1550 rte_spinlock_lock(&dev
->stats_lock
);
1551 netdev_dpdk_vhost_update_tx_counters(&dev
->stats
, pkts
, total_pkts
,
1553 rte_spinlock_unlock(&dev
->stats_lock
);
1556 for (i
= 0; i
< total_pkts
- dropped
; i
++) {
1557 dp_packet_delete(pkts
[i
]);
1561 /* Tx function. Transmit packets indefinitely */
1563 dpdk_do_tx_copy(struct netdev
*netdev
, int qid
, struct dp_packet_batch
*batch
)
1564 OVS_NO_THREAD_SAFETY_ANALYSIS
1566 #if !defined(__CHECKER__) && !defined(_WIN32)
1567 const size_t PKT_ARRAY_SIZE
= batch
->count
;
1569 /* Sparse or MSVC doesn't like variable length array. */
1570 enum { PKT_ARRAY_SIZE
= NETDEV_MAX_BURST
};
1572 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1573 struct rte_mbuf
*pkts
[PKT_ARRAY_SIZE
];
1578 /* If we are on a non pmd thread we have to use the mempool mutex, because
1579 * every non pmd thread shares the same mempool cache */
1581 if (!dpdk_thread_is_pmd()) {
1582 ovs_mutex_lock(&nonpmd_mempool_mutex
);
1585 dp_packet_batch_apply_cutlen(batch
);
1587 for (i
= 0; i
< batch
->count
; i
++) {
1588 int size
= dp_packet_size(batch
->packets
[i
]);
1590 if (OVS_UNLIKELY(size
> dev
->max_packet_len
)) {
1591 VLOG_WARN_RL(&rl
, "Too big size %d max_packet_len %d",
1592 (int) size
, dev
->max_packet_len
);
1598 pkts
[newcnt
] = rte_pktmbuf_alloc(dev
->dpdk_mp
->mp
);
1600 if (!pkts
[newcnt
]) {
1601 dropped
+= batch
->count
- i
;
1605 /* We have to do a copy for now */
1606 memcpy(rte_pktmbuf_mtod(pkts
[newcnt
], void *),
1607 dp_packet_data(batch
->packets
[i
]), size
);
1609 rte_pktmbuf_data_len(pkts
[newcnt
]) = size
;
1610 rte_pktmbuf_pkt_len(pkts
[newcnt
]) = size
;
1615 if (dev
->type
== DPDK_DEV_VHOST
) {
1616 __netdev_dpdk_vhost_send(netdev
, qid
, (struct dp_packet
**) pkts
,
1619 unsigned int qos_pkts
= newcnt
;
1621 /* Check if QoS has been configured for this netdev. */
1622 newcnt
= netdev_dpdk_qos_run__(dev
, pkts
, newcnt
);
1624 dropped
+= qos_pkts
- newcnt
;
1625 netdev_dpdk_eth_tx_burst(dev
, qid
, pkts
, newcnt
);
1628 if (OVS_UNLIKELY(dropped
)) {
1629 rte_spinlock_lock(&dev
->stats_lock
);
1630 dev
->stats
.tx_dropped
+= dropped
;
1631 rte_spinlock_unlock(&dev
->stats_lock
);
1634 if (!dpdk_thread_is_pmd()) {
1635 ovs_mutex_unlock(&nonpmd_mempool_mutex
);
1640 netdev_dpdk_vhost_send(struct netdev
*netdev
, int qid
,
1641 struct dp_packet_batch
*batch
,
1642 bool may_steal
, bool concurrent_txq OVS_UNUSED
)
1645 if (OVS_UNLIKELY(!may_steal
|| batch
->packets
[0]->source
!= DPBUF_DPDK
)) {
1646 dpdk_do_tx_copy(netdev
, qid
, batch
);
1647 dp_packet_delete_batch(batch
, may_steal
);
1649 dp_packet_batch_apply_cutlen(batch
);
1650 __netdev_dpdk_vhost_send(netdev
, qid
, batch
->packets
, batch
->count
);
1656 netdev_dpdk_send__(struct netdev_dpdk
*dev
, int qid
,
1657 struct dp_packet_batch
*batch
, bool may_steal
,
1658 bool concurrent_txq
)
1660 if (OVS_UNLIKELY(concurrent_txq
)) {
1661 qid
= qid
% dev
->up
.n_txq
;
1662 rte_spinlock_lock(&dev
->tx_q
[qid
].tx_lock
);
1665 if (OVS_UNLIKELY(!may_steal
||
1666 batch
->packets
[0]->source
!= DPBUF_DPDK
)) {
1667 struct netdev
*netdev
= &dev
->up
;
1669 dpdk_do_tx_copy(netdev
, qid
, batch
);
1670 dp_packet_delete_batch(batch
, may_steal
);
1673 int cnt
= batch
->count
;
1674 struct rte_mbuf
**pkts
= (struct rte_mbuf
**) batch
->packets
;
1676 dp_packet_batch_apply_cutlen(batch
);
1678 cnt
= netdev_dpdk_filter_packet_len(dev
, pkts
, cnt
);
1679 cnt
= netdev_dpdk_qos_run__(dev
, pkts
, cnt
);
1680 dropped
= batch
->count
- cnt
;
1682 netdev_dpdk_eth_tx_burst(dev
, qid
, pkts
, cnt
);
1684 if (OVS_UNLIKELY(dropped
)) {
1685 rte_spinlock_lock(&dev
->stats_lock
);
1686 dev
->stats
.tx_dropped
+= dropped
;
1687 rte_spinlock_unlock(&dev
->stats_lock
);
1691 if (OVS_UNLIKELY(concurrent_txq
)) {
1692 rte_spinlock_unlock(&dev
->tx_q
[qid
].tx_lock
);
1697 netdev_dpdk_eth_send(struct netdev
*netdev
, int qid
,
1698 struct dp_packet_batch
*batch
, bool may_steal
,
1699 bool concurrent_txq
)
1701 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1703 netdev_dpdk_send__(dev
, qid
, batch
, may_steal
, concurrent_txq
);
1708 netdev_dpdk_set_etheraddr(struct netdev
*netdev
, const struct eth_addr mac
)
1710 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1712 ovs_mutex_lock(&dev
->mutex
);
1713 if (!eth_addr_equals(dev
->hwaddr
, mac
)) {
1715 netdev_change_seq_changed(netdev
);
1717 ovs_mutex_unlock(&dev
->mutex
);
1723 netdev_dpdk_get_etheraddr(const struct netdev
*netdev
, struct eth_addr
*mac
)
1725 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1727 ovs_mutex_lock(&dev
->mutex
);
1729 ovs_mutex_unlock(&dev
->mutex
);
1735 netdev_dpdk_get_mtu(const struct netdev
*netdev
, int *mtup
)
1737 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1739 ovs_mutex_lock(&dev
->mutex
);
1741 ovs_mutex_unlock(&dev
->mutex
);
1747 netdev_dpdk_set_mtu(struct netdev
*netdev
, int mtu
)
1749 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1751 if (MTU_TO_FRAME_LEN(mtu
) > NETDEV_DPDK_MAX_PKT_LEN
1752 || mtu
< ETHER_MIN_MTU
) {
1753 VLOG_WARN("%s: unsupported MTU %d\n", dev
->up
.name
, mtu
);
1757 ovs_mutex_lock(&dev
->mutex
);
1758 if (dev
->requested_mtu
!= mtu
) {
1759 dev
->requested_mtu
= mtu
;
1760 netdev_request_reconfigure(netdev
);
1762 ovs_mutex_unlock(&dev
->mutex
);
1768 netdev_dpdk_get_carrier(const struct netdev
*netdev
, bool *carrier
);
1771 netdev_dpdk_vhost_get_stats(const struct netdev
*netdev
,
1772 struct netdev_stats
*stats
)
1774 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1776 ovs_mutex_lock(&dev
->mutex
);
1778 rte_spinlock_lock(&dev
->stats_lock
);
1779 /* Supported Stats */
1780 stats
->rx_packets
+= dev
->stats
.rx_packets
;
1781 stats
->tx_packets
+= dev
->stats
.tx_packets
;
1782 stats
->rx_dropped
= dev
->stats
.rx_dropped
;
1783 stats
->tx_dropped
+= dev
->stats
.tx_dropped
;
1784 stats
->multicast
= dev
->stats
.multicast
;
1785 stats
->rx_bytes
= dev
->stats
.rx_bytes
;
1786 stats
->tx_bytes
= dev
->stats
.tx_bytes
;
1787 stats
->rx_errors
= dev
->stats
.rx_errors
;
1788 stats
->rx_length_errors
= dev
->stats
.rx_length_errors
;
1790 stats
->rx_1_to_64_packets
= dev
->stats
.rx_1_to_64_packets
;
1791 stats
->rx_65_to_127_packets
= dev
->stats
.rx_65_to_127_packets
;
1792 stats
->rx_128_to_255_packets
= dev
->stats
.rx_128_to_255_packets
;
1793 stats
->rx_256_to_511_packets
= dev
->stats
.rx_256_to_511_packets
;
1794 stats
->rx_512_to_1023_packets
= dev
->stats
.rx_512_to_1023_packets
;
1795 stats
->rx_1024_to_1522_packets
= dev
->stats
.rx_1024_to_1522_packets
;
1796 stats
->rx_1523_to_max_packets
= dev
->stats
.rx_1523_to_max_packets
;
1798 rte_spinlock_unlock(&dev
->stats_lock
);
1800 ovs_mutex_unlock(&dev
->mutex
);
1806 netdev_dpdk_convert_xstats(struct netdev_stats
*stats
,
1807 const struct rte_eth_xstat
*xstats
,
1808 const struct rte_eth_xstat_name
*names
,
1809 const unsigned int size
)
1811 for (unsigned int i
= 0; i
< size
; i
++) {
1812 if (strcmp(XSTAT_RX_64_PACKETS
, names
[i
].name
) == 0) {
1813 stats
->rx_1_to_64_packets
= xstats
[i
].value
;
1814 } else if (strcmp(XSTAT_RX_65_TO_127_PACKETS
, names
[i
].name
) == 0) {
1815 stats
->rx_65_to_127_packets
= xstats
[i
].value
;
1816 } else if (strcmp(XSTAT_RX_128_TO_255_PACKETS
, names
[i
].name
) == 0) {
1817 stats
->rx_128_to_255_packets
= xstats
[i
].value
;
1818 } else if (strcmp(XSTAT_RX_256_TO_511_PACKETS
, names
[i
].name
) == 0) {
1819 stats
->rx_256_to_511_packets
= xstats
[i
].value
;
1820 } else if (strcmp(XSTAT_RX_512_TO_1023_PACKETS
, names
[i
].name
) == 0) {
1821 stats
->rx_512_to_1023_packets
= xstats
[i
].value
;
1822 } else if (strcmp(XSTAT_RX_1024_TO_1522_PACKETS
, names
[i
].name
) == 0) {
1823 stats
->rx_1024_to_1522_packets
= xstats
[i
].value
;
1824 } else if (strcmp(XSTAT_RX_1523_TO_MAX_PACKETS
, names
[i
].name
) == 0) {
1825 stats
->rx_1523_to_max_packets
= xstats
[i
].value
;
1826 } else if (strcmp(XSTAT_TX_64_PACKETS
, names
[i
].name
) == 0) {
1827 stats
->tx_1_to_64_packets
= xstats
[i
].value
;
1828 } else if (strcmp(XSTAT_TX_65_TO_127_PACKETS
, names
[i
].name
) == 0) {
1829 stats
->tx_65_to_127_packets
= xstats
[i
].value
;
1830 } else if (strcmp(XSTAT_TX_128_TO_255_PACKETS
, names
[i
].name
) == 0) {
1831 stats
->tx_128_to_255_packets
= xstats
[i
].value
;
1832 } else if (strcmp(XSTAT_TX_256_TO_511_PACKETS
, names
[i
].name
) == 0) {
1833 stats
->tx_256_to_511_packets
= xstats
[i
].value
;
1834 } else if (strcmp(XSTAT_TX_512_TO_1023_PACKETS
, names
[i
].name
) == 0) {
1835 stats
->tx_512_to_1023_packets
= xstats
[i
].value
;
1836 } else if (strcmp(XSTAT_TX_1024_TO_1522_PACKETS
, names
[i
].name
) == 0) {
1837 stats
->tx_1024_to_1522_packets
= xstats
[i
].value
;
1838 } else if (strcmp(XSTAT_TX_1523_TO_MAX_PACKETS
, names
[i
].name
) == 0) {
1839 stats
->tx_1523_to_max_packets
= xstats
[i
].value
;
1840 } else if (strcmp(XSTAT_TX_MULTICAST_PACKETS
, names
[i
].name
) == 0) {
1841 stats
->tx_multicast_packets
= xstats
[i
].value
;
1842 } else if (strcmp(XSTAT_RX_BROADCAST_PACKETS
, names
[i
].name
) == 0) {
1843 stats
->rx_broadcast_packets
= xstats
[i
].value
;
1844 } else if (strcmp(XSTAT_TX_BROADCAST_PACKETS
, names
[i
].name
) == 0) {
1845 stats
->tx_broadcast_packets
= xstats
[i
].value
;
1846 } else if (strcmp(XSTAT_RX_UNDERSIZED_ERRORS
, names
[i
].name
) == 0) {
1847 stats
->rx_undersized_errors
= xstats
[i
].value
;
1848 } else if (strcmp(XSTAT_RX_FRAGMENTED_ERRORS
, names
[i
].name
) == 0) {
1849 stats
->rx_fragmented_errors
= xstats
[i
].value
;
1850 } else if (strcmp(XSTAT_RX_JABBER_ERRORS
, names
[i
].name
) == 0) {
1851 stats
->rx_jabber_errors
= xstats
[i
].value
;
1857 netdev_dpdk_get_stats(const struct netdev
*netdev
, struct netdev_stats
*stats
)
1859 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1860 struct rte_eth_stats rte_stats
;
1863 netdev_dpdk_get_carrier(netdev
, &gg
);
1864 ovs_mutex_lock(&dev
->mutex
);
1866 struct rte_eth_xstat
*rte_xstats
= NULL
;
1867 struct rte_eth_xstat_name
*rte_xstats_names
= NULL
;
1868 int rte_xstats_len
, rte_xstats_new_len
, rte_xstats_ret
;
1870 if (rte_eth_stats_get(dev
->port_id
, &rte_stats
)) {
1871 VLOG_ERR("Can't get ETH statistics for port: %i.", dev
->port_id
);
1872 ovs_mutex_unlock(&dev
->mutex
);
1876 /* Get length of statistics */
1877 rte_xstats_len
= rte_eth_xstats_get_names(dev
->port_id
, NULL
, 0);
1878 if (rte_xstats_len
< 0) {
1879 VLOG_WARN("Cannot get XSTATS values for port: %i", dev
->port_id
);
1882 /* Reserve memory for xstats names and values */
1883 rte_xstats_names
= xcalloc(rte_xstats_len
, sizeof *rte_xstats_names
);
1884 rte_xstats
= xcalloc(rte_xstats_len
, sizeof *rte_xstats
);
1886 /* Retreive xstats names */
1887 rte_xstats_new_len
= rte_eth_xstats_get_names(dev
->port_id
,
1890 if (rte_xstats_new_len
!= rte_xstats_len
) {
1891 VLOG_WARN("Cannot get XSTATS names for port: %i.", dev
->port_id
);
1894 /* Retreive xstats values */
1895 memset(rte_xstats
, 0xff, sizeof *rte_xstats
* rte_xstats_len
);
1896 rte_xstats_ret
= rte_eth_xstats_get(dev
->port_id
, rte_xstats
,
1898 if (rte_xstats_ret
> 0 && rte_xstats_ret
<= rte_xstats_len
) {
1899 netdev_dpdk_convert_xstats(stats
, rte_xstats
, rte_xstats_names
,
1902 VLOG_WARN("Cannot get XSTATS values for port: %i.", dev
->port_id
);
1907 free(rte_xstats_names
);
1909 stats
->rx_packets
= rte_stats
.ipackets
;
1910 stats
->tx_packets
= rte_stats
.opackets
;
1911 stats
->rx_bytes
= rte_stats
.ibytes
;
1912 stats
->tx_bytes
= rte_stats
.obytes
;
1913 /* DPDK counts imissed as errors, but count them here as dropped instead */
1914 stats
->rx_errors
= rte_stats
.ierrors
- rte_stats
.imissed
;
1915 stats
->tx_errors
= rte_stats
.oerrors
;
1917 rte_spinlock_lock(&dev
->stats_lock
);
1918 stats
->tx_dropped
= dev
->stats
.tx_dropped
;
1919 stats
->rx_dropped
= dev
->stats
.rx_dropped
;
1920 rte_spinlock_unlock(&dev
->stats_lock
);
1922 /* These are the available DPDK counters for packets not received due to
1923 * local resource constraints in DPDK and NIC respectively. */
1924 stats
->rx_dropped
+= rte_stats
.rx_nombuf
+ rte_stats
.imissed
;
1925 stats
->rx_missed_errors
= rte_stats
.imissed
;
1927 ovs_mutex_unlock(&dev
->mutex
);
1933 netdev_dpdk_get_features(const struct netdev
*netdev
,
1934 enum netdev_features
*current
,
1935 enum netdev_features
*advertised OVS_UNUSED
,
1936 enum netdev_features
*supported OVS_UNUSED
,
1937 enum netdev_features
*peer OVS_UNUSED
)
1939 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1940 struct rte_eth_link link
;
1942 ovs_mutex_lock(&dev
->mutex
);
1944 ovs_mutex_unlock(&dev
->mutex
);
1946 if (link
.link_duplex
== ETH_LINK_HALF_DUPLEX
) {
1947 if (link
.link_speed
== ETH_SPEED_NUM_10M
) {
1948 *current
= NETDEV_F_10MB_HD
;
1950 if (link
.link_speed
== ETH_SPEED_NUM_100M
) {
1951 *current
= NETDEV_F_100MB_HD
;
1953 if (link
.link_speed
== ETH_SPEED_NUM_1G
) {
1954 *current
= NETDEV_F_1GB_HD
;
1956 } else if (link
.link_duplex
== ETH_LINK_FULL_DUPLEX
) {
1957 if (link
.link_speed
== ETH_SPEED_NUM_10M
) {
1958 *current
= NETDEV_F_10MB_FD
;
1960 if (link
.link_speed
== ETH_SPEED_NUM_100M
) {
1961 *current
= NETDEV_F_100MB_FD
;
1963 if (link
.link_speed
== ETH_SPEED_NUM_1G
) {
1964 *current
= NETDEV_F_1GB_FD
;
1966 if (link
.link_speed
== ETH_SPEED_NUM_10G
) {
1967 *current
= NETDEV_F_10GB_FD
;
1971 if (link
.link_autoneg
) {
1972 *current
|= NETDEV_F_AUTONEG
;
1978 static struct ingress_policer
*
1979 netdev_dpdk_policer_construct(uint32_t rate
, uint32_t burst
)
1981 struct ingress_policer
*policer
= NULL
;
1982 uint64_t rate_bytes
;
1983 uint64_t burst_bytes
;
1986 policer
= xmalloc(sizeof *policer
);
1987 rte_spinlock_init(&policer
->policer_lock
);
1989 /* rte_meter requires bytes so convert kbits rate and burst to bytes. */
1990 rate_bytes
= rate
* 1000/8;
1991 burst_bytes
= burst
* 1000/8;
1993 policer
->app_srtcm_params
.cir
= rate_bytes
;
1994 policer
->app_srtcm_params
.cbs
= burst_bytes
;
1995 policer
->app_srtcm_params
.ebs
= 0;
1996 err
= rte_meter_srtcm_config(&policer
->in_policer
,
1997 &policer
->app_srtcm_params
);
1999 VLOG_ERR("Could not create rte meter for ingress policer");
2007 netdev_dpdk_set_policing(struct netdev
* netdev
, uint32_t policer_rate
,
2008 uint32_t policer_burst
)
2010 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
2011 struct ingress_policer
*policer
;
2013 /* Force to 0 if no rate specified,
2014 * default to 8000 kbits if burst is 0,
2015 * else stick with user-specified value.
2017 policer_burst
= (!policer_rate
? 0
2018 : !policer_burst
? 8000
2021 ovs_mutex_lock(&dev
->mutex
);
2023 policer
= ovsrcu_get_protected(struct ingress_policer
*,
2024 &dev
->ingress_policer
);
2026 if (dev
->policer_rate
== policer_rate
&&
2027 dev
->policer_burst
== policer_burst
) {
2028 /* Assume that settings haven't changed since we last set them. */
2029 ovs_mutex_unlock(&dev
->mutex
);
2033 /* Destroy any existing ingress policer for the device if one exists */
2035 ovsrcu_postpone(free
, policer
);
2038 if (policer_rate
!= 0) {
2039 policer
= netdev_dpdk_policer_construct(policer_rate
, policer_burst
);
2043 ovsrcu_set(&dev
->ingress_policer
, policer
);
2044 dev
->policer_rate
= policer_rate
;
2045 dev
->policer_burst
= policer_burst
;
2046 ovs_mutex_unlock(&dev
->mutex
);
2052 netdev_dpdk_get_ifindex(const struct netdev
*netdev
)
2054 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
2057 ovs_mutex_lock(&dev
->mutex
);
2058 ifindex
= dev
->port_id
;
2059 ovs_mutex_unlock(&dev
->mutex
);
2065 netdev_dpdk_get_carrier(const struct netdev
*netdev
, bool *carrier
)
2067 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
2069 ovs_mutex_lock(&dev
->mutex
);
2070 check_link_status(dev
);
2071 *carrier
= dev
->link
.link_status
;
2073 ovs_mutex_unlock(&dev
->mutex
);
2079 netdev_dpdk_vhost_get_carrier(const struct netdev
*netdev
, bool *carrier
)
2081 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
2083 ovs_mutex_lock(&dev
->mutex
);
2085 if (is_vhost_running(dev
)) {
2091 ovs_mutex_unlock(&dev
->mutex
);
2096 static long long int
2097 netdev_dpdk_get_carrier_resets(const struct netdev
*netdev
)
2099 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
2100 long long int carrier_resets
;
2102 ovs_mutex_lock(&dev
->mutex
);
2103 carrier_resets
= dev
->link_reset_cnt
;
2104 ovs_mutex_unlock(&dev
->mutex
);
2106 return carrier_resets
;
2110 netdev_dpdk_set_miimon(struct netdev
*netdev OVS_UNUSED
,
2111 long long int interval OVS_UNUSED
)
2117 netdev_dpdk_update_flags__(struct netdev_dpdk
*dev
,
2118 enum netdev_flags off
, enum netdev_flags on
,
2119 enum netdev_flags
*old_flagsp
)
2120 OVS_REQUIRES(dev
->mutex
)
2124 if ((off
| on
) & ~(NETDEV_UP
| NETDEV_PROMISC
)) {
2128 *old_flagsp
= dev
->flags
;
2132 if (dev
->flags
== *old_flagsp
) {
2136 if (dev
->type
== DPDK_DEV_ETH
) {
2137 if (dev
->flags
& NETDEV_UP
) {
2138 err
= rte_eth_dev_start(dev
->port_id
);
2143 if (dev
->flags
& NETDEV_PROMISC
) {
2144 rte_eth_promiscuous_enable(dev
->port_id
);
2147 if (!(dev
->flags
& NETDEV_UP
)) {
2148 rte_eth_dev_stop(dev
->port_id
);
2151 /* If DPDK_DEV_VHOST device's NETDEV_UP flag was changed and vhost is
2152 * running then change netdev's change_seq to trigger link state
2155 if ((NETDEV_UP
& ((*old_flagsp
^ on
) | (*old_flagsp
^ off
)))
2156 && is_vhost_running(dev
)) {
2157 netdev_change_seq_changed(&dev
->up
);
2159 /* Clear statistics if device is getting up. */
2160 if (NETDEV_UP
& on
) {
2161 rte_spinlock_lock(&dev
->stats_lock
);
2162 memset(&dev
->stats
, 0, sizeof dev
->stats
);
2163 rte_spinlock_unlock(&dev
->stats_lock
);
2172 netdev_dpdk_update_flags(struct netdev
*netdev
,
2173 enum netdev_flags off
, enum netdev_flags on
,
2174 enum netdev_flags
*old_flagsp
)
2176 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
2179 ovs_mutex_lock(&dev
->mutex
);
2180 error
= netdev_dpdk_update_flags__(dev
, off
, on
, old_flagsp
);
2181 ovs_mutex_unlock(&dev
->mutex
);
2187 netdev_dpdk_get_status(const struct netdev
*netdev
, struct smap
*args
)
2189 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
2190 struct rte_eth_dev_info dev_info
;
2192 if (!rte_eth_dev_is_valid_port(dev
->port_id
)) {
2196 ovs_mutex_lock(&dev
->mutex
);
2197 rte_eth_dev_info_get(dev
->port_id
, &dev_info
);
2198 ovs_mutex_unlock(&dev
->mutex
);
2200 smap_add_format(args
, "port_no", "%d", dev
->port_id
);
2201 smap_add_format(args
, "numa_id", "%d",
2202 rte_eth_dev_socket_id(dev
->port_id
));
2203 smap_add_format(args
, "driver_name", "%s", dev_info
.driver_name
);
2204 smap_add_format(args
, "min_rx_bufsize", "%u", dev_info
.min_rx_bufsize
);
2205 smap_add_format(args
, "max_rx_pktlen", "%u", dev
->max_packet_len
);
2206 smap_add_format(args
, "max_rx_queues", "%u", dev_info
.max_rx_queues
);
2207 smap_add_format(args
, "max_tx_queues", "%u", dev_info
.max_tx_queues
);
2208 smap_add_format(args
, "max_mac_addrs", "%u", dev_info
.max_mac_addrs
);
2209 smap_add_format(args
, "max_hash_mac_addrs", "%u",
2210 dev_info
.max_hash_mac_addrs
);
2211 smap_add_format(args
, "max_vfs", "%u", dev_info
.max_vfs
);
2212 smap_add_format(args
, "max_vmdq_pools", "%u", dev_info
.max_vmdq_pools
);
2214 if (dev_info
.pci_dev
) {
2215 smap_add_format(args
, "pci-vendor_id", "0x%u",
2216 dev_info
.pci_dev
->id
.vendor_id
);
2217 smap_add_format(args
, "pci-device_id", "0x%x",
2218 dev_info
.pci_dev
->id
.device_id
);
2225 netdev_dpdk_set_admin_state__(struct netdev_dpdk
*dev
, bool admin_state
)
2226 OVS_REQUIRES(dev
->mutex
)
2228 enum netdev_flags old_flags
;
2231 netdev_dpdk_update_flags__(dev
, 0, NETDEV_UP
, &old_flags
);
2233 netdev_dpdk_update_flags__(dev
, NETDEV_UP
, 0, &old_flags
);
2238 netdev_dpdk_set_admin_state(struct unixctl_conn
*conn
, int argc
,
2239 const char *argv
[], void *aux OVS_UNUSED
)
2243 if (!strcasecmp(argv
[argc
- 1], "up")) {
2245 } else if ( !strcasecmp(argv
[argc
- 1], "down")) {
2248 unixctl_command_reply_error(conn
, "Invalid Admin State");
2253 struct netdev
*netdev
= netdev_from_name(argv
[1]);
2254 if (netdev
&& is_dpdk_class(netdev
->netdev_class
)) {
2255 struct netdev_dpdk
*dpdk_dev
= netdev_dpdk_cast(netdev
);
2257 ovs_mutex_lock(&dpdk_dev
->mutex
);
2258 netdev_dpdk_set_admin_state__(dpdk_dev
, up
);
2259 ovs_mutex_unlock(&dpdk_dev
->mutex
);
2261 netdev_close(netdev
);
2263 unixctl_command_reply_error(conn
, "Not a DPDK Interface");
2264 netdev_close(netdev
);
2268 struct netdev_dpdk
*netdev
;
2270 ovs_mutex_lock(&dpdk_mutex
);
2271 LIST_FOR_EACH (netdev
, list_node
, &dpdk_list
) {
2272 ovs_mutex_lock(&netdev
->mutex
);
2273 netdev_dpdk_set_admin_state__(netdev
, up
);
2274 ovs_mutex_unlock(&netdev
->mutex
);
2276 ovs_mutex_unlock(&dpdk_mutex
);
2278 unixctl_command_reply(conn
, "OK");
2282 * Set virtqueue flags so that we do not receive interrupts.
2285 set_irq_status(int vid
)
2290 for (i
= 0; i
< rte_vhost_get_queue_num(vid
); i
++) {
2291 idx
= i
* VIRTIO_QNUM
;
2292 rte_vhost_enable_guest_notification(vid
, idx
+ VIRTIO_RXQ
, 0);
2293 rte_vhost_enable_guest_notification(vid
, idx
+ VIRTIO_TXQ
, 0);
2298 * Fixes mapping for vhost-user tx queues. Must be called after each
2299 * enabling/disabling of queues and n_txq modifications.
2302 netdev_dpdk_remap_txqs(struct netdev_dpdk
*dev
)
2303 OVS_REQUIRES(dev
->mutex
)
2305 int *enabled_queues
, n_enabled
= 0;
2306 int i
, k
, total_txqs
= dev
->up
.n_txq
;
2308 enabled_queues
= dpdk_rte_mzalloc(total_txqs
* sizeof *enabled_queues
);
2310 for (i
= 0; i
< total_txqs
; i
++) {
2311 /* Enabled queues always mapped to themselves. */
2312 if (dev
->tx_q
[i
].map
== i
) {
2313 enabled_queues
[n_enabled
++] = i
;
2317 if (n_enabled
== 0 && total_txqs
!= 0) {
2318 enabled_queues
[0] = OVS_VHOST_QUEUE_DISABLED
;
2323 for (i
= 0; i
< total_txqs
; i
++) {
2324 if (dev
->tx_q
[i
].map
!= i
) {
2325 dev
->tx_q
[i
].map
= enabled_queues
[k
];
2326 k
= (k
+ 1) % n_enabled
;
2330 VLOG_DBG("TX queue mapping for %s\n", dev
->vhost_id
);
2331 for (i
= 0; i
< total_txqs
; i
++) {
2332 VLOG_DBG("%2d --> %2d", i
, dev
->tx_q
[i
].map
);
2335 rte_free(enabled_queues
);
2339 * A new virtio-net device is added to a vhost port.
2344 struct netdev_dpdk
*dev
;
2345 bool exists
= false;
2347 char ifname
[IF_NAME_SZ
];
2349 rte_vhost_get_ifname(vid
, ifname
, sizeof ifname
);
2351 ovs_mutex_lock(&dpdk_mutex
);
2352 /* Add device to the vhost port with the same name as that passed down. */
2353 LIST_FOR_EACH(dev
, list_node
, &dpdk_list
) {
2354 ovs_mutex_lock(&dev
->mutex
);
2355 if (strncmp(ifname
, dev
->vhost_id
, IF_NAME_SZ
) == 0) {
2356 uint32_t qp_num
= rte_vhost_get_queue_num(vid
);
2358 /* Get NUMA information */
2359 newnode
= rte_vhost_get_numa_node(vid
);
2360 if (newnode
== -1) {
2362 VLOG_INFO("Error getting NUMA info for vHost Device '%s'",
2365 newnode
= dev
->socket_id
;
2368 if (dev
->requested_n_txq
!= qp_num
2369 || dev
->requested_n_rxq
!= qp_num
2370 || dev
->requested_socket_id
!= newnode
) {
2371 dev
->requested_socket_id
= newnode
;
2372 dev
->requested_n_rxq
= qp_num
;
2373 dev
->requested_n_txq
= qp_num
;
2374 netdev_request_reconfigure(&dev
->up
);
2376 /* Reconfiguration not required. */
2377 dev
->vhost_reconfigured
= true;
2380 ovsrcu_index_set(&dev
->vid
, vid
);
2383 /* Disable notifications. */
2384 set_irq_status(vid
);
2385 netdev_change_seq_changed(&dev
->up
);
2386 ovs_mutex_unlock(&dev
->mutex
);
2389 ovs_mutex_unlock(&dev
->mutex
);
2391 ovs_mutex_unlock(&dpdk_mutex
);
2394 VLOG_INFO("vHost Device '%s' can't be added - name not found", ifname
);
2399 VLOG_INFO("vHost Device '%s' has been added on numa node %i",
2405 /* Clears mapping for all available queues of vhost interface. */
2407 netdev_dpdk_txq_map_clear(struct netdev_dpdk
*dev
)
2408 OVS_REQUIRES(dev
->mutex
)
2412 for (i
= 0; i
< dev
->up
.n_txq
; i
++) {
2413 dev
->tx_q
[i
].map
= OVS_VHOST_QUEUE_MAP_UNKNOWN
;
2418 * Remove a virtio-net device from the specific vhost port. Use dev->remove
2419 * flag to stop any more packets from being sent or received to/from a VM and
2420 * ensure all currently queued packets have been sent/received before removing
2424 destroy_device(int vid
)
2426 struct netdev_dpdk
*dev
;
2427 bool exists
= false;
2428 char ifname
[IF_NAME_SZ
];
2430 rte_vhost_get_ifname(vid
, ifname
, sizeof ifname
);
2432 ovs_mutex_lock(&dpdk_mutex
);
2433 LIST_FOR_EACH (dev
, list_node
, &dpdk_list
) {
2434 if (netdev_dpdk_get_vid(dev
) == vid
) {
2436 ovs_mutex_lock(&dev
->mutex
);
2437 dev
->vhost_reconfigured
= false;
2438 ovsrcu_index_set(&dev
->vid
, -1);
2439 netdev_dpdk_txq_map_clear(dev
);
2441 netdev_change_seq_changed(&dev
->up
);
2442 ovs_mutex_unlock(&dev
->mutex
);
2448 ovs_mutex_unlock(&dpdk_mutex
);
2452 * Wait for other threads to quiesce after setting the 'virtio_dev'
2453 * to NULL, before returning.
2455 ovsrcu_synchronize();
2457 * As call to ovsrcu_synchronize() will end the quiescent state,
2458 * put thread back into quiescent state before returning.
2460 ovsrcu_quiesce_start();
2461 VLOG_INFO("vHost Device '%s' has been removed", ifname
);
2463 VLOG_INFO("vHost Device '%s' not found", ifname
);
2468 vring_state_changed(int vid
, uint16_t queue_id
, int enable
)
2470 struct netdev_dpdk
*dev
;
2471 bool exists
= false;
2472 int qid
= queue_id
/ VIRTIO_QNUM
;
2473 char ifname
[IF_NAME_SZ
];
2475 rte_vhost_get_ifname(vid
, ifname
, sizeof ifname
);
2477 if (queue_id
% VIRTIO_QNUM
== VIRTIO_TXQ
) {
2481 ovs_mutex_lock(&dpdk_mutex
);
2482 LIST_FOR_EACH (dev
, list_node
, &dpdk_list
) {
2483 ovs_mutex_lock(&dev
->mutex
);
2484 if (strncmp(ifname
, dev
->vhost_id
, IF_NAME_SZ
) == 0) {
2486 dev
->tx_q
[qid
].map
= qid
;
2488 dev
->tx_q
[qid
].map
= OVS_VHOST_QUEUE_DISABLED
;
2490 netdev_dpdk_remap_txqs(dev
);
2492 ovs_mutex_unlock(&dev
->mutex
);
2495 ovs_mutex_unlock(&dev
->mutex
);
2497 ovs_mutex_unlock(&dpdk_mutex
);
2500 VLOG_INFO("State of queue %d ( tx_qid %d ) of vhost device '%s'"
2501 "changed to \'%s\'", queue_id
, qid
, ifname
,
2502 (enable
== 1) ? "enabled" : "disabled");
2504 VLOG_INFO("vHost Device '%s' not found", ifname
);
2512 netdev_dpdk_get_vid(const struct netdev_dpdk
*dev
)
2514 return ovsrcu_index_get(&dev
->vid
);
2517 struct ingress_policer
*
2518 netdev_dpdk_get_ingress_policer(const struct netdev_dpdk
*dev
)
2520 return ovsrcu_get(struct ingress_policer
*, &dev
->ingress_policer
);
2524 * These callbacks allow virtio-net devices to be added to vhost ports when
2525 * configuration has been fully complete.
2527 static const struct virtio_net_device_ops virtio_net_device_ops
=
2529 .new_device
= new_device
,
2530 .destroy_device
= destroy_device
,
2531 .vring_state_changed
= vring_state_changed
2535 start_vhost_loop(void *dummy OVS_UNUSED
)
2537 pthread_detach(pthread_self());
2538 /* Put the vhost thread into quiescent state. */
2539 ovsrcu_quiesce_start();
2540 rte_vhost_driver_session_start();
2545 dpdk_vhost_class_init(void)
2547 rte_vhost_driver_callback_register(&virtio_net_device_ops
);
2548 rte_vhost_feature_disable(1ULL << VIRTIO_NET_F_HOST_TSO4
2549 | 1ULL << VIRTIO_NET_F_HOST_TSO6
2550 | 1ULL << VIRTIO_NET_F_CSUM
);
2552 ovs_thread_create("vhost_thread", start_vhost_loop
, NULL
);
2557 dpdk_common_init(void)
2559 unixctl_command_register("netdev-dpdk/set-admin-state",
2560 "[netdev] up|down", 1, 2,
2561 netdev_dpdk_set_admin_state
, NULL
);
2568 dpdk_ring_create(const char dev_name
[], unsigned int port_no
,
2569 unsigned int *eth_port_id
)
2571 struct dpdk_ring
*ivshmem
;
2572 char ring_name
[RTE_RING_NAMESIZE
];
2575 ivshmem
= dpdk_rte_mzalloc(sizeof *ivshmem
);
2576 if (ivshmem
== NULL
) {
2580 /* XXX: Add support for multiquque ring. */
2581 err
= snprintf(ring_name
, sizeof ring_name
, "%s_tx", dev_name
);
2586 /* Create single producer tx ring, netdev does explicit locking. */
2587 ivshmem
->cring_tx
= rte_ring_create(ring_name
, DPDK_RING_SIZE
, SOCKET0
,
2589 if (ivshmem
->cring_tx
== NULL
) {
2594 err
= snprintf(ring_name
, sizeof ring_name
, "%s_rx", dev_name
);
2599 /* Create single consumer rx ring, netdev does explicit locking. */
2600 ivshmem
->cring_rx
= rte_ring_create(ring_name
, DPDK_RING_SIZE
, SOCKET0
,
2602 if (ivshmem
->cring_rx
== NULL
) {
2607 err
= rte_eth_from_rings(dev_name
, &ivshmem
->cring_rx
, 1,
2608 &ivshmem
->cring_tx
, 1, SOCKET0
);
2615 ivshmem
->user_port_id
= port_no
;
2616 ivshmem
->eth_port_id
= rte_eth_dev_count() - 1;
2617 ovs_list_push_back(&dpdk_ring_list
, &ivshmem
->list_node
);
2619 *eth_port_id
= ivshmem
->eth_port_id
;
2624 dpdk_ring_open(const char dev_name
[], unsigned int *eth_port_id
)
2625 OVS_REQUIRES(dpdk_mutex
)
2627 struct dpdk_ring
*ivshmem
;
2628 unsigned int port_no
;
2631 /* Names always start with "dpdkr" */
2632 err
= dpdk_dev_parse_name(dev_name
, "dpdkr", &port_no
);
2637 /* Look through our list to find the device */
2638 LIST_FOR_EACH (ivshmem
, list_node
, &dpdk_ring_list
) {
2639 if (ivshmem
->user_port_id
== port_no
) {
2640 VLOG_INFO("Found dpdk ring device %s:", dev_name
);
2641 /* Really all that is needed */
2642 *eth_port_id
= ivshmem
->eth_port_id
;
2646 /* Need to create the device rings */
2647 return dpdk_ring_create(dev_name
, port_no
, eth_port_id
);
2651 netdev_dpdk_ring_send(struct netdev
*netdev
, int qid
,
2652 struct dp_packet_batch
*batch
, bool may_steal
,
2653 bool concurrent_txq
)
2655 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
2658 /* When using 'dpdkr' and sending to a DPDK ring, we want to ensure that
2659 * the rss hash field is clear. This is because the same mbuf may be
2660 * modified by the consumer of the ring and return into the datapath
2661 * without recalculating the RSS hash. */
2662 for (i
= 0; i
< batch
->count
; i
++) {
2663 dp_packet_rss_invalidate(batch
->packets
[i
]);
2666 netdev_dpdk_send__(dev
, qid
, batch
, may_steal
, concurrent_txq
);
2671 netdev_dpdk_ring_construct(struct netdev
*netdev
)
2673 unsigned int port_no
= 0;
2676 if (rte_eal_init_ret
) {
2677 return rte_eal_init_ret
;
2680 ovs_mutex_lock(&dpdk_mutex
);
2682 err
= dpdk_ring_open(netdev
->name
, &port_no
);
2687 err
= netdev_dpdk_init(netdev
, port_no
, DPDK_DEV_ETH
);
2690 ovs_mutex_unlock(&dpdk_mutex
);
2697 * Initialize QoS configuration operations.
2700 qos_conf_init(struct qos_conf
*conf
, const struct dpdk_qos_ops
*ops
)
2706 * Search existing QoS operations in qos_ops and compare each set of
2707 * operations qos_name to name. Return a dpdk_qos_ops pointer to a match,
2710 static const struct dpdk_qos_ops
*
2711 qos_lookup_name(const char *name
)
2713 const struct dpdk_qos_ops
*const *opsp
;
2715 for (opsp
= qos_confs
; *opsp
!= NULL
; opsp
++) {
2716 const struct dpdk_qos_ops
*ops
= *opsp
;
2717 if (!strcmp(name
, ops
->qos_name
)) {
2725 * Call qos_destruct to clean up items associated with the netdevs
2726 * qos_conf. Set netdevs qos_conf to NULL.
2729 qos_delete_conf(struct netdev
*netdev
)
2731 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
2733 rte_spinlock_lock(&dev
->qos_lock
);
2734 if (dev
->qos_conf
) {
2735 if (dev
->qos_conf
->ops
->qos_destruct
) {
2736 dev
->qos_conf
->ops
->qos_destruct(netdev
, dev
->qos_conf
);
2738 dev
->qos_conf
= NULL
;
2740 rte_spinlock_unlock(&dev
->qos_lock
);
2744 netdev_dpdk_get_qos_types(const struct netdev
*netdev OVS_UNUSED
,
2747 const struct dpdk_qos_ops
*const *opsp
;
2749 for (opsp
= qos_confs
; *opsp
!= NULL
; opsp
++) {
2750 const struct dpdk_qos_ops
*ops
= *opsp
;
2751 if (ops
->qos_construct
&& ops
->qos_name
[0] != '\0') {
2752 sset_add(types
, ops
->qos_name
);
2759 netdev_dpdk_get_qos(const struct netdev
*netdev
,
2760 const char **typep
, struct smap
*details
)
2762 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
2765 ovs_mutex_lock(&dev
->mutex
);
2766 if (dev
->qos_conf
) {
2767 *typep
= dev
->qos_conf
->ops
->qos_name
;
2768 error
= (dev
->qos_conf
->ops
->qos_get
2769 ? dev
->qos_conf
->ops
->qos_get(netdev
, details
): 0);
2771 /* No QoS configuration set, return an empty string */
2774 ovs_mutex_unlock(&dev
->mutex
);
2780 netdev_dpdk_set_qos(struct netdev
*netdev
,
2781 const char *type
, const struct smap
*details
)
2783 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
2784 const struct dpdk_qos_ops
*new_ops
= NULL
;
2787 /* If type is empty or unsupported then the current QoS configuration
2788 * for the dpdk-netdev can be destroyed */
2789 new_ops
= qos_lookup_name(type
);
2791 if (type
[0] == '\0' || !new_ops
|| !new_ops
->qos_construct
) {
2792 qos_delete_conf(netdev
);
2796 ovs_mutex_lock(&dev
->mutex
);
2798 if (dev
->qos_conf
) {
2799 if (new_ops
== dev
->qos_conf
->ops
) {
2800 error
= new_ops
->qos_set
? new_ops
->qos_set(netdev
, details
) : 0;
2802 /* Delete existing QoS configuration. */
2803 qos_delete_conf(netdev
);
2804 ovs_assert(dev
->qos_conf
== NULL
);
2806 /* Install new QoS configuration. */
2807 error
= new_ops
->qos_construct(netdev
, details
);
2810 error
= new_ops
->qos_construct(netdev
, details
);
2813 ovs_assert((error
== 0) == (dev
->qos_conf
!= NULL
));
2815 VLOG_ERR("Failed to set QoS type %s on port %s, returned error: %s",
2816 type
, netdev
->name
, rte_strerror(-error
));
2819 ovs_mutex_unlock(&dev
->mutex
);
2823 /* egress-policer details */
2825 struct egress_policer
{
2826 struct qos_conf qos_conf
;
2827 struct rte_meter_srtcm_params app_srtcm_params
;
2828 struct rte_meter_srtcm egress_meter
;
2831 static struct egress_policer
*
2832 egress_policer_get__(const struct netdev
*netdev
)
2834 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
2835 return CONTAINER_OF(dev
->qos_conf
, struct egress_policer
, qos_conf
);
2839 egress_policer_qos_construct(struct netdev
*netdev
,
2840 const struct smap
*details
)
2842 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
2843 struct egress_policer
*policer
;
2846 rte_spinlock_lock(&dev
->qos_lock
);
2847 policer
= xmalloc(sizeof *policer
);
2848 qos_conf_init(&policer
->qos_conf
, &egress_policer_ops
);
2849 dev
->qos_conf
= &policer
->qos_conf
;
2850 policer
->app_srtcm_params
.cir
= smap_get_ullong(details
, "cir", 0);
2851 policer
->app_srtcm_params
.cbs
= smap_get_ullong(details
, "cbs", 0);
2852 policer
->app_srtcm_params
.ebs
= 0;
2853 err
= rte_meter_srtcm_config(&policer
->egress_meter
,
2854 &policer
->app_srtcm_params
);
2857 /* Error occurred during rte_meter creation, destroy the policer
2858 * and set the qos configuration for the netdev dpdk to NULL
2861 dev
->qos_conf
= NULL
;
2864 rte_spinlock_unlock(&dev
->qos_lock
);
2870 egress_policer_qos_destruct(struct netdev
*netdev OVS_UNUSED
,
2871 struct qos_conf
*conf
)
2873 struct egress_policer
*policer
= CONTAINER_OF(conf
, struct egress_policer
,
2879 egress_policer_qos_get(const struct netdev
*netdev
, struct smap
*details
)
2881 struct egress_policer
*policer
= egress_policer_get__(netdev
);
2882 smap_add_format(details
, "cir", "%llu",
2883 1ULL * policer
->app_srtcm_params
.cir
);
2884 smap_add_format(details
, "cbs", "%llu",
2885 1ULL * policer
->app_srtcm_params
.cbs
);
2891 egress_policer_qos_set(struct netdev
*netdev
, const struct smap
*details
)
2893 struct egress_policer
*policer
;
2894 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
2897 policer
= egress_policer_get__(netdev
);
2898 rte_spinlock_lock(&dev
->qos_lock
);
2899 policer
->app_srtcm_params
.cir
= smap_get_ullong(details
, "cir", 0);
2900 policer
->app_srtcm_params
.cbs
= smap_get_ullong(details
, "cbs", 0);
2901 policer
->app_srtcm_params
.ebs
= 0;
2902 err
= rte_meter_srtcm_config(&policer
->egress_meter
,
2903 &policer
->app_srtcm_params
);
2906 /* Error occurred during rte_meter creation, destroy the policer
2907 * and set the qos configuration for the netdev dpdk to NULL
2910 dev
->qos_conf
= NULL
;
2913 rte_spinlock_unlock(&dev
->qos_lock
);
2919 egress_policer_run(struct netdev
*netdev
, struct rte_mbuf
**pkts
, int pkt_cnt
)
2922 struct egress_policer
*policer
= egress_policer_get__(netdev
);
2924 cnt
= netdev_dpdk_policer_run(&policer
->egress_meter
, pkts
, pkt_cnt
);
2929 static const struct dpdk_qos_ops egress_policer_ops
= {
2930 "egress-policer", /* qos_name */
2931 egress_policer_qos_construct
,
2932 egress_policer_qos_destruct
,
2933 egress_policer_qos_get
,
2934 egress_policer_qos_set
,
2939 netdev_dpdk_reconfigure(struct netdev
*netdev
)
2941 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
2944 ovs_mutex_lock(&dev
->mutex
);
2946 if (netdev
->n_txq
== dev
->requested_n_txq
2947 && netdev
->n_rxq
== dev
->requested_n_rxq
2948 && dev
->mtu
== dev
->requested_mtu
) {
2949 /* Reconfiguration is unnecessary */
2954 rte_eth_dev_stop(dev
->port_id
);
2956 if (dev
->mtu
!= dev
->requested_mtu
) {
2957 netdev_dpdk_mempool_configure(dev
);
2960 netdev
->n_txq
= dev
->requested_n_txq
;
2961 netdev
->n_rxq
= dev
->requested_n_rxq
;
2963 rte_free(dev
->tx_q
);
2964 err
= dpdk_eth_dev_init(dev
);
2965 netdev_dpdk_alloc_txq(dev
, netdev
->n_txq
);
2967 netdev_change_seq_changed(netdev
);
2970 ovs_mutex_unlock(&dev
->mutex
);
2975 dpdk_vhost_reconfigure_helper(struct netdev_dpdk
*dev
)
2976 OVS_REQUIRES(dev
->mutex
)
2978 dev
->up
.n_txq
= dev
->requested_n_txq
;
2979 dev
->up
.n_rxq
= dev
->requested_n_rxq
;
2981 /* Enable TX queue 0 by default if it wasn't disabled. */
2982 if (dev
->tx_q
[0].map
== OVS_VHOST_QUEUE_MAP_UNKNOWN
) {
2983 dev
->tx_q
[0].map
= 0;
2986 netdev_dpdk_remap_txqs(dev
);
2988 if (dev
->requested_socket_id
!= dev
->socket_id
2989 || dev
->requested_mtu
!= dev
->mtu
) {
2990 if (!netdev_dpdk_mempool_configure(dev
)) {
2991 netdev_change_seq_changed(&dev
->up
);
2995 if (netdev_dpdk_get_vid(dev
) >= 0) {
2996 dev
->vhost_reconfigured
= true;
3001 netdev_dpdk_vhost_reconfigure(struct netdev
*netdev
)
3003 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
3005 ovs_mutex_lock(&dev
->mutex
);
3006 dpdk_vhost_reconfigure_helper(dev
);
3007 ovs_mutex_unlock(&dev
->mutex
);
3012 netdev_dpdk_vhost_client_reconfigure(struct netdev
*netdev
)
3014 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
3017 ovs_mutex_lock(&dev
->mutex
);
3019 dpdk_vhost_reconfigure_helper(dev
);
3021 /* Configure vHost client mode if requested and if the following criteria
3023 * 1. Device hasn't been registered yet.
3024 * 2. A path has been specified.
3026 if (!(dev
->vhost_driver_flags
& RTE_VHOST_USER_CLIENT
)
3027 && strlen(dev
->vhost_id
)) {
3028 /* Register client-mode device */
3029 err
= rte_vhost_driver_register(dev
->vhost_id
,
3030 RTE_VHOST_USER_CLIENT
);
3032 VLOG_ERR("vhost-user device setup failure for device %s\n",
3035 /* Configuration successful */
3036 dev
->vhost_driver_flags
|= RTE_VHOST_USER_CLIENT
;
3037 VLOG_INFO("vHost User device '%s' created in 'client' mode, "
3038 "using client socket '%s'",
3039 dev
->up
.name
, dev
->vhost_id
);
3043 ovs_mutex_unlock(&dev
->mutex
);
3048 #define NETDEV_DPDK_CLASS(NAME, CONSTRUCT, DESTRUCT, \
3049 SET_CONFIG, SET_TX_MULTIQ, SEND, \
3050 GET_CARRIER, GET_STATS, \
3051 GET_FEATURES, GET_STATUS, \
3052 RECONFIGURE, RXQ_RECV) \
3055 true, /* is_pmd */ \
3057 NULL, /* netdev_dpdk_run */ \
3058 NULL, /* netdev_dpdk_wait */ \
3060 netdev_dpdk_alloc, \
3063 netdev_dpdk_dealloc, \
3064 netdev_dpdk_get_config, \
3066 NULL, /* get_tunnel_config */ \
3067 NULL, /* build header */ \
3068 NULL, /* push header */ \
3069 NULL, /* pop header */ \
3070 netdev_dpdk_get_numa_id, /* get_numa_id */ \
3074 NULL, /* send_wait */ \
3076 netdev_dpdk_set_etheraddr, \
3077 netdev_dpdk_get_etheraddr, \
3078 netdev_dpdk_get_mtu, \
3079 netdev_dpdk_set_mtu, \
3080 netdev_dpdk_get_ifindex, \
3082 netdev_dpdk_get_carrier_resets, \
3083 netdev_dpdk_set_miimon, \
3086 NULL, /* set_advertisements */ \
3088 netdev_dpdk_set_policing, \
3089 netdev_dpdk_get_qos_types, \
3090 NULL, /* get_qos_capabilities */ \
3091 netdev_dpdk_get_qos, \
3092 netdev_dpdk_set_qos, \
3093 NULL, /* get_queue */ \
3094 NULL, /* set_queue */ \
3095 NULL, /* delete_queue */ \
3096 NULL, /* get_queue_stats */ \
3097 NULL, /* queue_dump_start */ \
3098 NULL, /* queue_dump_next */ \
3099 NULL, /* queue_dump_done */ \
3100 NULL, /* dump_queue_stats */ \
3102 NULL, /* set_in4 */ \
3103 NULL, /* get_addr_list */ \
3104 NULL, /* add_router */ \
3105 NULL, /* get_next_hop */ \
3107 NULL, /* arp_lookup */ \
3109 netdev_dpdk_update_flags, \
3112 netdev_dpdk_rxq_alloc, \
3113 netdev_dpdk_rxq_construct, \
3114 netdev_dpdk_rxq_destruct, \
3115 netdev_dpdk_rxq_dealloc, \
3117 NULL, /* rx_wait */ \
3118 NULL, /* rxq_drain */ \
3122 process_vhost_flags(char *flag
, char *default_val
, int size
,
3123 const struct smap
*ovs_other_config
,
3129 val
= smap_get(ovs_other_config
, flag
);
3131 /* Process the vhost-sock-dir flag if it is provided, otherwise resort to
3134 if (val
&& (strlen(val
) <= size
)) {
3136 *new_val
= xstrdup(val
);
3137 VLOG_INFO("User-provided %s in use: %s", flag
, *new_val
);
3139 VLOG_INFO("No %s provided - defaulting to %s", flag
, default_val
);
3140 *new_val
= default_val
;
3147 grow_argv(char ***argv
, size_t cur_siz
, size_t grow_by
)
3149 return xrealloc(*argv
, sizeof(char *) * (cur_siz
+ grow_by
));
3153 dpdk_option_extend(char ***argv
, int argc
, const char *option
,
3156 char **newargv
= grow_argv(argv
, argc
, 2);
3158 newargv
[argc
] = xstrdup(option
);
3159 newargv
[argc
+1] = xstrdup(value
);
3163 move_argv(char ***argv
, size_t cur_size
, char **src_argv
, size_t src_argc
)
3165 char **newargv
= grow_argv(argv
, cur_size
, src_argc
);
3166 while (src_argc
--) {
3167 newargv
[cur_size
+src_argc
] = src_argv
[src_argc
];
3168 src_argv
[src_argc
] = NULL
;
3174 extra_dpdk_args(const char *ovs_extra_config
, char ***argv
, int argc
)
3177 char *release_tok
= xstrdup(ovs_extra_config
);
3178 char *tok
, *endptr
= NULL
;
3180 for (tok
= strtok_r(release_tok
, " ", &endptr
); tok
!= NULL
;
3181 tok
= strtok_r(NULL
, " ", &endptr
)) {
3182 char **newarg
= grow_argv(argv
, ret
, 1);
3184 newarg
[ret
++] = xstrdup(tok
);
3191 argv_contains(char **argv_haystack
, const size_t argc_haystack
,
3194 for (size_t i
= 0; i
< argc_haystack
; ++i
) {
3195 if (!strcmp(argv_haystack
[i
], needle
))
3202 construct_dpdk_options(const struct smap
*ovs_other_config
,
3203 char ***argv
, const int initial_size
,
3204 char **extra_args
, const size_t extra_argc
)
3206 struct dpdk_options_map
{
3207 const char *ovs_configuration
;
3208 const char *dpdk_option
;
3209 bool default_enabled
;
3210 const char *default_value
;
3212 {"dpdk-lcore-mask", "-c", false, NULL
},
3213 {"dpdk-hugepage-dir", "--huge-dir", false, NULL
},
3216 int i
, ret
= initial_size
;
3218 /*First, construct from the flat-options (non-mutex)*/
3219 for (i
= 0; i
< ARRAY_SIZE(opts
); ++i
) {
3220 const char *lookup
= smap_get(ovs_other_config
,
3221 opts
[i
].ovs_configuration
);
3222 if (!lookup
&& opts
[i
].default_enabled
) {
3223 lookup
= opts
[i
].default_value
;
3227 if (!argv_contains(extra_args
, extra_argc
, opts
[i
].dpdk_option
)) {
3228 dpdk_option_extend(argv
, ret
, opts
[i
].dpdk_option
, lookup
);
3231 VLOG_WARN("Ignoring database defined option '%s' due to "
3232 "dpdk_extras config", opts
[i
].dpdk_option
);
3240 #define MAX_DPDK_EXCL_OPTS 10
3243 construct_dpdk_mutex_options(const struct smap
*ovs_other_config
,
3244 char ***argv
, const int initial_size
,
3245 char **extra_args
, const size_t extra_argc
)
3247 struct dpdk_exclusive_options_map
{
3248 const char *category
;
3249 const char *ovs_dpdk_options
[MAX_DPDK_EXCL_OPTS
];
3250 const char *eal_dpdk_options
[MAX_DPDK_EXCL_OPTS
];
3251 const char *default_value
;
3255 {"dpdk-alloc-mem", "dpdk-socket-mem", NULL
,},
3256 {"-m", "--socket-mem", NULL
,},
3261 int i
, ret
= initial_size
;
3262 for (i
= 0; i
< ARRAY_SIZE(excl_opts
); ++i
) {
3263 int found_opts
= 0, scan
, found_pos
= -1;
3264 const char *found_value
;
3265 struct dpdk_exclusive_options_map
*popt
= &excl_opts
[i
];
3267 for (scan
= 0; scan
< MAX_DPDK_EXCL_OPTS
3268 && popt
->ovs_dpdk_options
[scan
]; ++scan
) {
3269 const char *lookup
= smap_get(ovs_other_config
,
3270 popt
->ovs_dpdk_options
[scan
]);
3271 if (lookup
&& strlen(lookup
)) {
3274 found_value
= lookup
;
3279 if (popt
->default_option
) {
3280 found_pos
= popt
->default_option
;
3281 found_value
= popt
->default_value
;
3287 if (found_opts
> 1) {
3288 VLOG_ERR("Multiple defined options for %s. Please check your"
3289 " database settings and reconfigure if necessary.",
3293 if (!argv_contains(extra_args
, extra_argc
,
3294 popt
->eal_dpdk_options
[found_pos
])) {
3295 dpdk_option_extend(argv
, ret
, popt
->eal_dpdk_options
[found_pos
],
3299 VLOG_WARN("Ignoring database defined option '%s' due to "
3300 "dpdk_extras config", popt
->eal_dpdk_options
[found_pos
]);
3308 get_dpdk_args(const struct smap
*ovs_other_config
, char ***argv
,
3311 const char *extra_configuration
;
3312 char **extra_args
= NULL
;
3314 size_t extra_argc
= 0;
3316 extra_configuration
= smap_get(ovs_other_config
, "dpdk-extra");
3317 if (extra_configuration
) {
3318 extra_argc
= extra_dpdk_args(extra_configuration
, &extra_args
, 0);
3321 i
= construct_dpdk_options(ovs_other_config
, argv
, argc
, extra_args
,
3323 i
= construct_dpdk_mutex_options(ovs_other_config
, argv
, i
, extra_args
,
3326 if (extra_configuration
) {
3327 *argv
= move_argv(argv
, i
, extra_args
, extra_argc
);
3330 return i
+ extra_argc
;
3333 static char **dpdk_argv
;
3334 static int dpdk_argc
;
3337 deferred_argv_release(void)
3340 for (result
= 0; result
< dpdk_argc
; ++result
) {
3341 free(dpdk_argv
[result
]);
3348 dpdk_init__(const struct smap
*ovs_other_config
)
3353 bool auto_determine
= true;
3356 char *sock_dir_subcomponent
;
3358 if (!smap_get_bool(ovs_other_config
, "dpdk-init", false)) {
3359 VLOG_INFO("DPDK Disabled - to change this requires a restart.\n");
3363 VLOG_INFO("DPDK Enabled, initializing");
3364 if (process_vhost_flags("vhost-sock-dir", xstrdup(ovs_rundir()),
3365 NAME_MAX
, ovs_other_config
,
3366 &sock_dir_subcomponent
)) {
3368 if (!strstr(sock_dir_subcomponent
, "..")) {
3369 vhost_sock_dir
= xasprintf("%s/%s", ovs_rundir(),
3370 sock_dir_subcomponent
);
3372 err
= stat(vhost_sock_dir
, &s
);
3374 VLOG_ERR("vhost-user sock directory '%s' does not exist.",
3378 vhost_sock_dir
= xstrdup(ovs_rundir());
3379 VLOG_ERR("vhost-user sock directory request '%s/%s' has invalid"
3380 "characters '..' - using %s instead.",
3381 ovs_rundir(), sock_dir_subcomponent
, ovs_rundir());
3383 free(sock_dir_subcomponent
);
3385 vhost_sock_dir
= sock_dir_subcomponent
;
3388 argv
= grow_argv(&argv
, 0, 1);
3390 argv
[0] = xstrdup(ovs_get_program_name());
3391 argc_tmp
= get_dpdk_args(ovs_other_config
, &argv
, argc
);
3393 while (argc_tmp
!= argc
) {
3394 if (!strcmp("-c", argv
[argc
]) || !strcmp("-l", argv
[argc
])) {
3395 auto_determine
= false;
3403 * NOTE: This is an unsophisticated mechanism for determining the DPDK
3404 * lcore for the DPDK Master.
3406 if (auto_determine
) {
3408 /* Get the main thread affinity */
3410 err
= pthread_getaffinity_np(pthread_self(), sizeof(cpu_set_t
),
3413 for (i
= 0; i
< CPU_SETSIZE
; i
++) {
3414 if (CPU_ISSET(i
, &cpuset
)) {
3415 argv
= grow_argv(&argv
, argc
, 2);
3416 argv
[argc
++] = xstrdup("-c");
3417 argv
[argc
++] = xasprintf("0x%08llX", (1ULL<<i
));
3422 VLOG_ERR("Thread getaffinity error %d. Using core 0x1", err
);
3423 /* User did not set dpdk-lcore-mask and unable to get current
3424 * thread affintity - default to core 0x1 */
3425 argv
= grow_argv(&argv
, argc
, 2);
3426 argv
[argc
++] = xstrdup("-c");
3427 argv
[argc
++] = xasprintf("0x%X", 1);
3431 argv
= grow_argv(&argv
, argc
, 1);
3436 if (VLOG_IS_INFO_ENABLED()) {
3440 ds_put_cstr(&eal_args
, "EAL ARGS:");
3441 for (opt
= 0; opt
< argc
; ++opt
) {
3442 ds_put_cstr(&eal_args
, " ");
3443 ds_put_cstr(&eal_args
, argv
[opt
]);
3445 VLOG_INFO("%s", ds_cstr_ro(&eal_args
));
3446 ds_destroy(&eal_args
);
3449 /* Make sure things are initialized ... */
3450 result
= rte_eal_init(argc
, argv
);
3452 ovs_abort(result
, "Cannot init EAL");
3455 /* Set the main thread affinity back to pre rte_eal_init() value */
3456 if (auto_determine
&& !err
) {
3457 err
= pthread_setaffinity_np(pthread_self(), sizeof(cpu_set_t
),
3460 VLOG_ERR("Thread setaffinity error %d", err
);
3467 atexit(deferred_argv_release
);
3469 rte_memzone_dump(stdout
);
3470 rte_eal_init_ret
= 0;
3472 /* We are called from the main thread here */
3473 RTE_PER_LCORE(_lcore_id
) = NON_PMD_CORE_ID
;
3475 ovs_thread_create("dpdk_watchdog", dpdk_watchdog
, NULL
);
3477 dpdk_vhost_class_init();
3480 VLOG_INFO("DPDK pdump packet capture enabled");
3481 err
= rte_pdump_init(ovs_rundir());
3483 VLOG_INFO("Error initialising DPDK pdump");
3486 char *server_socket_path
;
3488 server_socket_path
= xasprintf("%s/%s", ovs_rundir(),
3489 "pdump_server_socket");
3490 fatal_signal_add_file_to_unlink(server_socket_path
);
3491 free(server_socket_path
);
3495 /* Finally, register the dpdk classes */
3496 netdev_dpdk_register();
3500 dpdk_init(const struct smap
*ovs_other_config
)
3502 static struct ovsthread_once once
= OVSTHREAD_ONCE_INITIALIZER
;
3504 if (ovs_other_config
&& ovsthread_once_start(&once
)) {
3505 dpdk_init__(ovs_other_config
);
3506 ovsthread_once_done(&once
);
3510 static const struct netdev_class dpdk_class
=
3513 netdev_dpdk_construct
,
3514 netdev_dpdk_destruct
,
3515 netdev_dpdk_set_config
,
3516 netdev_dpdk_set_tx_multiq
,
3517 netdev_dpdk_eth_send
,
3518 netdev_dpdk_get_carrier
,
3519 netdev_dpdk_get_stats
,
3520 netdev_dpdk_get_features
,
3521 netdev_dpdk_get_status
,
3522 netdev_dpdk_reconfigure
,
3523 netdev_dpdk_rxq_recv
);
3525 static const struct netdev_class dpdk_ring_class
=
3528 netdev_dpdk_ring_construct
,
3529 netdev_dpdk_destruct
,
3530 netdev_dpdk_ring_set_config
,
3531 netdev_dpdk_set_tx_multiq
,
3532 netdev_dpdk_ring_send
,
3533 netdev_dpdk_get_carrier
,
3534 netdev_dpdk_get_stats
,
3535 netdev_dpdk_get_features
,
3536 netdev_dpdk_get_status
,
3537 netdev_dpdk_reconfigure
,
3538 netdev_dpdk_rxq_recv
);
3540 static const struct netdev_class dpdk_vhost_class
=
3543 netdev_dpdk_vhost_construct
,
3544 netdev_dpdk_vhost_destruct
,
3547 netdev_dpdk_vhost_send
,
3548 netdev_dpdk_vhost_get_carrier
,
3549 netdev_dpdk_vhost_get_stats
,
3552 netdev_dpdk_vhost_reconfigure
,
3553 netdev_dpdk_vhost_rxq_recv
);
3554 static const struct netdev_class dpdk_vhost_client_class
=
3556 "dpdkvhostuserclient",
3557 netdev_dpdk_vhost_client_construct
,
3558 netdev_dpdk_vhost_destruct
,
3559 netdev_dpdk_vhost_client_set_config
,
3561 netdev_dpdk_vhost_send
,
3562 netdev_dpdk_vhost_get_carrier
,
3563 netdev_dpdk_vhost_get_stats
,
3566 netdev_dpdk_vhost_client_reconfigure
,
3567 netdev_dpdk_vhost_rxq_recv
);
3570 netdev_dpdk_register(void)
3573 netdev_register_provider(&dpdk_class
);
3574 netdev_register_provider(&dpdk_ring_class
);
3575 netdev_register_provider(&dpdk_vhost_class
);
3576 netdev_register_provider(&dpdk_vhost_client_class
);
3580 dpdk_set_lcore_id(unsigned cpu
)
3582 /* NON_PMD_CORE_ID is reserved for use by non pmd threads. */
3583 ovs_assert(cpu
!= NON_PMD_CORE_ID
);
3584 RTE_PER_LCORE(_lcore_id
) = cpu
;
3588 dpdk_thread_is_pmd(void)
3590 return rte_lcore_id() != NON_PMD_CORE_ID
;