2 * Copyright (c) 2014, 2015, 2016 Nicira, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
18 #include "netdev-dpdk.h"
26 #include <rte_config.h>
27 #include <rte_cycles.h>
28 #include <rte_errno.h>
29 #include <rte_eth_ring.h>
30 #include <rte_ethdev.h>
31 #include <rte_malloc.h>
33 #include <rte_meter.h>
34 #include <rte_virtio_net.h>
37 #include "dp-packet.h"
39 #include "dpif-netdev.h"
40 #include "fatal-signal.h"
41 #include "netdev-provider.h"
42 #include "netdev-vport.h"
44 #include "openvswitch/dynamic-string.h"
45 #include "openvswitch/list.h"
46 #include "openvswitch/ofp-print.h"
47 #include "openvswitch/vlog.h"
49 #include "ovs-thread.h"
52 #include "openvswitch/shash.h"
55 #include "unaligned.h"
59 VLOG_DEFINE_THIS_MODULE(netdev_dpdk
);
60 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(5, 20);
62 #define DPDK_PORT_WATCHDOG_INTERVAL 5
64 #define OVS_CACHE_LINE_SIZE CACHE_LINE_SIZE
65 #define OVS_VPORT_DPDK "ovs_dpdk"
68 * need to reserve tons of extra space in the mbufs so we can align the
69 * DMA addresses to 4KB.
70 * The minimum mbuf size is limited to avoid scatter behaviour and drop in
71 * performance for standard Ethernet MTU.
73 #define ETHER_HDR_MAX_LEN (ETHER_HDR_LEN + ETHER_CRC_LEN \
74 + (2 * VLAN_HEADER_LEN))
75 #define MTU_TO_FRAME_LEN(mtu) ((mtu) + ETHER_HDR_LEN + ETHER_CRC_LEN)
76 #define MTU_TO_MAX_FRAME_LEN(mtu) ((mtu) + ETHER_HDR_MAX_LEN)
77 #define FRAME_LEN_TO_MTU(frame_len) ((frame_len) \
78 - ETHER_HDR_LEN - ETHER_CRC_LEN)
79 #define MBUF_SIZE(mtu) (MTU_TO_MAX_FRAME_LEN(mtu) \
80 + sizeof(struct dp_packet) \
81 + RTE_PKTMBUF_HEADROOM)
82 #define NETDEV_DPDK_MBUF_ALIGN 1024
83 #define NETDEV_DPDK_MAX_PKT_LEN 9728
85 /* Max and min number of packets in the mempool. OVS tries to allocate a
86 * mempool with MAX_NB_MBUF: if this fails (because the system doesn't have
87 * enough hugepages) we keep halving the number until the allocation succeeds
88 * or we reach MIN_NB_MBUF */
90 #define MAX_NB_MBUF (4096 * 64)
91 #define MIN_NB_MBUF (4096 * 4)
92 #define MP_CACHE_SZ RTE_MEMPOOL_CACHE_MAX_SIZE
94 /* MAX_NB_MBUF can be divided by 2 many times, until MIN_NB_MBUF */
95 BUILD_ASSERT_DECL(MAX_NB_MBUF
% ROUND_DOWN_POW2(MAX_NB_MBUF
/MIN_NB_MBUF
) == 0);
97 /* The smallest possible NB_MBUF that we're going to try should be a multiple
98 * of MP_CACHE_SZ. This is advised by DPDK documentation. */
99 BUILD_ASSERT_DECL((MAX_NB_MBUF
/ ROUND_DOWN_POW2(MAX_NB_MBUF
/MIN_NB_MBUF
))
103 * DPDK XSTATS Counter names definition
105 #define XSTAT_RX_64_PACKETS "rx_size_64_packets"
106 #define XSTAT_RX_65_TO_127_PACKETS "rx_size_65_to_127_packets"
107 #define XSTAT_RX_128_TO_255_PACKETS "rx_size_128_to_255_packets"
108 #define XSTAT_RX_256_TO_511_PACKETS "rx_size_256_to_511_packets"
109 #define XSTAT_RX_512_TO_1023_PACKETS "rx_size_512_to_1023_packets"
110 #define XSTAT_RX_1024_TO_1522_PACKETS "rx_size_1024_to_1522_packets"
111 #define XSTAT_RX_1523_TO_MAX_PACKETS "rx_size_1523_to_max_packets"
113 #define XSTAT_TX_64_PACKETS "tx_size_64_packets"
114 #define XSTAT_TX_65_TO_127_PACKETS "tx_size_65_to_127_packets"
115 #define XSTAT_TX_128_TO_255_PACKETS "tx_size_128_to_255_packets"
116 #define XSTAT_TX_256_TO_511_PACKETS "tx_size_256_to_511_packets"
117 #define XSTAT_TX_512_TO_1023_PACKETS "tx_size_512_to_1023_packets"
118 #define XSTAT_TX_1024_TO_1522_PACKETS "tx_size_1024_to_1522_packets"
119 #define XSTAT_TX_1523_TO_MAX_PACKETS "tx_size_1523_to_max_packets"
121 #define XSTAT_TX_MULTICAST_PACKETS "tx_multicast_packets"
122 #define XSTAT_RX_BROADCAST_PACKETS "rx_broadcast_packets"
123 #define XSTAT_TX_BROADCAST_PACKETS "tx_broadcast_packets"
124 #define XSTAT_RX_UNDERSIZED_ERRORS "rx_undersized_errors"
125 #define XSTAT_RX_OVERSIZE_ERRORS "rx_oversize_errors"
126 #define XSTAT_RX_FRAGMENTED_ERRORS "rx_fragmented_errors"
127 #define XSTAT_RX_JABBER_ERRORS "rx_jabber_errors"
131 /* Default size of Physical NIC RXQ */
132 #define NIC_PORT_DEFAULT_RXQ_SIZE 2048
133 /* Default size of Physical NIC TXQ */
134 #define NIC_PORT_DEFAULT_TXQ_SIZE 2048
135 /* Maximum size of Physical NIC Queues */
136 #define NIC_PORT_MAX_Q_SIZE 4096
138 #define OVS_VHOST_MAX_QUEUE_NUM 1024 /* Maximum number of vHost TX queues. */
139 #define OVS_VHOST_QUEUE_MAP_UNKNOWN (-1) /* Mapping not initialized. */
140 #define OVS_VHOST_QUEUE_DISABLED (-2) /* Queue was disabled by guest and not
141 * yet mapped to another queue. */
143 #define VHOST_ENQ_RETRY_NUM 8
144 #define IF_NAME_SZ (PATH_MAX > IFNAMSIZ ? PATH_MAX : IFNAMSIZ)
146 static const struct rte_eth_conf port_conf
= {
148 .mq_mode
= ETH_MQ_RX_RSS
,
150 .header_split
= 0, /* Header Split disabled */
151 .hw_ip_checksum
= 0, /* IP checksum offload disabled */
152 .hw_vlan_filter
= 0, /* VLAN filtering disabled */
153 .jumbo_frame
= 0, /* Jumbo Frame Support disabled */
159 .rss_hf
= ETH_RSS_IP
| ETH_RSS_UDP
| ETH_RSS_TCP
,
163 .mq_mode
= ETH_MQ_TX_NONE
,
167 enum { DPDK_RING_SIZE
= 256 };
168 BUILD_ASSERT_DECL(IS_POW2(DPDK_RING_SIZE
));
169 enum { DRAIN_TSC
= 200000ULL };
176 /* Quality of Service */
178 /* An instance of a QoS configuration. Always associated with a particular
181 * Each QoS implementation subclasses this with whatever additional data it
185 const struct dpdk_qos_ops
*ops
;
189 /* A particular implementation of dpdk QoS operations.
191 * The functions below return 0 if successful or a positive errno value on
192 * failure, except where otherwise noted. All of them must be provided, except
193 * where otherwise noted.
195 struct dpdk_qos_ops
{
197 /* Name of the QoS type */
198 const char *qos_name
;
200 /* Called to construct a qos_conf object. The implementation should make
201 * the appropriate calls to configure QoS according to 'details'.
203 * The contents of 'details' should be documented as valid for 'ovs_name'
204 * in the "other_config" column in the "QoS" table in vswitchd/vswitch.xml
205 * (which is built as ovs-vswitchd.conf.db(8)).
207 * This function must return 0 if and only if it sets '*conf' to an
208 * initialized 'struct qos_conf'.
210 * For all QoS implementations it should always be non-null.
212 int (*qos_construct
)(const struct smap
*details
, struct qos_conf
**conf
);
214 /* Destroys the data structures allocated by the implementation as part of
217 * For all QoS implementations it should always be non-null.
219 void (*qos_destruct
)(struct qos_conf
*conf
);
221 /* Retrieves details of 'conf' configuration into 'details'.
223 * The contents of 'details' should be documented as valid for 'ovs_name'
224 * in the "other_config" column in the "QoS" table in vswitchd/vswitch.xml
225 * (which is built as ovs-vswitchd.conf.db(8)).
227 int (*qos_get
)(const struct qos_conf
*conf
, struct smap
*details
);
229 /* Returns true if 'conf' is already configured according to 'details'.
231 * The contents of 'details' should be documented as valid for 'ovs_name'
232 * in the "other_config" column in the "QoS" table in vswitchd/vswitch.xml
233 * (which is built as ovs-vswitchd.conf.db(8)).
235 * For all QoS implementations it should always be non-null.
237 bool (*qos_is_equal
)(const struct qos_conf
*conf
,
238 const struct smap
*details
);
240 /* Modify an array of rte_mbufs. The modification is specific to
241 * each qos implementation.
243 * The function should take and array of mbufs and an int representing
244 * the current number of mbufs present in the array.
246 * After the function has performed a qos modification to the array of
247 * mbufs it returns an int representing the number of mbufs now present in
248 * the array. This value is can then be passed to the port send function
249 * along with the modified array for transmission.
251 * For all QoS implementations it should always be non-null.
253 int (*qos_run
)(struct qos_conf
*qos_conf
, struct rte_mbuf
**pkts
,
257 /* dpdk_qos_ops for each type of user space QoS implementation */
258 static const struct dpdk_qos_ops egress_policer_ops
;
261 * Array of dpdk_qos_ops, contains pointer to all supported QoS
264 static const struct dpdk_qos_ops
*const qos_confs
[] = {
269 static struct ovs_mutex dpdk_mutex
= OVS_MUTEX_INITIALIZER
;
271 /* Contains all 'struct dpdk_dev's. */
272 static struct ovs_list dpdk_list
OVS_GUARDED_BY(dpdk_mutex
)
273 = OVS_LIST_INITIALIZER(&dpdk_list
);
275 static struct ovs_mutex dpdk_mp_mutex
OVS_ACQ_AFTER(dpdk_mutex
)
276 = OVS_MUTEX_INITIALIZER
;
278 static struct ovs_list dpdk_mp_list
OVS_GUARDED_BY(dpdk_mp_mutex
)
279 = OVS_LIST_INITIALIZER(&dpdk_mp_list
);
282 struct rte_mempool
*mp
;
286 struct ovs_list list_node
OVS_GUARDED_BY(dpdk_mp_mutex
);
289 /* There should be one 'struct dpdk_tx_queue' created for
291 struct dpdk_tx_queue
{
292 rte_spinlock_t tx_lock
; /* Protects the members and the NIC queue
293 * from concurrent access. It is used only
294 * if the queue is shared among different
295 * pmd threads (see 'concurrent_txq'). */
296 int map
; /* Mapping of configured vhost-user queues
297 * to enabled by guest. */
300 /* dpdk has no way to remove dpdk ring ethernet devices
301 so we have to keep them around once they've been created
304 static struct ovs_list dpdk_ring_list
OVS_GUARDED_BY(dpdk_mutex
)
305 = OVS_LIST_INITIALIZER(&dpdk_ring_list
);
308 /* For the client rings */
309 struct rte_ring
*cring_tx
;
310 struct rte_ring
*cring_rx
;
311 unsigned int user_port_id
; /* User given port no, parsed from port name */
312 int eth_port_id
; /* ethernet device port id */
313 struct ovs_list list_node
OVS_GUARDED_BY(dpdk_mutex
);
316 struct ingress_policer
{
317 struct rte_meter_srtcm_params app_srtcm_params
;
318 struct rte_meter_srtcm in_policer
;
319 rte_spinlock_t policer_lock
;
322 enum dpdk_hw_ol_features
{
323 NETDEV_RX_CHECKSUM_OFFLOAD
= 1 << 0,
330 enum dpdk_dev_type type
;
332 struct dpdk_tx_queue
*tx_q
;
334 struct ovs_mutex mutex
OVS_ACQ_AFTER(dpdk_mutex
);
336 struct dpdk_mp
*dpdk_mp
;
340 struct netdev_stats stats
;
342 rte_spinlock_t stats_lock
;
344 struct eth_addr hwaddr
;
345 enum netdev_flags flags
;
347 struct rte_eth_link link
;
350 /* virtio identifier for vhost devices */
353 /* True if vHost device is 'up' and has been reconfigured at least once */
354 bool vhost_reconfigured
;
356 /* Identifier used to distinguish vhost devices from each other. */
357 char vhost_id
[PATH_MAX
];
359 /* Device arguments for dpdk ports */
363 struct ovs_list list_node
OVS_GUARDED_BY(dpdk_mutex
);
365 /* QoS configuration and lock for the device */
366 OVSRCU_TYPE(struct qos_conf
*) qos_conf
;
368 /* The following properties cannot be changed when a device is running,
369 * so we remember the request and update them next time
370 * netdev_dpdk*_reconfigure() is called */
374 int requested_rxq_size
;
375 int requested_txq_size
;
377 /* Number of rx/tx descriptors for physical devices */
381 /* Socket ID detected when vHost device is brought up */
382 int requested_socket_id
;
384 /* Denotes whether vHost port is client/server mode */
385 uint64_t vhost_driver_flags
;
387 /* Ingress Policer */
388 OVSRCU_TYPE(struct ingress_policer
*) ingress_policer
;
389 uint32_t policer_rate
;
390 uint32_t policer_burst
;
392 /* DPDK-ETH Flow control */
393 struct rte_eth_fc_conf fc_conf
;
395 /* DPDK-ETH hardware offload features,
396 * from the enum set 'dpdk_hw_ol_features' */
397 uint32_t hw_ol_features
;
400 struct netdev_rxq_dpdk
{
401 struct netdev_rxq up
;
405 static int netdev_dpdk_class_init(void);
406 static int netdev_dpdk_vhost_class_init(void);
408 int netdev_dpdk_get_vid(const struct netdev_dpdk
*dev
);
410 struct ingress_policer
*
411 netdev_dpdk_get_ingress_policer(const struct netdev_dpdk
*dev
);
414 is_dpdk_class(const struct netdev_class
*class)
416 return class->init
== netdev_dpdk_class_init
417 || class->init
== netdev_dpdk_vhost_class_init
;
420 /* DPDK NIC drivers allocate RX buffers at a particular granularity, typically
421 * aligned at 1k or less. If a declared mbuf size is not a multiple of this
422 * value, insufficient buffers are allocated to accomodate the packet in its
423 * entirety. Furthermore, certain drivers need to ensure that there is also
424 * sufficient space in the Rx buffer to accommodate two VLAN tags (for QinQ
425 * frames). If the RX buffer is too small, then the driver enables scatter RX
426 * behaviour, which reduces performance. To prevent this, use a buffer size
427 * that is closest to 'mtu', but which satisfies the aforementioned criteria.
430 dpdk_buf_size(int mtu
)
432 return ROUND_UP((MTU_TO_MAX_FRAME_LEN(mtu
) + RTE_PKTMBUF_HEADROOM
),
433 NETDEV_DPDK_MBUF_ALIGN
);
436 /* Allocates an area of 'sz' bytes from DPDK. The memory is zero'ed.
438 * Unlike xmalloc(), this function can return NULL on failure. */
440 dpdk_rte_mzalloc(size_t sz
)
442 return rte_zmalloc(OVS_VPORT_DPDK
, sz
, OVS_CACHE_LINE_SIZE
);
446 free_dpdk_buf(struct dp_packet
*p
)
448 struct rte_mbuf
*pkt
= (struct rte_mbuf
*) p
;
450 rte_pktmbuf_free(pkt
);
454 ovs_rte_pktmbuf_init(struct rte_mempool
*mp
,
455 void *opaque_arg OVS_UNUSED
,
457 unsigned i OVS_UNUSED
)
459 struct rte_mbuf
*pkt
= _p
;
461 rte_pktmbuf_init(mp
, opaque_arg
, _p
, i
);
463 dp_packet_init_dpdk((struct dp_packet
*) pkt
, pkt
->buf_len
);
466 static struct dpdk_mp
*
467 dpdk_mp_create(int socket_id
, int mtu
)
469 struct rte_pktmbuf_pool_private mbp_priv
;
474 dmp
= dpdk_rte_mzalloc(sizeof *dmp
);
478 dmp
->socket_id
= socket_id
;
481 mbp_priv
.mbuf_data_room_size
= MBUF_SIZE(mtu
) - sizeof(struct dp_packet
);
482 mbp_priv
.mbuf_priv_size
= sizeof(struct dp_packet
)
483 - sizeof(struct rte_mbuf
);
484 /* XXX: this is a really rough method of provisioning memory.
485 * It's impossible to determine what the exact memory requirements are
486 * when the number of ports and rxqs that utilize a particular mempool can
487 * change dynamically at runtime. For now, use this rough heurisitic.
489 if (mtu
>= ETHER_MTU
) {
490 mp_size
= MAX_NB_MBUF
;
492 mp_size
= MIN_NB_MBUF
;
496 mp_name
= xasprintf("ovs_mp_%d_%d_%u", dmp
->mtu
, dmp
->socket_id
,
499 dmp
->mp
= rte_mempool_create(mp_name
, mp_size
, MBUF_SIZE(mtu
),
501 sizeof(struct rte_pktmbuf_pool_private
),
502 rte_pktmbuf_pool_init
, &mbp_priv
,
503 ovs_rte_pktmbuf_init
, NULL
,
506 VLOG_DBG("Allocated \"%s\" mempool with %u mbufs",
513 } while (rte_errno
== ENOMEM
&& (mp_size
/= 2) >= MIN_NB_MBUF
);
519 static struct dpdk_mp
*
520 dpdk_mp_get(int socket_id
, int mtu
)
524 ovs_mutex_lock(&dpdk_mp_mutex
);
525 LIST_FOR_EACH (dmp
, list_node
, &dpdk_mp_list
) {
526 if (dmp
->socket_id
== socket_id
&& dmp
->mtu
== mtu
) {
532 dmp
= dpdk_mp_create(socket_id
, mtu
);
533 ovs_list_push_back(&dpdk_mp_list
, &dmp
->list_node
);
536 ovs_mutex_unlock(&dpdk_mp_mutex
);
542 dpdk_mp_put(struct dpdk_mp
*dmp
)
548 ovs_mutex_lock(&dpdk_mp_mutex
);
549 ovs_assert(dmp
->refcount
);
551 if (!--dmp
->refcount
) {
552 ovs_list_remove(&dmp
->list_node
);
553 rte_mempool_free(dmp
->mp
);
556 ovs_mutex_unlock(&dpdk_mp_mutex
);
559 /* Tries to allocate new mempool on requested_socket_id with
560 * mbuf size corresponding to requested_mtu.
561 * On success new configuration will be applied.
562 * On error, device will be left unchanged. */
564 netdev_dpdk_mempool_configure(struct netdev_dpdk
*dev
)
565 OVS_REQUIRES(dev
->mutex
)
567 uint32_t buf_size
= dpdk_buf_size(dev
->requested_mtu
);
570 mp
= dpdk_mp_get(dev
->requested_socket_id
, FRAME_LEN_TO_MTU(buf_size
));
572 VLOG_ERR("Insufficient memory to create memory pool for netdev "
573 "%s, with MTU %d on socket %d\n",
574 dev
->up
.name
, dev
->requested_mtu
, dev
->requested_socket_id
);
577 dpdk_mp_put(dev
->dpdk_mp
);
579 dev
->mtu
= dev
->requested_mtu
;
580 dev
->socket_id
= dev
->requested_socket_id
;
581 dev
->max_packet_len
= MTU_TO_FRAME_LEN(dev
->mtu
);
588 check_link_status(struct netdev_dpdk
*dev
)
590 struct rte_eth_link link
;
592 rte_eth_link_get_nowait(dev
->port_id
, &link
);
594 if (dev
->link
.link_status
!= link
.link_status
) {
595 netdev_change_seq_changed(&dev
->up
);
597 dev
->link_reset_cnt
++;
599 if (dev
->link
.link_status
) {
600 VLOG_DBG_RL(&rl
, "Port %d Link Up - speed %u Mbps - %s",
601 dev
->port_id
, (unsigned) dev
->link
.link_speed
,
602 (dev
->link
.link_duplex
== ETH_LINK_FULL_DUPLEX
) ?
603 ("full-duplex") : ("half-duplex"));
605 VLOG_DBG_RL(&rl
, "Port %d Link Down", dev
->port_id
);
611 dpdk_watchdog(void *dummy OVS_UNUSED
)
613 struct netdev_dpdk
*dev
;
615 pthread_detach(pthread_self());
618 ovs_mutex_lock(&dpdk_mutex
);
619 LIST_FOR_EACH (dev
, list_node
, &dpdk_list
) {
620 ovs_mutex_lock(&dev
->mutex
);
621 if (dev
->type
== DPDK_DEV_ETH
) {
622 check_link_status(dev
);
624 ovs_mutex_unlock(&dev
->mutex
);
626 ovs_mutex_unlock(&dpdk_mutex
);
627 xsleep(DPDK_PORT_WATCHDOG_INTERVAL
);
634 dpdk_eth_dev_queue_setup(struct netdev_dpdk
*dev
, int n_rxq
, int n_txq
)
638 struct rte_eth_conf conf
= port_conf
;
640 if (dev
->mtu
> ETHER_MTU
) {
641 conf
.rxmode
.jumbo_frame
= 1;
642 conf
.rxmode
.max_rx_pkt_len
= dev
->max_packet_len
;
644 conf
.rxmode
.jumbo_frame
= 0;
645 conf
.rxmode
.max_rx_pkt_len
= 0;
647 conf
.rxmode
.hw_ip_checksum
= (dev
->hw_ol_features
&
648 NETDEV_RX_CHECKSUM_OFFLOAD
) != 0;
649 /* A device may report more queues than it makes available (this has
650 * been observed for Intel xl710, which reserves some of them for
651 * SRIOV): rte_eth_*_queue_setup will fail if a queue is not
652 * available. When this happens we can retry the configuration
653 * and request less queues */
654 while (n_rxq
&& n_txq
) {
656 VLOG_INFO("Retrying setup with (rxq:%d txq:%d)", n_rxq
, n_txq
);
659 diag
= rte_eth_dev_configure(dev
->port_id
, n_rxq
, n_txq
, &conf
);
661 VLOG_WARN("Interface %s eth_dev setup error %s\n",
662 dev
->up
.name
, rte_strerror(-diag
));
666 for (i
= 0; i
< n_txq
; i
++) {
667 diag
= rte_eth_tx_queue_setup(dev
->port_id
, i
, dev
->txq_size
,
668 dev
->socket_id
, NULL
);
670 VLOG_INFO("Interface %s txq(%d) setup error: %s",
671 dev
->up
.name
, i
, rte_strerror(-diag
));
677 /* Retry with less tx queues */
682 for (i
= 0; i
< n_rxq
; i
++) {
683 diag
= rte_eth_rx_queue_setup(dev
->port_id
, i
, dev
->rxq_size
,
684 dev
->socket_id
, NULL
,
687 VLOG_INFO("Interface %s rxq(%d) setup error: %s",
688 dev
->up
.name
, i
, rte_strerror(-diag
));
694 /* Retry with less rx queues */
699 dev
->up
.n_rxq
= n_rxq
;
700 dev
->up
.n_txq
= n_txq
;
709 dpdk_eth_checksum_offload_configure(struct netdev_dpdk
*dev
)
710 OVS_REQUIRES(dev
->mutex
)
712 struct rte_eth_dev_info info
;
713 bool rx_csum_ol_flag
= false;
714 uint32_t rx_chksm_offload_capa
= DEV_RX_OFFLOAD_UDP_CKSUM
|
715 DEV_RX_OFFLOAD_TCP_CKSUM
|
716 DEV_RX_OFFLOAD_IPV4_CKSUM
;
717 rte_eth_dev_info_get(dev
->port_id
, &info
);
718 rx_csum_ol_flag
= (dev
->hw_ol_features
& NETDEV_RX_CHECKSUM_OFFLOAD
) != 0;
720 if (rx_csum_ol_flag
&&
721 (info
.rx_offload_capa
& rx_chksm_offload_capa
) !=
722 rx_chksm_offload_capa
) {
723 VLOG_WARN_ONCE("Rx checksum offload is not supported on device %d",
725 dev
->hw_ol_features
&= ~NETDEV_RX_CHECKSUM_OFFLOAD
;
728 netdev_request_reconfigure(&dev
->up
);
732 dpdk_eth_flow_ctrl_setup(struct netdev_dpdk
*dev
) OVS_REQUIRES(dev
->mutex
)
734 if (rte_eth_dev_flow_ctrl_set(dev
->port_id
, &dev
->fc_conf
)) {
735 VLOG_WARN("Failed to enable flow control on device %d", dev
->port_id
);
740 dpdk_eth_dev_init(struct netdev_dpdk
*dev
)
741 OVS_REQUIRES(dev
->mutex
)
743 struct rte_pktmbuf_pool_private
*mbp_priv
;
744 struct rte_eth_dev_info info
;
745 struct ether_addr eth_addr
;
749 if (!rte_eth_dev_is_valid_port(dev
->port_id
)) {
753 rte_eth_dev_info_get(dev
->port_id
, &info
);
755 n_rxq
= MIN(info
.max_rx_queues
, dev
->up
.n_rxq
);
756 n_txq
= MIN(info
.max_tx_queues
, dev
->up
.n_txq
);
758 diag
= dpdk_eth_dev_queue_setup(dev
, n_rxq
, n_txq
);
760 VLOG_ERR("Interface %s(rxq:%d txq:%d) configure error: %s",
761 dev
->up
.name
, n_rxq
, n_txq
, rte_strerror(-diag
));
765 diag
= rte_eth_dev_start(dev
->port_id
);
767 VLOG_ERR("Interface %s start error: %s", dev
->up
.name
,
768 rte_strerror(-diag
));
772 rte_eth_promiscuous_enable(dev
->port_id
);
773 rte_eth_allmulticast_enable(dev
->port_id
);
775 memset(ð_addr
, 0x0, sizeof(eth_addr
));
776 rte_eth_macaddr_get(dev
->port_id
, ð_addr
);
777 VLOG_INFO_RL(&rl
, "Port %d: "ETH_ADDR_FMT
"",
778 dev
->port_id
, ETH_ADDR_BYTES_ARGS(eth_addr
.addr_bytes
));
780 memcpy(dev
->hwaddr
.ea
, eth_addr
.addr_bytes
, ETH_ADDR_LEN
);
781 rte_eth_link_get_nowait(dev
->port_id
, &dev
->link
);
783 mbp_priv
= rte_mempool_get_priv(dev
->dpdk_mp
->mp
);
784 dev
->buf_size
= mbp_priv
->mbuf_data_room_size
- RTE_PKTMBUF_HEADROOM
;
786 dev
->flags
= NETDEV_UP
| NETDEV_PROMISC
;
788 /* Get the Flow control configuration for DPDK-ETH */
789 diag
= rte_eth_dev_flow_ctrl_get(dev
->port_id
, &dev
->fc_conf
);
791 VLOG_DBG("cannot get flow control parameters on port=%d, err=%d",
798 static struct netdev_dpdk
*
799 netdev_dpdk_cast(const struct netdev
*netdev
)
801 return CONTAINER_OF(netdev
, struct netdev_dpdk
, up
);
804 static struct netdev
*
805 netdev_dpdk_alloc(void)
807 struct netdev_dpdk
*dev
;
809 dev
= dpdk_rte_mzalloc(sizeof *dev
);
817 static struct dpdk_tx_queue
*
818 netdev_dpdk_alloc_txq(unsigned int n_txqs
)
820 struct dpdk_tx_queue
*txqs
;
823 txqs
= dpdk_rte_mzalloc(n_txqs
* sizeof *txqs
);
825 for (i
= 0; i
< n_txqs
; i
++) {
826 /* Initialize map for vhost devices. */
827 txqs
[i
].map
= OVS_VHOST_QUEUE_MAP_UNKNOWN
;
828 rte_spinlock_init(&txqs
[i
].tx_lock
);
836 netdev_dpdk_init(struct netdev
*netdev
, unsigned int port_no
,
837 enum dpdk_dev_type type
)
838 OVS_REQUIRES(dpdk_mutex
)
840 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
844 ovs_mutex_init(&dev
->mutex
);
845 ovs_mutex_lock(&dev
->mutex
);
847 rte_spinlock_init(&dev
->stats_lock
);
849 /* If the 'sid' is negative, it means that the kernel fails
850 * to obtain the pci numa info. In that situation, always
852 if (type
== DPDK_DEV_ETH
&& rte_eth_dev_is_valid_port(dev
->port_id
)) {
853 sid
= rte_eth_dev_socket_id(port_no
);
855 sid
= rte_lcore_to_socket_id(rte_get_master_lcore());
858 dev
->socket_id
= sid
< 0 ? SOCKET0
: sid
;
859 dev
->requested_socket_id
= dev
->socket_id
;
860 dev
->port_id
= port_no
;
863 dev
->requested_mtu
= dev
->mtu
= ETHER_MTU
;
864 dev
->max_packet_len
= MTU_TO_FRAME_LEN(dev
->mtu
);
865 ovsrcu_index_init(&dev
->vid
, -1);
866 dev
->vhost_reconfigured
= false;
868 err
= netdev_dpdk_mempool_configure(dev
);
873 ovsrcu_init(&dev
->qos_conf
, NULL
);
875 ovsrcu_init(&dev
->ingress_policer
, NULL
);
876 dev
->policer_rate
= 0;
877 dev
->policer_burst
= 0;
879 netdev
->n_rxq
= NR_QUEUE
;
880 netdev
->n_txq
= NR_QUEUE
;
881 dev
->requested_n_rxq
= netdev
->n_rxq
;
882 dev
->requested_n_txq
= netdev
->n_txq
;
883 dev
->rxq_size
= NIC_PORT_DEFAULT_RXQ_SIZE
;
884 dev
->txq_size
= NIC_PORT_DEFAULT_TXQ_SIZE
;
885 dev
->requested_rxq_size
= dev
->rxq_size
;
886 dev
->requested_txq_size
= dev
->txq_size
;
888 /* Initialize the flow control to NULL */
889 memset(&dev
->fc_conf
, 0, sizeof dev
->fc_conf
);
891 /* Initilize the hardware offload flags to 0 */
892 dev
->hw_ol_features
= 0;
893 if (type
== DPDK_DEV_ETH
) {
894 if (rte_eth_dev_is_valid_port(dev
->port_id
)) {
895 err
= dpdk_eth_dev_init(dev
);
900 dev
->tx_q
= netdev_dpdk_alloc_txq(netdev
->n_txq
);
902 dev
->tx_q
= netdev_dpdk_alloc_txq(OVS_VHOST_MAX_QUEUE_NUM
);
903 /* Enable DPDK_DEV_VHOST device and set promiscuous mode flag. */
904 dev
->flags
= NETDEV_UP
| NETDEV_PROMISC
;
912 ovs_list_push_back(&dpdk_list
, &dev
->list_node
);
915 ovs_mutex_unlock(&dev
->mutex
);
919 /* dev_name must be the prefix followed by a positive decimal number.
920 * (no leading + or - signs are allowed) */
922 dpdk_dev_parse_name(const char dev_name
[], const char prefix
[],
923 unsigned int *port_no
)
927 if (strncmp(dev_name
, prefix
, strlen(prefix
))) {
931 cport
= dev_name
+ strlen(prefix
);
933 if (str_to_uint(cport
, 10, port_no
)) {
941 netdev_dpdk_vhost_construct(struct netdev
*netdev
)
943 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
944 const char *name
= netdev
->name
;
947 /* 'name' is appended to 'vhost_sock_dir' and used to create a socket in
948 * the file system. '/' or '\' would traverse directories, so they're not
949 * acceptable in 'name'. */
950 if (strchr(name
, '/') || strchr(name
, '\\')) {
951 VLOG_ERR("\"%s\" is not a valid name for a vhost-user port. "
952 "A valid name must not include '/' or '\\'",
957 ovs_mutex_lock(&dpdk_mutex
);
958 /* Take the name of the vhost-user port and append it to the location where
959 * the socket is to be created, then register the socket.
961 snprintf(dev
->vhost_id
, sizeof dev
->vhost_id
, "%s/%s",
962 dpdk_get_vhost_sock_dir(), name
);
964 dev
->vhost_driver_flags
&= ~RTE_VHOST_USER_CLIENT
;
965 err
= rte_vhost_driver_register(dev
->vhost_id
, dev
->vhost_driver_flags
);
967 VLOG_ERR("vhost-user socket device setup failure for socket %s\n",
970 fatal_signal_add_file_to_unlink(dev
->vhost_id
);
971 VLOG_INFO("Socket %s created for vhost-user port %s\n",
972 dev
->vhost_id
, name
);
974 err
= netdev_dpdk_init(netdev
, -1, DPDK_DEV_VHOST
);
976 ovs_mutex_unlock(&dpdk_mutex
);
981 netdev_dpdk_vhost_client_construct(struct netdev
*netdev
)
985 ovs_mutex_lock(&dpdk_mutex
);
986 err
= netdev_dpdk_init(netdev
, -1, DPDK_DEV_VHOST
);
987 ovs_mutex_unlock(&dpdk_mutex
);
992 netdev_dpdk_construct(struct netdev
*netdev
)
996 ovs_mutex_lock(&dpdk_mutex
);
997 err
= netdev_dpdk_init(netdev
, -1, DPDK_DEV_ETH
);
998 ovs_mutex_unlock(&dpdk_mutex
);
1003 netdev_dpdk_destruct(struct netdev
*netdev
)
1005 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1007 ovs_mutex_lock(&dpdk_mutex
);
1008 ovs_mutex_lock(&dev
->mutex
);
1010 rte_eth_dev_stop(dev
->port_id
);
1012 free(ovsrcu_get_protected(struct ingress_policer
*,
1013 &dev
->ingress_policer
));
1015 rte_free(dev
->tx_q
);
1016 ovs_list_remove(&dev
->list_node
);
1017 dpdk_mp_put(dev
->dpdk_mp
);
1019 ovs_mutex_unlock(&dev
->mutex
);
1020 ovs_mutex_unlock(&dpdk_mutex
);
1023 /* rte_vhost_driver_unregister() can call back destroy_device(), which will
1024 * try to acquire 'dpdk_mutex' and possibly 'dev->mutex'. To avoid a
1025 * deadlock, none of the mutexes must be held while calling this function. */
1027 dpdk_vhost_driver_unregister(struct netdev_dpdk
*dev OVS_UNUSED
,
1029 OVS_EXCLUDED(dpdk_mutex
)
1030 OVS_EXCLUDED(dev
->mutex
)
1032 return rte_vhost_driver_unregister(vhost_id
);
1036 netdev_dpdk_vhost_destruct(struct netdev
*netdev
)
1038 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1041 ovs_mutex_lock(&dpdk_mutex
);
1042 ovs_mutex_lock(&dev
->mutex
);
1044 /* Guest becomes an orphan if still attached. */
1045 if (netdev_dpdk_get_vid(dev
) >= 0
1046 && !(dev
->vhost_driver_flags
& RTE_VHOST_USER_CLIENT
)) {
1047 VLOG_ERR("Removing port '%s' while vhost device still attached.",
1049 VLOG_ERR("To restore connectivity after re-adding of port, VM on "
1050 "socket '%s' must be restarted.", dev
->vhost_id
);
1053 free(ovsrcu_get_protected(struct ingress_policer
*,
1054 &dev
->ingress_policer
));
1056 rte_free(dev
->tx_q
);
1057 ovs_list_remove(&dev
->list_node
);
1058 dpdk_mp_put(dev
->dpdk_mp
);
1060 vhost_id
= xstrdup(dev
->vhost_id
);
1062 ovs_mutex_unlock(&dev
->mutex
);
1063 ovs_mutex_unlock(&dpdk_mutex
);
1069 if (dpdk_vhost_driver_unregister(dev
, vhost_id
)) {
1070 VLOG_ERR("%s: Unable to unregister vhost driver for socket '%s'.\n",
1071 netdev
->name
, vhost_id
);
1072 } else if (!(dev
->vhost_driver_flags
& RTE_VHOST_USER_CLIENT
)) {
1073 /* OVS server mode - remove this socket from list for deletion */
1074 fatal_signal_remove_file_to_unlink(vhost_id
);
1081 netdev_dpdk_dealloc(struct netdev
*netdev
)
1083 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1089 netdev_dpdk_get_config(const struct netdev
*netdev
, struct smap
*args
)
1091 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1093 ovs_mutex_lock(&dev
->mutex
);
1095 smap_add_format(args
, "requested_rx_queues", "%d", dev
->requested_n_rxq
);
1096 smap_add_format(args
, "configured_rx_queues", "%d", netdev
->n_rxq
);
1097 smap_add_format(args
, "requested_tx_queues", "%d", dev
->requested_n_txq
);
1098 smap_add_format(args
, "configured_tx_queues", "%d", netdev
->n_txq
);
1099 smap_add_format(args
, "mtu", "%d", dev
->mtu
);
1101 if (dev
->type
== DPDK_DEV_ETH
) {
1102 smap_add_format(args
, "requested_rxq_descriptors", "%d",
1103 dev
->requested_rxq_size
);
1104 smap_add_format(args
, "configured_rxq_descriptors", "%d",
1106 smap_add_format(args
, "requested_txq_descriptors", "%d",
1107 dev
->requested_txq_size
);
1108 smap_add_format(args
, "configured_txq_descriptors", "%d",
1110 if (dev
->hw_ol_features
& NETDEV_RX_CHECKSUM_OFFLOAD
) {
1111 smap_add(args
, "rx_csum_offload", "true");
1114 ovs_mutex_unlock(&dev
->mutex
);
1119 static struct netdev_dpdk
*
1120 netdev_dpdk_lookup_by_port_id(int port_id
)
1121 OVS_REQUIRES(dpdk_mutex
)
1123 struct netdev_dpdk
*dev
;
1125 LIST_FOR_EACH (dev
, list_node
, &dpdk_list
) {
1126 if (dev
->port_id
== port_id
) {
1135 netdev_dpdk_process_devargs(const char *devargs
)
1137 uint8_t new_port_id
= UINT8_MAX
;
1139 if (!rte_eth_dev_count()
1140 || rte_eth_dev_get_port_by_name(devargs
, &new_port_id
)
1141 || !rte_eth_dev_is_valid_port(new_port_id
)) {
1142 /* Device not found in DPDK, attempt to attach it */
1143 if (!rte_eth_dev_attach(devargs
, &new_port_id
)) {
1144 /* Attach successful */
1145 VLOG_INFO("Device '%s' attached to DPDK", devargs
);
1147 /* Attach unsuccessful */
1148 VLOG_INFO("Error attaching device '%s' to DPDK", devargs
);
1157 dpdk_set_rxq_config(struct netdev_dpdk
*dev
, const struct smap
*args
)
1158 OVS_REQUIRES(dev
->mutex
)
1162 new_n_rxq
= MAX(smap_get_int(args
, "n_rxq", NR_QUEUE
), 1);
1163 if (new_n_rxq
!= dev
->requested_n_rxq
) {
1164 dev
->requested_n_rxq
= new_n_rxq
;
1165 netdev_request_reconfigure(&dev
->up
);
1170 dpdk_process_queue_size(struct netdev
*netdev
, const struct smap
*args
,
1171 const char *flag
, int default_size
, int *new_size
)
1173 int queue_size
= smap_get_int(args
, flag
, default_size
);
1175 if (queue_size
<= 0 || queue_size
> NIC_PORT_MAX_Q_SIZE
1176 || !is_pow2(queue_size
)) {
1177 queue_size
= default_size
;
1180 if (queue_size
!= *new_size
) {
1181 *new_size
= queue_size
;
1182 netdev_request_reconfigure(netdev
);
1187 netdev_dpdk_set_config(struct netdev
*netdev
, const struct smap
*args
)
1189 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1190 bool rx_fc_en
, tx_fc_en
, autoneg
;
1191 enum rte_eth_fc_mode fc_mode
;
1192 static const enum rte_eth_fc_mode fc_mode_set
[2][2] = {
1193 {RTE_FC_NONE
, RTE_FC_TX_PAUSE
},
1194 {RTE_FC_RX_PAUSE
, RTE_FC_FULL
}
1198 const char *new_devargs
;
1201 ovs_mutex_lock(&dpdk_mutex
);
1202 ovs_mutex_lock(&dev
->mutex
);
1204 dpdk_set_rxq_config(dev
, args
);
1206 dpdk_process_queue_size(netdev
, args
, "n_rxq_desc",
1207 NIC_PORT_DEFAULT_RXQ_SIZE
,
1208 &dev
->requested_rxq_size
);
1209 dpdk_process_queue_size(netdev
, args
, "n_txq_desc",
1210 NIC_PORT_DEFAULT_TXQ_SIZE
,
1211 &dev
->requested_txq_size
);
1213 new_devargs
= smap_get(args
, "dpdk-devargs");
1215 if (dev
->devargs
&& strcmp(new_devargs
, dev
->devargs
)) {
1216 /* The user requested a new device. If we return error, the caller
1217 * will delete this netdev and try to recreate it. */
1222 /* dpdk-devargs is required for device configuration */
1223 if (new_devargs
&& new_devargs
[0]) {
1224 /* Don't process dpdk-devargs if value is unchanged and port id
1226 if (!(dev
->devargs
&& !strcmp(dev
->devargs
, new_devargs
)
1227 && rte_eth_dev_is_valid_port(dev
->port_id
))) {
1228 int new_port_id
= netdev_dpdk_process_devargs(new_devargs
);
1229 if (!rte_eth_dev_is_valid_port(new_port_id
)) {
1231 } else if (new_port_id
== dev
->port_id
) {
1232 /* Already configured, do not reconfigure again */
1235 struct netdev_dpdk
*dup_dev
;
1236 dup_dev
= netdev_dpdk_lookup_by_port_id(new_port_id
);
1238 VLOG_WARN("'%s' is trying to use device '%s' which is "
1239 "already in use by '%s'.",
1240 netdev_get_name(netdev
), new_devargs
,
1241 netdev_get_name(&dup_dev
->up
));
1244 dev
->devargs
= xstrdup(new_devargs
);
1245 dev
->port_id
= new_port_id
;
1246 netdev_request_reconfigure(&dev
->up
);
1252 /* dpdk-devargs unspecified - can't configure device */
1260 rx_fc_en
= smap_get_bool(args
, "rx-flow-ctrl", false);
1261 tx_fc_en
= smap_get_bool(args
, "tx-flow-ctrl", false);
1262 autoneg
= smap_get_bool(args
, "flow-ctrl-autoneg", false);
1264 fc_mode
= fc_mode_set
[tx_fc_en
][rx_fc_en
];
1265 if (dev
->fc_conf
.mode
!= fc_mode
|| autoneg
!= dev
->fc_conf
.autoneg
) {
1266 dev
->fc_conf
.mode
= fc_mode
;
1267 dev
->fc_conf
.autoneg
= autoneg
;
1268 dpdk_eth_flow_ctrl_setup(dev
);
1271 /* Rx checksum offload configuration */
1272 /* By default the Rx checksum offload is ON */
1273 rx_chksm_ofld
= smap_get_bool(args
, "rx-checksum-offload", true);
1274 temp_flag
= (dev
->hw_ol_features
& NETDEV_RX_CHECKSUM_OFFLOAD
)
1276 if (temp_flag
!= rx_chksm_ofld
) {
1277 dev
->hw_ol_features
^= NETDEV_RX_CHECKSUM_OFFLOAD
;
1278 dpdk_eth_checksum_offload_configure(dev
);
1282 ovs_mutex_unlock(&dev
->mutex
);
1283 ovs_mutex_unlock(&dpdk_mutex
);
1289 netdev_dpdk_ring_set_config(struct netdev
*netdev
, const struct smap
*args
)
1291 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1293 ovs_mutex_lock(&dev
->mutex
);
1294 dpdk_set_rxq_config(dev
, args
);
1295 ovs_mutex_unlock(&dev
->mutex
);
1301 netdev_dpdk_vhost_client_set_config(struct netdev
*netdev
,
1302 const struct smap
*args
)
1304 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1307 ovs_mutex_lock(&dev
->mutex
);
1308 if (!(dev
->vhost_driver_flags
& RTE_VHOST_USER_CLIENT
)) {
1309 path
= smap_get(args
, "vhost-server-path");
1310 if (path
&& strcmp(path
, dev
->vhost_id
)) {
1311 strcpy(dev
->vhost_id
, path
);
1312 netdev_request_reconfigure(netdev
);
1315 ovs_mutex_unlock(&dev
->mutex
);
1321 netdev_dpdk_get_numa_id(const struct netdev
*netdev
)
1323 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1325 return dev
->socket_id
;
1328 /* Sets the number of tx queues for the dpdk interface. */
1330 netdev_dpdk_set_tx_multiq(struct netdev
*netdev
, unsigned int n_txq
)
1332 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1334 ovs_mutex_lock(&dev
->mutex
);
1336 if (dev
->requested_n_txq
== n_txq
) {
1340 dev
->requested_n_txq
= n_txq
;
1341 netdev_request_reconfigure(netdev
);
1344 ovs_mutex_unlock(&dev
->mutex
);
1348 static struct netdev_rxq
*
1349 netdev_dpdk_rxq_alloc(void)
1351 struct netdev_rxq_dpdk
*rx
= dpdk_rte_mzalloc(sizeof *rx
);
1360 static struct netdev_rxq_dpdk
*
1361 netdev_rxq_dpdk_cast(const struct netdev_rxq
*rxq
)
1363 return CONTAINER_OF(rxq
, struct netdev_rxq_dpdk
, up
);
1367 netdev_dpdk_rxq_construct(struct netdev_rxq
*rxq
)
1369 struct netdev_rxq_dpdk
*rx
= netdev_rxq_dpdk_cast(rxq
);
1370 struct netdev_dpdk
*dev
= netdev_dpdk_cast(rxq
->netdev
);
1372 ovs_mutex_lock(&dev
->mutex
);
1373 rx
->port_id
= dev
->port_id
;
1374 ovs_mutex_unlock(&dev
->mutex
);
1380 netdev_dpdk_rxq_destruct(struct netdev_rxq
*rxq OVS_UNUSED
)
1385 netdev_dpdk_rxq_dealloc(struct netdev_rxq
*rxq
)
1387 struct netdev_rxq_dpdk
*rx
= netdev_rxq_dpdk_cast(rxq
);
1392 /* Tries to transmit 'pkts' to txq 'qid' of device 'dev'. Takes ownership of
1393 * 'pkts', even in case of failure.
1395 * Returns the number of packets that weren't transmitted. */
1397 netdev_dpdk_eth_tx_burst(struct netdev_dpdk
*dev
, int qid
,
1398 struct rte_mbuf
**pkts
, int cnt
)
1402 while (nb_tx
!= cnt
) {
1405 ret
= rte_eth_tx_burst(dev
->port_id
, qid
, pkts
+ nb_tx
, cnt
- nb_tx
);
1413 if (OVS_UNLIKELY(nb_tx
!= cnt
)) {
1414 /* Free buffers, which we couldn't transmit, one at a time (each
1415 * packet could come from a different mempool) */
1418 for (i
= nb_tx
; i
< cnt
; i
++) {
1419 rte_pktmbuf_free(pkts
[i
]);
1427 netdev_dpdk_policer_pkt_handle(struct rte_meter_srtcm
*meter
,
1428 struct rte_mbuf
*pkt
, uint64_t time
)
1430 uint32_t pkt_len
= rte_pktmbuf_pkt_len(pkt
) - sizeof(struct ether_hdr
);
1432 return rte_meter_srtcm_color_blind_check(meter
, time
, pkt_len
) ==
1437 netdev_dpdk_policer_run(struct rte_meter_srtcm
*meter
,
1438 struct rte_mbuf
**pkts
, int pkt_cnt
)
1442 struct rte_mbuf
*pkt
= NULL
;
1443 uint64_t current_time
= rte_rdtsc();
1445 for (i
= 0; i
< pkt_cnt
; i
++) {
1447 /* Handle current packet */
1448 if (netdev_dpdk_policer_pkt_handle(meter
, pkt
, current_time
)) {
1454 rte_pktmbuf_free(pkt
);
1462 ingress_policer_run(struct ingress_policer
*policer
, struct rte_mbuf
**pkts
,
1467 rte_spinlock_lock(&policer
->policer_lock
);
1468 cnt
= netdev_dpdk_policer_run(&policer
->in_policer
, pkts
, pkt_cnt
);
1469 rte_spinlock_unlock(&policer
->policer_lock
);
1475 is_vhost_running(struct netdev_dpdk
*dev
)
1477 return (netdev_dpdk_get_vid(dev
) >= 0 && dev
->vhost_reconfigured
);
1481 netdev_dpdk_vhost_update_rx_size_counters(struct netdev_stats
*stats
,
1482 unsigned int packet_size
)
1484 /* Hard-coded search for the size bucket. */
1485 if (packet_size
< 256) {
1486 if (packet_size
>= 128) {
1487 stats
->rx_128_to_255_packets
++;
1488 } else if (packet_size
<= 64) {
1489 stats
->rx_1_to_64_packets
++;
1491 stats
->rx_65_to_127_packets
++;
1494 if (packet_size
>= 1523) {
1495 stats
->rx_1523_to_max_packets
++;
1496 } else if (packet_size
>= 1024) {
1497 stats
->rx_1024_to_1522_packets
++;
1498 } else if (packet_size
< 512) {
1499 stats
->rx_256_to_511_packets
++;
1501 stats
->rx_512_to_1023_packets
++;
1507 netdev_dpdk_vhost_update_rx_counters(struct netdev_stats
*stats
,
1508 struct dp_packet
**packets
, int count
,
1512 unsigned int packet_size
;
1513 struct dp_packet
*packet
;
1515 stats
->rx_packets
+= count
;
1516 stats
->rx_dropped
+= dropped
;
1517 for (i
= 0; i
< count
; i
++) {
1518 packet
= packets
[i
];
1519 packet_size
= dp_packet_size(packet
);
1521 if (OVS_UNLIKELY(packet_size
< ETH_HEADER_LEN
)) {
1522 /* This only protects the following multicast counting from
1523 * too short packets, but it does not stop the packet from
1524 * further processing. */
1526 stats
->rx_length_errors
++;
1530 netdev_dpdk_vhost_update_rx_size_counters(stats
, packet_size
);
1532 struct eth_header
*eh
= (struct eth_header
*) dp_packet_data(packet
);
1533 if (OVS_UNLIKELY(eth_addr_is_multicast(eh
->eth_dst
))) {
1537 stats
->rx_bytes
+= packet_size
;
1542 * The receive path for the vhost port is the TX path out from guest.
1545 netdev_dpdk_vhost_rxq_recv(struct netdev_rxq
*rxq
,
1546 struct dp_packet_batch
*batch
)
1548 struct netdev_dpdk
*dev
= netdev_dpdk_cast(rxq
->netdev
);
1549 int qid
= rxq
->queue_id
;
1550 struct ingress_policer
*policer
= netdev_dpdk_get_ingress_policer(dev
);
1552 uint16_t dropped
= 0;
1554 if (OVS_UNLIKELY(!is_vhost_running(dev
)
1555 || !(dev
->flags
& NETDEV_UP
))) {
1559 nb_rx
= rte_vhost_dequeue_burst(netdev_dpdk_get_vid(dev
),
1560 qid
* VIRTIO_QNUM
+ VIRTIO_TXQ
,
1562 (struct rte_mbuf
**) batch
->packets
,
1570 nb_rx
= ingress_policer_run(policer
,
1571 (struct rte_mbuf
**) batch
->packets
,
1576 rte_spinlock_lock(&dev
->stats_lock
);
1577 netdev_dpdk_vhost_update_rx_counters(&dev
->stats
, batch
->packets
,
1579 rte_spinlock_unlock(&dev
->stats_lock
);
1581 batch
->count
= (int) nb_rx
;
1586 netdev_dpdk_rxq_recv(struct netdev_rxq
*rxq
, struct dp_packet_batch
*batch
)
1588 struct netdev_rxq_dpdk
*rx
= netdev_rxq_dpdk_cast(rxq
);
1589 struct netdev_dpdk
*dev
= netdev_dpdk_cast(rxq
->netdev
);
1590 struct ingress_policer
*policer
= netdev_dpdk_get_ingress_policer(dev
);
1594 nb_rx
= rte_eth_rx_burst(rx
->port_id
, rxq
->queue_id
,
1595 (struct rte_mbuf
**) batch
->packets
,
1603 nb_rx
= ingress_policer_run(policer
,
1604 (struct rte_mbuf
**) batch
->packets
,
1609 /* Update stats to reflect dropped packets */
1610 if (OVS_UNLIKELY(dropped
)) {
1611 rte_spinlock_lock(&dev
->stats_lock
);
1612 dev
->stats
.rx_dropped
+= dropped
;
1613 rte_spinlock_unlock(&dev
->stats_lock
);
1616 batch
->count
= nb_rx
;
1622 netdev_dpdk_qos_run(struct netdev_dpdk
*dev
, struct rte_mbuf
**pkts
,
1625 struct qos_conf
*qos_conf
= ovsrcu_get(struct qos_conf
*, &dev
->qos_conf
);
1628 rte_spinlock_lock(&qos_conf
->lock
);
1629 cnt
= qos_conf
->ops
->qos_run(qos_conf
, pkts
, cnt
);
1630 rte_spinlock_unlock(&qos_conf
->lock
);
1637 netdev_dpdk_filter_packet_len(struct netdev_dpdk
*dev
, struct rte_mbuf
**pkts
,
1642 struct rte_mbuf
*pkt
;
1644 for (i
= 0; i
< pkt_cnt
; i
++) {
1646 if (OVS_UNLIKELY(pkt
->pkt_len
> dev
->max_packet_len
)) {
1647 VLOG_WARN_RL(&rl
, "%s: Too big size %" PRIu32
" max_packet_len %d",
1648 dev
->up
.name
, pkt
->pkt_len
, dev
->max_packet_len
);
1649 rte_pktmbuf_free(pkt
);
1653 if (OVS_UNLIKELY(i
!= cnt
)) {
1663 netdev_dpdk_vhost_update_tx_counters(struct netdev_stats
*stats
,
1664 struct dp_packet
**packets
,
1669 int sent
= attempted
- dropped
;
1671 stats
->tx_packets
+= sent
;
1672 stats
->tx_dropped
+= dropped
;
1674 for (i
= 0; i
< sent
; i
++) {
1675 stats
->tx_bytes
+= dp_packet_size(packets
[i
]);
1680 __netdev_dpdk_vhost_send(struct netdev
*netdev
, int qid
,
1681 struct dp_packet
**pkts
, int cnt
)
1683 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1684 struct rte_mbuf
**cur_pkts
= (struct rte_mbuf
**) pkts
;
1685 unsigned int total_pkts
= cnt
;
1686 unsigned int dropped
= 0;
1689 qid
= dev
->tx_q
[qid
% netdev
->n_txq
].map
;
1691 if (OVS_UNLIKELY(!is_vhost_running(dev
) || qid
< 0
1692 || !(dev
->flags
& NETDEV_UP
))) {
1693 rte_spinlock_lock(&dev
->stats_lock
);
1694 dev
->stats
.tx_dropped
+= cnt
;
1695 rte_spinlock_unlock(&dev
->stats_lock
);
1699 rte_spinlock_lock(&dev
->tx_q
[qid
].tx_lock
);
1701 cnt
= netdev_dpdk_filter_packet_len(dev
, cur_pkts
, cnt
);
1702 /* Check has QoS has been configured for the netdev */
1703 cnt
= netdev_dpdk_qos_run(dev
, cur_pkts
, cnt
);
1704 dropped
= total_pkts
- cnt
;
1707 int vhost_qid
= qid
* VIRTIO_QNUM
+ VIRTIO_RXQ
;
1708 unsigned int tx_pkts
;
1710 tx_pkts
= rte_vhost_enqueue_burst(netdev_dpdk_get_vid(dev
),
1711 vhost_qid
, cur_pkts
, cnt
);
1712 if (OVS_LIKELY(tx_pkts
)) {
1713 /* Packets have been sent.*/
1715 /* Prepare for possible retry.*/
1716 cur_pkts
= &cur_pkts
[tx_pkts
];
1718 /* No packets sent - do not retry.*/
1721 } while (cnt
&& (retries
++ <= VHOST_ENQ_RETRY_NUM
));
1723 rte_spinlock_unlock(&dev
->tx_q
[qid
].tx_lock
);
1725 rte_spinlock_lock(&dev
->stats_lock
);
1726 netdev_dpdk_vhost_update_tx_counters(&dev
->stats
, pkts
, total_pkts
,
1728 rte_spinlock_unlock(&dev
->stats_lock
);
1731 for (i
= 0; i
< total_pkts
- dropped
; i
++) {
1732 dp_packet_delete(pkts
[i
]);
1736 /* Tx function. Transmit packets indefinitely */
1738 dpdk_do_tx_copy(struct netdev
*netdev
, int qid
, struct dp_packet_batch
*batch
)
1739 OVS_NO_THREAD_SAFETY_ANALYSIS
1741 #if !defined(__CHECKER__) && !defined(_WIN32)
1742 const size_t PKT_ARRAY_SIZE
= batch
->count
;
1744 /* Sparse or MSVC doesn't like variable length array. */
1745 enum { PKT_ARRAY_SIZE
= NETDEV_MAX_BURST
};
1747 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1748 struct rte_mbuf
*pkts
[PKT_ARRAY_SIZE
];
1753 dp_packet_batch_apply_cutlen(batch
);
1755 for (i
= 0; i
< batch
->count
; i
++) {
1756 int size
= dp_packet_size(batch
->packets
[i
]);
1758 if (OVS_UNLIKELY(size
> dev
->max_packet_len
)) {
1759 VLOG_WARN_RL(&rl
, "Too big size %d max_packet_len %d",
1760 (int) size
, dev
->max_packet_len
);
1766 pkts
[newcnt
] = rte_pktmbuf_alloc(dev
->dpdk_mp
->mp
);
1768 if (!pkts
[newcnt
]) {
1769 dropped
+= batch
->count
- i
;
1773 /* We have to do a copy for now */
1774 memcpy(rte_pktmbuf_mtod(pkts
[newcnt
], void *),
1775 dp_packet_data(batch
->packets
[i
]), size
);
1777 rte_pktmbuf_data_len(pkts
[newcnt
]) = size
;
1778 rte_pktmbuf_pkt_len(pkts
[newcnt
]) = size
;
1783 if (dev
->type
== DPDK_DEV_VHOST
) {
1784 __netdev_dpdk_vhost_send(netdev
, qid
, (struct dp_packet
**) pkts
,
1787 unsigned int qos_pkts
= newcnt
;
1789 /* Check if QoS has been configured for this netdev. */
1790 newcnt
= netdev_dpdk_qos_run(dev
, pkts
, newcnt
);
1792 dropped
+= qos_pkts
- newcnt
;
1793 dropped
+= netdev_dpdk_eth_tx_burst(dev
, qid
, pkts
, newcnt
);
1796 if (OVS_UNLIKELY(dropped
)) {
1797 rte_spinlock_lock(&dev
->stats_lock
);
1798 dev
->stats
.tx_dropped
+= dropped
;
1799 rte_spinlock_unlock(&dev
->stats_lock
);
1804 netdev_dpdk_vhost_send(struct netdev
*netdev
, int qid
,
1805 struct dp_packet_batch
*batch
,
1806 bool may_steal
, bool concurrent_txq OVS_UNUSED
)
1809 if (OVS_UNLIKELY(!may_steal
|| batch
->packets
[0]->source
!= DPBUF_DPDK
)) {
1810 dpdk_do_tx_copy(netdev
, qid
, batch
);
1811 dp_packet_delete_batch(batch
, may_steal
);
1813 dp_packet_batch_apply_cutlen(batch
);
1814 __netdev_dpdk_vhost_send(netdev
, qid
, batch
->packets
, batch
->count
);
1820 netdev_dpdk_send__(struct netdev_dpdk
*dev
, int qid
,
1821 struct dp_packet_batch
*batch
, bool may_steal
,
1822 bool concurrent_txq
)
1824 if (OVS_UNLIKELY(concurrent_txq
)) {
1825 qid
= qid
% dev
->up
.n_txq
;
1826 rte_spinlock_lock(&dev
->tx_q
[qid
].tx_lock
);
1829 if (OVS_UNLIKELY(!may_steal
||
1830 batch
->packets
[0]->source
!= DPBUF_DPDK
)) {
1831 struct netdev
*netdev
= &dev
->up
;
1833 dpdk_do_tx_copy(netdev
, qid
, batch
);
1834 dp_packet_delete_batch(batch
, may_steal
);
1837 int cnt
= batch
->count
;
1838 struct rte_mbuf
**pkts
= (struct rte_mbuf
**) batch
->packets
;
1840 dp_packet_batch_apply_cutlen(batch
);
1842 cnt
= netdev_dpdk_filter_packet_len(dev
, pkts
, cnt
);
1843 cnt
= netdev_dpdk_qos_run(dev
, pkts
, cnt
);
1844 dropped
= batch
->count
- cnt
;
1846 dropped
+= netdev_dpdk_eth_tx_burst(dev
, qid
, pkts
, cnt
);
1848 if (OVS_UNLIKELY(dropped
)) {
1849 rte_spinlock_lock(&dev
->stats_lock
);
1850 dev
->stats
.tx_dropped
+= dropped
;
1851 rte_spinlock_unlock(&dev
->stats_lock
);
1855 if (OVS_UNLIKELY(concurrent_txq
)) {
1856 rte_spinlock_unlock(&dev
->tx_q
[qid
].tx_lock
);
1861 netdev_dpdk_eth_send(struct netdev
*netdev
, int qid
,
1862 struct dp_packet_batch
*batch
, bool may_steal
,
1863 bool concurrent_txq
)
1865 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1867 netdev_dpdk_send__(dev
, qid
, batch
, may_steal
, concurrent_txq
);
1872 netdev_dpdk_set_etheraddr(struct netdev
*netdev
, const struct eth_addr mac
)
1874 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1876 ovs_mutex_lock(&dev
->mutex
);
1877 if (!eth_addr_equals(dev
->hwaddr
, mac
)) {
1879 netdev_change_seq_changed(netdev
);
1881 ovs_mutex_unlock(&dev
->mutex
);
1887 netdev_dpdk_get_etheraddr(const struct netdev
*netdev
, struct eth_addr
*mac
)
1889 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1891 ovs_mutex_lock(&dev
->mutex
);
1893 ovs_mutex_unlock(&dev
->mutex
);
1899 netdev_dpdk_get_mtu(const struct netdev
*netdev
, int *mtup
)
1901 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1903 ovs_mutex_lock(&dev
->mutex
);
1905 ovs_mutex_unlock(&dev
->mutex
);
1911 netdev_dpdk_set_mtu(struct netdev
*netdev
, int mtu
)
1913 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1915 if (MTU_TO_FRAME_LEN(mtu
) > NETDEV_DPDK_MAX_PKT_LEN
1916 || mtu
< ETHER_MIN_MTU
) {
1917 VLOG_WARN("%s: unsupported MTU %d\n", dev
->up
.name
, mtu
);
1921 ovs_mutex_lock(&dev
->mutex
);
1922 if (dev
->requested_mtu
!= mtu
) {
1923 dev
->requested_mtu
= mtu
;
1924 netdev_request_reconfigure(netdev
);
1926 ovs_mutex_unlock(&dev
->mutex
);
1932 netdev_dpdk_get_carrier(const struct netdev
*netdev
, bool *carrier
);
1935 netdev_dpdk_vhost_get_stats(const struct netdev
*netdev
,
1936 struct netdev_stats
*stats
)
1938 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1940 ovs_mutex_lock(&dev
->mutex
);
1942 rte_spinlock_lock(&dev
->stats_lock
);
1943 /* Supported Stats */
1944 stats
->rx_packets
+= dev
->stats
.rx_packets
;
1945 stats
->tx_packets
+= dev
->stats
.tx_packets
;
1946 stats
->rx_dropped
= dev
->stats
.rx_dropped
;
1947 stats
->tx_dropped
+= dev
->stats
.tx_dropped
;
1948 stats
->multicast
= dev
->stats
.multicast
;
1949 stats
->rx_bytes
= dev
->stats
.rx_bytes
;
1950 stats
->tx_bytes
= dev
->stats
.tx_bytes
;
1951 stats
->rx_errors
= dev
->stats
.rx_errors
;
1952 stats
->rx_length_errors
= dev
->stats
.rx_length_errors
;
1954 stats
->rx_1_to_64_packets
= dev
->stats
.rx_1_to_64_packets
;
1955 stats
->rx_65_to_127_packets
= dev
->stats
.rx_65_to_127_packets
;
1956 stats
->rx_128_to_255_packets
= dev
->stats
.rx_128_to_255_packets
;
1957 stats
->rx_256_to_511_packets
= dev
->stats
.rx_256_to_511_packets
;
1958 stats
->rx_512_to_1023_packets
= dev
->stats
.rx_512_to_1023_packets
;
1959 stats
->rx_1024_to_1522_packets
= dev
->stats
.rx_1024_to_1522_packets
;
1960 stats
->rx_1523_to_max_packets
= dev
->stats
.rx_1523_to_max_packets
;
1962 rte_spinlock_unlock(&dev
->stats_lock
);
1964 ovs_mutex_unlock(&dev
->mutex
);
1970 netdev_dpdk_convert_xstats(struct netdev_stats
*stats
,
1971 const struct rte_eth_xstat
*xstats
,
1972 const struct rte_eth_xstat_name
*names
,
1973 const unsigned int size
)
1975 for (unsigned int i
= 0; i
< size
; i
++) {
1976 if (strcmp(XSTAT_RX_64_PACKETS
, names
[i
].name
) == 0) {
1977 stats
->rx_1_to_64_packets
= xstats
[i
].value
;
1978 } else if (strcmp(XSTAT_RX_65_TO_127_PACKETS
, names
[i
].name
) == 0) {
1979 stats
->rx_65_to_127_packets
= xstats
[i
].value
;
1980 } else if (strcmp(XSTAT_RX_128_TO_255_PACKETS
, names
[i
].name
) == 0) {
1981 stats
->rx_128_to_255_packets
= xstats
[i
].value
;
1982 } else if (strcmp(XSTAT_RX_256_TO_511_PACKETS
, names
[i
].name
) == 0) {
1983 stats
->rx_256_to_511_packets
= xstats
[i
].value
;
1984 } else if (strcmp(XSTAT_RX_512_TO_1023_PACKETS
, names
[i
].name
) == 0) {
1985 stats
->rx_512_to_1023_packets
= xstats
[i
].value
;
1986 } else if (strcmp(XSTAT_RX_1024_TO_1522_PACKETS
, names
[i
].name
) == 0) {
1987 stats
->rx_1024_to_1522_packets
= xstats
[i
].value
;
1988 } else if (strcmp(XSTAT_RX_1523_TO_MAX_PACKETS
, names
[i
].name
) == 0) {
1989 stats
->rx_1523_to_max_packets
= xstats
[i
].value
;
1990 } else if (strcmp(XSTAT_TX_64_PACKETS
, names
[i
].name
) == 0) {
1991 stats
->tx_1_to_64_packets
= xstats
[i
].value
;
1992 } else if (strcmp(XSTAT_TX_65_TO_127_PACKETS
, names
[i
].name
) == 0) {
1993 stats
->tx_65_to_127_packets
= xstats
[i
].value
;
1994 } else if (strcmp(XSTAT_TX_128_TO_255_PACKETS
, names
[i
].name
) == 0) {
1995 stats
->tx_128_to_255_packets
= xstats
[i
].value
;
1996 } else if (strcmp(XSTAT_TX_256_TO_511_PACKETS
, names
[i
].name
) == 0) {
1997 stats
->tx_256_to_511_packets
= xstats
[i
].value
;
1998 } else if (strcmp(XSTAT_TX_512_TO_1023_PACKETS
, names
[i
].name
) == 0) {
1999 stats
->tx_512_to_1023_packets
= xstats
[i
].value
;
2000 } else if (strcmp(XSTAT_TX_1024_TO_1522_PACKETS
, names
[i
].name
) == 0) {
2001 stats
->tx_1024_to_1522_packets
= xstats
[i
].value
;
2002 } else if (strcmp(XSTAT_TX_1523_TO_MAX_PACKETS
, names
[i
].name
) == 0) {
2003 stats
->tx_1523_to_max_packets
= xstats
[i
].value
;
2004 } else if (strcmp(XSTAT_TX_MULTICAST_PACKETS
, names
[i
].name
) == 0) {
2005 stats
->tx_multicast_packets
= xstats
[i
].value
;
2006 } else if (strcmp(XSTAT_RX_BROADCAST_PACKETS
, names
[i
].name
) == 0) {
2007 stats
->rx_broadcast_packets
= xstats
[i
].value
;
2008 } else if (strcmp(XSTAT_TX_BROADCAST_PACKETS
, names
[i
].name
) == 0) {
2009 stats
->tx_broadcast_packets
= xstats
[i
].value
;
2010 } else if (strcmp(XSTAT_RX_UNDERSIZED_ERRORS
, names
[i
].name
) == 0) {
2011 stats
->rx_undersized_errors
= xstats
[i
].value
;
2012 } else if (strcmp(XSTAT_RX_FRAGMENTED_ERRORS
, names
[i
].name
) == 0) {
2013 stats
->rx_fragmented_errors
= xstats
[i
].value
;
2014 } else if (strcmp(XSTAT_RX_JABBER_ERRORS
, names
[i
].name
) == 0) {
2015 stats
->rx_jabber_errors
= xstats
[i
].value
;
2021 netdev_dpdk_get_stats(const struct netdev
*netdev
, struct netdev_stats
*stats
)
2023 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
2024 struct rte_eth_stats rte_stats
;
2027 netdev_dpdk_get_carrier(netdev
, &gg
);
2028 ovs_mutex_lock(&dev
->mutex
);
2030 struct rte_eth_xstat
*rte_xstats
= NULL
;
2031 struct rte_eth_xstat_name
*rte_xstats_names
= NULL
;
2032 int rte_xstats_len
, rte_xstats_new_len
, rte_xstats_ret
;
2034 if (rte_eth_stats_get(dev
->port_id
, &rte_stats
)) {
2035 VLOG_ERR("Can't get ETH statistics for port: %i.", dev
->port_id
);
2036 ovs_mutex_unlock(&dev
->mutex
);
2040 /* Get length of statistics */
2041 rte_xstats_len
= rte_eth_xstats_get_names(dev
->port_id
, NULL
, 0);
2042 if (rte_xstats_len
< 0) {
2043 VLOG_WARN("Cannot get XSTATS values for port: %i", dev
->port_id
);
2046 /* Reserve memory for xstats names and values */
2047 rte_xstats_names
= xcalloc(rte_xstats_len
, sizeof *rte_xstats_names
);
2048 rte_xstats
= xcalloc(rte_xstats_len
, sizeof *rte_xstats
);
2050 /* Retreive xstats names */
2051 rte_xstats_new_len
= rte_eth_xstats_get_names(dev
->port_id
,
2054 if (rte_xstats_new_len
!= rte_xstats_len
) {
2055 VLOG_WARN("Cannot get XSTATS names for port: %i.", dev
->port_id
);
2058 /* Retreive xstats values */
2059 memset(rte_xstats
, 0xff, sizeof *rte_xstats
* rte_xstats_len
);
2060 rte_xstats_ret
= rte_eth_xstats_get(dev
->port_id
, rte_xstats
,
2062 if (rte_xstats_ret
> 0 && rte_xstats_ret
<= rte_xstats_len
) {
2063 netdev_dpdk_convert_xstats(stats
, rte_xstats
, rte_xstats_names
,
2066 VLOG_WARN("Cannot get XSTATS values for port: %i.", dev
->port_id
);
2071 free(rte_xstats_names
);
2073 stats
->rx_packets
= rte_stats
.ipackets
;
2074 stats
->tx_packets
= rte_stats
.opackets
;
2075 stats
->rx_bytes
= rte_stats
.ibytes
;
2076 stats
->tx_bytes
= rte_stats
.obytes
;
2077 /* DPDK counts imissed as errors, but count them here as dropped instead */
2078 stats
->rx_errors
= rte_stats
.ierrors
- rte_stats
.imissed
;
2079 stats
->tx_errors
= rte_stats
.oerrors
;
2081 rte_spinlock_lock(&dev
->stats_lock
);
2082 stats
->tx_dropped
= dev
->stats
.tx_dropped
;
2083 stats
->rx_dropped
= dev
->stats
.rx_dropped
;
2084 rte_spinlock_unlock(&dev
->stats_lock
);
2086 /* These are the available DPDK counters for packets not received due to
2087 * local resource constraints in DPDK and NIC respectively. */
2088 stats
->rx_dropped
+= rte_stats
.rx_nombuf
+ rte_stats
.imissed
;
2089 stats
->rx_missed_errors
= rte_stats
.imissed
;
2091 ovs_mutex_unlock(&dev
->mutex
);
2097 netdev_dpdk_get_features(const struct netdev
*netdev
,
2098 enum netdev_features
*current
,
2099 enum netdev_features
*advertised
,
2100 enum netdev_features
*supported
,
2101 enum netdev_features
*peer
)
2103 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
2104 struct rte_eth_link link
;
2106 ovs_mutex_lock(&dev
->mutex
);
2108 ovs_mutex_unlock(&dev
->mutex
);
2110 if (link
.link_duplex
== ETH_LINK_HALF_DUPLEX
) {
2111 if (link
.link_speed
== ETH_SPEED_NUM_10M
) {
2112 *current
= NETDEV_F_10MB_HD
;
2114 if (link
.link_speed
== ETH_SPEED_NUM_100M
) {
2115 *current
= NETDEV_F_100MB_HD
;
2117 if (link
.link_speed
== ETH_SPEED_NUM_1G
) {
2118 *current
= NETDEV_F_1GB_HD
;
2120 } else if (link
.link_duplex
== ETH_LINK_FULL_DUPLEX
) {
2121 if (link
.link_speed
== ETH_SPEED_NUM_10M
) {
2122 *current
= NETDEV_F_10MB_FD
;
2124 if (link
.link_speed
== ETH_SPEED_NUM_100M
) {
2125 *current
= NETDEV_F_100MB_FD
;
2127 if (link
.link_speed
== ETH_SPEED_NUM_1G
) {
2128 *current
= NETDEV_F_1GB_FD
;
2130 if (link
.link_speed
== ETH_SPEED_NUM_10G
) {
2131 *current
= NETDEV_F_10GB_FD
;
2135 if (link
.link_autoneg
) {
2136 *current
|= NETDEV_F_AUTONEG
;
2139 *advertised
= *supported
= *peer
= 0;
2144 static struct ingress_policer
*
2145 netdev_dpdk_policer_construct(uint32_t rate
, uint32_t burst
)
2147 struct ingress_policer
*policer
= NULL
;
2148 uint64_t rate_bytes
;
2149 uint64_t burst_bytes
;
2152 policer
= xmalloc(sizeof *policer
);
2153 rte_spinlock_init(&policer
->policer_lock
);
2155 /* rte_meter requires bytes so convert kbits rate and burst to bytes. */
2156 rate_bytes
= rate
* 1000/8;
2157 burst_bytes
= burst
* 1000/8;
2159 policer
->app_srtcm_params
.cir
= rate_bytes
;
2160 policer
->app_srtcm_params
.cbs
= burst_bytes
;
2161 policer
->app_srtcm_params
.ebs
= 0;
2162 err
= rte_meter_srtcm_config(&policer
->in_policer
,
2163 &policer
->app_srtcm_params
);
2165 VLOG_ERR("Could not create rte meter for ingress policer");
2173 netdev_dpdk_set_policing(struct netdev
* netdev
, uint32_t policer_rate
,
2174 uint32_t policer_burst
)
2176 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
2177 struct ingress_policer
*policer
;
2179 /* Force to 0 if no rate specified,
2180 * default to 8000 kbits if burst is 0,
2181 * else stick with user-specified value.
2183 policer_burst
= (!policer_rate
? 0
2184 : !policer_burst
? 8000
2187 ovs_mutex_lock(&dev
->mutex
);
2189 policer
= ovsrcu_get_protected(struct ingress_policer
*,
2190 &dev
->ingress_policer
);
2192 if (dev
->policer_rate
== policer_rate
&&
2193 dev
->policer_burst
== policer_burst
) {
2194 /* Assume that settings haven't changed since we last set them. */
2195 ovs_mutex_unlock(&dev
->mutex
);
2199 /* Destroy any existing ingress policer for the device if one exists */
2201 ovsrcu_postpone(free
, policer
);
2204 if (policer_rate
!= 0) {
2205 policer
= netdev_dpdk_policer_construct(policer_rate
, policer_burst
);
2209 ovsrcu_set(&dev
->ingress_policer
, policer
);
2210 dev
->policer_rate
= policer_rate
;
2211 dev
->policer_burst
= policer_burst
;
2212 ovs_mutex_unlock(&dev
->mutex
);
2218 netdev_dpdk_get_ifindex(const struct netdev
*netdev
)
2220 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
2223 ovs_mutex_lock(&dev
->mutex
);
2224 ifindex
= dev
->port_id
;
2225 ovs_mutex_unlock(&dev
->mutex
);
2231 netdev_dpdk_get_carrier(const struct netdev
*netdev
, bool *carrier
)
2233 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
2235 ovs_mutex_lock(&dev
->mutex
);
2236 check_link_status(dev
);
2237 *carrier
= dev
->link
.link_status
;
2239 ovs_mutex_unlock(&dev
->mutex
);
2245 netdev_dpdk_vhost_get_carrier(const struct netdev
*netdev
, bool *carrier
)
2247 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
2249 ovs_mutex_lock(&dev
->mutex
);
2251 if (is_vhost_running(dev
)) {
2257 ovs_mutex_unlock(&dev
->mutex
);
2262 static long long int
2263 netdev_dpdk_get_carrier_resets(const struct netdev
*netdev
)
2265 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
2266 long long int carrier_resets
;
2268 ovs_mutex_lock(&dev
->mutex
);
2269 carrier_resets
= dev
->link_reset_cnt
;
2270 ovs_mutex_unlock(&dev
->mutex
);
2272 return carrier_resets
;
2276 netdev_dpdk_set_miimon(struct netdev
*netdev OVS_UNUSED
,
2277 long long int interval OVS_UNUSED
)
2283 netdev_dpdk_update_flags__(struct netdev_dpdk
*dev
,
2284 enum netdev_flags off
, enum netdev_flags on
,
2285 enum netdev_flags
*old_flagsp
)
2286 OVS_REQUIRES(dev
->mutex
)
2290 if ((off
| on
) & ~(NETDEV_UP
| NETDEV_PROMISC
)) {
2294 *old_flagsp
= dev
->flags
;
2298 if (dev
->flags
== *old_flagsp
) {
2302 if (dev
->type
== DPDK_DEV_ETH
) {
2303 if (dev
->flags
& NETDEV_UP
) {
2304 err
= rte_eth_dev_start(dev
->port_id
);
2309 if (dev
->flags
& NETDEV_PROMISC
) {
2310 rte_eth_promiscuous_enable(dev
->port_id
);
2313 if (!(dev
->flags
& NETDEV_UP
)) {
2314 rte_eth_dev_stop(dev
->port_id
);
2317 netdev_change_seq_changed(&dev
->up
);
2319 /* If DPDK_DEV_VHOST device's NETDEV_UP flag was changed and vhost is
2320 * running then change netdev's change_seq to trigger link state
2323 if ((NETDEV_UP
& ((*old_flagsp
^ on
) | (*old_flagsp
^ off
)))
2324 && is_vhost_running(dev
)) {
2325 netdev_change_seq_changed(&dev
->up
);
2327 /* Clear statistics if device is getting up. */
2328 if (NETDEV_UP
& on
) {
2329 rte_spinlock_lock(&dev
->stats_lock
);
2330 memset(&dev
->stats
, 0, sizeof dev
->stats
);
2331 rte_spinlock_unlock(&dev
->stats_lock
);
2340 netdev_dpdk_update_flags(struct netdev
*netdev
,
2341 enum netdev_flags off
, enum netdev_flags on
,
2342 enum netdev_flags
*old_flagsp
)
2344 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
2347 ovs_mutex_lock(&dev
->mutex
);
2348 error
= netdev_dpdk_update_flags__(dev
, off
, on
, old_flagsp
);
2349 ovs_mutex_unlock(&dev
->mutex
);
2355 netdev_dpdk_get_status(const struct netdev
*netdev
, struct smap
*args
)
2357 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
2358 struct rte_eth_dev_info dev_info
;
2360 if (!rte_eth_dev_is_valid_port(dev
->port_id
)) {
2364 ovs_mutex_lock(&dev
->mutex
);
2365 rte_eth_dev_info_get(dev
->port_id
, &dev_info
);
2366 ovs_mutex_unlock(&dev
->mutex
);
2368 smap_add_format(args
, "port_no", "%d", dev
->port_id
);
2369 smap_add_format(args
, "numa_id", "%d",
2370 rte_eth_dev_socket_id(dev
->port_id
));
2371 smap_add_format(args
, "driver_name", "%s", dev_info
.driver_name
);
2372 smap_add_format(args
, "min_rx_bufsize", "%u", dev_info
.min_rx_bufsize
);
2373 smap_add_format(args
, "max_rx_pktlen", "%u", dev
->max_packet_len
);
2374 smap_add_format(args
, "max_rx_queues", "%u", dev_info
.max_rx_queues
);
2375 smap_add_format(args
, "max_tx_queues", "%u", dev_info
.max_tx_queues
);
2376 smap_add_format(args
, "max_mac_addrs", "%u", dev_info
.max_mac_addrs
);
2377 smap_add_format(args
, "max_hash_mac_addrs", "%u",
2378 dev_info
.max_hash_mac_addrs
);
2379 smap_add_format(args
, "max_vfs", "%u", dev_info
.max_vfs
);
2380 smap_add_format(args
, "max_vmdq_pools", "%u", dev_info
.max_vmdq_pools
);
2382 if (dev_info
.pci_dev
) {
2383 smap_add_format(args
, "pci-vendor_id", "0x%u",
2384 dev_info
.pci_dev
->id
.vendor_id
);
2385 smap_add_format(args
, "pci-device_id", "0x%x",
2386 dev_info
.pci_dev
->id
.device_id
);
2393 netdev_dpdk_set_admin_state__(struct netdev_dpdk
*dev
, bool admin_state
)
2394 OVS_REQUIRES(dev
->mutex
)
2396 enum netdev_flags old_flags
;
2399 netdev_dpdk_update_flags__(dev
, 0, NETDEV_UP
, &old_flags
);
2401 netdev_dpdk_update_flags__(dev
, NETDEV_UP
, 0, &old_flags
);
2406 netdev_dpdk_set_admin_state(struct unixctl_conn
*conn
, int argc
,
2407 const char *argv
[], void *aux OVS_UNUSED
)
2411 if (!strcasecmp(argv
[argc
- 1], "up")) {
2413 } else if ( !strcasecmp(argv
[argc
- 1], "down")) {
2416 unixctl_command_reply_error(conn
, "Invalid Admin State");
2421 struct netdev
*netdev
= netdev_from_name(argv
[1]);
2422 if (netdev
&& is_dpdk_class(netdev
->netdev_class
)) {
2423 struct netdev_dpdk
*dpdk_dev
= netdev_dpdk_cast(netdev
);
2425 ovs_mutex_lock(&dpdk_dev
->mutex
);
2426 netdev_dpdk_set_admin_state__(dpdk_dev
, up
);
2427 ovs_mutex_unlock(&dpdk_dev
->mutex
);
2429 netdev_close(netdev
);
2431 unixctl_command_reply_error(conn
, "Not a DPDK Interface");
2432 netdev_close(netdev
);
2436 struct netdev_dpdk
*netdev
;
2438 ovs_mutex_lock(&dpdk_mutex
);
2439 LIST_FOR_EACH (netdev
, list_node
, &dpdk_list
) {
2440 ovs_mutex_lock(&netdev
->mutex
);
2441 netdev_dpdk_set_admin_state__(netdev
, up
);
2442 ovs_mutex_unlock(&netdev
->mutex
);
2444 ovs_mutex_unlock(&dpdk_mutex
);
2446 unixctl_command_reply(conn
, "OK");
2450 netdev_dpdk_detach(struct unixctl_conn
*conn
, int argc OVS_UNUSED
,
2451 const char *argv
[], void *aux OVS_UNUSED
)
2456 char devname
[RTE_ETH_NAME_MAX_LEN
];
2457 struct netdev_dpdk
*dev
;
2459 ovs_mutex_lock(&dpdk_mutex
);
2461 if (!rte_eth_dev_count() || rte_eth_dev_get_port_by_name(argv
[1],
2463 response
= xasprintf("Device '%s' not found in DPDK", argv
[1]);
2467 dev
= netdev_dpdk_lookup_by_port_id(port_id
);
2469 response
= xasprintf("Device '%s' is being used by interface '%s'. "
2470 "Remove it before detaching",
2471 argv
[1], netdev_get_name(&dev
->up
));
2475 rte_eth_dev_close(port_id
);
2477 ret
= rte_eth_dev_detach(port_id
, devname
);
2479 response
= xasprintf("Device '%s' can not be detached", argv
[1]);
2483 response
= xasprintf("Device '%s' has been detached", argv
[1]);
2485 ovs_mutex_unlock(&dpdk_mutex
);
2486 unixctl_command_reply(conn
, response
);
2491 ovs_mutex_unlock(&dpdk_mutex
);
2492 unixctl_command_reply_error(conn
, response
);
2497 * Set virtqueue flags so that we do not receive interrupts.
2500 set_irq_status(int vid
)
2505 for (i
= 0; i
< rte_vhost_get_queue_num(vid
); i
++) {
2506 idx
= i
* VIRTIO_QNUM
;
2507 rte_vhost_enable_guest_notification(vid
, idx
+ VIRTIO_RXQ
, 0);
2508 rte_vhost_enable_guest_notification(vid
, idx
+ VIRTIO_TXQ
, 0);
2513 * Fixes mapping for vhost-user tx queues. Must be called after each
2514 * enabling/disabling of queues and n_txq modifications.
2517 netdev_dpdk_remap_txqs(struct netdev_dpdk
*dev
)
2518 OVS_REQUIRES(dev
->mutex
)
2520 int *enabled_queues
, n_enabled
= 0;
2521 int i
, k
, total_txqs
= dev
->up
.n_txq
;
2523 enabled_queues
= xcalloc(total_txqs
, sizeof *enabled_queues
);
2525 for (i
= 0; i
< total_txqs
; i
++) {
2526 /* Enabled queues always mapped to themselves. */
2527 if (dev
->tx_q
[i
].map
== i
) {
2528 enabled_queues
[n_enabled
++] = i
;
2532 if (n_enabled
== 0 && total_txqs
!= 0) {
2533 enabled_queues
[0] = OVS_VHOST_QUEUE_DISABLED
;
2538 for (i
= 0; i
< total_txqs
; i
++) {
2539 if (dev
->tx_q
[i
].map
!= i
) {
2540 dev
->tx_q
[i
].map
= enabled_queues
[k
];
2541 k
= (k
+ 1) % n_enabled
;
2545 VLOG_DBG("TX queue mapping for %s\n", dev
->vhost_id
);
2546 for (i
= 0; i
< total_txqs
; i
++) {
2547 VLOG_DBG("%2d --> %2d", i
, dev
->tx_q
[i
].map
);
2550 free(enabled_queues
);
2554 * A new virtio-net device is added to a vhost port.
2559 struct netdev_dpdk
*dev
;
2560 bool exists
= false;
2562 char ifname
[IF_NAME_SZ
];
2564 rte_vhost_get_ifname(vid
, ifname
, sizeof ifname
);
2566 ovs_mutex_lock(&dpdk_mutex
);
2567 /* Add device to the vhost port with the same name as that passed down. */
2568 LIST_FOR_EACH(dev
, list_node
, &dpdk_list
) {
2569 ovs_mutex_lock(&dev
->mutex
);
2570 if (strncmp(ifname
, dev
->vhost_id
, IF_NAME_SZ
) == 0) {
2571 uint32_t qp_num
= rte_vhost_get_queue_num(vid
);
2573 /* Get NUMA information */
2574 newnode
= rte_vhost_get_numa_node(vid
);
2575 if (newnode
== -1) {
2577 VLOG_INFO("Error getting NUMA info for vHost Device '%s'",
2580 newnode
= dev
->socket_id
;
2583 if (dev
->requested_n_txq
!= qp_num
2584 || dev
->requested_n_rxq
!= qp_num
2585 || dev
->requested_socket_id
!= newnode
) {
2586 dev
->requested_socket_id
= newnode
;
2587 dev
->requested_n_rxq
= qp_num
;
2588 dev
->requested_n_txq
= qp_num
;
2589 netdev_request_reconfigure(&dev
->up
);
2591 /* Reconfiguration not required. */
2592 dev
->vhost_reconfigured
= true;
2595 ovsrcu_index_set(&dev
->vid
, vid
);
2598 /* Disable notifications. */
2599 set_irq_status(vid
);
2600 netdev_change_seq_changed(&dev
->up
);
2601 ovs_mutex_unlock(&dev
->mutex
);
2604 ovs_mutex_unlock(&dev
->mutex
);
2606 ovs_mutex_unlock(&dpdk_mutex
);
2609 VLOG_INFO("vHost Device '%s' can't be added - name not found", ifname
);
2614 VLOG_INFO("vHost Device '%s' has been added on numa node %i",
2620 /* Clears mapping for all available queues of vhost interface. */
2622 netdev_dpdk_txq_map_clear(struct netdev_dpdk
*dev
)
2623 OVS_REQUIRES(dev
->mutex
)
2627 for (i
= 0; i
< dev
->up
.n_txq
; i
++) {
2628 dev
->tx_q
[i
].map
= OVS_VHOST_QUEUE_MAP_UNKNOWN
;
2633 * Remove a virtio-net device from the specific vhost port. Use dev->remove
2634 * flag to stop any more packets from being sent or received to/from a VM and
2635 * ensure all currently queued packets have been sent/received before removing
2639 destroy_device(int vid
)
2641 struct netdev_dpdk
*dev
;
2642 bool exists
= false;
2643 char ifname
[IF_NAME_SZ
];
2645 rte_vhost_get_ifname(vid
, ifname
, sizeof ifname
);
2647 ovs_mutex_lock(&dpdk_mutex
);
2648 LIST_FOR_EACH (dev
, list_node
, &dpdk_list
) {
2649 if (netdev_dpdk_get_vid(dev
) == vid
) {
2651 ovs_mutex_lock(&dev
->mutex
);
2652 dev
->vhost_reconfigured
= false;
2653 ovsrcu_index_set(&dev
->vid
, -1);
2654 netdev_dpdk_txq_map_clear(dev
);
2656 netdev_change_seq_changed(&dev
->up
);
2657 ovs_mutex_unlock(&dev
->mutex
);
2663 ovs_mutex_unlock(&dpdk_mutex
);
2667 * Wait for other threads to quiesce after setting the 'virtio_dev'
2668 * to NULL, before returning.
2670 ovsrcu_synchronize();
2672 * As call to ovsrcu_synchronize() will end the quiescent state,
2673 * put thread back into quiescent state before returning.
2675 ovsrcu_quiesce_start();
2676 VLOG_INFO("vHost Device '%s' has been removed", ifname
);
2678 VLOG_INFO("vHost Device '%s' not found", ifname
);
2683 vring_state_changed(int vid
, uint16_t queue_id
, int enable
)
2685 struct netdev_dpdk
*dev
;
2686 bool exists
= false;
2687 int qid
= queue_id
/ VIRTIO_QNUM
;
2688 char ifname
[IF_NAME_SZ
];
2690 rte_vhost_get_ifname(vid
, ifname
, sizeof ifname
);
2692 if (queue_id
% VIRTIO_QNUM
== VIRTIO_TXQ
) {
2696 ovs_mutex_lock(&dpdk_mutex
);
2697 LIST_FOR_EACH (dev
, list_node
, &dpdk_list
) {
2698 ovs_mutex_lock(&dev
->mutex
);
2699 if (strncmp(ifname
, dev
->vhost_id
, IF_NAME_SZ
) == 0) {
2701 dev
->tx_q
[qid
].map
= qid
;
2703 dev
->tx_q
[qid
].map
= OVS_VHOST_QUEUE_DISABLED
;
2705 netdev_dpdk_remap_txqs(dev
);
2707 ovs_mutex_unlock(&dev
->mutex
);
2710 ovs_mutex_unlock(&dev
->mutex
);
2712 ovs_mutex_unlock(&dpdk_mutex
);
2715 VLOG_INFO("State of queue %d ( tx_qid %d ) of vhost device '%s'"
2716 "changed to \'%s\'", queue_id
, qid
, ifname
,
2717 (enable
== 1) ? "enabled" : "disabled");
2719 VLOG_INFO("vHost Device '%s' not found", ifname
);
2727 netdev_dpdk_get_vid(const struct netdev_dpdk
*dev
)
2729 return ovsrcu_index_get(&dev
->vid
);
2732 struct ingress_policer
*
2733 netdev_dpdk_get_ingress_policer(const struct netdev_dpdk
*dev
)
2735 return ovsrcu_get(struct ingress_policer
*, &dev
->ingress_policer
);
2739 * These callbacks allow virtio-net devices to be added to vhost ports when
2740 * configuration has been fully complete.
2742 static const struct virtio_net_device_ops virtio_net_device_ops
=
2744 .new_device
= new_device
,
2745 .destroy_device
= destroy_device
,
2746 .vring_state_changed
= vring_state_changed
2750 start_vhost_loop(void *dummy OVS_UNUSED
)
2752 pthread_detach(pthread_self());
2753 /* Put the vhost thread into quiescent state. */
2754 ovsrcu_quiesce_start();
2755 rte_vhost_driver_session_start();
2760 netdev_dpdk_class_init(void)
2762 static struct ovsthread_once once
= OVSTHREAD_ONCE_INITIALIZER
;
2764 /* This function can be called for different classes. The initialization
2765 * needs to be done only once */
2766 if (ovsthread_once_start(&once
)) {
2767 ovs_thread_create("dpdk_watchdog", dpdk_watchdog
, NULL
);
2768 unixctl_command_register("netdev-dpdk/set-admin-state",
2769 "[netdev] up|down", 1, 2,
2770 netdev_dpdk_set_admin_state
, NULL
);
2771 unixctl_command_register("netdev-dpdk/detach",
2772 "pci address of device", 1, 1,
2773 netdev_dpdk_detach
, NULL
);
2775 ovsthread_once_done(&once
);
2782 netdev_dpdk_vhost_class_init(void)
2784 static struct ovsthread_once once
= OVSTHREAD_ONCE_INITIALIZER
;
2786 /* This function can be called for different classes. The initialization
2787 * needs to be done only once */
2788 if (ovsthread_once_start(&once
)) {
2789 rte_vhost_driver_callback_register(&virtio_net_device_ops
);
2790 rte_vhost_feature_disable(1ULL << VIRTIO_NET_F_HOST_TSO4
2791 | 1ULL << VIRTIO_NET_F_HOST_TSO6
2792 | 1ULL << VIRTIO_NET_F_CSUM
2793 | 1ULL << VIRTIO_RING_F_INDIRECT_DESC
);
2794 ovs_thread_create("vhost_thread", start_vhost_loop
, NULL
);
2796 ovsthread_once_done(&once
);
2805 dpdk_ring_create(const char dev_name
[], unsigned int port_no
,
2806 unsigned int *eth_port_id
)
2808 struct dpdk_ring
*ring_pair
;
2812 ring_pair
= dpdk_rte_mzalloc(sizeof *ring_pair
);
2817 /* XXX: Add support for multiquque ring. */
2818 ring_name
= xasprintf("%s_tx", dev_name
);
2820 /* Create single producer tx ring, netdev does explicit locking. */
2821 ring_pair
->cring_tx
= rte_ring_create(ring_name
, DPDK_RING_SIZE
, SOCKET0
,
2824 if (ring_pair
->cring_tx
== NULL
) {
2825 rte_free(ring_pair
);
2829 ring_name
= xasprintf("%s_rx", dev_name
);
2831 /* Create single consumer rx ring, netdev does explicit locking. */
2832 ring_pair
->cring_rx
= rte_ring_create(ring_name
, DPDK_RING_SIZE
, SOCKET0
,
2835 if (ring_pair
->cring_rx
== NULL
) {
2836 rte_free(ring_pair
);
2840 port_id
= rte_eth_from_rings(dev_name
, &ring_pair
->cring_rx
, 1,
2841 &ring_pair
->cring_tx
, 1, SOCKET0
);
2844 rte_free(ring_pair
);
2848 ring_pair
->user_port_id
= port_no
;
2849 ring_pair
->eth_port_id
= port_id
;
2850 *eth_port_id
= port_id
;
2852 ovs_list_push_back(&dpdk_ring_list
, &ring_pair
->list_node
);
2858 dpdk_ring_open(const char dev_name
[], unsigned int *eth_port_id
)
2859 OVS_REQUIRES(dpdk_mutex
)
2861 struct dpdk_ring
*ring_pair
;
2862 unsigned int port_no
;
2865 /* Names always start with "dpdkr" */
2866 err
= dpdk_dev_parse_name(dev_name
, "dpdkr", &port_no
);
2871 /* Look through our list to find the device */
2872 LIST_FOR_EACH (ring_pair
, list_node
, &dpdk_ring_list
) {
2873 if (ring_pair
->user_port_id
== port_no
) {
2874 VLOG_INFO("Found dpdk ring device %s:", dev_name
);
2875 /* Really all that is needed */
2876 *eth_port_id
= ring_pair
->eth_port_id
;
2880 /* Need to create the device rings */
2881 return dpdk_ring_create(dev_name
, port_no
, eth_port_id
);
2885 netdev_dpdk_ring_send(struct netdev
*netdev
, int qid
,
2886 struct dp_packet_batch
*batch
, bool may_steal
,
2887 bool concurrent_txq
)
2889 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
2892 /* When using 'dpdkr' and sending to a DPDK ring, we want to ensure that
2893 * the rss hash field is clear. This is because the same mbuf may be
2894 * modified by the consumer of the ring and return into the datapath
2895 * without recalculating the RSS hash. */
2896 for (i
= 0; i
< batch
->count
; i
++) {
2897 dp_packet_rss_invalidate(batch
->packets
[i
]);
2900 netdev_dpdk_send__(dev
, qid
, batch
, may_steal
, concurrent_txq
);
2905 netdev_dpdk_ring_construct(struct netdev
*netdev
)
2907 unsigned int port_no
= 0;
2910 ovs_mutex_lock(&dpdk_mutex
);
2912 err
= dpdk_ring_open(netdev
->name
, &port_no
);
2917 err
= netdev_dpdk_init(netdev
, port_no
, DPDK_DEV_ETH
);
2920 ovs_mutex_unlock(&dpdk_mutex
);
2927 * Initialize QoS configuration operations.
2930 qos_conf_init(struct qos_conf
*conf
, const struct dpdk_qos_ops
*ops
)
2933 rte_spinlock_init(&conf
->lock
);
2937 * Search existing QoS operations in qos_ops and compare each set of
2938 * operations qos_name to name. Return a dpdk_qos_ops pointer to a match,
2941 static const struct dpdk_qos_ops
*
2942 qos_lookup_name(const char *name
)
2944 const struct dpdk_qos_ops
*const *opsp
;
2946 for (opsp
= qos_confs
; *opsp
!= NULL
; opsp
++) {
2947 const struct dpdk_qos_ops
*ops
= *opsp
;
2948 if (!strcmp(name
, ops
->qos_name
)) {
2956 netdev_dpdk_get_qos_types(const struct netdev
*netdev OVS_UNUSED
,
2959 const struct dpdk_qos_ops
*const *opsp
;
2961 for (opsp
= qos_confs
; *opsp
!= NULL
; opsp
++) {
2962 const struct dpdk_qos_ops
*ops
= *opsp
;
2963 if (ops
->qos_construct
&& ops
->qos_name
[0] != '\0') {
2964 sset_add(types
, ops
->qos_name
);
2971 netdev_dpdk_get_qos(const struct netdev
*netdev
,
2972 const char **typep
, struct smap
*details
)
2974 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
2975 struct qos_conf
*qos_conf
;
2978 ovs_mutex_lock(&dev
->mutex
);
2979 qos_conf
= ovsrcu_get_protected(struct qos_conf
*, &dev
->qos_conf
);
2981 *typep
= qos_conf
->ops
->qos_name
;
2982 error
= (qos_conf
->ops
->qos_get
2983 ? qos_conf
->ops
->qos_get(qos_conf
, details
): 0);
2985 /* No QoS configuration set, return an empty string */
2988 ovs_mutex_unlock(&dev
->mutex
);
2994 netdev_dpdk_set_qos(struct netdev
*netdev
, const char *type
,
2995 const struct smap
*details
)
2997 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
2998 const struct dpdk_qos_ops
*new_ops
= NULL
;
2999 struct qos_conf
*qos_conf
, *new_qos_conf
= NULL
;
3002 ovs_mutex_lock(&dev
->mutex
);
3004 qos_conf
= ovsrcu_get_protected(struct qos_conf
*, &dev
->qos_conf
);
3006 new_ops
= qos_lookup_name(type
);
3008 if (!new_ops
|| !new_ops
->qos_construct
) {
3009 new_qos_conf
= NULL
;
3010 if (type
&& type
[0]) {
3013 } else if (qos_conf
&& qos_conf
->ops
== new_ops
3014 && qos_conf
->ops
->qos_is_equal(qos_conf
, details
)) {
3015 new_qos_conf
= qos_conf
;
3017 error
= new_ops
->qos_construct(details
, &new_qos_conf
);
3021 VLOG_ERR("Failed to set QoS type %s on port %s: %s",
3022 type
, netdev
->name
, rte_strerror(error
));
3025 if (new_qos_conf
!= qos_conf
) {
3026 ovsrcu_set(&dev
->qos_conf
, new_qos_conf
);
3028 ovsrcu_postpone(qos_conf
->ops
->qos_destruct
, qos_conf
);
3032 ovs_mutex_unlock(&dev
->mutex
);
3037 /* egress-policer details */
3039 struct egress_policer
{
3040 struct qos_conf qos_conf
;
3041 struct rte_meter_srtcm_params app_srtcm_params
;
3042 struct rte_meter_srtcm egress_meter
;
3046 egress_policer_details_to_param(const struct smap
*details
,
3047 struct rte_meter_srtcm_params
*params
)
3049 memset(params
, 0, sizeof *params
);
3050 params
->cir
= smap_get_ullong(details
, "cir", 0);
3051 params
->cbs
= smap_get_ullong(details
, "cbs", 0);
3056 egress_policer_qos_construct(const struct smap
*details
,
3057 struct qos_conf
**conf
)
3059 struct egress_policer
*policer
;
3062 policer
= xmalloc(sizeof *policer
);
3063 qos_conf_init(&policer
->qos_conf
, &egress_policer_ops
);
3064 egress_policer_details_to_param(details
, &policer
->app_srtcm_params
);
3065 err
= rte_meter_srtcm_config(&policer
->egress_meter
,
3066 &policer
->app_srtcm_params
);
3068 *conf
= &policer
->qos_conf
;
3079 egress_policer_qos_destruct(struct qos_conf
*conf
)
3081 struct egress_policer
*policer
= CONTAINER_OF(conf
, struct egress_policer
,
3087 egress_policer_qos_get(const struct qos_conf
*conf
, struct smap
*details
)
3089 struct egress_policer
*policer
=
3090 CONTAINER_OF(conf
, struct egress_policer
, qos_conf
);
3092 smap_add_format(details
, "cir", "%"PRIu64
, policer
->app_srtcm_params
.cir
);
3093 smap_add_format(details
, "cbs", "%"PRIu64
, policer
->app_srtcm_params
.cbs
);
3099 egress_policer_qos_is_equal(const struct qos_conf
*conf
, const struct smap
*details
)
3101 struct egress_policer
*policer
=
3102 CONTAINER_OF(conf
, struct egress_policer
, qos_conf
);
3103 struct rte_meter_srtcm_params params
;
3105 egress_policer_details_to_param(details
, ¶ms
);
3107 return !memcmp(¶ms
, &policer
->app_srtcm_params
, sizeof params
);
3111 egress_policer_run(struct qos_conf
*conf
, struct rte_mbuf
**pkts
, int pkt_cnt
)
3114 struct egress_policer
*policer
=
3115 CONTAINER_OF(conf
, struct egress_policer
, qos_conf
);
3117 cnt
= netdev_dpdk_policer_run(&policer
->egress_meter
, pkts
, pkt_cnt
);
3122 static const struct dpdk_qos_ops egress_policer_ops
= {
3123 "egress-policer", /* qos_name */
3124 egress_policer_qos_construct
,
3125 egress_policer_qos_destruct
,
3126 egress_policer_qos_get
,
3127 egress_policer_qos_is_equal
,
3132 netdev_dpdk_reconfigure(struct netdev
*netdev
)
3134 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
3137 ovs_mutex_lock(&dev
->mutex
);
3139 if (netdev
->n_txq
== dev
->requested_n_txq
3140 && netdev
->n_rxq
== dev
->requested_n_rxq
3141 && dev
->mtu
== dev
->requested_mtu
3142 && dev
->rxq_size
== dev
->requested_rxq_size
3143 && dev
->txq_size
== dev
->requested_txq_size
) {
3144 /* Reconfiguration is unnecessary */
3149 rte_eth_dev_stop(dev
->port_id
);
3151 if (dev
->mtu
!= dev
->requested_mtu
) {
3152 netdev_dpdk_mempool_configure(dev
);
3155 netdev
->n_txq
= dev
->requested_n_txq
;
3156 netdev
->n_rxq
= dev
->requested_n_rxq
;
3158 dev
->rxq_size
= dev
->requested_rxq_size
;
3159 dev
->txq_size
= dev
->requested_txq_size
;
3161 rte_free(dev
->tx_q
);
3162 err
= dpdk_eth_dev_init(dev
);
3163 dev
->tx_q
= netdev_dpdk_alloc_txq(netdev
->n_txq
);
3168 netdev_change_seq_changed(netdev
);
3171 ovs_mutex_unlock(&dev
->mutex
);
3176 dpdk_vhost_reconfigure_helper(struct netdev_dpdk
*dev
)
3177 OVS_REQUIRES(dev
->mutex
)
3179 dev
->up
.n_txq
= dev
->requested_n_txq
;
3180 dev
->up
.n_rxq
= dev
->requested_n_rxq
;
3182 /* Enable TX queue 0 by default if it wasn't disabled. */
3183 if (dev
->tx_q
[0].map
== OVS_VHOST_QUEUE_MAP_UNKNOWN
) {
3184 dev
->tx_q
[0].map
= 0;
3187 netdev_dpdk_remap_txqs(dev
);
3189 if (dev
->requested_socket_id
!= dev
->socket_id
3190 || dev
->requested_mtu
!= dev
->mtu
) {
3191 if (!netdev_dpdk_mempool_configure(dev
)) {
3192 netdev_change_seq_changed(&dev
->up
);
3196 if (netdev_dpdk_get_vid(dev
) >= 0) {
3197 dev
->vhost_reconfigured
= true;
3202 netdev_dpdk_vhost_reconfigure(struct netdev
*netdev
)
3204 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
3206 ovs_mutex_lock(&dev
->mutex
);
3207 dpdk_vhost_reconfigure_helper(dev
);
3208 ovs_mutex_unlock(&dev
->mutex
);
3213 netdev_dpdk_vhost_client_reconfigure(struct netdev
*netdev
)
3215 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
3218 ovs_mutex_lock(&dev
->mutex
);
3220 dpdk_vhost_reconfigure_helper(dev
);
3222 /* Configure vHost client mode if requested and if the following criteria
3224 * 1. Device hasn't been registered yet.
3225 * 2. A path has been specified.
3227 if (!(dev
->vhost_driver_flags
& RTE_VHOST_USER_CLIENT
)
3228 && strlen(dev
->vhost_id
)) {
3229 /* Register client-mode device */
3230 err
= rte_vhost_driver_register(dev
->vhost_id
,
3231 RTE_VHOST_USER_CLIENT
);
3233 VLOG_ERR("vhost-user device setup failure for device %s\n",
3236 /* Configuration successful */
3237 dev
->vhost_driver_flags
|= RTE_VHOST_USER_CLIENT
;
3238 VLOG_INFO("vHost User device '%s' created in 'client' mode, "
3239 "using client socket '%s'",
3240 dev
->up
.name
, dev
->vhost_id
);
3244 ovs_mutex_unlock(&dev
->mutex
);
3249 #define NETDEV_DPDK_CLASS(NAME, INIT, CONSTRUCT, DESTRUCT, \
3250 SET_CONFIG, SET_TX_MULTIQ, SEND, \
3251 GET_CARRIER, GET_STATS, \
3252 GET_FEATURES, GET_STATUS, \
3253 RECONFIGURE, RXQ_RECV) \
3256 true, /* is_pmd */ \
3258 NULL, /* netdev_dpdk_run */ \
3259 NULL, /* netdev_dpdk_wait */ \
3261 netdev_dpdk_alloc, \
3264 netdev_dpdk_dealloc, \
3265 netdev_dpdk_get_config, \
3267 NULL, /* get_tunnel_config */ \
3268 NULL, /* build header */ \
3269 NULL, /* push header */ \
3270 NULL, /* pop header */ \
3271 netdev_dpdk_get_numa_id, /* get_numa_id */ \
3275 NULL, /* send_wait */ \
3277 netdev_dpdk_set_etheraddr, \
3278 netdev_dpdk_get_etheraddr, \
3279 netdev_dpdk_get_mtu, \
3280 netdev_dpdk_set_mtu, \
3281 netdev_dpdk_get_ifindex, \
3283 netdev_dpdk_get_carrier_resets, \
3284 netdev_dpdk_set_miimon, \
3287 NULL, /* set_advertisements */ \
3289 netdev_dpdk_set_policing, \
3290 netdev_dpdk_get_qos_types, \
3291 NULL, /* get_qos_capabilities */ \
3292 netdev_dpdk_get_qos, \
3293 netdev_dpdk_set_qos, \
3294 NULL, /* get_queue */ \
3295 NULL, /* set_queue */ \
3296 NULL, /* delete_queue */ \
3297 NULL, /* get_queue_stats */ \
3298 NULL, /* queue_dump_start */ \
3299 NULL, /* queue_dump_next */ \
3300 NULL, /* queue_dump_done */ \
3301 NULL, /* dump_queue_stats */ \
3303 NULL, /* set_in4 */ \
3304 NULL, /* get_addr_list */ \
3305 NULL, /* add_router */ \
3306 NULL, /* get_next_hop */ \
3308 NULL, /* arp_lookup */ \
3310 netdev_dpdk_update_flags, \
3313 netdev_dpdk_rxq_alloc, \
3314 netdev_dpdk_rxq_construct, \
3315 netdev_dpdk_rxq_destruct, \
3316 netdev_dpdk_rxq_dealloc, \
3318 NULL, /* rx_wait */ \
3319 NULL, /* rxq_drain */ \
3322 static const struct netdev_class dpdk_class
=
3325 netdev_dpdk_class_init
,
3326 netdev_dpdk_construct
,
3327 netdev_dpdk_destruct
,
3328 netdev_dpdk_set_config
,
3329 netdev_dpdk_set_tx_multiq
,
3330 netdev_dpdk_eth_send
,
3331 netdev_dpdk_get_carrier
,
3332 netdev_dpdk_get_stats
,
3333 netdev_dpdk_get_features
,
3334 netdev_dpdk_get_status
,
3335 netdev_dpdk_reconfigure
,
3336 netdev_dpdk_rxq_recv
);
3338 static const struct netdev_class dpdk_ring_class
=
3341 netdev_dpdk_class_init
,
3342 netdev_dpdk_ring_construct
,
3343 netdev_dpdk_destruct
,
3344 netdev_dpdk_ring_set_config
,
3345 netdev_dpdk_set_tx_multiq
,
3346 netdev_dpdk_ring_send
,
3347 netdev_dpdk_get_carrier
,
3348 netdev_dpdk_get_stats
,
3349 netdev_dpdk_get_features
,
3350 netdev_dpdk_get_status
,
3351 netdev_dpdk_reconfigure
,
3352 netdev_dpdk_rxq_recv
);
3354 static const struct netdev_class dpdk_vhost_class
=
3357 netdev_dpdk_vhost_class_init
,
3358 netdev_dpdk_vhost_construct
,
3359 netdev_dpdk_vhost_destruct
,
3362 netdev_dpdk_vhost_send
,
3363 netdev_dpdk_vhost_get_carrier
,
3364 netdev_dpdk_vhost_get_stats
,
3367 netdev_dpdk_vhost_reconfigure
,
3368 netdev_dpdk_vhost_rxq_recv
);
3369 static const struct netdev_class dpdk_vhost_client_class
=
3371 "dpdkvhostuserclient",
3372 netdev_dpdk_vhost_class_init
,
3373 netdev_dpdk_vhost_client_construct
,
3374 netdev_dpdk_vhost_destruct
,
3375 netdev_dpdk_vhost_client_set_config
,
3377 netdev_dpdk_vhost_send
,
3378 netdev_dpdk_vhost_get_carrier
,
3379 netdev_dpdk_vhost_get_stats
,
3382 netdev_dpdk_vhost_client_reconfigure
,
3383 netdev_dpdk_vhost_rxq_recv
);
3386 netdev_dpdk_register(void)
3388 netdev_register_provider(&dpdk_class
);
3389 netdev_register_provider(&dpdk_ring_class
);
3390 netdev_register_provider(&dpdk_vhost_class
);
3391 netdev_register_provider(&dpdk_vhost_client_class
);