2 * Copyright (c) 2014, 2015, 2016, 2017 Nicira, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
18 #include "netdev-dpdk.h"
25 #include <linux/virtio_net.h>
26 #include <sys/socket.h>
29 /* Include rte_compat.h first to allow experimental API's needed for the
30 * rte_meter.h rfc4115 functions. Once they are no longer marked as
31 * experimental the #define and rte_compat.h include can be removed.
33 #define ALLOW_EXPERIMENTAL_API
34 #include <rte_compat.h>
35 #include <rte_bus_pci.h>
36 #include <rte_config.h>
37 #include <rte_cycles.h>
38 #include <rte_errno.h>
39 #include <rte_eth_ring.h>
40 #include <rte_ethdev.h>
42 #include <rte_malloc.h>
44 #include <rte_meter.h>
46 #include <rte_version.h>
47 #include <rte_vhost.h>
52 #include "dp-packet.h"
54 #include "dpif-netdev.h"
55 #include "fatal-signal.h"
56 #include "if-notifier.h"
57 #include "netdev-provider.h"
58 #include "netdev-vport.h"
60 #include "openvswitch/dynamic-string.h"
61 #include "openvswitch/list.h"
62 #include "openvswitch/match.h"
63 #include "openvswitch/ofp-print.h"
64 #include "openvswitch/shash.h"
65 #include "openvswitch/vlog.h"
68 #include "ovs-thread.h"
73 #include "unaligned.h"
75 #include "userspace-tso.h"
79 enum {VIRTIO_RXQ
, VIRTIO_TXQ
, VIRTIO_QNUM
};
81 VLOG_DEFINE_THIS_MODULE(netdev_dpdk
);
82 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(5, 20);
84 COVERAGE_DEFINE(vhost_tx_contention
);
85 COVERAGE_DEFINE(vhost_notification
);
87 #define DPDK_PORT_WATCHDOG_INTERVAL 5
89 #define OVS_CACHE_LINE_SIZE CACHE_LINE_SIZE
90 #define OVS_VPORT_DPDK "ovs_dpdk"
93 * need to reserve tons of extra space in the mbufs so we can align the
94 * DMA addresses to 4KB.
95 * The minimum mbuf size is limited to avoid scatter behaviour and drop in
96 * performance for standard Ethernet MTU.
98 #define ETHER_HDR_MAX_LEN (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN \
99 + (2 * VLAN_HEADER_LEN))
100 #define MTU_TO_FRAME_LEN(mtu) ((mtu) + RTE_ETHER_HDR_LEN + \
102 #define MTU_TO_MAX_FRAME_LEN(mtu) ((mtu) + ETHER_HDR_MAX_LEN)
103 #define FRAME_LEN_TO_MTU(frame_len) ((frame_len) \
104 - RTE_ETHER_HDR_LEN - RTE_ETHER_CRC_LEN)
105 #define NETDEV_DPDK_MBUF_ALIGN 1024
106 #define NETDEV_DPDK_MAX_PKT_LEN 9728
108 /* Max and min number of packets in the mempool. OVS tries to allocate a
109 * mempool with MAX_NB_MBUF: if this fails (because the system doesn't have
110 * enough hugepages) we keep halving the number until the allocation succeeds
111 * or we reach MIN_NB_MBUF */
113 #define MAX_NB_MBUF (4096 * 64)
114 #define MIN_NB_MBUF (4096 * 4)
115 #define MP_CACHE_SZ RTE_MEMPOOL_CACHE_MAX_SIZE
117 /* MAX_NB_MBUF can be divided by 2 many times, until MIN_NB_MBUF */
118 BUILD_ASSERT_DECL(MAX_NB_MBUF
% ROUND_DOWN_POW2(MAX_NB_MBUF
/ MIN_NB_MBUF
)
121 /* The smallest possible NB_MBUF that we're going to try should be a multiple
122 * of MP_CACHE_SZ. This is advised by DPDK documentation. */
123 BUILD_ASSERT_DECL((MAX_NB_MBUF
/ ROUND_DOWN_POW2(MAX_NB_MBUF
/ MIN_NB_MBUF
))
128 /* Default size of Physical NIC RXQ */
129 #define NIC_PORT_DEFAULT_RXQ_SIZE 2048
130 /* Default size of Physical NIC TXQ */
131 #define NIC_PORT_DEFAULT_TXQ_SIZE 2048
132 /* Maximum size of Physical NIC Queues */
133 #define NIC_PORT_MAX_Q_SIZE 4096
135 #define OVS_VHOST_MAX_QUEUE_NUM 1024 /* Maximum number of vHost TX queues. */
136 #define OVS_VHOST_QUEUE_MAP_UNKNOWN (-1) /* Mapping not initialized. */
137 #define OVS_VHOST_QUEUE_DISABLED (-2) /* Queue was disabled by guest and not
138 * yet mapped to another queue. */
140 #define DPDK_ETH_PORT_ID_INVALID RTE_MAX_ETHPORTS
142 /* DPDK library uses uint16_t for port_id. */
143 typedef uint16_t dpdk_port_t
;
144 #define DPDK_PORT_ID_FMT "%"PRIu16
146 /* Minimum amount of vhost tx retries, effectively a disable. */
147 #define VHOST_ENQ_RETRY_MIN 0
148 /* Maximum amount of vhost tx retries. */
149 #define VHOST_ENQ_RETRY_MAX 32
150 /* Legacy default value for vhost tx retries. */
151 #define VHOST_ENQ_RETRY_DEF 8
153 #define IF_NAME_SZ (PATH_MAX > IFNAMSIZ ? PATH_MAX : IFNAMSIZ)
155 static const struct rte_eth_conf port_conf
= {
157 .mq_mode
= ETH_MQ_RX_RSS
,
164 .rss_hf
= ETH_RSS_IP
| ETH_RSS_UDP
| ETH_RSS_TCP
,
168 .mq_mode
= ETH_MQ_TX_NONE
,
173 * These callbacks allow virtio-net devices to be added to vhost ports when
174 * configuration has been fully completed.
176 static int new_device(int vid
);
177 static void destroy_device(int vid
);
178 static int vring_state_changed(int vid
, uint16_t queue_id
, int enable
);
179 static void destroy_connection(int vid
);
180 static void vhost_guest_notified(int vid
);
182 static const struct vhost_device_ops virtio_net_device_ops
=
184 .new_device
= new_device
,
185 .destroy_device
= destroy_device
,
186 .vring_state_changed
= vring_state_changed
,
187 .features_changed
= NULL
,
188 .new_connection
= NULL
,
189 .destroy_connection
= destroy_connection
,
190 .guest_notified
= vhost_guest_notified
,
193 /* Custom software stats for dpdk ports */
194 struct netdev_dpdk_sw_stats
{
195 /* No. of retries when unable to transmit. */
197 /* Packet drops when unable to transmit; Probably Tx queue is full. */
198 uint64_t tx_failure_drops
;
199 /* Packet length greater than device MTU. */
200 uint64_t tx_mtu_exceeded_drops
;
201 /* Packet drops in egress policer processing. */
202 uint64_t tx_qos_drops
;
203 /* Packet drops in ingress policer processing. */
204 uint64_t rx_qos_drops
;
205 /* Packet drops in HWOL processing. */
206 uint64_t tx_invalid_hwol_drops
;
209 enum { DPDK_RING_SIZE
= 256 };
210 BUILD_ASSERT_DECL(IS_POW2(DPDK_RING_SIZE
));
211 enum { DRAIN_TSC
= 200000ULL };
218 /* Quality of Service */
220 /* An instance of a QoS configuration. Always associated with a particular
223 * Each QoS implementation subclasses this with whatever additional data it
227 const struct dpdk_qos_ops
*ops
;
231 /* QoS queue information used by the netdev queue dump functions. */
232 struct netdev_dpdk_queue_state
{
238 /* A particular implementation of dpdk QoS operations.
240 * The functions below return 0 if successful or a positive errno value on
241 * failure, except where otherwise noted. All of them must be provided, except
242 * where otherwise noted.
244 struct dpdk_qos_ops
{
246 /* Name of the QoS type */
247 const char *qos_name
;
249 /* Called to construct a qos_conf object. The implementation should make
250 * the appropriate calls to configure QoS according to 'details'.
252 * The contents of 'details' should be documented as valid for 'ovs_name'
253 * in the "other_config" column in the "QoS" table in vswitchd/vswitch.xml
254 * (which is built as ovs-vswitchd.conf.db(8)).
256 * This function must return 0 if and only if it sets '*conf' to an
257 * initialized 'struct qos_conf'.
259 * For all QoS implementations it should always be non-null.
261 int (*qos_construct
)(const struct smap
*details
, struct qos_conf
**conf
);
263 /* Destroys the data structures allocated by the implementation as part of
266 * For all QoS implementations it should always be non-null.
268 void (*qos_destruct
)(struct qos_conf
*conf
);
270 /* Retrieves details of 'conf' configuration into 'details'.
272 * The contents of 'details' should be documented as valid for 'ovs_name'
273 * in the "other_config" column in the "QoS" table in vswitchd/vswitch.xml
274 * (which is built as ovs-vswitchd.conf.db(8)).
276 int (*qos_get
)(const struct qos_conf
*conf
, struct smap
*details
);
278 /* Returns true if 'conf' is already configured according to 'details'.
280 * The contents of 'details' should be documented as valid for 'ovs_name'
281 * in the "other_config" column in the "QoS" table in vswitchd/vswitch.xml
282 * (which is built as ovs-vswitchd.conf.db(8)).
284 * For all QoS implementations it should always be non-null.
286 bool (*qos_is_equal
)(const struct qos_conf
*conf
,
287 const struct smap
*details
);
289 /* Modify an array of rte_mbufs. The modification is specific to
290 * each qos implementation.
292 * The function should take and array of mbufs and an int representing
293 * the current number of mbufs present in the array.
295 * After the function has performed a qos modification to the array of
296 * mbufs it returns an int representing the number of mbufs now present in
297 * the array. This value is can then be passed to the port send function
298 * along with the modified array for transmission.
300 * For all QoS implementations it should always be non-null.
302 int (*qos_run
)(struct qos_conf
*qos_conf
, struct rte_mbuf
**pkts
,
303 int pkt_cnt
, bool should_steal
);
305 /* Called to construct a QoS Queue. The implementation should make
306 * the appropriate calls to configure QoS Queue according to 'details'.
308 * The contents of 'details' should be documented as valid for 'ovs_name'
309 * in the "other_config" column in the "QoS" table in vswitchd/vswitch.xml
310 * (which is built as ovs-vswitchd.conf.db(8)).
312 * This function must return 0 if and only if it constructs
313 * QoS queue successfully.
315 int (*qos_queue_construct
)(const struct smap
*details
,
316 uint32_t queue_id
, struct qos_conf
*conf
);
318 /* Destroys the QoS Queue. */
319 void (*qos_queue_destruct
)(struct qos_conf
*conf
, uint32_t queue_id
);
321 /* Retrieves details of QoS Queue configuration into 'details'.
323 * The contents of 'details' should be documented as valid for 'ovs_name'
324 * in the "other_config" column in the "QoS" table in vswitchd/vswitch.xml
325 * (which is built as ovs-vswitchd.conf.db(8)).
327 int (*qos_queue_get
)(struct smap
*details
, uint32_t queue_id
,
328 const struct qos_conf
*conf
);
330 /* Retrieves statistics of QoS Queue configuration into 'stats'. */
331 int (*qos_queue_get_stats
)(const struct qos_conf
*conf
, uint32_t queue_id
,
332 struct netdev_queue_stats
*stats
);
334 /* Setup the 'netdev_dpdk_queue_state' structure used by the dpdk queue
337 int (*qos_queue_dump_state_init
)(const struct qos_conf
*conf
,
338 struct netdev_dpdk_queue_state
*state
);
341 /* dpdk_qos_ops for each type of user space QoS implementation. */
342 static const struct dpdk_qos_ops egress_policer_ops
;
343 static const struct dpdk_qos_ops trtcm_policer_ops
;
346 * Array of dpdk_qos_ops, contains pointer to all supported QoS
349 static const struct dpdk_qos_ops
*const qos_confs
[] = {
355 static struct ovs_mutex dpdk_mutex
= OVS_MUTEX_INITIALIZER
;
357 /* Contains all 'struct dpdk_dev's. */
358 static struct ovs_list dpdk_list
OVS_GUARDED_BY(dpdk_mutex
)
359 = OVS_LIST_INITIALIZER(&dpdk_list
);
361 static struct ovs_mutex dpdk_mp_mutex
OVS_ACQ_AFTER(dpdk_mutex
)
362 = OVS_MUTEX_INITIALIZER
;
364 /* Contains all 'struct dpdk_mp's. */
365 static struct ovs_list dpdk_mp_list
OVS_GUARDED_BY(dpdk_mp_mutex
)
366 = OVS_LIST_INITIALIZER(&dpdk_mp_list
);
369 struct rte_mempool
*mp
;
373 struct ovs_list list_node
OVS_GUARDED_BY(dpdk_mp_mutex
);
376 /* There should be one 'struct dpdk_tx_queue' created for
377 * each netdev tx queue. */
378 struct dpdk_tx_queue
{
379 /* Padding to make dpdk_tx_queue exactly one cache line long. */
380 PADDED_MEMBERS(CACHE_LINE_SIZE
,
381 /* Protects the members and the NIC queue from concurrent access.
382 * It is used only if the queue is shared among different pmd threads
383 * (see 'concurrent_txq'). */
384 rte_spinlock_t tx_lock
;
385 /* Mapping of configured vhost-user queue to enabled by guest. */
390 /* dpdk has no way to remove dpdk ring ethernet devices
391 so we have to keep them around once they've been created
394 static struct ovs_list dpdk_ring_list
OVS_GUARDED_BY(dpdk_mutex
)
395 = OVS_LIST_INITIALIZER(&dpdk_ring_list
);
398 /* For the client rings */
399 struct rte_ring
*cring_tx
;
400 struct rte_ring
*cring_rx
;
401 unsigned int user_port_id
; /* User given port no, parsed from port name */
402 dpdk_port_t eth_port_id
; /* ethernet device port id */
403 struct ovs_list list_node
OVS_GUARDED_BY(dpdk_mutex
);
406 struct ingress_policer
{
407 struct rte_meter_srtcm_params app_srtcm_params
;
408 struct rte_meter_srtcm in_policer
;
409 struct rte_meter_srtcm_profile in_prof
;
410 rte_spinlock_t policer_lock
;
413 enum dpdk_hw_ol_features
{
414 NETDEV_RX_CHECKSUM_OFFLOAD
= 1 << 0,
415 NETDEV_RX_HW_CRC_STRIP
= 1 << 1,
416 NETDEV_RX_HW_SCATTER
= 1 << 2,
417 NETDEV_TX_TSO_OFFLOAD
= 1 << 3,
421 * In order to avoid confusion in variables names, following naming convention
422 * should be used, if possible:
424 * 'struct netdev' : 'netdev'
425 * 'struct netdev_dpdk' : 'dev'
426 * 'struct netdev_rxq' : 'rxq'
427 * 'struct netdev_rxq_dpdk' : 'rx'
430 * struct netdev *netdev = netdev_from_name(name);
431 * struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
433 * Also, 'netdev' should be used instead of 'dev->up', where 'netdev' was
438 PADDED_MEMBERS_CACHELINE_MARKER(CACHE_LINE_SIZE
, cacheline0
,
441 /* If true, device was attached by rte_eth_dev_attach(). */
443 /* If true, rte_eth_dev_start() was successfully called */
446 /* 1 pad byte here. */
447 struct eth_addr hwaddr
;
452 enum dpdk_dev_type type
;
453 enum netdev_flags flags
;
456 /* Device arguments for dpdk ports. */
458 /* Identifier used to distinguish vhost devices from each other. */
461 struct dpdk_tx_queue
*tx_q
;
462 struct rte_eth_link link
;
465 PADDED_MEMBERS_CACHELINE_MARKER(CACHE_LINE_SIZE
, cacheline1
,
466 struct ovs_mutex mutex
OVS_ACQ_AFTER(dpdk_mutex
);
467 struct dpdk_mp
*dpdk_mp
;
469 /* virtio identifier for vhost devices */
472 /* True if vHost device is 'up' and has been reconfigured at least once */
473 bool vhost_reconfigured
;
475 atomic_uint8_t vhost_tx_retries_max
;
476 /* 2 pad bytes here. */
479 PADDED_MEMBERS(CACHE_LINE_SIZE
,
482 struct ovs_list list_node
OVS_GUARDED_BY(dpdk_mutex
);
484 /* QoS configuration and lock for the device */
485 OVSRCU_TYPE(struct qos_conf
*) qos_conf
;
487 /* Ingress Policer */
488 OVSRCU_TYPE(struct ingress_policer
*) ingress_policer
;
489 uint32_t policer_rate
;
490 uint32_t policer_burst
;
492 /* Array of vhost rxq states, see vring_state_changed. */
493 bool *vhost_rxq_enabled
;
496 PADDED_MEMBERS(CACHE_LINE_SIZE
,
497 struct netdev_stats stats
;
498 struct netdev_dpdk_sw_stats
*sw_stats
;
500 rte_spinlock_t stats_lock
;
501 /* 36 pad bytes here. */
504 PADDED_MEMBERS(CACHE_LINE_SIZE
,
505 /* The following properties cannot be changed when a device is running,
506 * so we remember the request and update them next time
507 * netdev_dpdk*_reconfigure() is called */
511 int requested_rxq_size
;
512 int requested_txq_size
;
514 /* Number of rx/tx descriptors for physical devices */
518 /* Socket ID detected when vHost device is brought up */
519 int requested_socket_id
;
521 /* Denotes whether vHost port is client/server mode */
522 uint64_t vhost_driver_flags
;
524 /* DPDK-ETH Flow control */
525 struct rte_eth_fc_conf fc_conf
;
527 /* DPDK-ETH hardware offload features,
528 * from the enum set 'dpdk_hw_ol_features' */
529 uint32_t hw_ol_features
;
531 /* Properties for link state change detection mode.
532 * If lsc_interrupt_mode is set to false, poll mode is used,
533 * otherwise interrupt mode is used. */
534 bool requested_lsc_interrupt_mode
;
535 bool lsc_interrupt_mode
;
538 PADDED_MEMBERS(CACHE_LINE_SIZE
,
539 /* Names of all XSTATS counters */
540 struct rte_eth_xstat_name
*rte_xstats_names
;
541 int rte_xstats_names_size
;
542 int rte_xstats_ids_size
;
543 uint64_t *rte_xstats_ids
;
547 struct netdev_rxq_dpdk
{
548 struct netdev_rxq up
;
552 static void netdev_dpdk_destruct(struct netdev
*netdev
);
553 static void netdev_dpdk_vhost_destruct(struct netdev
*netdev
);
555 static int netdev_dpdk_get_sw_custom_stats(const struct netdev
*,
556 struct netdev_custom_stats
*);
557 static void netdev_dpdk_clear_xstats(struct netdev_dpdk
*dev
);
559 int netdev_dpdk_get_vid(const struct netdev_dpdk
*dev
);
561 struct ingress_policer
*
562 netdev_dpdk_get_ingress_policer(const struct netdev_dpdk
*dev
);
565 is_dpdk_class(const struct netdev_class
*class)
567 return class->destruct
== netdev_dpdk_destruct
568 || class->destruct
== netdev_dpdk_vhost_destruct
;
571 /* DPDK NIC drivers allocate RX buffers at a particular granularity, typically
572 * aligned at 1k or less. If a declared mbuf size is not a multiple of this
573 * value, insufficient buffers are allocated to accomodate the packet in its
574 * entirety. Furthermore, certain drivers need to ensure that there is also
575 * sufficient space in the Rx buffer to accommodate two VLAN tags (for QinQ
576 * frames). If the RX buffer is too small, then the driver enables scatter RX
577 * behaviour, which reduces performance. To prevent this, use a buffer size
578 * that is closest to 'mtu', but which satisfies the aforementioned criteria.
581 dpdk_buf_size(int mtu
)
583 return ROUND_UP(MTU_TO_MAX_FRAME_LEN(mtu
), NETDEV_DPDK_MBUF_ALIGN
)
584 + RTE_PKTMBUF_HEADROOM
;
587 /* Allocates an area of 'sz' bytes from DPDK. The memory is zero'ed.
589 * Unlike xmalloc(), this function can return NULL on failure. */
591 dpdk_rte_mzalloc(size_t sz
)
593 return rte_zmalloc(OVS_VPORT_DPDK
, sz
, OVS_CACHE_LINE_SIZE
);
597 free_dpdk_buf(struct dp_packet
*p
)
599 struct rte_mbuf
*pkt
= (struct rte_mbuf
*) p
;
601 rte_pktmbuf_free(pkt
);
605 ovs_rte_pktmbuf_init(struct rte_mempool
*mp OVS_UNUSED
,
606 void *opaque_arg OVS_UNUSED
,
608 unsigned i OVS_UNUSED
)
610 struct rte_mbuf
*pkt
= _p
;
612 dp_packet_init_dpdk((struct dp_packet
*) pkt
);
616 dpdk_mp_full(const struct rte_mempool
*mp
) OVS_REQUIRES(dpdk_mp_mutex
)
618 /* At this point we want to know if all the mbufs are back
619 * in the mempool. rte_mempool_full() is not atomic but it's
620 * the best available and as we are no longer requesting mbufs
621 * from the mempool, it means mbufs will not move from
622 * 'mempool ring' --> 'mempool cache'. In rte_mempool_full()
623 * the ring is counted before caches, so we won't get false
624 * positives in this use case and we handle false negatives.
626 * If future implementations of rte_mempool_full() were to change
627 * it could be possible for a false positive. Even that would
628 * likely be ok, as there are additional checks during mempool
629 * freeing but it would make things racey.
631 return rte_mempool_full(mp
);
634 /* Free unused mempools. */
636 dpdk_mp_sweep(void) OVS_REQUIRES(dpdk_mp_mutex
)
638 struct dpdk_mp
*dmp
, *next
;
640 LIST_FOR_EACH_SAFE (dmp
, next
, list_node
, &dpdk_mp_list
) {
641 if (!dmp
->refcount
&& dpdk_mp_full(dmp
->mp
)) {
642 VLOG_DBG("Freeing mempool \"%s\"", dmp
->mp
->name
);
643 ovs_list_remove(&dmp
->list_node
);
644 rte_mempool_free(dmp
->mp
);
650 /* Calculating the required number of mbufs differs depending on the
651 * mempool model being used. Check if per port memory is in use before
655 dpdk_calculate_mbufs(struct netdev_dpdk
*dev
, int mtu
, bool per_port_mp
)
660 /* Shared memory are being used.
661 * XXX: this is a really rough method of provisioning memory.
662 * It's impossible to determine what the exact memory requirements are
663 * when the number of ports and rxqs that utilize a particular mempool
664 * can change dynamically at runtime. For now, use this rough
667 if (mtu
>= RTE_ETHER_MTU
) {
668 n_mbufs
= MAX_NB_MBUF
;
670 n_mbufs
= MIN_NB_MBUF
;
673 /* Per port memory is being used.
674 * XXX: rough estimation of number of mbufs required for this port:
675 * <packets required to fill the device rxqs>
676 * + <packets that could be stuck on other ports txqs>
677 * + <packets in the pmd threads>
678 * + <additional memory for corner cases>
680 n_mbufs
= dev
->requested_n_rxq
* dev
->requested_rxq_size
681 + dev
->requested_n_txq
* dev
->requested_txq_size
682 + MIN(RTE_MAX_LCORE
, dev
->requested_n_rxq
) * NETDEV_MAX_BURST
689 static struct dpdk_mp
*
690 dpdk_mp_create(struct netdev_dpdk
*dev
, int mtu
, bool per_port_mp
)
692 char mp_name
[RTE_MEMPOOL_NAMESIZE
];
693 const char *netdev_name
= netdev_get_name(&dev
->up
);
694 int socket_id
= dev
->requested_socket_id
;
695 uint32_t n_mbufs
= 0;
696 uint32_t mbuf_size
= 0;
697 uint32_t aligned_mbuf_size
= 0;
698 uint32_t mbuf_priv_data_len
= 0;
699 uint32_t pkt_size
= 0;
700 uint32_t hash
= hash_string(netdev_name
, 0);
701 struct dpdk_mp
*dmp
= NULL
;
704 dmp
= dpdk_rte_mzalloc(sizeof *dmp
);
708 dmp
->socket_id
= socket_id
;
712 /* Get the size of each mbuf, based on the MTU */
713 mbuf_size
= MTU_TO_FRAME_LEN(mtu
);
715 n_mbufs
= dpdk_calculate_mbufs(dev
, mtu
, per_port_mp
);
718 /* Full DPDK memory pool name must be unique and cannot be
719 * longer than RTE_MEMPOOL_NAMESIZE. Note that for the shared
720 * mempool case this can result in one device using a mempool
721 * which references a different device in it's name. However as
722 * mempool names are hashed, the device name will not be readable
723 * so this is not an issue for tasks such as debugging.
725 ret
= snprintf(mp_name
, RTE_MEMPOOL_NAMESIZE
,
726 "ovs%08x%02d%05d%07u",
727 hash
, socket_id
, mtu
, n_mbufs
);
728 if (ret
< 0 || ret
>= RTE_MEMPOOL_NAMESIZE
) {
729 VLOG_DBG("snprintf returned %d. "
730 "Failed to generate a mempool name for \"%s\". "
731 "Hash:0x%x, socket_id: %d, mtu:%d, mbufs:%u.",
732 ret
, netdev_name
, hash
, socket_id
, mtu
, n_mbufs
);
736 VLOG_DBG("Port %s: Requesting a mempool of %u mbufs of size %u "
737 "on socket %d for %d Rx and %d Tx queues, "
738 "cache line size of %u",
739 netdev_name
, n_mbufs
, mbuf_size
, socket_id
,
740 dev
->requested_n_rxq
, dev
->requested_n_txq
,
741 RTE_CACHE_LINE_SIZE
);
743 /* The size of the mbuf's private area (i.e. area that holds OvS'
745 mbuf_priv_data_len
= sizeof(struct dp_packet
) -
746 sizeof(struct rte_mbuf
);
747 /* The size of the entire dp_packet. */
748 pkt_size
= sizeof(struct dp_packet
) + mbuf_size
;
749 /* mbuf size, rounded up to cacheline size. */
750 aligned_mbuf_size
= ROUND_UP(pkt_size
, RTE_CACHE_LINE_SIZE
);
751 /* If there is a size discrepancy, add padding to mbuf_priv_data_len.
752 * This maintains mbuf size cache alignment, while also honoring RX
753 * buffer alignment in the data portion of the mbuf. If this adjustment
754 * is not made, there is a possiblity later on that for an element of
755 * the mempool, buf, buf->data_len < (buf->buf_len - buf->data_off).
756 * This is problematic in the case of multi-segment mbufs, particularly
757 * when an mbuf segment needs to be resized (when [push|popp]ing a VLAN
758 * header, for example.
760 mbuf_priv_data_len
+= (aligned_mbuf_size
- pkt_size
);
762 dmp
->mp
= rte_pktmbuf_pool_create(mp_name
, n_mbufs
, MP_CACHE_SZ
,
768 VLOG_DBG("Allocated \"%s\" mempool with %u mbufs",
770 /* rte_pktmbuf_pool_create has done some initialization of the
771 * rte_mbuf part of each dp_packet, while ovs_rte_pktmbuf_init
772 * initializes some OVS specific fields of dp_packet.
774 rte_mempool_obj_iter(dmp
->mp
, ovs_rte_pktmbuf_init
, NULL
);
776 } else if (rte_errno
== EEXIST
) {
777 /* A mempool with the same name already exists. We just
778 * retrieve its pointer to be returned to the caller. */
779 dmp
->mp
= rte_mempool_lookup(mp_name
);
780 /* As the mempool create returned EEXIST we can expect the
781 * lookup has returned a valid pointer. If for some reason
782 * that's not the case we keep track of it. */
783 VLOG_DBG("A mempool with name \"%s\" already exists at %p.",
787 VLOG_DBG("Failed to create mempool \"%s\" with a request of "
788 "%u mbufs, retrying with %u mbufs",
789 mp_name
, n_mbufs
, n_mbufs
/ 2);
791 } while (!dmp
->mp
&& rte_errno
== ENOMEM
&& (n_mbufs
/= 2) >= MIN_NB_MBUF
);
793 VLOG_ERR("Failed to create mempool \"%s\" with a request of %u mbufs",
800 static struct dpdk_mp
*
801 dpdk_mp_get(struct netdev_dpdk
*dev
, int mtu
, bool per_port_mp
)
803 struct dpdk_mp
*dmp
, *next
;
806 ovs_mutex_lock(&dpdk_mp_mutex
);
807 /* Check if shared memory is being used, if so check existing mempools
808 * to see if reuse is possible. */
810 LIST_FOR_EACH (dmp
, list_node
, &dpdk_mp_list
) {
811 if (dmp
->socket_id
== dev
->requested_socket_id
812 && dmp
->mtu
== mtu
) {
813 VLOG_DBG("Reusing mempool \"%s\"", dmp
->mp
->name
);
820 /* Sweep mempools after reuse or before create. */
824 dmp
= dpdk_mp_create(dev
, mtu
, per_port_mp
);
826 /* Shared memory will hit the reuse case above so will not
827 * request a mempool that already exists but we need to check
828 * for the EEXIST case for per port memory case. Compare the
829 * mempool returned by dmp to each entry in dpdk_mp_list. If a
830 * match is found, free dmp as a new entry is not required, set
831 * dmp to point to the existing entry and increment the refcount
832 * to avoid being freed at a later stage.
834 if (per_port_mp
&& rte_errno
== EEXIST
) {
835 LIST_FOR_EACH (next
, list_node
, &dpdk_mp_list
) {
836 if (dmp
->mp
== next
->mp
) {
843 ovs_list_push_back(&dpdk_mp_list
, &dmp
->list_node
);
848 ovs_mutex_unlock(&dpdk_mp_mutex
);
853 /* Decrement reference to a mempool. */
855 dpdk_mp_put(struct dpdk_mp
*dmp
)
861 ovs_mutex_lock(&dpdk_mp_mutex
);
862 ovs_assert(dmp
->refcount
);
864 ovs_mutex_unlock(&dpdk_mp_mutex
);
867 /* Depending on the memory model being used this function tries to
868 * identify and reuse an existing mempool or tries to allocate a new
869 * mempool on requested_socket_id with mbuf size corresponding to the
870 * requested_mtu. On success, a new configuration will be applied.
871 * On error, device will be left unchanged. */
873 netdev_dpdk_mempool_configure(struct netdev_dpdk
*dev
)
874 OVS_REQUIRES(dev
->mutex
)
876 uint32_t buf_size
= dpdk_buf_size(dev
->requested_mtu
);
879 bool per_port_mp
= dpdk_per_port_memory();
881 /* With shared memory we do not need to configure a mempool if the MTU
882 * and socket ID have not changed, the previous configuration is still
883 * valid so return 0 */
884 if (!per_port_mp
&& dev
->mtu
== dev
->requested_mtu
885 && dev
->socket_id
== dev
->requested_socket_id
) {
889 dmp
= dpdk_mp_get(dev
, FRAME_LEN_TO_MTU(buf_size
), per_port_mp
);
891 VLOG_ERR("Failed to create memory pool for netdev "
892 "%s, with MTU %d on socket %d: %s\n",
893 dev
->up
.name
, dev
->requested_mtu
, dev
->requested_socket_id
,
894 rte_strerror(rte_errno
));
897 /* Check for any pre-existing dpdk_mp for the device before accessing
898 * the associated mempool.
900 if (dev
->dpdk_mp
!= NULL
) {
901 /* A new MTU was requested, decrement the reference count for the
902 * devices current dpdk_mp. This is required even if a pointer to
903 * same dpdk_mp is returned by dpdk_mp_get. The refcount for dmp
904 * has already been incremented by dpdk_mp_get at this stage so it
905 * must be decremented to keep an accurate refcount for the
908 dpdk_mp_put(dev
->dpdk_mp
);
911 dev
->mtu
= dev
->requested_mtu
;
912 dev
->socket_id
= dev
->requested_socket_id
;
913 dev
->max_packet_len
= MTU_TO_FRAME_LEN(dev
->mtu
);
920 check_link_status(struct netdev_dpdk
*dev
)
922 struct rte_eth_link link
;
924 rte_eth_link_get_nowait(dev
->port_id
, &link
);
926 if (dev
->link
.link_status
!= link
.link_status
) {
927 netdev_change_seq_changed(&dev
->up
);
929 dev
->link_reset_cnt
++;
931 if (dev
->link
.link_status
) {
933 "Port "DPDK_PORT_ID_FMT
" Link Up - speed %u Mbps - %s",
934 dev
->port_id
, (unsigned) dev
->link
.link_speed
,
935 (dev
->link
.link_duplex
== ETH_LINK_FULL_DUPLEX
)
936 ? "full-duplex" : "half-duplex");
938 VLOG_DBG_RL(&rl
, "Port "DPDK_PORT_ID_FMT
" Link Down",
945 dpdk_watchdog(void *dummy OVS_UNUSED
)
947 struct netdev_dpdk
*dev
;
949 pthread_detach(pthread_self());
952 ovs_mutex_lock(&dpdk_mutex
);
953 LIST_FOR_EACH (dev
, list_node
, &dpdk_list
) {
954 ovs_mutex_lock(&dev
->mutex
);
955 if (dev
->type
== DPDK_DEV_ETH
) {
956 check_link_status(dev
);
958 ovs_mutex_unlock(&dev
->mutex
);
960 ovs_mutex_unlock(&dpdk_mutex
);
961 xsleep(DPDK_PORT_WATCHDOG_INTERVAL
);
968 dpdk_eth_dev_port_config(struct netdev_dpdk
*dev
, int n_rxq
, int n_txq
)
972 struct rte_eth_conf conf
= port_conf
;
973 struct rte_eth_dev_info info
;
976 rte_eth_dev_info_get(dev
->port_id
, &info
);
978 /* As of DPDK 17.11.1 a few PMDs require to explicitly enable
979 * scatter to support jumbo RX.
980 * Setting scatter for the device is done after checking for
981 * scatter support in the device capabilites. */
982 if (dev
->mtu
> RTE_ETHER_MTU
) {
983 if (dev
->hw_ol_features
& NETDEV_RX_HW_SCATTER
) {
984 conf
.rxmode
.offloads
|= DEV_RX_OFFLOAD_SCATTER
;
988 conf
.intr_conf
.lsc
= dev
->lsc_interrupt_mode
;
990 if (dev
->hw_ol_features
& NETDEV_RX_CHECKSUM_OFFLOAD
) {
991 conf
.rxmode
.offloads
|= DEV_RX_OFFLOAD_CHECKSUM
;
994 if (!(dev
->hw_ol_features
& NETDEV_RX_HW_CRC_STRIP
)
995 && info
.rx_offload_capa
& DEV_RX_OFFLOAD_KEEP_CRC
) {
996 conf
.rxmode
.offloads
|= DEV_RX_OFFLOAD_KEEP_CRC
;
999 if (dev
->hw_ol_features
& NETDEV_TX_TSO_OFFLOAD
) {
1000 conf
.txmode
.offloads
|= DEV_TX_OFFLOAD_TCP_TSO
;
1001 conf
.txmode
.offloads
|= DEV_TX_OFFLOAD_TCP_CKSUM
;
1002 conf
.txmode
.offloads
|= DEV_TX_OFFLOAD_IPV4_CKSUM
;
1005 /* Limit configured rss hash functions to only those supported
1006 * by the eth device. */
1007 conf
.rx_adv_conf
.rss_conf
.rss_hf
&= info
.flow_type_rss_offloads
;
1009 /* A device may report more queues than it makes available (this has
1010 * been observed for Intel xl710, which reserves some of them for
1011 * SRIOV): rte_eth_*_queue_setup will fail if a queue is not
1012 * available. When this happens we can retry the configuration
1013 * and request less queues */
1014 while (n_rxq
&& n_txq
) {
1016 VLOG_INFO("Retrying setup with (rxq:%d txq:%d)", n_rxq
, n_txq
);
1019 diag
= rte_eth_dev_configure(dev
->port_id
, n_rxq
, n_txq
, &conf
);
1021 VLOG_WARN("Interface %s eth_dev setup error %s\n",
1022 dev
->up
.name
, rte_strerror(-diag
));
1026 diag
= rte_eth_dev_set_mtu(dev
->port_id
, dev
->mtu
);
1028 /* A device may not support rte_eth_dev_set_mtu, in this case
1029 * flag a warning to the user and include the devices configured
1030 * MTU value that will be used instead. */
1031 if (-ENOTSUP
== diag
) {
1032 rte_eth_dev_get_mtu(dev
->port_id
, &conf_mtu
);
1033 VLOG_WARN("Interface %s does not support MTU configuration, "
1034 "max packet size supported is %"PRIu16
".",
1035 dev
->up
.name
, conf_mtu
);
1037 VLOG_ERR("Interface %s MTU (%d) setup error: %s",
1038 dev
->up
.name
, dev
->mtu
, rte_strerror(-diag
));
1043 for (i
= 0; i
< n_txq
; i
++) {
1044 diag
= rte_eth_tx_queue_setup(dev
->port_id
, i
, dev
->txq_size
,
1045 dev
->socket_id
, NULL
);
1047 VLOG_INFO("Interface %s unable to setup txq(%d): %s",
1048 dev
->up
.name
, i
, rte_strerror(-diag
));
1054 /* Retry with less tx queues */
1059 for (i
= 0; i
< n_rxq
; i
++) {
1060 diag
= rte_eth_rx_queue_setup(dev
->port_id
, i
, dev
->rxq_size
,
1061 dev
->socket_id
, NULL
,
1064 VLOG_INFO("Interface %s unable to setup rxq(%d): %s",
1065 dev
->up
.name
, i
, rte_strerror(-diag
));
1071 /* Retry with less rx queues */
1076 dev
->up
.n_rxq
= n_rxq
;
1077 dev
->up
.n_txq
= n_txq
;
1086 dpdk_eth_flow_ctrl_setup(struct netdev_dpdk
*dev
) OVS_REQUIRES(dev
->mutex
)
1088 if (rte_eth_dev_flow_ctrl_set(dev
->port_id
, &dev
->fc_conf
)) {
1089 VLOG_WARN("Failed to enable flow control on device "DPDK_PORT_ID_FMT
,
1095 dpdk_eth_dev_init(struct netdev_dpdk
*dev
)
1096 OVS_REQUIRES(dev
->mutex
)
1098 struct rte_pktmbuf_pool_private
*mbp_priv
;
1099 struct rte_eth_dev_info info
;
1100 struct rte_ether_addr eth_addr
;
1103 uint32_t rx_chksm_offload_capa
= DEV_RX_OFFLOAD_UDP_CKSUM
|
1104 DEV_RX_OFFLOAD_TCP_CKSUM
|
1105 DEV_RX_OFFLOAD_IPV4_CKSUM
;
1106 uint32_t tx_tso_offload_capa
= DEV_TX_OFFLOAD_TCP_TSO
|
1107 DEV_TX_OFFLOAD_TCP_CKSUM
|
1108 DEV_TX_OFFLOAD_IPV4_CKSUM
;
1110 rte_eth_dev_info_get(dev
->port_id
, &info
);
1112 if (strstr(info
.driver_name
, "vf") != NULL
) {
1113 VLOG_INFO("Virtual function detected, HW_CRC_STRIP will be enabled");
1114 dev
->hw_ol_features
|= NETDEV_RX_HW_CRC_STRIP
;
1116 dev
->hw_ol_features
&= ~NETDEV_RX_HW_CRC_STRIP
;
1119 if ((info
.rx_offload_capa
& rx_chksm_offload_capa
) !=
1120 rx_chksm_offload_capa
) {
1121 VLOG_WARN("Rx checksum offload is not supported on port "
1122 DPDK_PORT_ID_FMT
, dev
->port_id
);
1123 dev
->hw_ol_features
&= ~NETDEV_RX_CHECKSUM_OFFLOAD
;
1125 dev
->hw_ol_features
|= NETDEV_RX_CHECKSUM_OFFLOAD
;
1128 if (info
.rx_offload_capa
& DEV_RX_OFFLOAD_SCATTER
) {
1129 dev
->hw_ol_features
|= NETDEV_RX_HW_SCATTER
;
1131 /* Do not warn on lack of scatter support */
1132 dev
->hw_ol_features
&= ~NETDEV_RX_HW_SCATTER
;
1135 if (info
.tx_offload_capa
& tx_tso_offload_capa
) {
1136 dev
->hw_ol_features
|= NETDEV_TX_TSO_OFFLOAD
;
1138 dev
->hw_ol_features
&= ~NETDEV_TX_TSO_OFFLOAD
;
1139 VLOG_WARN("Tx TSO offload is not supported on %s port "
1140 DPDK_PORT_ID_FMT
, netdev_get_name(&dev
->up
), dev
->port_id
);
1143 n_rxq
= MIN(info
.max_rx_queues
, dev
->up
.n_rxq
);
1144 n_txq
= MIN(info
.max_tx_queues
, dev
->up
.n_txq
);
1146 diag
= dpdk_eth_dev_port_config(dev
, n_rxq
, n_txq
);
1148 VLOG_ERR("Interface %s(rxq:%d txq:%d lsc interrupt mode:%s) "
1149 "configure error: %s",
1150 dev
->up
.name
, n_rxq
, n_txq
,
1151 dev
->lsc_interrupt_mode
? "true" : "false",
1152 rte_strerror(-diag
));
1156 diag
= rte_eth_dev_start(dev
->port_id
);
1158 VLOG_ERR("Interface %s start error: %s", dev
->up
.name
,
1159 rte_strerror(-diag
));
1162 dev
->started
= true;
1164 rte_eth_promiscuous_enable(dev
->port_id
);
1165 rte_eth_allmulticast_enable(dev
->port_id
);
1167 memset(ð_addr
, 0x0, sizeof(eth_addr
));
1168 rte_eth_macaddr_get(dev
->port_id
, ð_addr
);
1169 VLOG_INFO_RL(&rl
, "Port "DPDK_PORT_ID_FMT
": "ETH_ADDR_FMT
,
1170 dev
->port_id
, ETH_ADDR_BYTES_ARGS(eth_addr
.addr_bytes
));
1172 memcpy(dev
->hwaddr
.ea
, eth_addr
.addr_bytes
, ETH_ADDR_LEN
);
1173 rte_eth_link_get_nowait(dev
->port_id
, &dev
->link
);
1175 mbp_priv
= rte_mempool_get_priv(dev
->dpdk_mp
->mp
);
1176 dev
->buf_size
= mbp_priv
->mbuf_data_room_size
- RTE_PKTMBUF_HEADROOM
;
1180 static struct netdev_dpdk
*
1181 netdev_dpdk_cast(const struct netdev
*netdev
)
1183 return CONTAINER_OF(netdev
, struct netdev_dpdk
, up
);
1186 static struct netdev
*
1187 netdev_dpdk_alloc(void)
1189 struct netdev_dpdk
*dev
;
1191 dev
= dpdk_rte_mzalloc(sizeof *dev
);
1199 static struct dpdk_tx_queue
*
1200 netdev_dpdk_alloc_txq(unsigned int n_txqs
)
1202 struct dpdk_tx_queue
*txqs
;
1205 txqs
= dpdk_rte_mzalloc(n_txqs
* sizeof *txqs
);
1207 for (i
= 0; i
< n_txqs
; i
++) {
1208 /* Initialize map for vhost devices. */
1209 txqs
[i
].map
= OVS_VHOST_QUEUE_MAP_UNKNOWN
;
1210 rte_spinlock_init(&txqs
[i
].tx_lock
);
1218 common_construct(struct netdev
*netdev
, dpdk_port_t port_no
,
1219 enum dpdk_dev_type type
, int socket_id
)
1220 OVS_REQUIRES(dpdk_mutex
)
1222 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1224 ovs_mutex_init(&dev
->mutex
);
1226 rte_spinlock_init(&dev
->stats_lock
);
1228 /* If the 'sid' is negative, it means that the kernel fails
1229 * to obtain the pci numa info. In that situation, always
1231 dev
->socket_id
= socket_id
< 0 ? SOCKET0
: socket_id
;
1232 dev
->requested_socket_id
= dev
->socket_id
;
1233 dev
->port_id
= port_no
;
1236 dev
->requested_mtu
= RTE_ETHER_MTU
;
1237 dev
->max_packet_len
= MTU_TO_FRAME_LEN(dev
->mtu
);
1238 dev
->requested_lsc_interrupt_mode
= 0;
1239 ovsrcu_index_init(&dev
->vid
, -1);
1240 dev
->vhost_reconfigured
= false;
1241 dev
->attached
= false;
1242 dev
->started
= false;
1243 dev
->reset_needed
= false;
1245 ovsrcu_init(&dev
->qos_conf
, NULL
);
1247 ovsrcu_init(&dev
->ingress_policer
, NULL
);
1248 dev
->policer_rate
= 0;
1249 dev
->policer_burst
= 0;
1253 dev
->requested_n_rxq
= NR_QUEUE
;
1254 dev
->requested_n_txq
= NR_QUEUE
;
1255 dev
->requested_rxq_size
= NIC_PORT_DEFAULT_RXQ_SIZE
;
1256 dev
->requested_txq_size
= NIC_PORT_DEFAULT_TXQ_SIZE
;
1258 /* Initialize the flow control to NULL */
1259 memset(&dev
->fc_conf
, 0, sizeof dev
->fc_conf
);
1261 /* Initilize the hardware offload flags to 0 */
1262 dev
->hw_ol_features
= 0;
1264 dev
->flags
= NETDEV_UP
| NETDEV_PROMISC
;
1266 ovs_list_push_back(&dpdk_list
, &dev
->list_node
);
1268 netdev_request_reconfigure(netdev
);
1270 dev
->rte_xstats_names
= NULL
;
1271 dev
->rte_xstats_names_size
= 0;
1273 dev
->rte_xstats_ids
= NULL
;
1274 dev
->rte_xstats_ids_size
= 0;
1276 dev
->sw_stats
= xzalloc(sizeof *dev
->sw_stats
);
1277 dev
->sw_stats
->tx_retries
= (dev
->type
== DPDK_DEV_VHOST
) ? 0 : UINT64_MAX
;
1282 /* dev_name must be the prefix followed by a positive decimal number.
1283 * (no leading + or - signs are allowed) */
1285 dpdk_dev_parse_name(const char dev_name
[], const char prefix
[],
1286 unsigned int *port_no
)
1290 if (strncmp(dev_name
, prefix
, strlen(prefix
))) {
1294 cport
= dev_name
+ strlen(prefix
);
1296 if (str_to_uint(cport
, 10, port_no
)) {
1303 /* Get the number of OVS interfaces which have the same DPDK
1304 * rte device (e.g. same pci bus address).
1305 * FIXME: avoid direct access to DPDK internal array rte_eth_devices.
1308 netdev_dpdk_get_num_ports(struct rte_device
*device
)
1309 OVS_REQUIRES(dpdk_mutex
)
1311 struct netdev_dpdk
*dev
;
1314 LIST_FOR_EACH (dev
, list_node
, &dpdk_list
) {
1315 if (rte_eth_devices
[dev
->port_id
].device
== device
1316 && rte_eth_devices
[dev
->port_id
].state
!= RTE_ETH_DEV_UNUSED
) {
1324 vhost_common_construct(struct netdev
*netdev
)
1325 OVS_REQUIRES(dpdk_mutex
)
1327 int socket_id
= rte_lcore_to_socket_id(rte_get_master_lcore());
1328 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1330 dev
->vhost_rxq_enabled
= dpdk_rte_mzalloc(OVS_VHOST_MAX_QUEUE_NUM
*
1331 sizeof *dev
->vhost_rxq_enabled
);
1332 if (!dev
->vhost_rxq_enabled
) {
1335 dev
->tx_q
= netdev_dpdk_alloc_txq(OVS_VHOST_MAX_QUEUE_NUM
);
1337 rte_free(dev
->vhost_rxq_enabled
);
1341 atomic_init(&dev
->vhost_tx_retries_max
, VHOST_ENQ_RETRY_DEF
);
1343 return common_construct(netdev
, DPDK_ETH_PORT_ID_INVALID
,
1344 DPDK_DEV_VHOST
, socket_id
);
1348 netdev_dpdk_vhost_construct(struct netdev
*netdev
)
1350 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1351 const char *name
= netdev
->name
;
1354 /* 'name' is appended to 'vhost_sock_dir' and used to create a socket in
1355 * the file system. '/' or '\' would traverse directories, so they're not
1356 * acceptable in 'name'. */
1357 if (strchr(name
, '/') || strchr(name
, '\\')) {
1358 VLOG_ERR("\"%s\" is not a valid name for a vhost-user port. "
1359 "A valid name must not include '/' or '\\'",
1364 ovs_mutex_lock(&dpdk_mutex
);
1365 /* Take the name of the vhost-user port and append it to the location where
1366 * the socket is to be created, then register the socket.
1368 dev
->vhost_id
= xasprintf("%s/%s", dpdk_get_vhost_sock_dir(), name
);
1370 dev
->vhost_driver_flags
&= ~RTE_VHOST_USER_CLIENT
;
1372 /* There is no support for multi-segments buffers. */
1373 dev
->vhost_driver_flags
|= RTE_VHOST_USER_LINEARBUF_SUPPORT
;
1374 err
= rte_vhost_driver_register(dev
->vhost_id
, dev
->vhost_driver_flags
);
1376 VLOG_ERR("vhost-user socket device setup failure for socket %s\n",
1380 fatal_signal_add_file_to_unlink(dev
->vhost_id
);
1381 VLOG_INFO("Socket %s created for vhost-user port %s\n",
1382 dev
->vhost_id
, name
);
1385 err
= rte_vhost_driver_callback_register(dev
->vhost_id
,
1386 &virtio_net_device_ops
);
1388 VLOG_ERR("rte_vhost_driver_callback_register failed for vhost user "
1389 "port: %s\n", name
);
1393 if (!userspace_tso_enabled()) {
1394 err
= rte_vhost_driver_disable_features(dev
->vhost_id
,
1395 1ULL << VIRTIO_NET_F_HOST_TSO4
1396 | 1ULL << VIRTIO_NET_F_HOST_TSO6
1397 | 1ULL << VIRTIO_NET_F_CSUM
);
1399 VLOG_ERR("rte_vhost_driver_disable_features failed for vhost user "
1400 "port: %s\n", name
);
1405 err
= rte_vhost_driver_start(dev
->vhost_id
);
1407 VLOG_ERR("rte_vhost_driver_start failed for vhost user "
1408 "port: %s\n", name
);
1412 err
= vhost_common_construct(netdev
);
1414 VLOG_ERR("vhost_common_construct failed for vhost user "
1415 "port: %s\n", name
);
1420 free(dev
->vhost_id
);
1421 dev
->vhost_id
= NULL
;
1424 ovs_mutex_unlock(&dpdk_mutex
);
1425 VLOG_WARN_ONCE("dpdkvhostuser ports are considered deprecated; "
1426 "please migrate to dpdkvhostuserclient ports.");
1431 netdev_dpdk_vhost_client_construct(struct netdev
*netdev
)
1435 ovs_mutex_lock(&dpdk_mutex
);
1436 err
= vhost_common_construct(netdev
);
1438 VLOG_ERR("vhost_common_construct failed for vhost user client"
1439 "port: %s\n", netdev
->name
);
1441 ovs_mutex_unlock(&dpdk_mutex
);
1446 netdev_dpdk_construct(struct netdev
*netdev
)
1450 ovs_mutex_lock(&dpdk_mutex
);
1451 err
= common_construct(netdev
, DPDK_ETH_PORT_ID_INVALID
,
1452 DPDK_DEV_ETH
, SOCKET0
);
1453 ovs_mutex_unlock(&dpdk_mutex
);
1458 common_destruct(struct netdev_dpdk
*dev
)
1459 OVS_REQUIRES(dpdk_mutex
)
1460 OVS_EXCLUDED(dev
->mutex
)
1462 rte_free(dev
->tx_q
);
1463 dpdk_mp_put(dev
->dpdk_mp
);
1465 ovs_list_remove(&dev
->list_node
);
1466 free(ovsrcu_get_protected(struct ingress_policer
*,
1467 &dev
->ingress_policer
));
1468 free(dev
->sw_stats
);
1469 ovs_mutex_destroy(&dev
->mutex
);
1473 netdev_dpdk_destruct(struct netdev
*netdev
)
1475 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1476 struct rte_device
*rte_dev
;
1477 struct rte_eth_dev
*eth_dev
;
1478 bool remove_on_close
;
1480 ovs_mutex_lock(&dpdk_mutex
);
1482 rte_eth_dev_stop(dev
->port_id
);
1483 dev
->started
= false;
1485 if (dev
->attached
) {
1486 /* Retrieve eth device data before closing it.
1487 * FIXME: avoid direct access to DPDK internal array rte_eth_devices.
1489 eth_dev
= &rte_eth_devices
[dev
->port_id
];
1492 (eth_dev
->data
->dev_flags
& RTE_ETH_DEV_CLOSE_REMOVE
);
1493 rte_dev
= eth_dev
->device
;
1495 /* Remove the eth device. */
1496 rte_eth_dev_close(dev
->port_id
);
1498 /* Remove this rte device and all its eth devices if flag
1499 * RTE_ETH_DEV_CLOSE_REMOVE is not supported (which means representors
1500 * are not supported), or if all the eth devices belonging to the rte
1501 * device are closed.
1503 if (!remove_on_close
|| !netdev_dpdk_get_num_ports(rte_dev
)) {
1504 int ret
= rte_dev_remove(rte_dev
);
1507 VLOG_ERR("Device '%s' can not be detached: %s.",
1508 dev
->devargs
, rte_strerror(-ret
));
1510 /* Device was closed and detached. */
1511 VLOG_INFO("Device '%s' has been removed and detached",
1515 /* Device was only closed. rte_dev_remove() was not called. */
1516 VLOG_INFO("Device '%s' has been removed", dev
->devargs
);
1520 netdev_dpdk_clear_xstats(dev
);
1522 common_destruct(dev
);
1524 ovs_mutex_unlock(&dpdk_mutex
);
1527 /* rte_vhost_driver_unregister() can call back destroy_device(), which will
1528 * try to acquire 'dpdk_mutex' and possibly 'dev->mutex'. To avoid a
1529 * deadlock, none of the mutexes must be held while calling this function. */
1531 dpdk_vhost_driver_unregister(struct netdev_dpdk
*dev OVS_UNUSED
,
1533 OVS_EXCLUDED(dpdk_mutex
)
1534 OVS_EXCLUDED(dev
->mutex
)
1536 return rte_vhost_driver_unregister(vhost_id
);
1540 netdev_dpdk_vhost_destruct(struct netdev
*netdev
)
1542 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1545 ovs_mutex_lock(&dpdk_mutex
);
1547 /* Guest becomes an orphan if still attached. */
1548 if (netdev_dpdk_get_vid(dev
) >= 0
1549 && !(dev
->vhost_driver_flags
& RTE_VHOST_USER_CLIENT
)) {
1550 VLOG_ERR("Removing port '%s' while vhost device still attached.",
1552 VLOG_ERR("To restore connectivity after re-adding of port, VM on "
1553 "socket '%s' must be restarted.", dev
->vhost_id
);
1556 vhost_id
= dev
->vhost_id
;
1557 dev
->vhost_id
= NULL
;
1558 rte_free(dev
->vhost_rxq_enabled
);
1560 common_destruct(dev
);
1562 ovs_mutex_unlock(&dpdk_mutex
);
1568 if (dpdk_vhost_driver_unregister(dev
, vhost_id
)) {
1569 VLOG_ERR("%s: Unable to unregister vhost driver for socket '%s'.\n",
1570 netdev
->name
, vhost_id
);
1571 } else if (!(dev
->vhost_driver_flags
& RTE_VHOST_USER_CLIENT
)) {
1572 /* OVS server mode - remove this socket from list for deletion */
1573 fatal_signal_remove_file_to_unlink(vhost_id
);
1580 netdev_dpdk_dealloc(struct netdev
*netdev
)
1582 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1588 netdev_dpdk_clear_xstats(struct netdev_dpdk
*dev
)
1590 /* If statistics are already allocated, we have to
1591 * reconfigure, as port_id could have been changed. */
1592 if (dev
->rte_xstats_names
) {
1593 free(dev
->rte_xstats_names
);
1594 dev
->rte_xstats_names
= NULL
;
1595 dev
->rte_xstats_names_size
= 0;
1597 if (dev
->rte_xstats_ids
) {
1598 free(dev
->rte_xstats_ids
);
1599 dev
->rte_xstats_ids
= NULL
;
1600 dev
->rte_xstats_ids_size
= 0;
1605 netdev_dpdk_get_xstat_name(struct netdev_dpdk
*dev
, uint64_t id
)
1607 if (id
>= dev
->rte_xstats_names_size
) {
1610 return dev
->rte_xstats_names
[id
].name
;
1614 netdev_dpdk_configure_xstats(struct netdev_dpdk
*dev
)
1615 OVS_REQUIRES(dev
->mutex
)
1619 struct rte_eth_xstat
*rte_xstats
;
1624 /* Retrieving all XSTATS names. If something will go wrong
1625 * or amount of counters will be equal 0, rte_xstats_names
1626 * buffer will be marked as NULL, and any further xstats
1627 * query won't be performed (e.g. during netdev_dpdk_get_stats
1633 if (dev
->rte_xstats_names
== NULL
|| dev
->rte_xstats_ids
== NULL
) {
1634 dev
->rte_xstats_names_size
=
1635 rte_eth_xstats_get_names(dev
->port_id
, NULL
, 0);
1637 if (dev
->rte_xstats_names_size
< 0) {
1638 VLOG_WARN("Cannot get XSTATS for port: "DPDK_PORT_ID_FMT
,
1640 dev
->rte_xstats_names_size
= 0;
1642 /* Reserve memory for xstats names and values */
1643 dev
->rte_xstats_names
= xcalloc(dev
->rte_xstats_names_size
,
1644 sizeof *dev
->rte_xstats_names
);
1646 if (dev
->rte_xstats_names
) {
1647 /* Retreive xstats names */
1649 rte_eth_xstats_get_names(dev
->port_id
,
1650 dev
->rte_xstats_names
,
1651 dev
->rte_xstats_names_size
);
1653 if (rte_xstats_len
< 0) {
1654 VLOG_WARN("Cannot get XSTATS names for port: "
1655 DPDK_PORT_ID_FMT
, dev
->port_id
);
1657 } else if (rte_xstats_len
!= dev
->rte_xstats_names_size
) {
1658 VLOG_WARN("XSTATS size doesn't match for port: "
1659 DPDK_PORT_ID_FMT
, dev
->port_id
);
1663 dev
->rte_xstats_ids
= xcalloc(dev
->rte_xstats_names_size
,
1666 /* We have to calculate number of counters */
1667 rte_xstats
= xmalloc(rte_xstats_len
* sizeof *rte_xstats
);
1668 memset(rte_xstats
, 0xff, sizeof *rte_xstats
* rte_xstats_len
);
1670 /* Retreive xstats values */
1671 if (rte_eth_xstats_get(dev
->port_id
, rte_xstats
,
1672 rte_xstats_len
) > 0) {
1673 dev
->rte_xstats_ids_size
= 0;
1675 for (uint32_t i
= 0; i
< rte_xstats_len
; i
++) {
1676 id
= rte_xstats
[i
].id
;
1677 name
= netdev_dpdk_get_xstat_name(dev
, id
);
1678 /* We need to filter out everything except
1679 * dropped, error and management counters */
1680 if (string_ends_with(name
, "_errors") ||
1681 strstr(name
, "_management_") ||
1682 string_ends_with(name
, "_dropped")) {
1684 dev
->rte_xstats_ids
[xstats_no
] = id
;
1688 dev
->rte_xstats_ids_size
= xstats_no
;
1691 VLOG_WARN("Can't get XSTATS IDs for port: "
1692 DPDK_PORT_ID_FMT
, dev
->port_id
);
1699 /* Already configured */
1705 netdev_dpdk_clear_xstats(dev
);
1711 netdev_dpdk_get_config(const struct netdev
*netdev
, struct smap
*args
)
1713 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1715 ovs_mutex_lock(&dev
->mutex
);
1717 smap_add_format(args
, "requested_rx_queues", "%d", dev
->requested_n_rxq
);
1718 smap_add_format(args
, "configured_rx_queues", "%d", netdev
->n_rxq
);
1719 smap_add_format(args
, "requested_tx_queues", "%d", dev
->requested_n_txq
);
1720 smap_add_format(args
, "configured_tx_queues", "%d", netdev
->n_txq
);
1721 smap_add_format(args
, "mtu", "%d", dev
->mtu
);
1723 if (dev
->type
== DPDK_DEV_ETH
) {
1724 smap_add_format(args
, "requested_rxq_descriptors", "%d",
1725 dev
->requested_rxq_size
);
1726 smap_add_format(args
, "configured_rxq_descriptors", "%d",
1728 smap_add_format(args
, "requested_txq_descriptors", "%d",
1729 dev
->requested_txq_size
);
1730 smap_add_format(args
, "configured_txq_descriptors", "%d",
1732 if (dev
->hw_ol_features
& NETDEV_RX_CHECKSUM_OFFLOAD
) {
1733 smap_add(args
, "rx_csum_offload", "true");
1735 smap_add(args
, "rx_csum_offload", "false");
1737 if (dev
->hw_ol_features
& NETDEV_TX_TSO_OFFLOAD
) {
1738 smap_add(args
, "tx_tso_offload", "true");
1740 smap_add(args
, "tx_tso_offload", "false");
1742 smap_add(args
, "lsc_interrupt_mode",
1743 dev
->lsc_interrupt_mode
? "true" : "false");
1745 ovs_mutex_unlock(&dev
->mutex
);
1750 static struct netdev_dpdk
*
1751 netdev_dpdk_lookup_by_port_id(dpdk_port_t port_id
)
1752 OVS_REQUIRES(dpdk_mutex
)
1754 struct netdev_dpdk
*dev
;
1756 LIST_FOR_EACH (dev
, list_node
, &dpdk_list
) {
1757 if (dev
->port_id
== port_id
) {
1766 netdev_dpdk_get_port_by_mac(const char *mac_str
)
1768 dpdk_port_t port_id
;
1769 struct eth_addr mac
, port_mac
;
1771 if (!eth_addr_from_string(mac_str
, &mac
)) {
1772 VLOG_ERR("invalid mac: %s", mac_str
);
1773 return DPDK_ETH_PORT_ID_INVALID
;
1776 RTE_ETH_FOREACH_DEV (port_id
) {
1777 struct rte_ether_addr ea
;
1779 rte_eth_macaddr_get(port_id
, &ea
);
1780 memcpy(port_mac
.ea
, ea
.addr_bytes
, ETH_ADDR_LEN
);
1781 if (eth_addr_equals(mac
, port_mac
)) {
1786 return DPDK_ETH_PORT_ID_INVALID
;
1789 /* Return the first DPDK port id matching the devargs pattern. */
1790 static dpdk_port_t
netdev_dpdk_get_port_by_devargs(const char *devargs
)
1791 OVS_REQUIRES(dpdk_mutex
)
1793 dpdk_port_t port_id
;
1794 struct rte_dev_iterator iterator
;
1796 RTE_ETH_FOREACH_MATCHING_DEV (port_id
, devargs
, &iterator
) {
1797 /* If a break is done - must call rte_eth_iterator_cleanup. */
1798 rte_eth_iterator_cleanup(&iterator
);
1806 * Normally, a PCI id (optionally followed by a representor number)
1807 * is enough for identifying a specific DPDK port.
1808 * However, for some NICs having multiple ports sharing the same PCI
1809 * id, using PCI id won't work then.
1811 * To fix that, here one more method is introduced: "class=eth,mac=$MAC".
1813 * Note that the compatibility is fully kept: user can still use the
1814 * PCI id for adding ports (when it's enough for them).
1817 netdev_dpdk_process_devargs(struct netdev_dpdk
*dev
,
1818 const char *devargs
, char **errp
)
1819 OVS_REQUIRES(dpdk_mutex
)
1821 dpdk_port_t new_port_id
;
1823 if (strncmp(devargs
, "class=eth,mac=", 14) == 0) {
1824 new_port_id
= netdev_dpdk_get_port_by_mac(&devargs
[14]);
1826 new_port_id
= netdev_dpdk_get_port_by_devargs(devargs
);
1827 if (!rte_eth_dev_is_valid_port(new_port_id
)) {
1828 /* Device not found in DPDK, attempt to attach it */
1829 if (rte_dev_probe(devargs
)) {
1830 new_port_id
= DPDK_ETH_PORT_ID_INVALID
;
1832 new_port_id
= netdev_dpdk_get_port_by_devargs(devargs
);
1833 if (rte_eth_dev_is_valid_port(new_port_id
)) {
1834 /* Attach successful */
1835 dev
->attached
= true;
1836 VLOG_INFO("Device '%s' attached to DPDK", devargs
);
1838 /* Attach unsuccessful */
1839 new_port_id
= DPDK_ETH_PORT_ID_INVALID
;
1845 if (new_port_id
== DPDK_ETH_PORT_ID_INVALID
) {
1846 VLOG_WARN_BUF(errp
, "Error attaching device '%s' to DPDK", devargs
);
1853 dpdk_eth_event_callback(dpdk_port_t port_id
, enum rte_eth_event_type type
,
1854 void *param OVS_UNUSED
, void *ret_param OVS_UNUSED
)
1856 struct netdev_dpdk
*dev
;
1858 switch ((int) type
) {
1859 case RTE_ETH_EVENT_INTR_RESET
:
1860 ovs_mutex_lock(&dpdk_mutex
);
1861 dev
= netdev_dpdk_lookup_by_port_id(port_id
);
1863 ovs_mutex_lock(&dev
->mutex
);
1864 dev
->reset_needed
= true;
1865 netdev_request_reconfigure(&dev
->up
);
1866 VLOG_DBG_RL(&rl
, "%s: Device reset requested.",
1867 netdev_get_name(&dev
->up
));
1868 ovs_mutex_unlock(&dev
->mutex
);
1870 ovs_mutex_unlock(&dpdk_mutex
);
1874 /* Ignore all other types. */
1881 dpdk_set_rxq_config(struct netdev_dpdk
*dev
, const struct smap
*args
)
1882 OVS_REQUIRES(dev
->mutex
)
1886 new_n_rxq
= MAX(smap_get_int(args
, "n_rxq", NR_QUEUE
), 1);
1887 if (new_n_rxq
!= dev
->requested_n_rxq
) {
1888 dev
->requested_n_rxq
= new_n_rxq
;
1889 netdev_request_reconfigure(&dev
->up
);
1894 dpdk_process_queue_size(struct netdev
*netdev
, const struct smap
*args
,
1895 const char *flag
, int default_size
, int *new_size
)
1897 int queue_size
= smap_get_int(args
, flag
, default_size
);
1899 if (queue_size
<= 0 || queue_size
> NIC_PORT_MAX_Q_SIZE
1900 || !is_pow2(queue_size
)) {
1901 queue_size
= default_size
;
1904 if (queue_size
!= *new_size
) {
1905 *new_size
= queue_size
;
1906 netdev_request_reconfigure(netdev
);
1911 netdev_dpdk_set_config(struct netdev
*netdev
, const struct smap
*args
,
1914 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1915 bool rx_fc_en
, tx_fc_en
, autoneg
, lsc_interrupt_mode
;
1916 bool flow_control_requested
= true;
1917 enum rte_eth_fc_mode fc_mode
;
1918 static const enum rte_eth_fc_mode fc_mode_set
[2][2] = {
1919 {RTE_FC_NONE
, RTE_FC_TX_PAUSE
},
1920 {RTE_FC_RX_PAUSE
, RTE_FC_FULL
}
1922 const char *new_devargs
;
1925 ovs_mutex_lock(&dpdk_mutex
);
1926 ovs_mutex_lock(&dev
->mutex
);
1928 dpdk_set_rxq_config(dev
, args
);
1930 dpdk_process_queue_size(netdev
, args
, "n_rxq_desc",
1931 NIC_PORT_DEFAULT_RXQ_SIZE
,
1932 &dev
->requested_rxq_size
);
1933 dpdk_process_queue_size(netdev
, args
, "n_txq_desc",
1934 NIC_PORT_DEFAULT_TXQ_SIZE
,
1935 &dev
->requested_txq_size
);
1937 new_devargs
= smap_get(args
, "dpdk-devargs");
1939 if (dev
->devargs
&& new_devargs
&& strcmp(new_devargs
, dev
->devargs
)) {
1940 /* The user requested a new device. If we return error, the caller
1941 * will delete this netdev and try to recreate it. */
1946 /* dpdk-devargs is required for device configuration */
1947 if (new_devargs
&& new_devargs
[0]) {
1948 /* Don't process dpdk-devargs if value is unchanged and port id
1950 if (!(dev
->devargs
&& !strcmp(dev
->devargs
, new_devargs
)
1951 && rte_eth_dev_is_valid_port(dev
->port_id
))) {
1952 dpdk_port_t new_port_id
= netdev_dpdk_process_devargs(dev
,
1955 if (!rte_eth_dev_is_valid_port(new_port_id
)) {
1957 } else if (new_port_id
== dev
->port_id
) {
1958 /* Already configured, do not reconfigure again */
1961 struct netdev_dpdk
*dup_dev
;
1963 dup_dev
= netdev_dpdk_lookup_by_port_id(new_port_id
);
1965 VLOG_WARN_BUF(errp
, "'%s' is trying to use device '%s' "
1966 "which is already in use by '%s'",
1967 netdev_get_name(netdev
), new_devargs
,
1968 netdev_get_name(&dup_dev
->up
));
1971 int sid
= rte_eth_dev_socket_id(new_port_id
);
1973 dev
->requested_socket_id
= sid
< 0 ? SOCKET0
: sid
;
1974 dev
->devargs
= xstrdup(new_devargs
);
1975 dev
->port_id
= new_port_id
;
1976 netdev_request_reconfigure(&dev
->up
);
1977 netdev_dpdk_clear_xstats(dev
);
1983 VLOG_WARN_BUF(errp
, "'%s' is missing 'options:dpdk-devargs'. "
1984 "The old 'dpdk<port_id>' names are not supported",
1985 netdev_get_name(netdev
));
1993 lsc_interrupt_mode
= smap_get_bool(args
, "dpdk-lsc-interrupt", false);
1994 if (dev
->requested_lsc_interrupt_mode
!= lsc_interrupt_mode
) {
1995 dev
->requested_lsc_interrupt_mode
= lsc_interrupt_mode
;
1996 netdev_request_reconfigure(netdev
);
1999 rx_fc_en
= smap_get_bool(args
, "rx-flow-ctrl", false);
2000 tx_fc_en
= smap_get_bool(args
, "tx-flow-ctrl", false);
2001 autoneg
= smap_get_bool(args
, "flow-ctrl-autoneg", false);
2003 fc_mode
= fc_mode_set
[tx_fc_en
][rx_fc_en
];
2005 if (!smap_get(args
, "rx-flow-ctrl") && !smap_get(args
, "tx-flow-ctrl")
2006 && !smap_get(args
, "flow-ctrl-autoneg")) {
2007 /* FIXME: User didn't ask for flow control configuration.
2008 * For now we'll not print a warning if flow control is not
2009 * supported by the DPDK port. */
2010 flow_control_requested
= false;
2013 /* Get the Flow control configuration. */
2014 err
= -rte_eth_dev_flow_ctrl_get(dev
->port_id
, &dev
->fc_conf
);
2016 if (err
== ENOTSUP
) {
2017 if (flow_control_requested
) {
2018 VLOG_WARN("%s: Flow control is not supported.",
2019 netdev_get_name(netdev
));
2021 err
= 0; /* Not fatal. */
2023 VLOG_WARN("%s: Cannot get flow control parameters: %s",
2024 netdev_get_name(netdev
), rte_strerror(err
));
2029 if (dev
->fc_conf
.mode
!= fc_mode
|| autoneg
!= dev
->fc_conf
.autoneg
) {
2030 dev
->fc_conf
.mode
= fc_mode
;
2031 dev
->fc_conf
.autoneg
= autoneg
;
2032 dpdk_eth_flow_ctrl_setup(dev
);
2036 ovs_mutex_unlock(&dev
->mutex
);
2037 ovs_mutex_unlock(&dpdk_mutex
);
2043 netdev_dpdk_ring_set_config(struct netdev
*netdev
, const struct smap
*args
,
2044 char **errp OVS_UNUSED
)
2046 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
2048 ovs_mutex_lock(&dev
->mutex
);
2049 dpdk_set_rxq_config(dev
, args
);
2050 ovs_mutex_unlock(&dev
->mutex
);
2056 netdev_dpdk_vhost_client_set_config(struct netdev
*netdev
,
2057 const struct smap
*args
,
2058 char **errp OVS_UNUSED
)
2060 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
2062 int max_tx_retries
, cur_max_tx_retries
;
2064 ovs_mutex_lock(&dev
->mutex
);
2065 if (!(dev
->vhost_driver_flags
& RTE_VHOST_USER_CLIENT
)) {
2066 path
= smap_get(args
, "vhost-server-path");
2067 if (!nullable_string_is_equal(path
, dev
->vhost_id
)) {
2068 free(dev
->vhost_id
);
2069 dev
->vhost_id
= nullable_xstrdup(path
);
2070 /* check zero copy configuration */
2071 if (smap_get_bool(args
, "dq-zero-copy", false)) {
2072 dev
->vhost_driver_flags
|= RTE_VHOST_USER_DEQUEUE_ZERO_COPY
;
2074 dev
->vhost_driver_flags
&= ~RTE_VHOST_USER_DEQUEUE_ZERO_COPY
;
2076 netdev_request_reconfigure(netdev
);
2080 max_tx_retries
= smap_get_int(args
, "tx-retries-max",
2081 VHOST_ENQ_RETRY_DEF
);
2082 if (max_tx_retries
< VHOST_ENQ_RETRY_MIN
2083 || max_tx_retries
> VHOST_ENQ_RETRY_MAX
) {
2084 max_tx_retries
= VHOST_ENQ_RETRY_DEF
;
2086 atomic_read_relaxed(&dev
->vhost_tx_retries_max
, &cur_max_tx_retries
);
2087 if (max_tx_retries
!= cur_max_tx_retries
) {
2088 atomic_store_relaxed(&dev
->vhost_tx_retries_max
, max_tx_retries
);
2089 VLOG_INFO("Max Tx retries for vhost device '%s' set to %d",
2090 netdev_get_name(netdev
), max_tx_retries
);
2092 ovs_mutex_unlock(&dev
->mutex
);
2098 netdev_dpdk_get_numa_id(const struct netdev
*netdev
)
2100 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
2102 return dev
->socket_id
;
2105 /* Sets the number of tx queues for the dpdk interface. */
2107 netdev_dpdk_set_tx_multiq(struct netdev
*netdev
, unsigned int n_txq
)
2109 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
2111 ovs_mutex_lock(&dev
->mutex
);
2113 if (dev
->requested_n_txq
== n_txq
) {
2117 dev
->requested_n_txq
= n_txq
;
2118 netdev_request_reconfigure(netdev
);
2121 ovs_mutex_unlock(&dev
->mutex
);
2125 static struct netdev_rxq
*
2126 netdev_dpdk_rxq_alloc(void)
2128 struct netdev_rxq_dpdk
*rx
= dpdk_rte_mzalloc(sizeof *rx
);
2137 static struct netdev_rxq_dpdk
*
2138 netdev_rxq_dpdk_cast(const struct netdev_rxq
*rxq
)
2140 return CONTAINER_OF(rxq
, struct netdev_rxq_dpdk
, up
);
2144 netdev_dpdk_rxq_construct(struct netdev_rxq
*rxq
)
2146 struct netdev_rxq_dpdk
*rx
= netdev_rxq_dpdk_cast(rxq
);
2147 struct netdev_dpdk
*dev
= netdev_dpdk_cast(rxq
->netdev
);
2149 ovs_mutex_lock(&dev
->mutex
);
2150 rx
->port_id
= dev
->port_id
;
2151 ovs_mutex_unlock(&dev
->mutex
);
2157 netdev_dpdk_rxq_destruct(struct netdev_rxq
*rxq OVS_UNUSED
)
2162 netdev_dpdk_rxq_dealloc(struct netdev_rxq
*rxq
)
2164 struct netdev_rxq_dpdk
*rx
= netdev_rxq_dpdk_cast(rxq
);
2169 /* Prepare the packet for HWOL.
2170 * Return True if the packet is OK to continue. */
2172 netdev_dpdk_prep_hwol_packet(struct netdev_dpdk
*dev
, struct rte_mbuf
*mbuf
)
2174 struct dp_packet
*pkt
= CONTAINER_OF(mbuf
, struct dp_packet
, mbuf
);
2176 if (mbuf
->ol_flags
& PKT_TX_L4_MASK
) {
2177 mbuf
->l2_len
= (char *)dp_packet_l3(pkt
) - (char *)dp_packet_eth(pkt
);
2178 mbuf
->l3_len
= (char *)dp_packet_l4(pkt
) - (char *)dp_packet_l3(pkt
);
2179 mbuf
->outer_l2_len
= 0;
2180 mbuf
->outer_l3_len
= 0;
2183 if (mbuf
->ol_flags
& PKT_TX_TCP_SEG
) {
2184 struct tcp_header
*th
= dp_packet_l4(pkt
);
2187 VLOG_WARN_RL(&rl
, "%s: TCP Segmentation without L4 header"
2188 " pkt len: %"PRIu32
"", dev
->up
.name
, mbuf
->pkt_len
);
2192 mbuf
->l4_len
= TCP_OFFSET(th
->tcp_ctl
) * 4;
2193 mbuf
->ol_flags
|= PKT_TX_TCP_CKSUM
;
2194 mbuf
->tso_segsz
= dev
->mtu
- mbuf
->l3_len
- mbuf
->l4_len
;
2196 if (mbuf
->ol_flags
& PKT_TX_IPV4
) {
2197 mbuf
->ol_flags
|= PKT_TX_IP_CKSUM
;
2203 /* Prepare a batch for HWOL.
2204 * Return the number of good packets in the batch. */
2206 netdev_dpdk_prep_hwol_batch(struct netdev_dpdk
*dev
, struct rte_mbuf
**pkts
,
2211 struct rte_mbuf
*pkt
;
2213 /* Prepare and filter bad HWOL packets. */
2214 for (i
= 0; i
< pkt_cnt
; i
++) {
2216 if (!netdev_dpdk_prep_hwol_packet(dev
, pkt
)) {
2217 rte_pktmbuf_free(pkt
);
2221 if (OVS_UNLIKELY(i
!= cnt
)) {
2230 /* Tries to transmit 'pkts' to txq 'qid' of device 'dev'. Takes ownership of
2231 * 'pkts', even in case of failure.
2233 * Returns the number of packets that weren't transmitted. */
2235 netdev_dpdk_eth_tx_burst(struct netdev_dpdk
*dev
, int qid
,
2236 struct rte_mbuf
**pkts
, int cnt
)
2239 uint16_t nb_tx_prep
= cnt
;
2241 if (userspace_tso_enabled()) {
2242 nb_tx_prep
= rte_eth_tx_prepare(dev
->port_id
, qid
, pkts
, cnt
);
2243 if (nb_tx_prep
!= cnt
) {
2244 VLOG_WARN_RL(&rl
, "%s: Output batch contains invalid packets. "
2245 "Only %u/%u are valid: %s", dev
->up
.name
, nb_tx_prep
,
2246 cnt
, rte_strerror(rte_errno
));
2250 while (nb_tx
!= nb_tx_prep
) {
2253 ret
= rte_eth_tx_burst(dev
->port_id
, qid
, pkts
+ nb_tx
,
2254 nb_tx_prep
- nb_tx
);
2262 if (OVS_UNLIKELY(nb_tx
!= cnt
)) {
2263 /* Free buffers, which we couldn't transmit, one at a time (each
2264 * packet could come from a different mempool) */
2267 for (i
= nb_tx
; i
< cnt
; i
++) {
2268 rte_pktmbuf_free(pkts
[i
]);
2276 netdev_dpdk_srtcm_policer_pkt_handle(struct rte_meter_srtcm
*meter
,
2277 struct rte_meter_srtcm_profile
*profile
,
2278 struct rte_mbuf
*pkt
, uint64_t time
)
2280 uint32_t pkt_len
= rte_pktmbuf_pkt_len(pkt
) - sizeof(struct rte_ether_hdr
);
2282 return rte_meter_srtcm_color_blind_check(meter
, profile
, time
, pkt_len
) ==
2287 srtcm_policer_run_single_packet(struct rte_meter_srtcm
*meter
,
2288 struct rte_meter_srtcm_profile
*profile
,
2289 struct rte_mbuf
**pkts
, int pkt_cnt
,
2294 struct rte_mbuf
*pkt
= NULL
;
2295 uint64_t current_time
= rte_rdtsc();
2297 for (i
= 0; i
< pkt_cnt
; i
++) {
2299 /* Handle current packet */
2300 if (netdev_dpdk_srtcm_policer_pkt_handle(meter
, profile
,
2301 pkt
, current_time
)) {
2308 rte_pktmbuf_free(pkt
);
2317 ingress_policer_run(struct ingress_policer
*policer
, struct rte_mbuf
**pkts
,
2318 int pkt_cnt
, bool should_steal
)
2322 rte_spinlock_lock(&policer
->policer_lock
);
2323 cnt
= srtcm_policer_run_single_packet(&policer
->in_policer
,
2325 pkts
, pkt_cnt
, should_steal
);
2326 rte_spinlock_unlock(&policer
->policer_lock
);
2332 is_vhost_running(struct netdev_dpdk
*dev
)
2334 return (netdev_dpdk_get_vid(dev
) >= 0 && dev
->vhost_reconfigured
);
2338 netdev_dpdk_vhost_update_rx_size_counters(struct netdev_stats
*stats
,
2339 unsigned int packet_size
)
2341 /* Hard-coded search for the size bucket. */
2342 if (packet_size
< 256) {
2343 if (packet_size
>= 128) {
2344 stats
->rx_128_to_255_packets
++;
2345 } else if (packet_size
<= 64) {
2346 stats
->rx_1_to_64_packets
++;
2348 stats
->rx_65_to_127_packets
++;
2351 if (packet_size
>= 1523) {
2352 stats
->rx_1523_to_max_packets
++;
2353 } else if (packet_size
>= 1024) {
2354 stats
->rx_1024_to_1522_packets
++;
2355 } else if (packet_size
< 512) {
2356 stats
->rx_256_to_511_packets
++;
2358 stats
->rx_512_to_1023_packets
++;
2364 netdev_dpdk_vhost_update_rx_counters(struct netdev_dpdk
*dev
,
2365 struct dp_packet
**packets
, int count
,
2368 struct netdev_stats
*stats
= &dev
->stats
;
2369 struct dp_packet
*packet
;
2370 unsigned int packet_size
;
2373 stats
->rx_packets
+= count
;
2374 stats
->rx_dropped
+= qos_drops
;
2375 for (i
= 0; i
< count
; i
++) {
2376 packet
= packets
[i
];
2377 packet_size
= dp_packet_size(packet
);
2379 if (OVS_UNLIKELY(packet_size
< ETH_HEADER_LEN
)) {
2380 /* This only protects the following multicast counting from
2381 * too short packets, but it does not stop the packet from
2382 * further processing. */
2384 stats
->rx_length_errors
++;
2388 netdev_dpdk_vhost_update_rx_size_counters(stats
, packet_size
);
2390 struct eth_header
*eh
= (struct eth_header
*) dp_packet_data(packet
);
2391 if (OVS_UNLIKELY(eth_addr_is_multicast(eh
->eth_dst
))) {
2395 stats
->rx_bytes
+= packet_size
;
2398 if (OVS_UNLIKELY(qos_drops
)) {
2399 dev
->sw_stats
->rx_qos_drops
+= qos_drops
;
2404 * The receive path for the vhost port is the TX path out from guest.
2407 netdev_dpdk_vhost_rxq_recv(struct netdev_rxq
*rxq
,
2408 struct dp_packet_batch
*batch
, int *qfill
)
2410 struct netdev_dpdk
*dev
= netdev_dpdk_cast(rxq
->netdev
);
2411 struct ingress_policer
*policer
= netdev_dpdk_get_ingress_policer(dev
);
2413 uint16_t qos_drops
= 0;
2414 int qid
= rxq
->queue_id
* VIRTIO_QNUM
+ VIRTIO_TXQ
;
2415 int vid
= netdev_dpdk_get_vid(dev
);
2417 if (OVS_UNLIKELY(vid
< 0 || !dev
->vhost_reconfigured
2418 || !(dev
->flags
& NETDEV_UP
))) {
2422 nb_rx
= rte_vhost_dequeue_burst(vid
, qid
, dev
->dpdk_mp
->mp
,
2423 (struct rte_mbuf
**) batch
->packets
,
2430 if (nb_rx
== NETDEV_MAX_BURST
) {
2431 /* The DPDK API returns a uint32_t which often has invalid bits in
2432 * the upper 16-bits. Need to restrict the value to uint16_t. */
2433 *qfill
= rte_vhost_rx_queue_count(vid
, qid
) & UINT16_MAX
;
2441 nb_rx
= ingress_policer_run(policer
,
2442 (struct rte_mbuf
**) batch
->packets
,
2447 rte_spinlock_lock(&dev
->stats_lock
);
2448 netdev_dpdk_vhost_update_rx_counters(dev
, batch
->packets
,
2450 rte_spinlock_unlock(&dev
->stats_lock
);
2452 batch
->count
= nb_rx
;
2453 dp_packet_batch_init_packet_fields(batch
);
2459 netdev_dpdk_vhost_rxq_enabled(struct netdev_rxq
*rxq
)
2461 struct netdev_dpdk
*dev
= netdev_dpdk_cast(rxq
->netdev
);
2463 return dev
->vhost_rxq_enabled
[rxq
->queue_id
];
2467 netdev_dpdk_rxq_recv(struct netdev_rxq
*rxq
, struct dp_packet_batch
*batch
,
2470 struct netdev_rxq_dpdk
*rx
= netdev_rxq_dpdk_cast(rxq
);
2471 struct netdev_dpdk
*dev
= netdev_dpdk_cast(rxq
->netdev
);
2472 struct ingress_policer
*policer
= netdev_dpdk_get_ingress_policer(dev
);
2476 if (OVS_UNLIKELY(!(dev
->flags
& NETDEV_UP
))) {
2480 nb_rx
= rte_eth_rx_burst(rx
->port_id
, rxq
->queue_id
,
2481 (struct rte_mbuf
**) batch
->packets
,
2489 nb_rx
= ingress_policer_run(policer
,
2490 (struct rte_mbuf
**) batch
->packets
,
2495 /* Update stats to reflect dropped packets */
2496 if (OVS_UNLIKELY(dropped
)) {
2497 rte_spinlock_lock(&dev
->stats_lock
);
2498 dev
->stats
.rx_dropped
+= dropped
;
2499 dev
->sw_stats
->rx_qos_drops
+= dropped
;
2500 rte_spinlock_unlock(&dev
->stats_lock
);
2503 batch
->count
= nb_rx
;
2504 dp_packet_batch_init_packet_fields(batch
);
2507 if (nb_rx
== NETDEV_MAX_BURST
) {
2508 *qfill
= rte_eth_rx_queue_count(rx
->port_id
, rxq
->queue_id
);
2518 netdev_dpdk_qos_run(struct netdev_dpdk
*dev
, struct rte_mbuf
**pkts
,
2519 int cnt
, bool should_steal
)
2521 struct qos_conf
*qos_conf
= ovsrcu_get(struct qos_conf
*, &dev
->qos_conf
);
2524 rte_spinlock_lock(&qos_conf
->lock
);
2525 cnt
= qos_conf
->ops
->qos_run(qos_conf
, pkts
, cnt
, should_steal
);
2526 rte_spinlock_unlock(&qos_conf
->lock
);
2533 netdev_dpdk_filter_packet_len(struct netdev_dpdk
*dev
, struct rte_mbuf
**pkts
,
2538 struct rte_mbuf
*pkt
;
2540 /* Filter oversized packets, unless are marked for TSO. */
2541 for (i
= 0; i
< pkt_cnt
; i
++) {
2543 if (OVS_UNLIKELY((pkt
->pkt_len
> dev
->max_packet_len
)
2544 && !(pkt
->ol_flags
& PKT_TX_TCP_SEG
))) {
2545 VLOG_WARN_RL(&rl
, "%s: Too big size %" PRIu32
" "
2546 "max_packet_len %d", dev
->up
.name
, pkt
->pkt_len
,
2547 dev
->max_packet_len
);
2548 rte_pktmbuf_free(pkt
);
2552 if (OVS_UNLIKELY(i
!= cnt
)) {
2562 netdev_dpdk_vhost_update_tx_counters(struct netdev_dpdk
*dev
,
2563 struct dp_packet
**packets
,
2565 struct netdev_dpdk_sw_stats
*sw_stats_add
)
2567 int dropped
= sw_stats_add
->tx_mtu_exceeded_drops
+
2568 sw_stats_add
->tx_qos_drops
+
2569 sw_stats_add
->tx_failure_drops
+
2570 sw_stats_add
->tx_invalid_hwol_drops
;
2571 struct netdev_stats
*stats
= &dev
->stats
;
2572 int sent
= attempted
- dropped
;
2575 stats
->tx_packets
+= sent
;
2576 stats
->tx_dropped
+= dropped
;
2578 for (i
= 0; i
< sent
; i
++) {
2579 stats
->tx_bytes
+= dp_packet_size(packets
[i
]);
2582 if (OVS_UNLIKELY(dropped
|| sw_stats_add
->tx_retries
)) {
2583 struct netdev_dpdk_sw_stats
*sw_stats
= dev
->sw_stats
;
2585 sw_stats
->tx_retries
+= sw_stats_add
->tx_retries
;
2586 sw_stats
->tx_failure_drops
+= sw_stats_add
->tx_failure_drops
;
2587 sw_stats
->tx_mtu_exceeded_drops
+= sw_stats_add
->tx_mtu_exceeded_drops
;
2588 sw_stats
->tx_qos_drops
+= sw_stats_add
->tx_qos_drops
;
2589 sw_stats
->tx_invalid_hwol_drops
+= sw_stats_add
->tx_invalid_hwol_drops
;
2594 __netdev_dpdk_vhost_send(struct netdev
*netdev
, int qid
,
2595 struct dp_packet
**pkts
, int cnt
)
2597 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
2598 struct rte_mbuf
**cur_pkts
= (struct rte_mbuf
**) pkts
;
2599 struct netdev_dpdk_sw_stats sw_stats_add
;
2600 unsigned int n_packets_to_free
= cnt
;
2601 unsigned int total_packets
= cnt
;
2603 int max_retries
= VHOST_ENQ_RETRY_MIN
;
2604 int vid
= netdev_dpdk_get_vid(dev
);
2606 qid
= dev
->tx_q
[qid
% netdev
->n_txq
].map
;
2608 if (OVS_UNLIKELY(vid
< 0 || !dev
->vhost_reconfigured
|| qid
< 0
2609 || !(dev
->flags
& NETDEV_UP
))) {
2610 rte_spinlock_lock(&dev
->stats_lock
);
2611 dev
->stats
.tx_dropped
+= cnt
;
2612 rte_spinlock_unlock(&dev
->stats_lock
);
2616 if (OVS_UNLIKELY(!rte_spinlock_trylock(&dev
->tx_q
[qid
].tx_lock
))) {
2617 COVERAGE_INC(vhost_tx_contention
);
2618 rte_spinlock_lock(&dev
->tx_q
[qid
].tx_lock
);
2621 sw_stats_add
.tx_invalid_hwol_drops
= cnt
;
2622 if (userspace_tso_enabled()) {
2623 cnt
= netdev_dpdk_prep_hwol_batch(dev
, cur_pkts
, cnt
);
2626 sw_stats_add
.tx_invalid_hwol_drops
-= cnt
;
2627 sw_stats_add
.tx_mtu_exceeded_drops
= cnt
;
2628 cnt
= netdev_dpdk_filter_packet_len(dev
, cur_pkts
, cnt
);
2629 sw_stats_add
.tx_mtu_exceeded_drops
-= cnt
;
2631 /* Check has QoS has been configured for the netdev */
2632 sw_stats_add
.tx_qos_drops
= cnt
;
2633 cnt
= netdev_dpdk_qos_run(dev
, cur_pkts
, cnt
, true);
2634 sw_stats_add
.tx_qos_drops
-= cnt
;
2636 n_packets_to_free
= cnt
;
2639 int vhost_qid
= qid
* VIRTIO_QNUM
+ VIRTIO_RXQ
;
2640 unsigned int tx_pkts
;
2642 tx_pkts
= rte_vhost_enqueue_burst(vid
, vhost_qid
, cur_pkts
, cnt
);
2643 if (OVS_LIKELY(tx_pkts
)) {
2644 /* Packets have been sent.*/
2646 /* Prepare for possible retry.*/
2647 cur_pkts
= &cur_pkts
[tx_pkts
];
2648 if (OVS_UNLIKELY(cnt
&& !retries
)) {
2650 * Read max retries as there are packets not sent
2651 * and no retries have already occurred.
2653 atomic_read_relaxed(&dev
->vhost_tx_retries_max
, &max_retries
);
2656 /* No packets sent - do not retry.*/
2659 } while (cnt
&& (retries
++ < max_retries
));
2661 rte_spinlock_unlock(&dev
->tx_q
[qid
].tx_lock
);
2663 sw_stats_add
.tx_failure_drops
= cnt
;
2664 sw_stats_add
.tx_retries
= MIN(retries
, max_retries
);
2666 rte_spinlock_lock(&dev
->stats_lock
);
2667 netdev_dpdk_vhost_update_tx_counters(dev
, pkts
, total_packets
,
2669 rte_spinlock_unlock(&dev
->stats_lock
);
2672 for (i
= 0; i
< n_packets_to_free
; i
++) {
2673 dp_packet_delete(pkts
[i
]);
2678 netdev_dpdk_extbuf_free(void *addr OVS_UNUSED
, void *opaque
)
2683 static struct rte_mbuf
*
2684 dpdk_pktmbuf_attach_extbuf(struct rte_mbuf
*pkt
, uint32_t data_len
)
2686 uint32_t total_len
= RTE_PKTMBUF_HEADROOM
+ data_len
;
2687 struct rte_mbuf_ext_shared_info
*shinfo
= NULL
;
2691 if (rte_pktmbuf_tailroom(pkt
) >= sizeof *shinfo
) {
2692 shinfo
= rte_pktmbuf_mtod(pkt
, struct rte_mbuf_ext_shared_info
*);
2694 total_len
+= sizeof *shinfo
+ sizeof(uintptr_t);
2695 total_len
= RTE_ALIGN_CEIL(total_len
, sizeof(uintptr_t));
2698 if (OVS_UNLIKELY(total_len
> UINT16_MAX
)) {
2699 VLOG_ERR("Can't copy packet: too big %u", total_len
);
2703 buf_len
= total_len
;
2704 buf
= rte_malloc(NULL
, buf_len
, RTE_CACHE_LINE_SIZE
);
2705 if (OVS_UNLIKELY(buf
== NULL
)) {
2706 VLOG_ERR("Failed to allocate memory using rte_malloc: %u", buf_len
);
2710 /* Initialize shinfo. */
2712 shinfo
->free_cb
= netdev_dpdk_extbuf_free
;
2713 shinfo
->fcb_opaque
= buf
;
2714 rte_mbuf_ext_refcnt_set(shinfo
, 1);
2716 shinfo
= rte_pktmbuf_ext_shinfo_init_helper(buf
, &buf_len
,
2717 netdev_dpdk_extbuf_free
,
2719 if (OVS_UNLIKELY(shinfo
== NULL
)) {
2721 VLOG_ERR("Failed to initialize shared info for mbuf while "
2722 "attempting to attach an external buffer.");
2727 rte_pktmbuf_attach_extbuf(pkt
, buf
, rte_malloc_virt2iova(buf
), buf_len
,
2729 rte_pktmbuf_reset_headroom(pkt
);
2734 static struct rte_mbuf
*
2735 dpdk_pktmbuf_alloc(struct rte_mempool
*mp
, uint32_t data_len
)
2737 struct rte_mbuf
*pkt
= rte_pktmbuf_alloc(mp
);
2739 if (OVS_UNLIKELY(!pkt
)) {
2743 if (rte_pktmbuf_tailroom(pkt
) >= data_len
) {
2747 if (dpdk_pktmbuf_attach_extbuf(pkt
, data_len
)) {
2751 rte_pktmbuf_free(pkt
);
2756 static struct dp_packet
*
2757 dpdk_copy_dp_packet_to_mbuf(struct rte_mempool
*mp
, struct dp_packet
*pkt_orig
)
2759 struct rte_mbuf
*mbuf_dest
;
2760 struct dp_packet
*pkt_dest
;
2763 pkt_len
= dp_packet_size(pkt_orig
);
2764 mbuf_dest
= dpdk_pktmbuf_alloc(mp
, pkt_len
);
2765 if (OVS_UNLIKELY(mbuf_dest
== NULL
)) {
2769 pkt_dest
= CONTAINER_OF(mbuf_dest
, struct dp_packet
, mbuf
);
2770 memcpy(dp_packet_data(pkt_dest
), dp_packet_data(pkt_orig
), pkt_len
);
2771 dp_packet_set_size(pkt_dest
, pkt_len
);
2773 mbuf_dest
->tx_offload
= pkt_orig
->mbuf
.tx_offload
;
2774 mbuf_dest
->packet_type
= pkt_orig
->mbuf
.packet_type
;
2775 mbuf_dest
->ol_flags
|= (pkt_orig
->mbuf
.ol_flags
&
2776 ~(EXT_ATTACHED_MBUF
| IND_ATTACHED_MBUF
));
2778 memcpy(&pkt_dest
->l2_pad_size
, &pkt_orig
->l2_pad_size
,
2779 sizeof(struct dp_packet
) - offsetof(struct dp_packet
, l2_pad_size
));
2781 if (mbuf_dest
->ol_flags
& PKT_TX_L4_MASK
) {
2782 mbuf_dest
->l2_len
= (char *)dp_packet_l3(pkt_dest
)
2783 - (char *)dp_packet_eth(pkt_dest
);
2784 mbuf_dest
->l3_len
= (char *)dp_packet_l4(pkt_dest
)
2785 - (char *) dp_packet_l3(pkt_dest
);
2791 /* Tx function. Transmit packets indefinitely */
2793 dpdk_do_tx_copy(struct netdev
*netdev
, int qid
, struct dp_packet_batch
*batch
)
2794 OVS_NO_THREAD_SAFETY_ANALYSIS
2796 const size_t batch_cnt
= dp_packet_batch_size(batch
);
2797 #if !defined(__CHECKER__) && !defined(_WIN32)
2798 const size_t PKT_ARRAY_SIZE
= batch_cnt
;
2800 /* Sparse or MSVC doesn't like variable length array. */
2801 enum { PKT_ARRAY_SIZE
= NETDEV_MAX_BURST
};
2803 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
2804 struct dp_packet
*pkts
[PKT_ARRAY_SIZE
];
2805 struct netdev_dpdk_sw_stats
*sw_stats
= dev
->sw_stats
;
2806 uint32_t cnt
= batch_cnt
;
2807 uint32_t dropped
= 0;
2808 uint32_t tx_failure
= 0;
2809 uint32_t mtu_drops
= 0;
2810 uint32_t qos_drops
= 0;
2812 if (dev
->type
!= DPDK_DEV_VHOST
) {
2813 /* Check if QoS has been configured for this netdev. */
2814 cnt
= netdev_dpdk_qos_run(dev
, (struct rte_mbuf
**) batch
->packets
,
2816 qos_drops
= batch_cnt
- cnt
;
2821 for (uint32_t i
= 0; i
< cnt
; i
++) {
2822 struct dp_packet
*packet
= batch
->packets
[i
];
2823 uint32_t size
= dp_packet_size(packet
);
2825 if (size
> dev
->max_packet_len
2826 && !(packet
->mbuf
.ol_flags
& PKT_TX_TCP_SEG
)) {
2827 VLOG_WARN_RL(&rl
, "Too big size %u max_packet_len %d", size
,
2828 dev
->max_packet_len
);
2833 pkts
[txcnt
] = dpdk_copy_dp_packet_to_mbuf(dev
->dpdk_mp
->mp
, packet
);
2834 if (OVS_UNLIKELY(!pkts
[txcnt
])) {
2842 if (OVS_LIKELY(txcnt
)) {
2843 if (dev
->type
== DPDK_DEV_VHOST
) {
2844 __netdev_dpdk_vhost_send(netdev
, qid
, pkts
, txcnt
);
2846 tx_failure
+= netdev_dpdk_eth_tx_burst(dev
, qid
,
2847 (struct rte_mbuf
**)pkts
,
2852 dropped
+= qos_drops
+ mtu_drops
+ tx_failure
;
2853 if (OVS_UNLIKELY(dropped
)) {
2854 rte_spinlock_lock(&dev
->stats_lock
);
2855 dev
->stats
.tx_dropped
+= dropped
;
2856 sw_stats
->tx_failure_drops
+= tx_failure
;
2857 sw_stats
->tx_mtu_exceeded_drops
+= mtu_drops
;
2858 sw_stats
->tx_qos_drops
+= qos_drops
;
2859 rte_spinlock_unlock(&dev
->stats_lock
);
2864 netdev_dpdk_vhost_send(struct netdev
*netdev
, int qid
,
2865 struct dp_packet_batch
*batch
,
2866 bool concurrent_txq OVS_UNUSED
)
2869 if (OVS_UNLIKELY(batch
->packets
[0]->source
!= DPBUF_DPDK
)) {
2870 dpdk_do_tx_copy(netdev
, qid
, batch
);
2871 dp_packet_delete_batch(batch
, true);
2873 __netdev_dpdk_vhost_send(netdev
, qid
, batch
->packets
,
2874 dp_packet_batch_size(batch
));
2880 netdev_dpdk_send__(struct netdev_dpdk
*dev
, int qid
,
2881 struct dp_packet_batch
*batch
,
2882 bool concurrent_txq
)
2884 if (OVS_UNLIKELY(!(dev
->flags
& NETDEV_UP
))) {
2885 dp_packet_delete_batch(batch
, true);
2889 if (OVS_UNLIKELY(concurrent_txq
)) {
2890 qid
= qid
% dev
->up
.n_txq
;
2891 rte_spinlock_lock(&dev
->tx_q
[qid
].tx_lock
);
2894 if (OVS_UNLIKELY(batch
->packets
[0]->source
!= DPBUF_DPDK
)) {
2895 struct netdev
*netdev
= &dev
->up
;
2897 dpdk_do_tx_copy(netdev
, qid
, batch
);
2898 dp_packet_delete_batch(batch
, true);
2900 struct netdev_dpdk_sw_stats
*sw_stats
= dev
->sw_stats
;
2902 int tx_failure
, mtu_drops
, qos_drops
, hwol_drops
;
2903 int batch_cnt
= dp_packet_batch_size(batch
);
2904 struct rte_mbuf
**pkts
= (struct rte_mbuf
**) batch
->packets
;
2906 hwol_drops
= batch_cnt
;
2907 if (userspace_tso_enabled()) {
2908 batch_cnt
= netdev_dpdk_prep_hwol_batch(dev
, pkts
, batch_cnt
);
2910 hwol_drops
-= batch_cnt
;
2911 mtu_drops
= batch_cnt
;
2912 batch_cnt
= netdev_dpdk_filter_packet_len(dev
, pkts
, batch_cnt
);
2913 mtu_drops
-= batch_cnt
;
2914 qos_drops
= batch_cnt
;
2915 batch_cnt
= netdev_dpdk_qos_run(dev
, pkts
, batch_cnt
, true);
2916 qos_drops
-= batch_cnt
;
2918 tx_failure
= netdev_dpdk_eth_tx_burst(dev
, qid
, pkts
, batch_cnt
);
2920 dropped
= tx_failure
+ mtu_drops
+ qos_drops
+ hwol_drops
;
2921 if (OVS_UNLIKELY(dropped
)) {
2922 rte_spinlock_lock(&dev
->stats_lock
);
2923 dev
->stats
.tx_dropped
+= dropped
;
2924 sw_stats
->tx_failure_drops
+= tx_failure
;
2925 sw_stats
->tx_mtu_exceeded_drops
+= mtu_drops
;
2926 sw_stats
->tx_qos_drops
+= qos_drops
;
2927 sw_stats
->tx_invalid_hwol_drops
+= hwol_drops
;
2928 rte_spinlock_unlock(&dev
->stats_lock
);
2932 if (OVS_UNLIKELY(concurrent_txq
)) {
2933 rte_spinlock_unlock(&dev
->tx_q
[qid
].tx_lock
);
2938 netdev_dpdk_eth_send(struct netdev
*netdev
, int qid
,
2939 struct dp_packet_batch
*batch
, bool concurrent_txq
)
2941 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
2943 netdev_dpdk_send__(dev
, qid
, batch
, concurrent_txq
);
2948 netdev_dpdk_set_etheraddr(struct netdev
*netdev
, const struct eth_addr mac
)
2950 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
2952 ovs_mutex_lock(&dev
->mutex
);
2953 if (!eth_addr_equals(dev
->hwaddr
, mac
)) {
2955 netdev_change_seq_changed(netdev
);
2957 ovs_mutex_unlock(&dev
->mutex
);
2963 netdev_dpdk_get_etheraddr(const struct netdev
*netdev
, struct eth_addr
*mac
)
2965 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
2967 ovs_mutex_lock(&dev
->mutex
);
2969 ovs_mutex_unlock(&dev
->mutex
);
2975 netdev_dpdk_get_mtu(const struct netdev
*netdev
, int *mtup
)
2977 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
2979 ovs_mutex_lock(&dev
->mutex
);
2981 ovs_mutex_unlock(&dev
->mutex
);
2987 netdev_dpdk_set_mtu(struct netdev
*netdev
, int mtu
)
2989 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
2991 /* XXX: Ensure that the overall frame length of the requested MTU does not
2992 * surpass the NETDEV_DPDK_MAX_PKT_LEN. DPDK device drivers differ in how
2993 * the L2 frame length is calculated for a given MTU when
2994 * rte_eth_dev_set_mtu(mtu) is called e.g. i40e driver includes 2 x vlan
2995 * headers, the em driver includes 1 x vlan header, the ixgbe driver does
2996 * not include vlan headers. As such we should use
2997 * MTU_TO_MAX_FRAME_LEN(mtu) which includes an additional 2 x vlan headers
2998 * (8 bytes) for comparison. This avoids a failure later with
2999 * rte_eth_dev_set_mtu(). This approach should be used until DPDK provides
3000 * a method to retrieve the upper bound MTU for a given device.
3002 if (MTU_TO_MAX_FRAME_LEN(mtu
) > NETDEV_DPDK_MAX_PKT_LEN
3003 || mtu
< RTE_ETHER_MIN_MTU
) {
3004 VLOG_WARN("%s: unsupported MTU %d\n", dev
->up
.name
, mtu
);
3008 ovs_mutex_lock(&dev
->mutex
);
3009 if (dev
->requested_mtu
!= mtu
) {
3010 dev
->requested_mtu
= mtu
;
3011 netdev_request_reconfigure(netdev
);
3013 ovs_mutex_unlock(&dev
->mutex
);
3019 netdev_dpdk_get_carrier(const struct netdev
*netdev
, bool *carrier
);
3022 netdev_dpdk_vhost_get_stats(const struct netdev
*netdev
,
3023 struct netdev_stats
*stats
)
3025 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
3027 ovs_mutex_lock(&dev
->mutex
);
3029 rte_spinlock_lock(&dev
->stats_lock
);
3030 /* Supported Stats */
3031 stats
->rx_packets
= dev
->stats
.rx_packets
;
3032 stats
->tx_packets
= dev
->stats
.tx_packets
;
3033 stats
->rx_dropped
= dev
->stats
.rx_dropped
;
3034 stats
->tx_dropped
= dev
->stats
.tx_dropped
;
3035 stats
->multicast
= dev
->stats
.multicast
;
3036 stats
->rx_bytes
= dev
->stats
.rx_bytes
;
3037 stats
->tx_bytes
= dev
->stats
.tx_bytes
;
3038 stats
->rx_errors
= dev
->stats
.rx_errors
;
3039 stats
->rx_length_errors
= dev
->stats
.rx_length_errors
;
3041 stats
->rx_1_to_64_packets
= dev
->stats
.rx_1_to_64_packets
;
3042 stats
->rx_65_to_127_packets
= dev
->stats
.rx_65_to_127_packets
;
3043 stats
->rx_128_to_255_packets
= dev
->stats
.rx_128_to_255_packets
;
3044 stats
->rx_256_to_511_packets
= dev
->stats
.rx_256_to_511_packets
;
3045 stats
->rx_512_to_1023_packets
= dev
->stats
.rx_512_to_1023_packets
;
3046 stats
->rx_1024_to_1522_packets
= dev
->stats
.rx_1024_to_1522_packets
;
3047 stats
->rx_1523_to_max_packets
= dev
->stats
.rx_1523_to_max_packets
;
3049 rte_spinlock_unlock(&dev
->stats_lock
);
3051 ovs_mutex_unlock(&dev
->mutex
);
3057 netdev_dpdk_convert_xstats(struct netdev_stats
*stats
,
3058 const struct rte_eth_xstat
*xstats
,
3059 const struct rte_eth_xstat_name
*names
,
3060 const unsigned int size
)
3062 /* DPDK XSTATS Counter names definition. */
3063 #define DPDK_XSTATS \
3064 DPDK_XSTAT(multicast, "rx_multicast_packets" ) \
3065 DPDK_XSTAT(tx_multicast_packets, "tx_multicast_packets" ) \
3066 DPDK_XSTAT(rx_broadcast_packets, "rx_broadcast_packets" ) \
3067 DPDK_XSTAT(tx_broadcast_packets, "tx_broadcast_packets" ) \
3068 DPDK_XSTAT(rx_undersized_errors, "rx_undersized_errors" ) \
3069 DPDK_XSTAT(rx_oversize_errors, "rx_oversize_errors" ) \
3070 DPDK_XSTAT(rx_fragmented_errors, "rx_fragmented_errors" ) \
3071 DPDK_XSTAT(rx_jabber_errors, "rx_jabber_errors" ) \
3072 DPDK_XSTAT(rx_1_to_64_packets, "rx_size_64_packets" ) \
3073 DPDK_XSTAT(rx_65_to_127_packets, "rx_size_65_to_127_packets" ) \
3074 DPDK_XSTAT(rx_128_to_255_packets, "rx_size_128_to_255_packets" ) \
3075 DPDK_XSTAT(rx_256_to_511_packets, "rx_size_256_to_511_packets" ) \
3076 DPDK_XSTAT(rx_512_to_1023_packets, "rx_size_512_to_1023_packets" ) \
3077 DPDK_XSTAT(rx_1024_to_1522_packets, "rx_size_1024_to_1522_packets" ) \
3078 DPDK_XSTAT(rx_1523_to_max_packets, "rx_size_1523_to_max_packets" ) \
3079 DPDK_XSTAT(tx_1_to_64_packets, "tx_size_64_packets" ) \
3080 DPDK_XSTAT(tx_65_to_127_packets, "tx_size_65_to_127_packets" ) \
3081 DPDK_XSTAT(tx_128_to_255_packets, "tx_size_128_to_255_packets" ) \
3082 DPDK_XSTAT(tx_256_to_511_packets, "tx_size_256_to_511_packets" ) \
3083 DPDK_XSTAT(tx_512_to_1023_packets, "tx_size_512_to_1023_packets" ) \
3084 DPDK_XSTAT(tx_1024_to_1522_packets, "tx_size_1024_to_1522_packets" ) \
3085 DPDK_XSTAT(tx_1523_to_max_packets, "tx_size_1523_to_max_packets" )
3087 for (unsigned int i
= 0; i
< size
; i
++) {
3088 #define DPDK_XSTAT(MEMBER, NAME) \
3089 if (strcmp(NAME, names[i].name) == 0) { \
3090 stats->MEMBER = xstats[i].value; \
3100 netdev_dpdk_get_stats(const struct netdev
*netdev
, struct netdev_stats
*stats
)
3102 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
3103 struct rte_eth_stats rte_stats
;
3106 netdev_dpdk_get_carrier(netdev
, &gg
);
3107 ovs_mutex_lock(&dev
->mutex
);
3109 struct rte_eth_xstat
*rte_xstats
= NULL
;
3110 struct rte_eth_xstat_name
*rte_xstats_names
= NULL
;
3111 int rte_xstats_len
, rte_xstats_new_len
, rte_xstats_ret
;
3113 if (rte_eth_stats_get(dev
->port_id
, &rte_stats
)) {
3114 VLOG_ERR("Can't get ETH statistics for port: "DPDK_PORT_ID_FMT
,
3116 ovs_mutex_unlock(&dev
->mutex
);
3120 /* Get length of statistics */
3121 rte_xstats_len
= rte_eth_xstats_get_names(dev
->port_id
, NULL
, 0);
3122 if (rte_xstats_len
< 0) {
3123 VLOG_WARN("Cannot get XSTATS values for port: "DPDK_PORT_ID_FMT
,
3127 /* Reserve memory for xstats names and values */
3128 rte_xstats_names
= xcalloc(rte_xstats_len
, sizeof *rte_xstats_names
);
3129 rte_xstats
= xcalloc(rte_xstats_len
, sizeof *rte_xstats
);
3131 /* Retreive xstats names */
3132 rte_xstats_new_len
= rte_eth_xstats_get_names(dev
->port_id
,
3135 if (rte_xstats_new_len
!= rte_xstats_len
) {
3136 VLOG_WARN("Cannot get XSTATS names for port: "DPDK_PORT_ID_FMT
,
3140 /* Retreive xstats values */
3141 memset(rte_xstats
, 0xff, sizeof *rte_xstats
* rte_xstats_len
);
3142 rte_xstats_ret
= rte_eth_xstats_get(dev
->port_id
, rte_xstats
,
3144 if (rte_xstats_ret
> 0 && rte_xstats_ret
<= rte_xstats_len
) {
3145 netdev_dpdk_convert_xstats(stats
, rte_xstats
, rte_xstats_names
,
3148 VLOG_WARN("Cannot get XSTATS values for port: "DPDK_PORT_ID_FMT
,
3154 free(rte_xstats_names
);
3156 stats
->rx_packets
= rte_stats
.ipackets
;
3157 stats
->tx_packets
= rte_stats
.opackets
;
3158 stats
->rx_bytes
= rte_stats
.ibytes
;
3159 stats
->tx_bytes
= rte_stats
.obytes
;
3160 stats
->rx_errors
= rte_stats
.ierrors
;
3161 stats
->tx_errors
= rte_stats
.oerrors
;
3163 rte_spinlock_lock(&dev
->stats_lock
);
3164 stats
->tx_dropped
= dev
->stats
.tx_dropped
;
3165 stats
->rx_dropped
= dev
->stats
.rx_dropped
;
3166 rte_spinlock_unlock(&dev
->stats_lock
);
3168 /* These are the available DPDK counters for packets not received due to
3169 * local resource constraints in DPDK and NIC respectively. */
3170 stats
->rx_dropped
+= rte_stats
.rx_nombuf
+ rte_stats
.imissed
;
3171 stats
->rx_missed_errors
= rte_stats
.imissed
;
3173 ovs_mutex_unlock(&dev
->mutex
);
3179 netdev_dpdk_get_custom_stats(const struct netdev
*netdev
,
3180 struct netdev_custom_stats
*custom_stats
)
3184 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
3185 int rte_xstats_ret
, sw_stats_size
;
3187 netdev_dpdk_get_sw_custom_stats(netdev
, custom_stats
);
3189 ovs_mutex_lock(&dev
->mutex
);
3191 if (netdev_dpdk_configure_xstats(dev
)) {
3192 uint64_t *values
= xcalloc(dev
->rte_xstats_ids_size
,
3196 rte_eth_xstats_get_by_id(dev
->port_id
, dev
->rte_xstats_ids
,
3197 values
, dev
->rte_xstats_ids_size
);
3199 if (rte_xstats_ret
> 0 &&
3200 rte_xstats_ret
<= dev
->rte_xstats_ids_size
) {
3202 sw_stats_size
= custom_stats
->size
;
3203 custom_stats
->size
+= rte_xstats_ret
;
3204 custom_stats
->counters
= xrealloc(custom_stats
->counters
,
3205 custom_stats
->size
*
3206 sizeof *custom_stats
->counters
);
3208 for (i
= 0; i
< rte_xstats_ret
; i
++) {
3209 ovs_strlcpy(custom_stats
->counters
[sw_stats_size
+ i
].name
,
3210 netdev_dpdk_get_xstat_name(dev
,
3211 dev
->rte_xstats_ids
[i
]),
3212 NETDEV_CUSTOM_STATS_NAME_SIZE
);
3213 custom_stats
->counters
[sw_stats_size
+ i
].value
= values
[i
];
3216 VLOG_WARN("Cannot get XSTATS values for port: "DPDK_PORT_ID_FMT
,
3218 /* Let's clear statistics cache, so it will be
3220 netdev_dpdk_clear_xstats(dev
);
3226 ovs_mutex_unlock(&dev
->mutex
);
3232 netdev_dpdk_get_sw_custom_stats(const struct netdev
*netdev
,
3233 struct netdev_custom_stats
*custom_stats
)
3235 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
3239 SW_CSTAT(tx_retries) \
3240 SW_CSTAT(tx_failure_drops) \
3241 SW_CSTAT(tx_mtu_exceeded_drops) \
3242 SW_CSTAT(tx_qos_drops) \
3243 SW_CSTAT(rx_qos_drops) \
3244 SW_CSTAT(tx_invalid_hwol_drops)
3246 #define SW_CSTAT(NAME) + 1
3247 custom_stats
->size
= SW_CSTATS
;
3249 custom_stats
->counters
= xcalloc(custom_stats
->size
,
3250 sizeof *custom_stats
->counters
);
3252 ovs_mutex_lock(&dev
->mutex
);
3254 rte_spinlock_lock(&dev
->stats_lock
);
3256 #define SW_CSTAT(NAME) \
3257 custom_stats->counters[i++].value = dev->sw_stats->NAME;
3260 rte_spinlock_unlock(&dev
->stats_lock
);
3262 ovs_mutex_unlock(&dev
->mutex
);
3266 #define SW_CSTAT(NAME) \
3267 if (custom_stats->counters[i].value != UINT64_MAX) { \
3268 ovs_strlcpy(custom_stats->counters[n].name, \
3269 "ovs_"#NAME, NETDEV_CUSTOM_STATS_NAME_SIZE); \
3270 custom_stats->counters[n].value = custom_stats->counters[i].value; \
3277 custom_stats
->size
= n
;
3282 netdev_dpdk_get_features(const struct netdev
*netdev
,
3283 enum netdev_features
*current
,
3284 enum netdev_features
*advertised
,
3285 enum netdev_features
*supported
,
3286 enum netdev_features
*peer
)
3288 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
3289 struct rte_eth_link link
;
3290 uint32_t feature
= 0;
3292 ovs_mutex_lock(&dev
->mutex
);
3294 ovs_mutex_unlock(&dev
->mutex
);
3296 /* Match against OpenFlow defined link speed values. */
3297 if (link
.link_duplex
== ETH_LINK_FULL_DUPLEX
) {
3298 switch (link
.link_speed
) {
3299 case ETH_SPEED_NUM_10M
:
3300 feature
|= NETDEV_F_10MB_FD
;
3302 case ETH_SPEED_NUM_100M
:
3303 feature
|= NETDEV_F_100MB_FD
;
3305 case ETH_SPEED_NUM_1G
:
3306 feature
|= NETDEV_F_1GB_FD
;
3308 case ETH_SPEED_NUM_10G
:
3309 feature
|= NETDEV_F_10GB_FD
;
3311 case ETH_SPEED_NUM_40G
:
3312 feature
|= NETDEV_F_40GB_FD
;
3314 case ETH_SPEED_NUM_100G
:
3315 feature
|= NETDEV_F_100GB_FD
;
3318 feature
|= NETDEV_F_OTHER
;
3320 } else if (link
.link_duplex
== ETH_LINK_HALF_DUPLEX
) {
3321 switch (link
.link_speed
) {
3322 case ETH_SPEED_NUM_10M
:
3323 feature
|= NETDEV_F_10MB_HD
;
3325 case ETH_SPEED_NUM_100M
:
3326 feature
|= NETDEV_F_100MB_HD
;
3328 case ETH_SPEED_NUM_1G
:
3329 feature
|= NETDEV_F_1GB_HD
;
3332 feature
|= NETDEV_F_OTHER
;
3336 if (link
.link_autoneg
) {
3337 feature
|= NETDEV_F_AUTONEG
;
3341 *advertised
= *supported
= *peer
= 0;
3346 static struct ingress_policer
*
3347 netdev_dpdk_policer_construct(uint32_t rate
, uint32_t burst
)
3349 struct ingress_policer
*policer
= NULL
;
3350 uint64_t rate_bytes
;
3351 uint64_t burst_bytes
;
3354 policer
= xmalloc(sizeof *policer
);
3355 rte_spinlock_init(&policer
->policer_lock
);
3357 /* rte_meter requires bytes so convert kbits rate and burst to bytes. */
3358 rate_bytes
= rate
* 1000ULL / 8;
3359 burst_bytes
= burst
* 1000ULL / 8;
3361 policer
->app_srtcm_params
.cir
= rate_bytes
;
3362 policer
->app_srtcm_params
.cbs
= burst_bytes
;
3363 policer
->app_srtcm_params
.ebs
= 0;
3364 err
= rte_meter_srtcm_profile_config(&policer
->in_prof
,
3365 &policer
->app_srtcm_params
);
3367 err
= rte_meter_srtcm_config(&policer
->in_policer
,
3371 VLOG_ERR("Could not create rte meter for ingress policer");
3380 netdev_dpdk_set_policing(struct netdev
* netdev
, uint32_t policer_rate
,
3381 uint32_t policer_burst
)
3383 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
3384 struct ingress_policer
*policer
;
3386 /* Force to 0 if no rate specified,
3387 * default to 8000 kbits if burst is 0,
3388 * else stick with user-specified value.
3390 policer_burst
= (!policer_rate
? 0
3391 : !policer_burst
? 8000
3394 ovs_mutex_lock(&dev
->mutex
);
3396 policer
= ovsrcu_get_protected(struct ingress_policer
*,
3397 &dev
->ingress_policer
);
3399 if (dev
->policer_rate
== policer_rate
&&
3400 dev
->policer_burst
== policer_burst
) {
3401 /* Assume that settings haven't changed since we last set them. */
3402 ovs_mutex_unlock(&dev
->mutex
);
3406 /* Destroy any existing ingress policer for the device if one exists */
3408 ovsrcu_postpone(free
, policer
);
3411 if (policer_rate
!= 0) {
3412 policer
= netdev_dpdk_policer_construct(policer_rate
, policer_burst
);
3416 ovsrcu_set(&dev
->ingress_policer
, policer
);
3417 dev
->policer_rate
= policer_rate
;
3418 dev
->policer_burst
= policer_burst
;
3419 ovs_mutex_unlock(&dev
->mutex
);
3425 netdev_dpdk_get_ifindex(const struct netdev
*netdev
)
3427 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
3429 ovs_mutex_lock(&dev
->mutex
);
3430 /* Calculate hash from the netdev name. Ensure that ifindex is a 24-bit
3431 * postive integer to meet RFC 2863 recommendations.
3433 int ifindex
= hash_string(netdev
->name
, 0) % 0xfffffe + 1;
3434 ovs_mutex_unlock(&dev
->mutex
);
3440 netdev_dpdk_get_carrier(const struct netdev
*netdev
, bool *carrier
)
3442 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
3444 ovs_mutex_lock(&dev
->mutex
);
3445 check_link_status(dev
);
3446 *carrier
= dev
->link
.link_status
;
3448 ovs_mutex_unlock(&dev
->mutex
);
3454 netdev_dpdk_vhost_get_carrier(const struct netdev
*netdev
, bool *carrier
)
3456 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
3458 ovs_mutex_lock(&dev
->mutex
);
3460 if (is_vhost_running(dev
)) {
3466 ovs_mutex_unlock(&dev
->mutex
);
3471 static long long int
3472 netdev_dpdk_get_carrier_resets(const struct netdev
*netdev
)
3474 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
3475 long long int carrier_resets
;
3477 ovs_mutex_lock(&dev
->mutex
);
3478 carrier_resets
= dev
->link_reset_cnt
;
3479 ovs_mutex_unlock(&dev
->mutex
);
3481 return carrier_resets
;
3485 netdev_dpdk_set_miimon(struct netdev
*netdev OVS_UNUSED
,
3486 long long int interval OVS_UNUSED
)
3492 netdev_dpdk_update_flags__(struct netdev_dpdk
*dev
,
3493 enum netdev_flags off
, enum netdev_flags on
,
3494 enum netdev_flags
*old_flagsp
)
3495 OVS_REQUIRES(dev
->mutex
)
3497 if ((off
| on
) & ~(NETDEV_UP
| NETDEV_PROMISC
)) {
3501 *old_flagsp
= dev
->flags
;
3505 if (dev
->flags
== *old_flagsp
) {
3509 if (dev
->type
== DPDK_DEV_ETH
) {
3511 if ((dev
->flags
^ *old_flagsp
) & NETDEV_UP
) {
3514 if (dev
->flags
& NETDEV_UP
) {
3515 err
= rte_eth_dev_set_link_up(dev
->port_id
);
3517 err
= rte_eth_dev_set_link_down(dev
->port_id
);
3519 if (err
== -ENOTSUP
) {
3520 VLOG_INFO("Interface %s does not support link state "
3521 "configuration", netdev_get_name(&dev
->up
));
3522 } else if (err
< 0) {
3523 VLOG_ERR("Interface %s link change error: %s",
3524 netdev_get_name(&dev
->up
), rte_strerror(-err
));
3525 dev
->flags
= *old_flagsp
;
3530 if (dev
->flags
& NETDEV_PROMISC
) {
3531 rte_eth_promiscuous_enable(dev
->port_id
);
3534 netdev_change_seq_changed(&dev
->up
);
3536 /* If DPDK_DEV_VHOST device's NETDEV_UP flag was changed and vhost is
3537 * running then change netdev's change_seq to trigger link state
3540 if ((NETDEV_UP
& ((*old_flagsp
^ on
) | (*old_flagsp
^ off
)))
3541 && is_vhost_running(dev
)) {
3542 netdev_change_seq_changed(&dev
->up
);
3544 /* Clear statistics if device is getting up. */
3545 if (NETDEV_UP
& on
) {
3546 rte_spinlock_lock(&dev
->stats_lock
);
3547 memset(&dev
->stats
, 0, sizeof dev
->stats
);
3548 rte_spinlock_unlock(&dev
->stats_lock
);
3557 netdev_dpdk_update_flags(struct netdev
*netdev
,
3558 enum netdev_flags off
, enum netdev_flags on
,
3559 enum netdev_flags
*old_flagsp
)
3561 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
3564 ovs_mutex_lock(&dev
->mutex
);
3565 error
= netdev_dpdk_update_flags__(dev
, off
, on
, old_flagsp
);
3566 ovs_mutex_unlock(&dev
->mutex
);
3572 netdev_dpdk_vhost_user_get_status(const struct netdev
*netdev
,
3575 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
3577 ovs_mutex_lock(&dev
->mutex
);
3579 bool client_mode
= dev
->vhost_driver_flags
& RTE_VHOST_USER_CLIENT
;
3580 smap_add_format(args
, "mode", "%s", client_mode
? "client" : "server");
3582 int vid
= netdev_dpdk_get_vid(dev
);
3584 smap_add_format(args
, "status", "disconnected");
3585 ovs_mutex_unlock(&dev
->mutex
);
3588 smap_add_format(args
, "status", "connected");
3591 char socket_name
[PATH_MAX
];
3592 if (!rte_vhost_get_ifname(vid
, socket_name
, PATH_MAX
)) {
3593 smap_add_format(args
, "socket", "%s", socket_name
);
3597 if (!rte_vhost_get_negotiated_features(vid
, &features
)) {
3598 smap_add_format(args
, "features", "0x%016"PRIx64
, features
);
3602 if (!rte_vhost_get_mtu(vid
, &mtu
)) {
3603 smap_add_format(args
, "mtu", "%d", mtu
);
3606 int numa
= rte_vhost_get_numa_node(vid
);
3608 smap_add_format(args
, "numa", "%d", numa
);
3611 uint16_t vring_num
= rte_vhost_get_vring_num(vid
);
3613 smap_add_format(args
, "num_of_vrings", "%d", vring_num
);
3616 for (int i
= 0; i
< vring_num
; i
++) {
3617 struct rte_vhost_vring vring
;
3619 rte_vhost_get_vhost_vring(vid
, i
, &vring
);
3620 smap_add_nocopy(args
, xasprintf("vring_%d_size", i
),
3621 xasprintf("%d", vring
.size
));
3624 ovs_mutex_unlock(&dev
->mutex
);
3629 * Convert a given uint32_t link speed defined in DPDK to a string
3633 netdev_dpdk_link_speed_to_str__(uint32_t link_speed
)
3635 switch (link_speed
) {
3636 case ETH_SPEED_NUM_10M
: return "10Mbps";
3637 case ETH_SPEED_NUM_100M
: return "100Mbps";
3638 case ETH_SPEED_NUM_1G
: return "1Gbps";
3639 case ETH_SPEED_NUM_2_5G
: return "2.5Gbps";
3640 case ETH_SPEED_NUM_5G
: return "5Gbps";
3641 case ETH_SPEED_NUM_10G
: return "10Gbps";
3642 case ETH_SPEED_NUM_20G
: return "20Gbps";
3643 case ETH_SPEED_NUM_25G
: return "25Gbps";
3644 case ETH_SPEED_NUM_40G
: return "40Gbps";
3645 case ETH_SPEED_NUM_50G
: return "50Gbps";
3646 case ETH_SPEED_NUM_56G
: return "56Gbps";
3647 case ETH_SPEED_NUM_100G
: return "100Gbps";
3648 default: return "Not Defined";
3653 netdev_dpdk_get_status(const struct netdev
*netdev
, struct smap
*args
)
3655 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
3656 struct rte_eth_dev_info dev_info
;
3657 uint32_t link_speed
;
3659 if (!rte_eth_dev_is_valid_port(dev
->port_id
)) {
3663 ovs_mutex_lock(&dpdk_mutex
);
3664 ovs_mutex_lock(&dev
->mutex
);
3665 rte_eth_dev_info_get(dev
->port_id
, &dev_info
);
3666 link_speed
= dev
->link
.link_speed
;
3667 ovs_mutex_unlock(&dev
->mutex
);
3668 const struct rte_bus
*bus
;
3669 const struct rte_pci_device
*pci_dev
;
3670 uint16_t vendor_id
= PCI_ANY_ID
;
3671 uint16_t device_id
= PCI_ANY_ID
;
3672 bus
= rte_bus_find_by_device(dev_info
.device
);
3673 if (bus
&& !strcmp(bus
->name
, "pci")) {
3674 pci_dev
= RTE_DEV_TO_PCI(dev_info
.device
);
3676 vendor_id
= pci_dev
->id
.vendor_id
;
3677 device_id
= pci_dev
->id
.device_id
;
3680 ovs_mutex_unlock(&dpdk_mutex
);
3682 smap_add_format(args
, "port_no", DPDK_PORT_ID_FMT
, dev
->port_id
);
3683 smap_add_format(args
, "numa_id", "%d",
3684 rte_eth_dev_socket_id(dev
->port_id
));
3685 smap_add_format(args
, "driver_name", "%s", dev_info
.driver_name
);
3686 smap_add_format(args
, "min_rx_bufsize", "%u", dev_info
.min_rx_bufsize
);
3687 smap_add_format(args
, "max_rx_pktlen", "%u", dev
->max_packet_len
);
3688 smap_add_format(args
, "max_rx_queues", "%u", dev_info
.max_rx_queues
);
3689 smap_add_format(args
, "max_tx_queues", "%u", dev_info
.max_tx_queues
);
3690 smap_add_format(args
, "max_mac_addrs", "%u", dev_info
.max_mac_addrs
);
3691 smap_add_format(args
, "max_hash_mac_addrs", "%u",
3692 dev_info
.max_hash_mac_addrs
);
3693 smap_add_format(args
, "max_vfs", "%u", dev_info
.max_vfs
);
3694 smap_add_format(args
, "max_vmdq_pools", "%u", dev_info
.max_vmdq_pools
);
3696 /* Querying the DPDK library for iftype may be done in future, pending
3697 * support; cf. RFC 3635 Section 3.2.4. */
3698 enum { IF_TYPE_ETHERNETCSMACD
= 6 };
3700 smap_add_format(args
, "if_type", "%"PRIu32
, IF_TYPE_ETHERNETCSMACD
);
3701 smap_add_format(args
, "if_descr", "%s %s", rte_version(),
3702 dev_info
.driver_name
);
3703 smap_add_format(args
, "pci-vendor_id", "0x%x", vendor_id
);
3704 smap_add_format(args
, "pci-device_id", "0x%x", device_id
);
3706 /* Not all link speeds are defined in the OpenFlow specs e.g. 25 Gbps.
3707 * In that case the speed will not be reported as part of the usual
3708 * call to get_features(). Get the link speed of the device and add it
3709 * to the device status in an easy to read string format.
3711 smap_add(args
, "link_speed",
3712 netdev_dpdk_link_speed_to_str__(link_speed
));
3718 netdev_dpdk_set_admin_state__(struct netdev_dpdk
*dev
, bool admin_state
)
3719 OVS_REQUIRES(dev
->mutex
)
3721 enum netdev_flags old_flags
;
3724 netdev_dpdk_update_flags__(dev
, 0, NETDEV_UP
, &old_flags
);
3726 netdev_dpdk_update_flags__(dev
, NETDEV_UP
, 0, &old_flags
);
3731 netdev_dpdk_set_admin_state(struct unixctl_conn
*conn
, int argc
,
3732 const char *argv
[], void *aux OVS_UNUSED
)
3736 if (!strcasecmp(argv
[argc
- 1], "up")) {
3738 } else if ( !strcasecmp(argv
[argc
- 1], "down")) {
3741 unixctl_command_reply_error(conn
, "Invalid Admin State");
3746 struct netdev
*netdev
= netdev_from_name(argv
[1]);
3748 if (netdev
&& is_dpdk_class(netdev
->netdev_class
)) {
3749 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
3751 ovs_mutex_lock(&dev
->mutex
);
3752 netdev_dpdk_set_admin_state__(dev
, up
);
3753 ovs_mutex_unlock(&dev
->mutex
);
3755 netdev_close(netdev
);
3757 unixctl_command_reply_error(conn
, "Not a DPDK Interface");
3758 netdev_close(netdev
);
3762 struct netdev_dpdk
*dev
;
3764 ovs_mutex_lock(&dpdk_mutex
);
3765 LIST_FOR_EACH (dev
, list_node
, &dpdk_list
) {
3766 ovs_mutex_lock(&dev
->mutex
);
3767 netdev_dpdk_set_admin_state__(dev
, up
);
3768 ovs_mutex_unlock(&dev
->mutex
);
3770 ovs_mutex_unlock(&dpdk_mutex
);
3772 unixctl_command_reply(conn
, "OK");
3776 netdev_dpdk_detach(struct unixctl_conn
*conn
, int argc OVS_UNUSED
,
3777 const char *argv
[], void *aux OVS_UNUSED
)
3780 dpdk_port_t port_id
;
3781 struct netdev_dpdk
*dev
;
3782 struct rte_device
*rte_dev
;
3783 struct ds used_interfaces
= DS_EMPTY_INITIALIZER
;
3786 ovs_mutex_lock(&dpdk_mutex
);
3788 port_id
= netdev_dpdk_get_port_by_devargs(argv
[1]);
3789 if (!rte_eth_dev_is_valid_port(port_id
)) {
3790 response
= xasprintf("Device '%s' not found in DPDK", argv
[1]);
3794 rte_dev
= rte_eth_devices
[port_id
].device
;
3795 ds_put_format(&used_interfaces
,
3796 "Device '%s' is being used by the following interfaces:",
3799 LIST_FOR_EACH (dev
, list_node
, &dpdk_list
) {
3800 /* FIXME: avoid direct access to DPDK array rte_eth_devices. */
3801 if (rte_eth_devices
[dev
->port_id
].device
== rte_dev
3802 && rte_eth_devices
[dev
->port_id
].state
!= RTE_ETH_DEV_UNUSED
) {
3804 ds_put_format(&used_interfaces
, " %s",
3805 netdev_get_name(&dev
->up
));
3810 ds_put_cstr(&used_interfaces
, ". Remove them before detaching.");
3811 response
= ds_steal_cstr(&used_interfaces
);
3812 ds_destroy(&used_interfaces
);
3815 ds_destroy(&used_interfaces
);
3817 rte_eth_dev_close(port_id
);
3818 if (rte_dev_remove(rte_dev
) < 0) {
3819 response
= xasprintf("Device '%s' can not be detached", argv
[1]);
3823 response
= xasprintf("All devices shared with device '%s' "
3824 "have been detached", argv
[1]);
3826 ovs_mutex_unlock(&dpdk_mutex
);
3827 unixctl_command_reply(conn
, response
);
3832 ovs_mutex_unlock(&dpdk_mutex
);
3833 unixctl_command_reply_error(conn
, response
);
3838 netdev_dpdk_get_mempool_info(struct unixctl_conn
*conn
,
3839 int argc
, const char *argv
[],
3840 void *aux OVS_UNUSED
)
3844 char *response
= NULL
;
3845 struct netdev
*netdev
= NULL
;
3848 netdev
= netdev_from_name(argv
[1]);
3849 if (!netdev
|| !is_dpdk_class(netdev
->netdev_class
)) {
3850 unixctl_command_reply_error(conn
, "Not a DPDK Interface");
3855 stream
= open_memstream(&response
, &size
);
3857 response
= xasprintf("Unable to open memstream: %s.",
3858 ovs_strerror(errno
));
3859 unixctl_command_reply_error(conn
, response
);
3864 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
3866 ovs_mutex_lock(&dev
->mutex
);
3867 ovs_mutex_lock(&dpdk_mp_mutex
);
3869 rte_mempool_dump(stream
, dev
->dpdk_mp
->mp
);
3871 ovs_mutex_unlock(&dpdk_mp_mutex
);
3872 ovs_mutex_unlock(&dev
->mutex
);
3874 ovs_mutex_lock(&dpdk_mp_mutex
);
3875 rte_mempool_list_dump(stream
);
3876 ovs_mutex_unlock(&dpdk_mp_mutex
);
3881 unixctl_command_reply(conn
, response
);
3884 netdev_close(netdev
);
3888 * Set virtqueue flags so that we do not receive interrupts.
3891 set_irq_status(int vid
)
3895 for (i
= 0; i
< rte_vhost_get_vring_num(vid
); i
++) {
3896 rte_vhost_enable_guest_notification(vid
, i
, 0);
3901 * Fixes mapping for vhost-user tx queues. Must be called after each
3902 * enabling/disabling of queues and n_txq modifications.
3905 netdev_dpdk_remap_txqs(struct netdev_dpdk
*dev
)
3906 OVS_REQUIRES(dev
->mutex
)
3908 int *enabled_queues
, n_enabled
= 0;
3909 int i
, k
, total_txqs
= dev
->up
.n_txq
;
3911 enabled_queues
= xcalloc(total_txqs
, sizeof *enabled_queues
);
3913 for (i
= 0; i
< total_txqs
; i
++) {
3914 /* Enabled queues always mapped to themselves. */
3915 if (dev
->tx_q
[i
].map
== i
) {
3916 enabled_queues
[n_enabled
++] = i
;
3920 if (n_enabled
== 0 && total_txqs
!= 0) {
3921 enabled_queues
[0] = OVS_VHOST_QUEUE_DISABLED
;
3926 for (i
= 0; i
< total_txqs
; i
++) {
3927 if (dev
->tx_q
[i
].map
!= i
) {
3928 dev
->tx_q
[i
].map
= enabled_queues
[k
];
3929 k
= (k
+ 1) % n_enabled
;
3933 if (VLOG_IS_DBG_ENABLED()) {
3934 struct ds mapping
= DS_EMPTY_INITIALIZER
;
3936 ds_put_format(&mapping
, "TX queue mapping for port '%s':\n",
3937 netdev_get_name(&dev
->up
));
3938 for (i
= 0; i
< total_txqs
; i
++) {
3939 ds_put_format(&mapping
, "%2d --> %2d\n", i
, dev
->tx_q
[i
].map
);
3942 VLOG_DBG("%s", ds_cstr(&mapping
));
3943 ds_destroy(&mapping
);
3946 free(enabled_queues
);
3950 * A new virtio-net device is added to a vhost port.
3955 struct netdev_dpdk
*dev
;
3956 bool exists
= false;
3958 char ifname
[IF_NAME_SZ
];
3960 rte_vhost_get_ifname(vid
, ifname
, sizeof ifname
);
3962 ovs_mutex_lock(&dpdk_mutex
);
3963 /* Add device to the vhost port with the same name as that passed down. */
3964 LIST_FOR_EACH(dev
, list_node
, &dpdk_list
) {
3965 ovs_mutex_lock(&dev
->mutex
);
3966 if (nullable_string_is_equal(ifname
, dev
->vhost_id
)) {
3967 uint32_t qp_num
= rte_vhost_get_vring_num(vid
) / VIRTIO_QNUM
;
3969 /* Get NUMA information */
3970 newnode
= rte_vhost_get_numa_node(vid
);
3971 if (newnode
== -1) {
3973 VLOG_INFO("Error getting NUMA info for vHost Device '%s'",
3976 newnode
= dev
->socket_id
;
3979 if (dev
->requested_n_txq
< qp_num
3980 || dev
->requested_n_rxq
< qp_num
3981 || dev
->requested_socket_id
!= newnode
) {
3982 dev
->requested_socket_id
= newnode
;
3983 dev
->requested_n_rxq
= qp_num
;
3984 dev
->requested_n_txq
= qp_num
;
3985 netdev_request_reconfigure(&dev
->up
);
3987 /* Reconfiguration not required. */
3988 dev
->vhost_reconfigured
= true;
3991 ovsrcu_index_set(&dev
->vid
, vid
);
3994 /* Disable notifications. */
3995 set_irq_status(vid
);
3996 netdev_change_seq_changed(&dev
->up
);
3997 ovs_mutex_unlock(&dev
->mutex
);
4000 ovs_mutex_unlock(&dev
->mutex
);
4002 ovs_mutex_unlock(&dpdk_mutex
);
4005 VLOG_INFO("vHost Device '%s' can't be added - name not found", ifname
);
4010 VLOG_INFO("vHost Device '%s' has been added on numa node %i",
4016 /* Clears mapping for all available queues of vhost interface. */
4018 netdev_dpdk_txq_map_clear(struct netdev_dpdk
*dev
)
4019 OVS_REQUIRES(dev
->mutex
)
4023 for (i
= 0; i
< dev
->up
.n_txq
; i
++) {
4024 dev
->tx_q
[i
].map
= OVS_VHOST_QUEUE_MAP_UNKNOWN
;
4029 * Remove a virtio-net device from the specific vhost port. Use dev->remove
4030 * flag to stop any more packets from being sent or received to/from a VM and
4031 * ensure all currently queued packets have been sent/received before removing
4035 destroy_device(int vid
)
4037 struct netdev_dpdk
*dev
;
4038 bool exists
= false;
4039 char ifname
[IF_NAME_SZ
];
4041 rte_vhost_get_ifname(vid
, ifname
, sizeof ifname
);
4043 ovs_mutex_lock(&dpdk_mutex
);
4044 LIST_FOR_EACH (dev
, list_node
, &dpdk_list
) {
4045 if (netdev_dpdk_get_vid(dev
) == vid
) {
4047 ovs_mutex_lock(&dev
->mutex
);
4048 dev
->vhost_reconfigured
= false;
4049 ovsrcu_index_set(&dev
->vid
, -1);
4050 memset(dev
->vhost_rxq_enabled
, 0,
4051 dev
->up
.n_rxq
* sizeof *dev
->vhost_rxq_enabled
);
4052 netdev_dpdk_txq_map_clear(dev
);
4054 netdev_change_seq_changed(&dev
->up
);
4055 ovs_mutex_unlock(&dev
->mutex
);
4061 ovs_mutex_unlock(&dpdk_mutex
);
4065 * Wait for other threads to quiesce after setting the 'virtio_dev'
4066 * to NULL, before returning.
4068 ovsrcu_synchronize();
4070 * As call to ovsrcu_synchronize() will end the quiescent state,
4071 * put thread back into quiescent state before returning.
4073 ovsrcu_quiesce_start();
4074 VLOG_INFO("vHost Device '%s' has been removed", ifname
);
4076 VLOG_INFO("vHost Device '%s' not found", ifname
);
4081 vring_state_changed(int vid
, uint16_t queue_id
, int enable
)
4083 struct netdev_dpdk
*dev
;
4084 bool exists
= false;
4085 int qid
= queue_id
/ VIRTIO_QNUM
;
4086 bool is_rx
= (queue_id
% VIRTIO_QNUM
) == VIRTIO_TXQ
;
4087 char ifname
[IF_NAME_SZ
];
4089 rte_vhost_get_ifname(vid
, ifname
, sizeof ifname
);
4091 ovs_mutex_lock(&dpdk_mutex
);
4092 LIST_FOR_EACH (dev
, list_node
, &dpdk_list
) {
4093 ovs_mutex_lock(&dev
->mutex
);
4094 if (nullable_string_is_equal(ifname
, dev
->vhost_id
)) {
4096 bool old_state
= dev
->vhost_rxq_enabled
[qid
];
4098 dev
->vhost_rxq_enabled
[qid
] = enable
!= 0;
4099 if (old_state
!= dev
->vhost_rxq_enabled
[qid
]) {
4100 netdev_change_seq_changed(&dev
->up
);
4104 dev
->tx_q
[qid
].map
= qid
;
4106 dev
->tx_q
[qid
].map
= OVS_VHOST_QUEUE_DISABLED
;
4108 netdev_dpdk_remap_txqs(dev
);
4111 ovs_mutex_unlock(&dev
->mutex
);
4114 ovs_mutex_unlock(&dev
->mutex
);
4116 ovs_mutex_unlock(&dpdk_mutex
);
4119 VLOG_INFO("State of queue %d ( %s_qid %d ) of vhost device '%s' "
4120 "changed to \'%s\'", queue_id
, is_rx
== true ? "rx" : "tx",
4121 qid
, ifname
, (enable
== 1) ? "enabled" : "disabled");
4123 VLOG_INFO("vHost Device '%s' not found", ifname
);
4131 destroy_connection(int vid
)
4133 struct netdev_dpdk
*dev
;
4134 char ifname
[IF_NAME_SZ
];
4135 bool exists
= false;
4137 rte_vhost_get_ifname(vid
, ifname
, sizeof ifname
);
4139 ovs_mutex_lock(&dpdk_mutex
);
4140 LIST_FOR_EACH (dev
, list_node
, &dpdk_list
) {
4141 ovs_mutex_lock(&dev
->mutex
);
4142 if (nullable_string_is_equal(ifname
, dev
->vhost_id
)) {
4143 uint32_t qp_num
= NR_QUEUE
;
4145 if (netdev_dpdk_get_vid(dev
) >= 0) {
4146 VLOG_ERR("Connection on socket '%s' destroyed while vhost "
4147 "device still attached.", dev
->vhost_id
);
4150 /* Restore the number of queue pairs to default. */
4151 if (dev
->requested_n_txq
!= qp_num
4152 || dev
->requested_n_rxq
!= qp_num
) {
4153 dev
->requested_n_rxq
= qp_num
;
4154 dev
->requested_n_txq
= qp_num
;
4155 netdev_request_reconfigure(&dev
->up
);
4157 ovs_mutex_unlock(&dev
->mutex
);
4161 ovs_mutex_unlock(&dev
->mutex
);
4163 ovs_mutex_unlock(&dpdk_mutex
);
4166 VLOG_INFO("vHost Device '%s' connection has been destroyed", ifname
);
4168 VLOG_INFO("vHost Device '%s' not found", ifname
);
4173 void vhost_guest_notified(int vid OVS_UNUSED
)
4175 COVERAGE_INC(vhost_notification
);
4179 * Retrieve the DPDK virtio device ID (vid) associated with a vhostuser
4180 * or vhostuserclient netdev.
4182 * Returns a value greater or equal to zero for a valid vid or '-1' if
4183 * there is no valid vid associated. A vid of '-1' must not be used in
4184 * rte_vhost_ APi calls.
4186 * Once obtained and validated, a vid can be used by a PMD for multiple
4187 * subsequent rte_vhost API calls until the PMD quiesces. A PMD should
4188 * not fetch the vid again for each of a series of API calls.
4192 netdev_dpdk_get_vid(const struct netdev_dpdk
*dev
)
4194 return ovsrcu_index_get(&dev
->vid
);
4197 struct ingress_policer
*
4198 netdev_dpdk_get_ingress_policer(const struct netdev_dpdk
*dev
)
4200 return ovsrcu_get(struct ingress_policer
*, &dev
->ingress_policer
);
4204 netdev_dpdk_class_init(void)
4206 static struct ovsthread_once once
= OVSTHREAD_ONCE_INITIALIZER
;
4208 /* This function can be called for different classes. The initialization
4209 * needs to be done only once */
4210 if (ovsthread_once_start(&once
)) {
4213 ovs_thread_create("dpdk_watchdog", dpdk_watchdog
, NULL
);
4214 unixctl_command_register("netdev-dpdk/set-admin-state",
4215 "[netdev] up|down", 1, 2,
4216 netdev_dpdk_set_admin_state
, NULL
);
4218 unixctl_command_register("netdev-dpdk/detach",
4219 "pci address of device", 1, 1,
4220 netdev_dpdk_detach
, NULL
);
4222 unixctl_command_register("netdev-dpdk/get-mempool-info",
4224 netdev_dpdk_get_mempool_info
, NULL
);
4226 ret
= rte_eth_dev_callback_register(RTE_ETH_ALL
,
4227 RTE_ETH_EVENT_INTR_RESET
,
4228 dpdk_eth_event_callback
, NULL
);
4230 VLOG_ERR("Ethernet device callback register error: %s",
4231 rte_strerror(-ret
));
4234 ovsthread_once_done(&once
);
4243 dpdk_ring_create(const char dev_name
[], unsigned int port_no
,
4244 dpdk_port_t
*eth_port_id
)
4246 struct dpdk_ring
*ring_pair
;
4250 ring_pair
= dpdk_rte_mzalloc(sizeof *ring_pair
);
4255 /* XXX: Add support for multiquque ring. */
4256 ring_name
= xasprintf("%s_tx", dev_name
);
4258 /* Create single producer tx ring, netdev does explicit locking. */
4259 ring_pair
->cring_tx
= rte_ring_create(ring_name
, DPDK_RING_SIZE
, SOCKET0
,
4262 if (ring_pair
->cring_tx
== NULL
) {
4263 rte_free(ring_pair
);
4267 ring_name
= xasprintf("%s_rx", dev_name
);
4269 /* Create single consumer rx ring, netdev does explicit locking. */
4270 ring_pair
->cring_rx
= rte_ring_create(ring_name
, DPDK_RING_SIZE
, SOCKET0
,
4273 if (ring_pair
->cring_rx
== NULL
) {
4274 rte_free(ring_pair
);
4278 port_id
= rte_eth_from_rings(dev_name
, &ring_pair
->cring_rx
, 1,
4279 &ring_pair
->cring_tx
, 1, SOCKET0
);
4282 rte_free(ring_pair
);
4286 ring_pair
->user_port_id
= port_no
;
4287 ring_pair
->eth_port_id
= port_id
;
4288 *eth_port_id
= port_id
;
4290 ovs_list_push_back(&dpdk_ring_list
, &ring_pair
->list_node
);
4296 dpdk_ring_open(const char dev_name
[], dpdk_port_t
*eth_port_id
)
4297 OVS_REQUIRES(dpdk_mutex
)
4299 struct dpdk_ring
*ring_pair
;
4300 unsigned int port_no
;
4303 /* Names always start with "dpdkr" */
4304 err
= dpdk_dev_parse_name(dev_name
, "dpdkr", &port_no
);
4309 /* Look through our list to find the device */
4310 LIST_FOR_EACH (ring_pair
, list_node
, &dpdk_ring_list
) {
4311 if (ring_pair
->user_port_id
== port_no
) {
4312 VLOG_INFO("Found dpdk ring device %s:", dev_name
);
4313 /* Really all that is needed */
4314 *eth_port_id
= ring_pair
->eth_port_id
;
4318 /* Need to create the device rings */
4319 return dpdk_ring_create(dev_name
, port_no
, eth_port_id
);
4323 netdev_dpdk_ring_send(struct netdev
*netdev
, int qid
,
4324 struct dp_packet_batch
*batch
, bool concurrent_txq
)
4326 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
4327 struct dp_packet
*packet
;
4329 /* When using 'dpdkr' and sending to a DPDK ring, we want to ensure that
4330 * the offload fields are clear. This is because the same mbuf may be
4331 * modified by the consumer of the ring and return into the datapath
4332 * without recalculating the RSS hash or revalidating the checksums. */
4333 DP_PACKET_BATCH_FOR_EACH (i
, packet
, batch
) {
4334 dp_packet_reset_offload(packet
);
4337 netdev_dpdk_send__(dev
, qid
, batch
, concurrent_txq
);
4342 netdev_dpdk_ring_construct(struct netdev
*netdev
)
4344 dpdk_port_t port_no
= 0;
4347 VLOG_WARN_ONCE("dpdkr a.k.a. ring ports are considered deprecated. "
4348 "Please migrate to virtio-based interfaces, e.g. "
4349 "dpdkvhostuserclient ports, net_virtio_user DPDK vdev.");
4351 ovs_mutex_lock(&dpdk_mutex
);
4353 err
= dpdk_ring_open(netdev
->name
, &port_no
);
4358 err
= common_construct(netdev
, port_no
, DPDK_DEV_ETH
,
4359 rte_eth_dev_socket_id(port_no
));
4361 ovs_mutex_unlock(&dpdk_mutex
);
4368 * Initialize QoS configuration operations.
4371 qos_conf_init(struct qos_conf
*conf
, const struct dpdk_qos_ops
*ops
)
4374 rte_spinlock_init(&conf
->lock
);
4378 * Search existing QoS operations in qos_ops and compare each set of
4379 * operations qos_name to name. Return a dpdk_qos_ops pointer to a match,
4382 static const struct dpdk_qos_ops
*
4383 qos_lookup_name(const char *name
)
4385 const struct dpdk_qos_ops
*const *opsp
;
4387 for (opsp
= qos_confs
; *opsp
!= NULL
; opsp
++) {
4388 const struct dpdk_qos_ops
*ops
= *opsp
;
4389 if (!strcmp(name
, ops
->qos_name
)) {
4397 netdev_dpdk_get_qos_types(const struct netdev
*netdev OVS_UNUSED
,
4400 const struct dpdk_qos_ops
*const *opsp
;
4402 for (opsp
= qos_confs
; *opsp
!= NULL
; opsp
++) {
4403 const struct dpdk_qos_ops
*ops
= *opsp
;
4404 if (ops
->qos_construct
&& ops
->qos_name
[0] != '\0') {
4405 sset_add(types
, ops
->qos_name
);
4412 netdev_dpdk_get_qos(const struct netdev
*netdev
,
4413 const char **typep
, struct smap
*details
)
4415 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
4416 struct qos_conf
*qos_conf
;
4419 ovs_mutex_lock(&dev
->mutex
);
4420 qos_conf
= ovsrcu_get_protected(struct qos_conf
*, &dev
->qos_conf
);
4422 *typep
= qos_conf
->ops
->qos_name
;
4423 error
= (qos_conf
->ops
->qos_get
4424 ? qos_conf
->ops
->qos_get(qos_conf
, details
): 0);
4426 /* No QoS configuration set, return an empty string */
4429 ovs_mutex_unlock(&dev
->mutex
);
4435 netdev_dpdk_set_qos(struct netdev
*netdev
, const char *type
,
4436 const struct smap
*details
)
4438 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
4439 const struct dpdk_qos_ops
*new_ops
= NULL
;
4440 struct qos_conf
*qos_conf
, *new_qos_conf
= NULL
;
4443 ovs_mutex_lock(&dev
->mutex
);
4445 qos_conf
= ovsrcu_get_protected(struct qos_conf
*, &dev
->qos_conf
);
4447 new_ops
= qos_lookup_name(type
);
4449 if (!new_ops
|| !new_ops
->qos_construct
) {
4450 new_qos_conf
= NULL
;
4451 if (type
&& type
[0]) {
4454 } else if (qos_conf
&& qos_conf
->ops
== new_ops
4455 && qos_conf
->ops
->qos_is_equal(qos_conf
, details
)) {
4456 new_qos_conf
= qos_conf
;
4458 error
= new_ops
->qos_construct(details
, &new_qos_conf
);
4462 VLOG_ERR("Failed to set QoS type %s on port %s: %s",
4463 type
, netdev
->name
, rte_strerror(error
));
4466 if (new_qos_conf
!= qos_conf
) {
4467 ovsrcu_set(&dev
->qos_conf
, new_qos_conf
);
4469 ovsrcu_postpone(qos_conf
->ops
->qos_destruct
, qos_conf
);
4473 ovs_mutex_unlock(&dev
->mutex
);
4479 netdev_dpdk_get_queue(const struct netdev
*netdev
, uint32_t queue_id
,
4480 struct smap
*details
)
4482 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
4483 struct qos_conf
*qos_conf
;
4486 ovs_mutex_lock(&dev
->mutex
);
4488 qos_conf
= ovsrcu_get_protected(struct qos_conf
*, &dev
->qos_conf
);
4489 if (!qos_conf
|| !qos_conf
->ops
|| !qos_conf
->ops
->qos_queue_get
) {
4492 error
= qos_conf
->ops
->qos_queue_get(details
, queue_id
, qos_conf
);
4495 ovs_mutex_unlock(&dev
->mutex
);
4501 netdev_dpdk_set_queue(struct netdev
*netdev
, uint32_t queue_id
,
4502 const struct smap
*details
)
4504 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
4505 struct qos_conf
*qos_conf
;
4508 ovs_mutex_lock(&dev
->mutex
);
4510 qos_conf
= ovsrcu_get_protected(struct qos_conf
*, &dev
->qos_conf
);
4511 if (!qos_conf
|| !qos_conf
->ops
|| !qos_conf
->ops
->qos_queue_construct
) {
4514 error
= qos_conf
->ops
->qos_queue_construct(details
, queue_id
,
4518 if (error
&& error
!= EOPNOTSUPP
) {
4519 VLOG_ERR("Failed to set QoS queue %d on port %s: %s",
4520 queue_id
, netdev_get_name(netdev
), rte_strerror(error
));
4523 ovs_mutex_unlock(&dev
->mutex
);
4529 netdev_dpdk_delete_queue(struct netdev
*netdev
, uint32_t queue_id
)
4531 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
4532 struct qos_conf
*qos_conf
;
4535 ovs_mutex_lock(&dev
->mutex
);
4537 qos_conf
= ovsrcu_get_protected(struct qos_conf
*, &dev
->qos_conf
);
4538 if (qos_conf
&& qos_conf
->ops
&& qos_conf
->ops
->qos_queue_destruct
) {
4539 qos_conf
->ops
->qos_queue_destruct(qos_conf
, queue_id
);
4544 ovs_mutex_unlock(&dev
->mutex
);
4550 netdev_dpdk_get_queue_stats(const struct netdev
*netdev
, uint32_t queue_id
,
4551 struct netdev_queue_stats
*stats
)
4553 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
4554 struct qos_conf
*qos_conf
;
4557 ovs_mutex_lock(&dev
->mutex
);
4559 qos_conf
= ovsrcu_get_protected(struct qos_conf
*, &dev
->qos_conf
);
4560 if (qos_conf
&& qos_conf
->ops
&& qos_conf
->ops
->qos_queue_get_stats
) {
4561 qos_conf
->ops
->qos_queue_get_stats(qos_conf
, queue_id
, stats
);
4566 ovs_mutex_unlock(&dev
->mutex
);
4572 netdev_dpdk_queue_dump_start(const struct netdev
*netdev
, void **statep
)
4575 struct qos_conf
*qos_conf
;
4576 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
4578 ovs_mutex_lock(&dev
->mutex
);
4580 qos_conf
= ovsrcu_get_protected(struct qos_conf
*, &dev
->qos_conf
);
4581 if (qos_conf
&& qos_conf
->ops
4582 && qos_conf
->ops
->qos_queue_dump_state_init
) {
4583 struct netdev_dpdk_queue_state
*state
;
4585 *statep
= state
= xmalloc(sizeof *state
);
4586 error
= qos_conf
->ops
->qos_queue_dump_state_init(qos_conf
, state
);
4591 ovs_mutex_unlock(&dev
->mutex
);
4597 netdev_dpdk_queue_dump_next(const struct netdev
*netdev
, void *state_
,
4598 uint32_t *queue_idp
, struct smap
*details
)
4600 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
4601 struct netdev_dpdk_queue_state
*state
= state_
;
4602 struct qos_conf
*qos_conf
;
4605 ovs_mutex_lock(&dev
->mutex
);
4607 while (state
->cur_queue
< state
->n_queues
) {
4608 uint32_t queue_id
= state
->queues
[state
->cur_queue
++];
4610 qos_conf
= ovsrcu_get_protected(struct qos_conf
*, &dev
->qos_conf
);
4611 if (qos_conf
&& qos_conf
->ops
&& qos_conf
->ops
->qos_queue_get
) {
4612 *queue_idp
= queue_id
;
4613 error
= qos_conf
->ops
->qos_queue_get(details
, queue_id
, qos_conf
);
4618 ovs_mutex_unlock(&dev
->mutex
);
4624 netdev_dpdk_queue_dump_done(const struct netdev
*netdev OVS_UNUSED
,
4627 struct netdev_dpdk_queue_state
*state
= state_
;
4629 free(state
->queues
);
4636 /* egress-policer details */
4638 struct egress_policer
{
4639 struct qos_conf qos_conf
;
4640 struct rte_meter_srtcm_params app_srtcm_params
;
4641 struct rte_meter_srtcm egress_meter
;
4642 struct rte_meter_srtcm_profile egress_prof
;
4646 egress_policer_details_to_param(const struct smap
*details
,
4647 struct rte_meter_srtcm_params
*params
)
4649 memset(params
, 0, sizeof *params
);
4650 params
->cir
= smap_get_ullong(details
, "cir", 0);
4651 params
->cbs
= smap_get_ullong(details
, "cbs", 0);
4656 egress_policer_qos_construct(const struct smap
*details
,
4657 struct qos_conf
**conf
)
4659 struct egress_policer
*policer
;
4662 policer
= xmalloc(sizeof *policer
);
4663 qos_conf_init(&policer
->qos_conf
, &egress_policer_ops
);
4664 egress_policer_details_to_param(details
, &policer
->app_srtcm_params
);
4665 err
= rte_meter_srtcm_profile_config(&policer
->egress_prof
,
4666 &policer
->app_srtcm_params
);
4668 err
= rte_meter_srtcm_config(&policer
->egress_meter
,
4669 &policer
->egress_prof
);
4673 *conf
= &policer
->qos_conf
;
4675 VLOG_ERR("Could not create rte meter for egress policer");
4685 egress_policer_qos_destruct(struct qos_conf
*conf
)
4687 struct egress_policer
*policer
= CONTAINER_OF(conf
, struct egress_policer
,
4693 egress_policer_qos_get(const struct qos_conf
*conf
, struct smap
*details
)
4695 struct egress_policer
*policer
=
4696 CONTAINER_OF(conf
, struct egress_policer
, qos_conf
);
4698 smap_add_format(details
, "cir", "%"PRIu64
, policer
->app_srtcm_params
.cir
);
4699 smap_add_format(details
, "cbs", "%"PRIu64
, policer
->app_srtcm_params
.cbs
);
4705 egress_policer_qos_is_equal(const struct qos_conf
*conf
,
4706 const struct smap
*details
)
4708 struct egress_policer
*policer
=
4709 CONTAINER_OF(conf
, struct egress_policer
, qos_conf
);
4710 struct rte_meter_srtcm_params params
;
4712 egress_policer_details_to_param(details
, ¶ms
);
4714 return !memcmp(¶ms
, &policer
->app_srtcm_params
, sizeof params
);
4718 egress_policer_run(struct qos_conf
*conf
, struct rte_mbuf
**pkts
, int pkt_cnt
,
4722 struct egress_policer
*policer
=
4723 CONTAINER_OF(conf
, struct egress_policer
, qos_conf
);
4725 cnt
= srtcm_policer_run_single_packet(&policer
->egress_meter
,
4726 &policer
->egress_prof
, pkts
,
4727 pkt_cnt
, should_steal
);
4732 static const struct dpdk_qos_ops egress_policer_ops
= {
4733 .qos_name
= "egress-policer", /* qos_name */
4734 .qos_construct
= egress_policer_qos_construct
,
4735 .qos_destruct
= egress_policer_qos_destruct
,
4736 .qos_get
= egress_policer_qos_get
,
4737 .qos_is_equal
= egress_policer_qos_is_equal
,
4738 .qos_run
= egress_policer_run
4741 /* trtcm-policer details */
4743 struct trtcm_policer
{
4744 struct qos_conf qos_conf
;
4745 struct rte_meter_trtcm_rfc4115_params meter_params
;
4746 struct rte_meter_trtcm_rfc4115_profile meter_profile
;
4747 struct rte_meter_trtcm_rfc4115 meter
;
4748 struct netdev_queue_stats stats
;
4752 struct trtcm_policer_queue
{
4753 struct hmap_node hmap_node
;
4755 struct rte_meter_trtcm_rfc4115_params meter_params
;
4756 struct rte_meter_trtcm_rfc4115_profile meter_profile
;
4757 struct rte_meter_trtcm_rfc4115 meter
;
4758 struct netdev_queue_stats stats
;
4762 trtcm_policer_details_to_param(const struct smap
*details
,
4763 struct rte_meter_trtcm_rfc4115_params
*params
)
4765 memset(params
, 0, sizeof *params
);
4766 params
->cir
= smap_get_ullong(details
, "cir", 0);
4767 params
->eir
= smap_get_ullong(details
, "eir", 0);
4768 params
->cbs
= smap_get_ullong(details
, "cbs", 0);
4769 params
->ebs
= smap_get_ullong(details
, "ebs", 0);
4773 trtcm_policer_param_to_detail(
4774 const struct rte_meter_trtcm_rfc4115_params
*params
,
4775 struct smap
*details
)
4777 smap_add_format(details
, "cir", "%"PRIu64
, params
->cir
);
4778 smap_add_format(details
, "eir", "%"PRIu64
, params
->eir
);
4779 smap_add_format(details
, "cbs", "%"PRIu64
, params
->cbs
);
4780 smap_add_format(details
, "ebs", "%"PRIu64
, params
->ebs
);
4785 trtcm_policer_qos_construct(const struct smap
*details
,
4786 struct qos_conf
**conf
)
4788 struct trtcm_policer
*policer
;
4791 policer
= xmalloc(sizeof *policer
);
4792 qos_conf_init(&policer
->qos_conf
, &trtcm_policer_ops
);
4793 trtcm_policer_details_to_param(details
, &policer
->meter_params
);
4794 err
= rte_meter_trtcm_rfc4115_profile_config(&policer
->meter_profile
,
4795 &policer
->meter_params
);
4797 err
= rte_meter_trtcm_rfc4115_config(&policer
->meter
,
4798 &policer
->meter_profile
);
4802 *conf
= &policer
->qos_conf
;
4803 memset(&policer
->stats
, 0, sizeof policer
->stats
);
4804 hmap_init(&policer
->queues
);
4815 trtcm_policer_qos_destruct(struct qos_conf
*conf
)
4817 struct trtcm_policer_queue
*queue
, *next_queue
;
4818 struct trtcm_policer
*policer
= CONTAINER_OF(conf
, struct trtcm_policer
,
4821 HMAP_FOR_EACH_SAFE (queue
, next_queue
, hmap_node
, &policer
->queues
) {
4822 hmap_remove(&policer
->queues
, &queue
->hmap_node
);
4825 hmap_destroy(&policer
->queues
);
4830 trtcm_policer_qos_get(const struct qos_conf
*conf
, struct smap
*details
)
4832 struct trtcm_policer
*policer
= CONTAINER_OF(conf
, struct trtcm_policer
,
4835 trtcm_policer_param_to_detail(&policer
->meter_params
, details
);
4840 trtcm_policer_qos_is_equal(const struct qos_conf
*conf
,
4841 const struct smap
*details
)
4843 struct trtcm_policer
*policer
= CONTAINER_OF(conf
, struct trtcm_policer
,
4845 struct rte_meter_trtcm_rfc4115_params params
;
4847 trtcm_policer_details_to_param(details
, ¶ms
);
4849 return !memcmp(¶ms
, &policer
->meter_params
, sizeof params
);
4852 static struct trtcm_policer_queue
*
4853 trtcm_policer_qos_find_queue(struct trtcm_policer
*policer
, uint32_t queue_id
)
4855 struct trtcm_policer_queue
*queue
;
4856 HMAP_FOR_EACH_WITH_HASH (queue
, hmap_node
, hash_2words(queue_id
, 0),
4858 if (queue
->queue_id
== queue_id
) {
4866 trtcm_policer_run_single_packet(struct trtcm_policer
*policer
,
4867 struct rte_mbuf
*pkt
, uint64_t time
)
4869 enum rte_color pkt_color
;
4870 struct trtcm_policer_queue
*queue
;
4871 uint32_t pkt_len
= rte_pktmbuf_pkt_len(pkt
) - sizeof(struct rte_ether_hdr
);
4872 struct dp_packet
*dpkt
= CONTAINER_OF(pkt
, struct dp_packet
, mbuf
);
4874 queue
= trtcm_policer_qos_find_queue(policer
, dpkt
->md
.skb_priority
);
4876 /* If no queue is found, use the default queue, which MUST exist. */
4877 queue
= trtcm_policer_qos_find_queue(policer
, 0);
4883 pkt_color
= rte_meter_trtcm_rfc4115_color_blind_check(&queue
->meter
,
4884 &queue
->meter_profile
,
4888 if (pkt_color
== RTE_COLOR_RED
) {
4889 queue
->stats
.tx_errors
++;
4891 queue
->stats
.tx_bytes
+= pkt_len
;
4892 queue
->stats
.tx_packets
++;
4895 pkt_color
= rte_meter_trtcm_rfc4115_color_aware_check(&policer
->meter
,
4896 &policer
->meter_profile
,
4900 if (pkt_color
== RTE_COLOR_RED
) {
4901 policer
->stats
.tx_errors
++;
4905 policer
->stats
.tx_bytes
+= pkt_len
;
4906 policer
->stats
.tx_packets
++;
4911 trtcm_policer_run(struct qos_conf
*conf
, struct rte_mbuf
**pkts
, int pkt_cnt
,
4916 struct rte_mbuf
*pkt
= NULL
;
4917 uint64_t current_time
= rte_rdtsc();
4919 struct trtcm_policer
*policer
= CONTAINER_OF(conf
, struct trtcm_policer
,
4922 for (i
= 0; i
< pkt_cnt
; i
++) {
4925 if (trtcm_policer_run_single_packet(policer
, pkt
, current_time
)) {
4932 rte_pktmbuf_free(pkt
);
4940 trtcm_policer_qos_queue_construct(const struct smap
*details
,
4941 uint32_t queue_id
, struct qos_conf
*conf
)
4944 struct trtcm_policer_queue
*queue
;
4945 struct trtcm_policer
*policer
= CONTAINER_OF(conf
, struct trtcm_policer
,
4948 queue
= trtcm_policer_qos_find_queue(policer
, queue_id
);
4950 queue
= xmalloc(sizeof *queue
);
4951 queue
->queue_id
= queue_id
;
4952 memset(&queue
->stats
, 0, sizeof queue
->stats
);
4953 queue
->stats
.created
= time_msec();
4954 hmap_insert(&policer
->queues
, &queue
->hmap_node
,
4955 hash_2words(queue_id
, 0));
4957 if (queue_id
== 0 && smap_is_empty(details
)) {
4958 /* No default queue configured, use port values */
4959 memcpy(&queue
->meter_params
, &policer
->meter_params
,
4960 sizeof queue
->meter_params
);
4962 trtcm_policer_details_to_param(details
, &queue
->meter_params
);
4965 err
= rte_meter_trtcm_rfc4115_profile_config(&queue
->meter_profile
,
4966 &queue
->meter_params
);
4969 err
= rte_meter_trtcm_rfc4115_config(&queue
->meter
,
4970 &queue
->meter_profile
);
4973 hmap_remove(&policer
->queues
, &queue
->hmap_node
);
4981 trtcm_policer_qos_queue_destruct(struct qos_conf
*conf
, uint32_t queue_id
)
4983 struct trtcm_policer_queue
*queue
;
4984 struct trtcm_policer
*policer
= CONTAINER_OF(conf
, struct trtcm_policer
,
4987 queue
= trtcm_policer_qos_find_queue(policer
, queue_id
);
4989 hmap_remove(&policer
->queues
, &queue
->hmap_node
);
4995 trtcm_policer_qos_queue_get(struct smap
*details
, uint32_t queue_id
,
4996 const struct qos_conf
*conf
)
4998 struct trtcm_policer_queue
*queue
;
4999 struct trtcm_policer
*policer
= CONTAINER_OF(conf
, struct trtcm_policer
,
5002 queue
= trtcm_policer_qos_find_queue(policer
, queue_id
);
5007 trtcm_policer_param_to_detail(&queue
->meter_params
, details
);
5012 trtcm_policer_qos_queue_get_stats(const struct qos_conf
*conf
,
5014 struct netdev_queue_stats
*stats
)
5016 struct trtcm_policer_queue
*queue
;
5017 struct trtcm_policer
*policer
= CONTAINER_OF(conf
, struct trtcm_policer
,
5020 queue
= trtcm_policer_qos_find_queue(policer
, queue_id
);
5024 memcpy(stats
, &queue
->stats
, sizeof *stats
);
5029 trtcm_policer_qos_queue_dump_state_init(const struct qos_conf
*conf
,
5030 struct netdev_dpdk_queue_state
*state
)
5033 struct trtcm_policer_queue
*queue
;
5034 struct trtcm_policer
*policer
= CONTAINER_OF(conf
, struct trtcm_policer
,
5037 state
->n_queues
= hmap_count(&policer
->queues
);
5038 state
->cur_queue
= 0;
5039 state
->queues
= xmalloc(state
->n_queues
* sizeof *state
->queues
);
5041 HMAP_FOR_EACH (queue
, hmap_node
, &policer
->queues
) {
5042 state
->queues
[i
++] = queue
->queue_id
;
5047 static const struct dpdk_qos_ops trtcm_policer_ops
= {
5048 .qos_name
= "trtcm-policer",
5049 .qos_construct
= trtcm_policer_qos_construct
,
5050 .qos_destruct
= trtcm_policer_qos_destruct
,
5051 .qos_get
= trtcm_policer_qos_get
,
5052 .qos_is_equal
= trtcm_policer_qos_is_equal
,
5053 .qos_run
= trtcm_policer_run
,
5054 .qos_queue_construct
= trtcm_policer_qos_queue_construct
,
5055 .qos_queue_destruct
= trtcm_policer_qos_queue_destruct
,
5056 .qos_queue_get
= trtcm_policer_qos_queue_get
,
5057 .qos_queue_get_stats
= trtcm_policer_qos_queue_get_stats
,
5058 .qos_queue_dump_state_init
= trtcm_policer_qos_queue_dump_state_init
5062 netdev_dpdk_reconfigure(struct netdev
*netdev
)
5064 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
5067 ovs_mutex_lock(&dev
->mutex
);
5069 if (netdev
->n_txq
== dev
->requested_n_txq
5070 && netdev
->n_rxq
== dev
->requested_n_rxq
5071 && dev
->mtu
== dev
->requested_mtu
5072 && dev
->lsc_interrupt_mode
== dev
->requested_lsc_interrupt_mode
5073 && dev
->rxq_size
== dev
->requested_rxq_size
5074 && dev
->txq_size
== dev
->requested_txq_size
5075 && dev
->socket_id
== dev
->requested_socket_id
5076 && dev
->started
&& !dev
->reset_needed
) {
5077 /* Reconfiguration is unnecessary */
5082 if (dev
->reset_needed
) {
5083 rte_eth_dev_reset(dev
->port_id
);
5084 if_notifier_manual_report();
5085 dev
->reset_needed
= false;
5087 rte_eth_dev_stop(dev
->port_id
);
5090 dev
->started
= false;
5092 err
= netdev_dpdk_mempool_configure(dev
);
5093 if (err
&& err
!= EEXIST
) {
5097 dev
->lsc_interrupt_mode
= dev
->requested_lsc_interrupt_mode
;
5099 netdev
->n_txq
= dev
->requested_n_txq
;
5100 netdev
->n_rxq
= dev
->requested_n_rxq
;
5102 dev
->rxq_size
= dev
->requested_rxq_size
;
5103 dev
->txq_size
= dev
->requested_txq_size
;
5105 rte_free(dev
->tx_q
);
5106 err
= dpdk_eth_dev_init(dev
);
5107 if (dev
->hw_ol_features
& NETDEV_TX_TSO_OFFLOAD
) {
5108 netdev
->ol_flags
|= NETDEV_TX_OFFLOAD_TCP_TSO
;
5109 netdev
->ol_flags
|= NETDEV_TX_OFFLOAD_TCP_CKSUM
;
5110 netdev
->ol_flags
|= NETDEV_TX_OFFLOAD_IPV4_CKSUM
;
5113 dev
->tx_q
= netdev_dpdk_alloc_txq(netdev
->n_txq
);
5118 netdev_change_seq_changed(netdev
);
5121 ovs_mutex_unlock(&dev
->mutex
);
5126 dpdk_vhost_reconfigure_helper(struct netdev_dpdk
*dev
)
5127 OVS_REQUIRES(dev
->mutex
)
5129 dev
->up
.n_txq
= dev
->requested_n_txq
;
5130 dev
->up
.n_rxq
= dev
->requested_n_rxq
;
5133 /* Always keep RX queue 0 enabled for implementations that won't
5134 * report vring states. */
5135 dev
->vhost_rxq_enabled
[0] = true;
5137 /* Enable TX queue 0 by default if it wasn't disabled. */
5138 if (dev
->tx_q
[0].map
== OVS_VHOST_QUEUE_MAP_UNKNOWN
) {
5139 dev
->tx_q
[0].map
= 0;
5142 if (userspace_tso_enabled()) {
5143 dev
->hw_ol_features
|= NETDEV_TX_TSO_OFFLOAD
;
5144 VLOG_DBG("%s: TSO enabled on vhost port", netdev_get_name(&dev
->up
));
5147 netdev_dpdk_remap_txqs(dev
);
5149 err
= netdev_dpdk_mempool_configure(dev
);
5151 /* A new mempool was created or re-used. */
5152 netdev_change_seq_changed(&dev
->up
);
5153 } else if (err
!= EEXIST
) {
5156 if (netdev_dpdk_get_vid(dev
) >= 0) {
5157 if (dev
->vhost_reconfigured
== false) {
5158 dev
->vhost_reconfigured
= true;
5159 /* Carrier status may need updating. */
5160 netdev_change_seq_changed(&dev
->up
);
5168 netdev_dpdk_vhost_reconfigure(struct netdev
*netdev
)
5170 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
5173 ovs_mutex_lock(&dev
->mutex
);
5174 err
= dpdk_vhost_reconfigure_helper(dev
);
5175 ovs_mutex_unlock(&dev
->mutex
);
5181 netdev_dpdk_vhost_client_reconfigure(struct netdev
*netdev
)
5183 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
5185 uint64_t vhost_flags
= 0;
5188 ovs_mutex_lock(&dev
->mutex
);
5190 /* Configure vHost client mode if requested and if the following criteria
5192 * 1. Device hasn't been registered yet.
5193 * 2. A path has been specified.
5195 if (!(dev
->vhost_driver_flags
& RTE_VHOST_USER_CLIENT
) && dev
->vhost_id
) {
5196 /* Register client-mode device. */
5197 vhost_flags
|= RTE_VHOST_USER_CLIENT
;
5199 /* There is no support for multi-segments buffers. */
5200 vhost_flags
|= RTE_VHOST_USER_LINEARBUF_SUPPORT
;
5202 /* Enable IOMMU support, if explicitly requested. */
5203 if (dpdk_vhost_iommu_enabled()) {
5204 vhost_flags
|= RTE_VHOST_USER_IOMMU_SUPPORT
;
5207 /* Enable POSTCOPY support, if explicitly requested. */
5208 if (dpdk_vhost_postcopy_enabled()) {
5209 vhost_flags
|= RTE_VHOST_USER_POSTCOPY_SUPPORT
;
5212 zc_enabled
= dev
->vhost_driver_flags
5213 & RTE_VHOST_USER_DEQUEUE_ZERO_COPY
;
5214 /* Enable zero copy flag, if requested */
5216 vhost_flags
|= RTE_VHOST_USER_DEQUEUE_ZERO_COPY
;
5219 /* Enable External Buffers if TCP Segmentation Offload is enabled. */
5220 if (userspace_tso_enabled()) {
5221 vhost_flags
|= RTE_VHOST_USER_EXTBUF_SUPPORT
;
5224 err
= rte_vhost_driver_register(dev
->vhost_id
, vhost_flags
);
5226 VLOG_ERR("vhost-user device setup failure for device %s\n",
5230 /* Configuration successful */
5231 dev
->vhost_driver_flags
|= vhost_flags
;
5232 VLOG_INFO("vHost User device '%s' created in 'client' mode, "
5233 "using client socket '%s'",
5234 dev
->up
.name
, dev
->vhost_id
);
5236 VLOG_INFO("Zero copy enabled for vHost port %s", dev
->up
.name
);
5240 err
= rte_vhost_driver_callback_register(dev
->vhost_id
,
5241 &virtio_net_device_ops
);
5243 VLOG_ERR("rte_vhost_driver_callback_register failed for "
5244 "vhost user client port: %s\n", dev
->up
.name
);
5248 if (userspace_tso_enabled()) {
5249 netdev
->ol_flags
|= NETDEV_TX_OFFLOAD_TCP_TSO
;
5250 netdev
->ol_flags
|= NETDEV_TX_OFFLOAD_TCP_CKSUM
;
5251 netdev
->ol_flags
|= NETDEV_TX_OFFLOAD_IPV4_CKSUM
;
5253 err
= rte_vhost_driver_disable_features(dev
->vhost_id
,
5254 1ULL << VIRTIO_NET_F_HOST_TSO4
5255 | 1ULL << VIRTIO_NET_F_HOST_TSO6
5256 | 1ULL << VIRTIO_NET_F_CSUM
);
5258 VLOG_ERR("rte_vhost_driver_disable_features failed for "
5259 "vhost user client port: %s\n", dev
->up
.name
);
5264 err
= rte_vhost_driver_start(dev
->vhost_id
);
5266 VLOG_ERR("rte_vhost_driver_start failed for vhost user "
5267 "client port: %s\n", dev
->up
.name
);
5272 err
= dpdk_vhost_reconfigure_helper(dev
);
5275 ovs_mutex_unlock(&dev
->mutex
);
5281 netdev_dpdk_get_port_id(struct netdev
*netdev
)
5283 struct netdev_dpdk
*dev
;
5286 if (!is_dpdk_class(netdev
->netdev_class
)) {
5290 dev
= netdev_dpdk_cast(netdev
);
5291 ovs_mutex_lock(&dev
->mutex
);
5293 ovs_mutex_unlock(&dev
->mutex
);
5299 netdev_dpdk_flow_api_supported(struct netdev
*netdev
)
5301 struct netdev_dpdk
*dev
;
5304 if (!is_dpdk_class(netdev
->netdev_class
)) {
5308 dev
= netdev_dpdk_cast(netdev
);
5309 ovs_mutex_lock(&dev
->mutex
);
5310 if (dev
->type
== DPDK_DEV_ETH
) {
5311 /* TODO: Check if we able to offload some minimal flow. */
5314 ovs_mutex_unlock(&dev
->mutex
);
5320 netdev_dpdk_rte_flow_destroy(struct netdev
*netdev
,
5321 struct rte_flow
*rte_flow
,
5322 struct rte_flow_error
*error
)
5324 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
5327 ovs_mutex_lock(&dev
->mutex
);
5328 ret
= rte_flow_destroy(dev
->port_id
, rte_flow
, error
);
5329 ovs_mutex_unlock(&dev
->mutex
);
5334 netdev_dpdk_rte_flow_create(struct netdev
*netdev
,
5335 const struct rte_flow_attr
*attr
,
5336 const struct rte_flow_item
*items
,
5337 const struct rte_flow_action
*actions
,
5338 struct rte_flow_error
*error
)
5340 struct rte_flow
*flow
;
5341 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
5343 ovs_mutex_lock(&dev
->mutex
);
5344 flow
= rte_flow_create(dev
->port_id
, attr
, items
, actions
, error
);
5345 ovs_mutex_unlock(&dev
->mutex
);
5350 netdev_dpdk_rte_flow_query_count(struct netdev
*netdev
,
5351 struct rte_flow
*rte_flow
,
5352 struct rte_flow_query_count
*query
,
5353 struct rte_flow_error
*error
)
5355 struct rte_flow_action_count count
= { .shared
= 0, .id
= 0 };
5356 const struct rte_flow_action actions
[] = {
5358 .type
= RTE_FLOW_ACTION_TYPE_COUNT
,
5362 .type
= RTE_FLOW_ACTION_TYPE_END
,
5365 struct netdev_dpdk
*dev
;
5368 if (!is_dpdk_class(netdev
->netdev_class
)) {
5372 dev
= netdev_dpdk_cast(netdev
);
5373 ovs_mutex_lock(&dev
->mutex
);
5374 ret
= rte_flow_query(dev
->port_id
, rte_flow
, actions
, query
, error
);
5375 ovs_mutex_unlock(&dev
->mutex
);
5379 #define NETDEV_DPDK_CLASS_COMMON \
5381 .alloc = netdev_dpdk_alloc, \
5382 .dealloc = netdev_dpdk_dealloc, \
5383 .get_config = netdev_dpdk_get_config, \
5384 .get_numa_id = netdev_dpdk_get_numa_id, \
5385 .set_etheraddr = netdev_dpdk_set_etheraddr, \
5386 .get_etheraddr = netdev_dpdk_get_etheraddr, \
5387 .get_mtu = netdev_dpdk_get_mtu, \
5388 .set_mtu = netdev_dpdk_set_mtu, \
5389 .get_ifindex = netdev_dpdk_get_ifindex, \
5390 .get_carrier_resets = netdev_dpdk_get_carrier_resets, \
5391 .set_miimon_interval = netdev_dpdk_set_miimon, \
5392 .set_policing = netdev_dpdk_set_policing, \
5393 .get_qos_types = netdev_dpdk_get_qos_types, \
5394 .get_qos = netdev_dpdk_get_qos, \
5395 .set_qos = netdev_dpdk_set_qos, \
5396 .get_queue = netdev_dpdk_get_queue, \
5397 .set_queue = netdev_dpdk_set_queue, \
5398 .delete_queue = netdev_dpdk_delete_queue, \
5399 .get_queue_stats = netdev_dpdk_get_queue_stats, \
5400 .queue_dump_start = netdev_dpdk_queue_dump_start, \
5401 .queue_dump_next = netdev_dpdk_queue_dump_next, \
5402 .queue_dump_done = netdev_dpdk_queue_dump_done, \
5403 .update_flags = netdev_dpdk_update_flags, \
5404 .rxq_alloc = netdev_dpdk_rxq_alloc, \
5405 .rxq_construct = netdev_dpdk_rxq_construct, \
5406 .rxq_destruct = netdev_dpdk_rxq_destruct, \
5407 .rxq_dealloc = netdev_dpdk_rxq_dealloc
5409 #define NETDEV_DPDK_CLASS_BASE \
5410 NETDEV_DPDK_CLASS_COMMON, \
5411 .init = netdev_dpdk_class_init, \
5412 .destruct = netdev_dpdk_destruct, \
5413 .set_tx_multiq = netdev_dpdk_set_tx_multiq, \
5414 .get_carrier = netdev_dpdk_get_carrier, \
5415 .get_stats = netdev_dpdk_get_stats, \
5416 .get_custom_stats = netdev_dpdk_get_custom_stats, \
5417 .get_features = netdev_dpdk_get_features, \
5418 .get_status = netdev_dpdk_get_status, \
5419 .reconfigure = netdev_dpdk_reconfigure, \
5420 .rxq_recv = netdev_dpdk_rxq_recv
5422 static const struct netdev_class dpdk_class
= {
5424 NETDEV_DPDK_CLASS_BASE
,
5425 .construct
= netdev_dpdk_construct
,
5426 .set_config
= netdev_dpdk_set_config
,
5427 .send
= netdev_dpdk_eth_send
,
5430 static const struct netdev_class dpdk_ring_class
= {
5432 NETDEV_DPDK_CLASS_BASE
,
5433 .construct
= netdev_dpdk_ring_construct
,
5434 .set_config
= netdev_dpdk_ring_set_config
,
5435 .send
= netdev_dpdk_ring_send
,
5438 static const struct netdev_class dpdk_vhost_class
= {
5439 .type
= "dpdkvhostuser",
5440 NETDEV_DPDK_CLASS_COMMON
,
5441 .construct
= netdev_dpdk_vhost_construct
,
5442 .destruct
= netdev_dpdk_vhost_destruct
,
5443 .send
= netdev_dpdk_vhost_send
,
5444 .get_carrier
= netdev_dpdk_vhost_get_carrier
,
5445 .get_stats
= netdev_dpdk_vhost_get_stats
,
5446 .get_custom_stats
= netdev_dpdk_get_sw_custom_stats
,
5447 .get_status
= netdev_dpdk_vhost_user_get_status
,
5448 .reconfigure
= netdev_dpdk_vhost_reconfigure
,
5449 .rxq_recv
= netdev_dpdk_vhost_rxq_recv
,
5450 .rxq_enabled
= netdev_dpdk_vhost_rxq_enabled
,
5453 static const struct netdev_class dpdk_vhost_client_class
= {
5454 .type
= "dpdkvhostuserclient",
5455 NETDEV_DPDK_CLASS_COMMON
,
5456 .construct
= netdev_dpdk_vhost_client_construct
,
5457 .destruct
= netdev_dpdk_vhost_destruct
,
5458 .set_config
= netdev_dpdk_vhost_client_set_config
,
5459 .send
= netdev_dpdk_vhost_send
,
5460 .get_carrier
= netdev_dpdk_vhost_get_carrier
,
5461 .get_stats
= netdev_dpdk_vhost_get_stats
,
5462 .get_custom_stats
= netdev_dpdk_get_sw_custom_stats
,
5463 .get_status
= netdev_dpdk_vhost_user_get_status
,
5464 .reconfigure
= netdev_dpdk_vhost_client_reconfigure
,
5465 .rxq_recv
= netdev_dpdk_vhost_rxq_recv
,
5466 .rxq_enabled
= netdev_dpdk_vhost_rxq_enabled
,
5470 netdev_dpdk_register(void)
5472 netdev_register_provider(&dpdk_class
);
5473 netdev_register_provider(&dpdk_ring_class
);
5474 netdev_register_provider(&dpdk_vhost_class
);
5475 netdev_register_provider(&dpdk_vhost_client_class
);