2 * Copyright (c) 2014, 2015, 2016, 2017 Nicira, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
18 #include "netdev-dpdk.h"
25 #include <linux/virtio_net.h>
26 #include <sys/socket.h>
29 #include <rte_bus_pci.h>
30 #include <rte_config.h>
31 #include <rte_cycles.h>
32 #include <rte_errno.h>
33 #include <rte_ethdev.h>
35 #include <rte_malloc.h>
37 #include <rte_meter.h>
39 #include <rte_version.h>
40 #include <rte_vhost.h>
45 #include "dp-packet.h"
47 #include "dpif-netdev.h"
48 #include "fatal-signal.h"
49 #include "if-notifier.h"
50 #include "netdev-provider.h"
51 #include "netdev-vport.h"
53 #include "openvswitch/dynamic-string.h"
54 #include "openvswitch/list.h"
55 #include "openvswitch/match.h"
56 #include "openvswitch/ofp-print.h"
57 #include "openvswitch/shash.h"
58 #include "openvswitch/vlog.h"
61 #include "ovs-thread.h"
66 #include "unaligned.h"
68 #include "userspace-tso.h"
72 enum {VIRTIO_RXQ
, VIRTIO_TXQ
, VIRTIO_QNUM
};
74 VLOG_DEFINE_THIS_MODULE(netdev_dpdk
);
75 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(5, 20);
77 COVERAGE_DEFINE(vhost_tx_contention
);
78 COVERAGE_DEFINE(vhost_notification
);
80 #define DPDK_PORT_WATCHDOG_INTERVAL 5
82 #define OVS_CACHE_LINE_SIZE CACHE_LINE_SIZE
83 #define OVS_VPORT_DPDK "ovs_dpdk"
86 * need to reserve tons of extra space in the mbufs so we can align the
87 * DMA addresses to 4KB.
88 * The minimum mbuf size is limited to avoid scatter behaviour and drop in
89 * performance for standard Ethernet MTU.
91 #define ETHER_HDR_MAX_LEN (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN \
92 + (2 * VLAN_HEADER_LEN))
93 #define MTU_TO_FRAME_LEN(mtu) ((mtu) + RTE_ETHER_HDR_LEN + \
95 #define MTU_TO_MAX_FRAME_LEN(mtu) ((mtu) + ETHER_HDR_MAX_LEN)
96 #define FRAME_LEN_TO_MTU(frame_len) ((frame_len) \
97 - RTE_ETHER_HDR_LEN - RTE_ETHER_CRC_LEN)
98 #define NETDEV_DPDK_MBUF_ALIGN 1024
99 #define NETDEV_DPDK_MAX_PKT_LEN 9728
101 /* Max and min number of packets in the mempool. OVS tries to allocate a
102 * mempool with MAX_NB_MBUF: if this fails (because the system doesn't have
103 * enough hugepages) we keep halving the number until the allocation succeeds
104 * or we reach MIN_NB_MBUF */
106 #define MAX_NB_MBUF (4096 * 64)
107 #define MIN_NB_MBUF (4096 * 4)
108 #define MP_CACHE_SZ RTE_MEMPOOL_CACHE_MAX_SIZE
110 /* MAX_NB_MBUF can be divided by 2 many times, until MIN_NB_MBUF */
111 BUILD_ASSERT_DECL(MAX_NB_MBUF
% ROUND_DOWN_POW2(MAX_NB_MBUF
/ MIN_NB_MBUF
)
114 /* The smallest possible NB_MBUF that we're going to try should be a multiple
115 * of MP_CACHE_SZ. This is advised by DPDK documentation. */
116 BUILD_ASSERT_DECL((MAX_NB_MBUF
/ ROUND_DOWN_POW2(MAX_NB_MBUF
/ MIN_NB_MBUF
))
121 /* Default size of Physical NIC RXQ */
122 #define NIC_PORT_DEFAULT_RXQ_SIZE 2048
123 /* Default size of Physical NIC TXQ */
124 #define NIC_PORT_DEFAULT_TXQ_SIZE 2048
125 /* Maximum size of Physical NIC Queues */
126 #define NIC_PORT_MAX_Q_SIZE 4096
128 #define OVS_VHOST_MAX_QUEUE_NUM 1024 /* Maximum number of vHost TX queues. */
129 #define OVS_VHOST_QUEUE_MAP_UNKNOWN (-1) /* Mapping not initialized. */
130 #define OVS_VHOST_QUEUE_DISABLED (-2) /* Queue was disabled by guest and not
131 * yet mapped to another queue. */
133 #define DPDK_ETH_PORT_ID_INVALID RTE_MAX_ETHPORTS
135 /* DPDK library uses uint16_t for port_id. */
136 typedef uint16_t dpdk_port_t
;
137 #define DPDK_PORT_ID_FMT "%"PRIu16
139 /* Minimum amount of vhost tx retries, effectively a disable. */
140 #define VHOST_ENQ_RETRY_MIN 0
141 /* Maximum amount of vhost tx retries. */
142 #define VHOST_ENQ_RETRY_MAX 32
143 /* Legacy default value for vhost tx retries. */
144 #define VHOST_ENQ_RETRY_DEF 8
146 #define IF_NAME_SZ (PATH_MAX > IFNAMSIZ ? PATH_MAX : IFNAMSIZ)
148 /* List of required flags advertised by the hardware that will be used
149 * if TSO is enabled. Ideally this should include DEV_TX_OFFLOAD_SCTP_CKSUM.
150 * However, very few drivers supports that the moment and SCTP is not a
151 * widely used protocol as TCP and UDP, so it's optional. */
152 #define DPDK_TX_TSO_OFFLOAD_FLAGS (DEV_TX_OFFLOAD_TCP_TSO \
153 | DEV_TX_OFFLOAD_TCP_CKSUM \
154 | DEV_TX_OFFLOAD_UDP_CKSUM \
155 | DEV_TX_OFFLOAD_IPV4_CKSUM)
158 static const struct rte_eth_conf port_conf
= {
166 .rss_hf
= ETH_RSS_IP
| ETH_RSS_UDP
| ETH_RSS_TCP
,
170 .mq_mode
= ETH_MQ_TX_NONE
,
175 * These callbacks allow virtio-net devices to be added to vhost ports when
176 * configuration has been fully completed.
178 static int new_device(int vid
);
179 static void destroy_device(int vid
);
180 static int vring_state_changed(int vid
, uint16_t queue_id
, int enable
);
181 static void destroy_connection(int vid
);
182 static void vhost_guest_notified(int vid
);
184 static const struct vhost_device_ops virtio_net_device_ops
=
186 .new_device
= new_device
,
187 .destroy_device
= destroy_device
,
188 .vring_state_changed
= vring_state_changed
,
189 .features_changed
= NULL
,
190 .new_connection
= NULL
,
191 .destroy_connection
= destroy_connection
,
192 .guest_notified
= vhost_guest_notified
,
195 /* Custom software stats for dpdk ports */
196 struct netdev_dpdk_sw_stats
{
197 /* No. of retries when unable to transmit. */
199 /* Packet drops when unable to transmit; Probably Tx queue is full. */
200 uint64_t tx_failure_drops
;
201 /* Packet length greater than device MTU. */
202 uint64_t tx_mtu_exceeded_drops
;
203 /* Packet drops in egress policer processing. */
204 uint64_t tx_qos_drops
;
205 /* Packet drops in ingress policer processing. */
206 uint64_t rx_qos_drops
;
207 /* Packet drops in HWOL processing. */
208 uint64_t tx_invalid_hwol_drops
;
216 /* Quality of Service */
218 /* An instance of a QoS configuration. Always associated with a particular
221 * Each QoS implementation subclasses this with whatever additional data it
225 const struct dpdk_qos_ops
*ops
;
229 /* QoS queue information used by the netdev queue dump functions. */
230 struct netdev_dpdk_queue_state
{
236 /* A particular implementation of dpdk QoS operations.
238 * The functions below return 0 if successful or a positive errno value on
239 * failure, except where otherwise noted. All of them must be provided, except
240 * where otherwise noted.
242 struct dpdk_qos_ops
{
244 /* Name of the QoS type */
245 const char *qos_name
;
247 /* Called to construct a qos_conf object. The implementation should make
248 * the appropriate calls to configure QoS according to 'details'.
250 * The contents of 'details' should be documented as valid for 'ovs_name'
251 * in the "other_config" column in the "QoS" table in vswitchd/vswitch.xml
252 * (which is built as ovs-vswitchd.conf.db(8)).
254 * This function must return 0 if and only if it sets '*conf' to an
255 * initialized 'struct qos_conf'.
257 * For all QoS implementations it should always be non-null.
259 int (*qos_construct
)(const struct smap
*details
, struct qos_conf
**conf
);
261 /* Destroys the data structures allocated by the implementation as part of
264 * For all QoS implementations it should always be non-null.
266 void (*qos_destruct
)(struct qos_conf
*conf
);
268 /* Retrieves details of 'conf' configuration into 'details'.
270 * The contents of 'details' should be documented as valid for 'ovs_name'
271 * in the "other_config" column in the "QoS" table in vswitchd/vswitch.xml
272 * (which is built as ovs-vswitchd.conf.db(8)).
274 int (*qos_get
)(const struct qos_conf
*conf
, struct smap
*details
);
276 /* Returns true if 'conf' is already configured according to 'details'.
278 * The contents of 'details' should be documented as valid for 'ovs_name'
279 * in the "other_config" column in the "QoS" table in vswitchd/vswitch.xml
280 * (which is built as ovs-vswitchd.conf.db(8)).
282 * For all QoS implementations it should always be non-null.
284 bool (*qos_is_equal
)(const struct qos_conf
*conf
,
285 const struct smap
*details
);
287 /* Modify an array of rte_mbufs. The modification is specific to
288 * each qos implementation.
290 * The function should take and array of mbufs and an int representing
291 * the current number of mbufs present in the array.
293 * After the function has performed a qos modification to the array of
294 * mbufs it returns an int representing the number of mbufs now present in
295 * the array. This value is can then be passed to the port send function
296 * along with the modified array for transmission.
298 * For all QoS implementations it should always be non-null.
300 int (*qos_run
)(struct qos_conf
*qos_conf
, struct rte_mbuf
**pkts
,
301 int pkt_cnt
, bool should_steal
);
303 /* Called to construct a QoS Queue. The implementation should make
304 * the appropriate calls to configure QoS Queue according to 'details'.
306 * The contents of 'details' should be documented as valid for 'ovs_name'
307 * in the "other_config" column in the "QoS" table in vswitchd/vswitch.xml
308 * (which is built as ovs-vswitchd.conf.db(8)).
310 * This function must return 0 if and only if it constructs
311 * QoS queue successfully.
313 int (*qos_queue_construct
)(const struct smap
*details
,
314 uint32_t queue_id
, struct qos_conf
*conf
);
316 /* Destroys the QoS Queue. */
317 void (*qos_queue_destruct
)(struct qos_conf
*conf
, uint32_t queue_id
);
319 /* Retrieves details of QoS Queue configuration into 'details'.
321 * The contents of 'details' should be documented as valid for 'ovs_name'
322 * in the "other_config" column in the "QoS" table in vswitchd/vswitch.xml
323 * (which is built as ovs-vswitchd.conf.db(8)).
325 int (*qos_queue_get
)(struct smap
*details
, uint32_t queue_id
,
326 const struct qos_conf
*conf
);
328 /* Retrieves statistics of QoS Queue configuration into 'stats'. */
329 int (*qos_queue_get_stats
)(const struct qos_conf
*conf
, uint32_t queue_id
,
330 struct netdev_queue_stats
*stats
);
332 /* Setup the 'netdev_dpdk_queue_state' structure used by the dpdk queue
335 int (*qos_queue_dump_state_init
)(const struct qos_conf
*conf
,
336 struct netdev_dpdk_queue_state
*state
);
339 /* dpdk_qos_ops for each type of user space QoS implementation. */
340 static const struct dpdk_qos_ops egress_policer_ops
;
341 static const struct dpdk_qos_ops trtcm_policer_ops
;
344 * Array of dpdk_qos_ops, contains pointer to all supported QoS
347 static const struct dpdk_qos_ops
*const qos_confs
[] = {
353 static struct ovs_mutex dpdk_mutex
= OVS_MUTEX_INITIALIZER
;
355 /* Contains all 'struct dpdk_dev's. */
356 static struct ovs_list dpdk_list
OVS_GUARDED_BY(dpdk_mutex
)
357 = OVS_LIST_INITIALIZER(&dpdk_list
);
359 static struct ovs_mutex dpdk_mp_mutex
OVS_ACQ_AFTER(dpdk_mutex
)
360 = OVS_MUTEX_INITIALIZER
;
362 /* Contains all 'struct dpdk_mp's. */
363 static struct ovs_list dpdk_mp_list
OVS_GUARDED_BY(dpdk_mp_mutex
)
364 = OVS_LIST_INITIALIZER(&dpdk_mp_list
);
367 struct rte_mempool
*mp
;
371 struct ovs_list list_node
OVS_GUARDED_BY(dpdk_mp_mutex
);
374 /* There should be one 'struct dpdk_tx_queue' created for
375 * each netdev tx queue. */
376 struct dpdk_tx_queue
{
377 /* Padding to make dpdk_tx_queue exactly one cache line long. */
378 PADDED_MEMBERS(CACHE_LINE_SIZE
,
379 /* Protects the members and the NIC queue from concurrent access.
380 * It is used only if the queue is shared among different pmd threads
381 * (see 'concurrent_txq'). */
382 rte_spinlock_t tx_lock
;
383 /* Mapping of configured vhost-user queue to enabled by guest. */
388 struct ingress_policer
{
389 struct rte_meter_srtcm_params app_srtcm_params
;
390 struct rte_meter_srtcm in_policer
;
391 struct rte_meter_srtcm_profile in_prof
;
392 rte_spinlock_t policer_lock
;
395 enum dpdk_hw_ol_features
{
396 NETDEV_RX_CHECKSUM_OFFLOAD
= 1 << 0,
397 NETDEV_RX_HW_CRC_STRIP
= 1 << 1,
398 NETDEV_RX_HW_SCATTER
= 1 << 2,
399 NETDEV_TX_TSO_OFFLOAD
= 1 << 3,
400 NETDEV_TX_SCTP_CHECKSUM_OFFLOAD
= 1 << 4,
404 * In order to avoid confusion in variables names, following naming convention
405 * should be used, if possible:
407 * 'struct netdev' : 'netdev'
408 * 'struct netdev_dpdk' : 'dev'
409 * 'struct netdev_rxq' : 'rxq'
410 * 'struct netdev_rxq_dpdk' : 'rx'
413 * struct netdev *netdev = netdev_from_name(name);
414 * struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
416 * Also, 'netdev' should be used instead of 'dev->up', where 'netdev' was
421 PADDED_MEMBERS_CACHELINE_MARKER(CACHE_LINE_SIZE
, cacheline0
,
424 /* If true, device was attached by rte_eth_dev_attach(). */
426 /* If true, rte_eth_dev_start() was successfully called */
429 /* 1 pad byte here. */
430 struct eth_addr hwaddr
;
435 enum dpdk_dev_type type
;
436 enum netdev_flags flags
;
439 /* Device arguments for dpdk ports. */
441 /* Identifier used to distinguish vhost devices from each other. */
444 struct dpdk_tx_queue
*tx_q
;
445 struct rte_eth_link link
;
448 PADDED_MEMBERS_CACHELINE_MARKER(CACHE_LINE_SIZE
, cacheline1
,
449 struct ovs_mutex mutex
OVS_ACQ_AFTER(dpdk_mutex
);
450 struct dpdk_mp
*dpdk_mp
;
452 /* virtio identifier for vhost devices */
455 /* True if vHost device is 'up' and has been reconfigured at least once */
456 bool vhost_reconfigured
;
458 atomic_uint8_t vhost_tx_retries_max
;
459 /* 2 pad bytes here. */
462 PADDED_MEMBERS(CACHE_LINE_SIZE
,
465 struct ovs_list list_node
OVS_GUARDED_BY(dpdk_mutex
);
467 /* QoS configuration and lock for the device */
468 OVSRCU_TYPE(struct qos_conf
*) qos_conf
;
470 /* Ingress Policer */
471 OVSRCU_TYPE(struct ingress_policer
*) ingress_policer
;
472 uint32_t policer_rate
;
473 uint32_t policer_burst
;
475 /* Array of vhost rxq states, see vring_state_changed. */
476 bool *vhost_rxq_enabled
;
479 PADDED_MEMBERS(CACHE_LINE_SIZE
,
480 struct netdev_stats stats
;
481 struct netdev_dpdk_sw_stats
*sw_stats
;
483 rte_spinlock_t stats_lock
;
484 /* 36 pad bytes here. */
487 PADDED_MEMBERS(CACHE_LINE_SIZE
,
488 /* The following properties cannot be changed when a device is running,
489 * so we remember the request and update them next time
490 * netdev_dpdk*_reconfigure() is called */
494 int requested_rxq_size
;
495 int requested_txq_size
;
497 /* Number of rx/tx descriptors for physical devices */
501 /* Socket ID detected when vHost device is brought up */
502 int requested_socket_id
;
504 /* Denotes whether vHost port is client/server mode */
505 uint64_t vhost_driver_flags
;
507 /* DPDK-ETH Flow control */
508 struct rte_eth_fc_conf fc_conf
;
510 /* DPDK-ETH hardware offload features,
511 * from the enum set 'dpdk_hw_ol_features' */
512 uint32_t hw_ol_features
;
514 /* Properties for link state change detection mode.
515 * If lsc_interrupt_mode is set to false, poll mode is used,
516 * otherwise interrupt mode is used. */
517 bool requested_lsc_interrupt_mode
;
518 bool lsc_interrupt_mode
;
520 /* VF configuration. */
521 struct eth_addr requested_hwaddr
;
524 PADDED_MEMBERS(CACHE_LINE_SIZE
,
525 /* Names of all XSTATS counters */
526 struct rte_eth_xstat_name
*rte_xstats_names
;
527 int rte_xstats_names_size
;
528 int rte_xstats_ids_size
;
529 uint64_t *rte_xstats_ids
;
533 struct netdev_rxq_dpdk
{
534 struct netdev_rxq up
;
538 static void netdev_dpdk_destruct(struct netdev
*netdev
);
539 static void netdev_dpdk_vhost_destruct(struct netdev
*netdev
);
541 static int netdev_dpdk_get_sw_custom_stats(const struct netdev
*,
542 struct netdev_custom_stats
*);
543 static void netdev_dpdk_clear_xstats(struct netdev_dpdk
*dev
);
545 int netdev_dpdk_get_vid(const struct netdev_dpdk
*dev
);
547 struct ingress_policer
*
548 netdev_dpdk_get_ingress_policer(const struct netdev_dpdk
*dev
);
551 is_dpdk_class(const struct netdev_class
*class)
553 return class->destruct
== netdev_dpdk_destruct
554 || class->destruct
== netdev_dpdk_vhost_destruct
;
557 /* DPDK NIC drivers allocate RX buffers at a particular granularity, typically
558 * aligned at 1k or less. If a declared mbuf size is not a multiple of this
559 * value, insufficient buffers are allocated to accomodate the packet in its
560 * entirety. Furthermore, certain drivers need to ensure that there is also
561 * sufficient space in the Rx buffer to accommodate two VLAN tags (for QinQ
562 * frames). If the RX buffer is too small, then the driver enables scatter RX
563 * behaviour, which reduces performance. To prevent this, use a buffer size
564 * that is closest to 'mtu', but which satisfies the aforementioned criteria.
567 dpdk_buf_size(int mtu
)
569 return ROUND_UP(MTU_TO_MAX_FRAME_LEN(mtu
), NETDEV_DPDK_MBUF_ALIGN
)
570 + RTE_PKTMBUF_HEADROOM
;
573 /* Allocates an area of 'sz' bytes from DPDK. The memory is zero'ed.
575 * Unlike xmalloc(), this function can return NULL on failure. */
577 dpdk_rte_mzalloc(size_t sz
)
579 return rte_zmalloc(OVS_VPORT_DPDK
, sz
, OVS_CACHE_LINE_SIZE
);
583 free_dpdk_buf(struct dp_packet
*p
)
585 struct rte_mbuf
*pkt
= (struct rte_mbuf
*) p
;
587 rte_pktmbuf_free(pkt
);
591 ovs_rte_pktmbuf_init(struct rte_mempool
*mp OVS_UNUSED
,
592 void *opaque_arg OVS_UNUSED
,
594 unsigned i OVS_UNUSED
)
596 struct rte_mbuf
*pkt
= _p
;
598 dp_packet_init_dpdk((struct dp_packet
*) pkt
);
602 dpdk_mp_full(const struct rte_mempool
*mp
) OVS_REQUIRES(dpdk_mp_mutex
)
604 /* At this point we want to know if all the mbufs are back
605 * in the mempool. rte_mempool_full() is not atomic but it's
606 * the best available and as we are no longer requesting mbufs
607 * from the mempool, it means mbufs will not move from
608 * 'mempool ring' --> 'mempool cache'. In rte_mempool_full()
609 * the ring is counted before caches, so we won't get false
610 * positives in this use case and we handle false negatives.
612 * If future implementations of rte_mempool_full() were to change
613 * it could be possible for a false positive. Even that would
614 * likely be ok, as there are additional checks during mempool
615 * freeing but it would make things racey.
617 return rte_mempool_full(mp
);
620 /* Free unused mempools. */
622 dpdk_mp_sweep(void) OVS_REQUIRES(dpdk_mp_mutex
)
624 struct dpdk_mp
*dmp
, *next
;
626 LIST_FOR_EACH_SAFE (dmp
, next
, list_node
, &dpdk_mp_list
) {
627 if (!dmp
->refcount
&& dpdk_mp_full(dmp
->mp
)) {
628 VLOG_DBG("Freeing mempool \"%s\"", dmp
->mp
->name
);
629 ovs_list_remove(&dmp
->list_node
);
630 rte_mempool_free(dmp
->mp
);
636 /* Calculating the required number of mbufs differs depending on the
637 * mempool model being used. Check if per port memory is in use before
641 dpdk_calculate_mbufs(struct netdev_dpdk
*dev
, int mtu
, bool per_port_mp
)
646 /* Shared memory are being used.
647 * XXX: this is a really rough method of provisioning memory.
648 * It's impossible to determine what the exact memory requirements are
649 * when the number of ports and rxqs that utilize a particular mempool
650 * can change dynamically at runtime. For now, use this rough
653 if (mtu
>= RTE_ETHER_MTU
) {
654 n_mbufs
= MAX_NB_MBUF
;
656 n_mbufs
= MIN_NB_MBUF
;
659 /* Per port memory is being used.
660 * XXX: rough estimation of number of mbufs required for this port:
661 * <packets required to fill the device rxqs>
662 * + <packets that could be stuck on other ports txqs>
663 * + <packets in the pmd threads>
664 * + <additional memory for corner cases>
666 n_mbufs
= dev
->requested_n_rxq
* dev
->requested_rxq_size
667 + dev
->requested_n_txq
* dev
->requested_txq_size
668 + MIN(RTE_MAX_LCORE
, dev
->requested_n_rxq
) * NETDEV_MAX_BURST
675 static struct dpdk_mp
*
676 dpdk_mp_create(struct netdev_dpdk
*dev
, int mtu
, bool per_port_mp
)
678 char mp_name
[RTE_MEMPOOL_NAMESIZE
];
679 const char *netdev_name
= netdev_get_name(&dev
->up
);
680 int socket_id
= dev
->requested_socket_id
;
681 uint32_t n_mbufs
= 0;
682 uint32_t mbuf_size
= 0;
683 uint32_t aligned_mbuf_size
= 0;
684 uint32_t mbuf_priv_data_len
= 0;
685 uint32_t pkt_size
= 0;
686 uint32_t hash
= hash_string(netdev_name
, 0);
687 struct dpdk_mp
*dmp
= NULL
;
690 dmp
= dpdk_rte_mzalloc(sizeof *dmp
);
694 dmp
->socket_id
= socket_id
;
698 /* Get the size of each mbuf, based on the MTU */
699 mbuf_size
= MTU_TO_FRAME_LEN(mtu
);
701 n_mbufs
= dpdk_calculate_mbufs(dev
, mtu
, per_port_mp
);
704 /* Full DPDK memory pool name must be unique and cannot be
705 * longer than RTE_MEMPOOL_NAMESIZE. Note that for the shared
706 * mempool case this can result in one device using a mempool
707 * which references a different device in it's name. However as
708 * mempool names are hashed, the device name will not be readable
709 * so this is not an issue for tasks such as debugging.
711 ret
= snprintf(mp_name
, RTE_MEMPOOL_NAMESIZE
,
712 "ovs%08x%02d%05d%07u",
713 hash
, socket_id
, mtu
, n_mbufs
);
714 if (ret
< 0 || ret
>= RTE_MEMPOOL_NAMESIZE
) {
715 VLOG_DBG("snprintf returned %d. "
716 "Failed to generate a mempool name for \"%s\". "
717 "Hash:0x%x, socket_id: %d, mtu:%d, mbufs:%u.",
718 ret
, netdev_name
, hash
, socket_id
, mtu
, n_mbufs
);
722 VLOG_DBG("Port %s: Requesting a mempool of %u mbufs of size %u "
723 "on socket %d for %d Rx and %d Tx queues, "
724 "cache line size of %u",
725 netdev_name
, n_mbufs
, mbuf_size
, socket_id
,
726 dev
->requested_n_rxq
, dev
->requested_n_txq
,
727 RTE_CACHE_LINE_SIZE
);
729 /* The size of the mbuf's private area (i.e. area that holds OvS'
731 mbuf_priv_data_len
= sizeof(struct dp_packet
) -
732 sizeof(struct rte_mbuf
);
733 /* The size of the entire dp_packet. */
734 pkt_size
= sizeof(struct dp_packet
) + mbuf_size
;
735 /* mbuf size, rounded up to cacheline size. */
736 aligned_mbuf_size
= ROUND_UP(pkt_size
, RTE_CACHE_LINE_SIZE
);
737 /* If there is a size discrepancy, add padding to mbuf_priv_data_len.
738 * This maintains mbuf size cache alignment, while also honoring RX
739 * buffer alignment in the data portion of the mbuf. If this adjustment
740 * is not made, there is a possiblity later on that for an element of
741 * the mempool, buf, buf->data_len < (buf->buf_len - buf->data_off).
742 * This is problematic in the case of multi-segment mbufs, particularly
743 * when an mbuf segment needs to be resized (when [push|popp]ing a VLAN
744 * header, for example.
746 mbuf_priv_data_len
+= (aligned_mbuf_size
- pkt_size
);
748 dmp
->mp
= rte_pktmbuf_pool_create(mp_name
, n_mbufs
, MP_CACHE_SZ
,
754 VLOG_DBG("Allocated \"%s\" mempool with %u mbufs",
756 /* rte_pktmbuf_pool_create has done some initialization of the
757 * rte_mbuf part of each dp_packet, while ovs_rte_pktmbuf_init
758 * initializes some OVS specific fields of dp_packet.
760 rte_mempool_obj_iter(dmp
->mp
, ovs_rte_pktmbuf_init
, NULL
);
762 } else if (rte_errno
== EEXIST
) {
763 /* A mempool with the same name already exists. We just
764 * retrieve its pointer to be returned to the caller. */
765 dmp
->mp
= rte_mempool_lookup(mp_name
);
766 /* As the mempool create returned EEXIST we can expect the
767 * lookup has returned a valid pointer. If for some reason
768 * that's not the case we keep track of it. */
769 VLOG_DBG("A mempool with name \"%s\" already exists at %p.",
773 VLOG_DBG("Failed to create mempool \"%s\" with a request of "
774 "%u mbufs, retrying with %u mbufs",
775 mp_name
, n_mbufs
, n_mbufs
/ 2);
777 } while (!dmp
->mp
&& rte_errno
== ENOMEM
&& (n_mbufs
/= 2) >= MIN_NB_MBUF
);
779 VLOG_ERR("Failed to create mempool \"%s\" with a request of %u mbufs",
786 static struct dpdk_mp
*
787 dpdk_mp_get(struct netdev_dpdk
*dev
, int mtu
, bool per_port_mp
)
789 struct dpdk_mp
*dmp
, *next
;
792 ovs_mutex_lock(&dpdk_mp_mutex
);
793 /* Check if shared memory is being used, if so check existing mempools
794 * to see if reuse is possible. */
796 LIST_FOR_EACH (dmp
, list_node
, &dpdk_mp_list
) {
797 if (dmp
->socket_id
== dev
->requested_socket_id
798 && dmp
->mtu
== mtu
) {
799 VLOG_DBG("Reusing mempool \"%s\"", dmp
->mp
->name
);
806 /* Sweep mempools after reuse or before create. */
810 dmp
= dpdk_mp_create(dev
, mtu
, per_port_mp
);
812 /* Shared memory will hit the reuse case above so will not
813 * request a mempool that already exists but we need to check
814 * for the EEXIST case for per port memory case. Compare the
815 * mempool returned by dmp to each entry in dpdk_mp_list. If a
816 * match is found, free dmp as a new entry is not required, set
817 * dmp to point to the existing entry and increment the refcount
818 * to avoid being freed at a later stage.
820 if (per_port_mp
&& rte_errno
== EEXIST
) {
821 LIST_FOR_EACH (next
, list_node
, &dpdk_mp_list
) {
822 if (dmp
->mp
== next
->mp
) {
829 ovs_list_push_back(&dpdk_mp_list
, &dmp
->list_node
);
834 ovs_mutex_unlock(&dpdk_mp_mutex
);
839 /* Decrement reference to a mempool. */
841 dpdk_mp_put(struct dpdk_mp
*dmp
)
847 ovs_mutex_lock(&dpdk_mp_mutex
);
848 ovs_assert(dmp
->refcount
);
850 ovs_mutex_unlock(&dpdk_mp_mutex
);
853 /* Depending on the memory model being used this function tries to
854 * identify and reuse an existing mempool or tries to allocate a new
855 * mempool on requested_socket_id with mbuf size corresponding to the
856 * requested_mtu. On success, a new configuration will be applied.
857 * On error, device will be left unchanged. */
859 netdev_dpdk_mempool_configure(struct netdev_dpdk
*dev
)
860 OVS_REQUIRES(dev
->mutex
)
862 uint32_t buf_size
= dpdk_buf_size(dev
->requested_mtu
);
865 bool per_port_mp
= dpdk_per_port_memory();
867 /* With shared memory we do not need to configure a mempool if the MTU
868 * and socket ID have not changed, the previous configuration is still
869 * valid so return 0 */
870 if (!per_port_mp
&& dev
->mtu
== dev
->requested_mtu
871 && dev
->socket_id
== dev
->requested_socket_id
) {
875 dmp
= dpdk_mp_get(dev
, FRAME_LEN_TO_MTU(buf_size
), per_port_mp
);
877 VLOG_ERR("Failed to create memory pool for netdev "
878 "%s, with MTU %d on socket %d: %s\n",
879 dev
->up
.name
, dev
->requested_mtu
, dev
->requested_socket_id
,
880 rte_strerror(rte_errno
));
883 /* Check for any pre-existing dpdk_mp for the device before accessing
884 * the associated mempool.
886 if (dev
->dpdk_mp
!= NULL
) {
887 /* A new MTU was requested, decrement the reference count for the
888 * devices current dpdk_mp. This is required even if a pointer to
889 * same dpdk_mp is returned by dpdk_mp_get. The refcount for dmp
890 * has already been incremented by dpdk_mp_get at this stage so it
891 * must be decremented to keep an accurate refcount for the
894 dpdk_mp_put(dev
->dpdk_mp
);
897 dev
->mtu
= dev
->requested_mtu
;
898 dev
->socket_id
= dev
->requested_socket_id
;
899 dev
->max_packet_len
= MTU_TO_FRAME_LEN(dev
->mtu
);
906 check_link_status(struct netdev_dpdk
*dev
)
908 struct rte_eth_link link
;
910 rte_eth_link_get_nowait(dev
->port_id
, &link
);
912 if (dev
->link
.link_status
!= link
.link_status
) {
913 netdev_change_seq_changed(&dev
->up
);
915 dev
->link_reset_cnt
++;
917 if (dev
->link
.link_status
) {
919 "Port "DPDK_PORT_ID_FMT
" Link Up - speed %u Mbps - %s",
920 dev
->port_id
, (unsigned) dev
->link
.link_speed
,
921 (dev
->link
.link_duplex
== ETH_LINK_FULL_DUPLEX
)
922 ? "full-duplex" : "half-duplex");
924 VLOG_DBG_RL(&rl
, "Port "DPDK_PORT_ID_FMT
" Link Down",
931 dpdk_watchdog(void *dummy OVS_UNUSED
)
933 struct netdev_dpdk
*dev
;
935 pthread_detach(pthread_self());
938 ovs_mutex_lock(&dpdk_mutex
);
939 LIST_FOR_EACH (dev
, list_node
, &dpdk_list
) {
940 ovs_mutex_lock(&dev
->mutex
);
941 if (dev
->type
== DPDK_DEV_ETH
) {
942 check_link_status(dev
);
944 ovs_mutex_unlock(&dev
->mutex
);
946 ovs_mutex_unlock(&dpdk_mutex
);
947 xsleep(DPDK_PORT_WATCHDOG_INTERVAL
);
954 dpdk_eth_dev_port_config(struct netdev_dpdk
*dev
, int n_rxq
, int n_txq
)
958 struct rte_eth_conf conf
= port_conf
;
959 struct rte_eth_dev_info info
;
962 rte_eth_dev_info_get(dev
->port_id
, &info
);
964 /* As of DPDK 19.11, it is not allowed to set a mq_mode for
965 * virtio PMD driver. */
966 if (!strcmp(info
.driver_name
, "net_virtio")) {
967 conf
.rxmode
.mq_mode
= ETH_MQ_RX_NONE
;
969 conf
.rxmode
.mq_mode
= ETH_MQ_RX_RSS
;
972 /* As of DPDK 17.11.1 a few PMDs require to explicitly enable
973 * scatter to support jumbo RX.
974 * Setting scatter for the device is done after checking for
975 * scatter support in the device capabilites. */
976 if (dev
->mtu
> RTE_ETHER_MTU
) {
977 if (dev
->hw_ol_features
& NETDEV_RX_HW_SCATTER
) {
978 conf
.rxmode
.offloads
|= DEV_RX_OFFLOAD_SCATTER
;
982 conf
.intr_conf
.lsc
= dev
->lsc_interrupt_mode
;
984 if (dev
->hw_ol_features
& NETDEV_RX_CHECKSUM_OFFLOAD
) {
985 conf
.rxmode
.offloads
|= DEV_RX_OFFLOAD_CHECKSUM
;
988 if (!(dev
->hw_ol_features
& NETDEV_RX_HW_CRC_STRIP
)
989 && info
.rx_offload_capa
& DEV_RX_OFFLOAD_KEEP_CRC
) {
990 conf
.rxmode
.offloads
|= DEV_RX_OFFLOAD_KEEP_CRC
;
993 if (dev
->hw_ol_features
& NETDEV_TX_TSO_OFFLOAD
) {
994 conf
.txmode
.offloads
|= DPDK_TX_TSO_OFFLOAD_FLAGS
;
995 if (dev
->hw_ol_features
& NETDEV_TX_SCTP_CHECKSUM_OFFLOAD
) {
996 conf
.txmode
.offloads
|= DEV_TX_OFFLOAD_SCTP_CKSUM
;
1000 /* Limit configured rss hash functions to only those supported
1001 * by the eth device. */
1002 conf
.rx_adv_conf
.rss_conf
.rss_hf
&= info
.flow_type_rss_offloads
;
1004 /* A device may report more queues than it makes available (this has
1005 * been observed for Intel xl710, which reserves some of them for
1006 * SRIOV): rte_eth_*_queue_setup will fail if a queue is not
1007 * available. When this happens we can retry the configuration
1008 * and request less queues */
1009 while (n_rxq
&& n_txq
) {
1011 VLOG_INFO("Retrying setup with (rxq:%d txq:%d)", n_rxq
, n_txq
);
1014 diag
= rte_eth_dev_configure(dev
->port_id
, n_rxq
, n_txq
, &conf
);
1016 VLOG_WARN("Interface %s eth_dev setup error %s\n",
1017 dev
->up
.name
, rte_strerror(-diag
));
1021 diag
= rte_eth_dev_set_mtu(dev
->port_id
, dev
->mtu
);
1023 /* A device may not support rte_eth_dev_set_mtu, in this case
1024 * flag a warning to the user and include the devices configured
1025 * MTU value that will be used instead. */
1026 if (-ENOTSUP
== diag
) {
1027 rte_eth_dev_get_mtu(dev
->port_id
, &conf_mtu
);
1028 VLOG_WARN("Interface %s does not support MTU configuration, "
1029 "max packet size supported is %"PRIu16
".",
1030 dev
->up
.name
, conf_mtu
);
1032 VLOG_ERR("Interface %s MTU (%d) setup error: %s",
1033 dev
->up
.name
, dev
->mtu
, rte_strerror(-diag
));
1038 for (i
= 0; i
< n_txq
; i
++) {
1039 diag
= rte_eth_tx_queue_setup(dev
->port_id
, i
, dev
->txq_size
,
1040 dev
->socket_id
, NULL
);
1042 VLOG_INFO("Interface %s unable to setup txq(%d): %s",
1043 dev
->up
.name
, i
, rte_strerror(-diag
));
1049 /* Retry with less tx queues */
1054 for (i
= 0; i
< n_rxq
; i
++) {
1055 diag
= rte_eth_rx_queue_setup(dev
->port_id
, i
, dev
->rxq_size
,
1056 dev
->socket_id
, NULL
,
1059 VLOG_INFO("Interface %s unable to setup rxq(%d): %s",
1060 dev
->up
.name
, i
, rte_strerror(-diag
));
1066 /* Retry with less rx queues */
1071 dev
->up
.n_rxq
= n_rxq
;
1072 dev
->up
.n_txq
= n_txq
;
1081 dpdk_eth_flow_ctrl_setup(struct netdev_dpdk
*dev
) OVS_REQUIRES(dev
->mutex
)
1083 if (rte_eth_dev_flow_ctrl_set(dev
->port_id
, &dev
->fc_conf
)) {
1084 VLOG_WARN("Failed to enable flow control on device "DPDK_PORT_ID_FMT
,
1090 dpdk_eth_dev_init(struct netdev_dpdk
*dev
)
1091 OVS_REQUIRES(dev
->mutex
)
1093 struct rte_pktmbuf_pool_private
*mbp_priv
;
1094 struct rte_eth_dev_info info
;
1095 struct rte_ether_addr eth_addr
;
1098 uint32_t tx_tso_offload_capa
= DPDK_TX_TSO_OFFLOAD_FLAGS
;
1099 uint32_t rx_chksm_offload_capa
= DEV_RX_OFFLOAD_UDP_CKSUM
|
1100 DEV_RX_OFFLOAD_TCP_CKSUM
|
1101 DEV_RX_OFFLOAD_IPV4_CKSUM
;
1103 rte_eth_dev_info_get(dev
->port_id
, &info
);
1105 if (strstr(info
.driver_name
, "vf") != NULL
) {
1106 VLOG_INFO("Virtual function detected, HW_CRC_STRIP will be enabled");
1107 dev
->hw_ol_features
|= NETDEV_RX_HW_CRC_STRIP
;
1109 dev
->hw_ol_features
&= ~NETDEV_RX_HW_CRC_STRIP
;
1112 if ((info
.rx_offload_capa
& rx_chksm_offload_capa
) !=
1113 rx_chksm_offload_capa
) {
1114 VLOG_WARN("Rx checksum offload is not supported on port "
1115 DPDK_PORT_ID_FMT
, dev
->port_id
);
1116 dev
->hw_ol_features
&= ~NETDEV_RX_CHECKSUM_OFFLOAD
;
1118 dev
->hw_ol_features
|= NETDEV_RX_CHECKSUM_OFFLOAD
;
1121 if (info
.rx_offload_capa
& DEV_RX_OFFLOAD_SCATTER
) {
1122 dev
->hw_ol_features
|= NETDEV_RX_HW_SCATTER
;
1124 /* Do not warn on lack of scatter support */
1125 dev
->hw_ol_features
&= ~NETDEV_RX_HW_SCATTER
;
1128 dev
->hw_ol_features
&= ~NETDEV_TX_TSO_OFFLOAD
;
1129 if (userspace_tso_enabled()) {
1130 if ((info
.tx_offload_capa
& tx_tso_offload_capa
)
1131 == tx_tso_offload_capa
) {
1132 dev
->hw_ol_features
|= NETDEV_TX_TSO_OFFLOAD
;
1133 if (info
.tx_offload_capa
& DEV_TX_OFFLOAD_SCTP_CKSUM
) {
1134 dev
->hw_ol_features
|= NETDEV_TX_SCTP_CHECKSUM_OFFLOAD
;
1136 VLOG_WARN("%s: Tx SCTP checksum offload is not supported, "
1137 "SCTP packets sent to this device will be dropped",
1138 netdev_get_name(&dev
->up
));
1141 VLOG_WARN("%s: Tx TSO offload is not supported.",
1142 netdev_get_name(&dev
->up
));
1146 n_rxq
= MIN(info
.max_rx_queues
, dev
->up
.n_rxq
);
1147 n_txq
= MIN(info
.max_tx_queues
, dev
->up
.n_txq
);
1149 diag
= dpdk_eth_dev_port_config(dev
, n_rxq
, n_txq
);
1151 VLOG_ERR("Interface %s(rxq:%d txq:%d lsc interrupt mode:%s) "
1152 "configure error: %s",
1153 dev
->up
.name
, n_rxq
, n_txq
,
1154 dev
->lsc_interrupt_mode
? "true" : "false",
1155 rte_strerror(-diag
));
1159 diag
= rte_eth_dev_start(dev
->port_id
);
1161 VLOG_ERR("Interface %s start error: %s", dev
->up
.name
,
1162 rte_strerror(-diag
));
1165 dev
->started
= true;
1167 rte_eth_promiscuous_enable(dev
->port_id
);
1168 rte_eth_allmulticast_enable(dev
->port_id
);
1170 memset(ð_addr
, 0x0, sizeof(eth_addr
));
1171 rte_eth_macaddr_get(dev
->port_id
, ð_addr
);
1172 VLOG_INFO_RL(&rl
, "Port "DPDK_PORT_ID_FMT
": "ETH_ADDR_FMT
,
1173 dev
->port_id
, ETH_ADDR_BYTES_ARGS(eth_addr
.addr_bytes
));
1175 memcpy(dev
->hwaddr
.ea
, eth_addr
.addr_bytes
, ETH_ADDR_LEN
);
1176 rte_eth_link_get_nowait(dev
->port_id
, &dev
->link
);
1178 mbp_priv
= rte_mempool_get_priv(dev
->dpdk_mp
->mp
);
1179 dev
->buf_size
= mbp_priv
->mbuf_data_room_size
- RTE_PKTMBUF_HEADROOM
;
1183 static struct netdev_dpdk
*
1184 netdev_dpdk_cast(const struct netdev
*netdev
)
1186 return CONTAINER_OF(netdev
, struct netdev_dpdk
, up
);
1189 static struct netdev
*
1190 netdev_dpdk_alloc(void)
1192 struct netdev_dpdk
*dev
;
1194 dev
= dpdk_rte_mzalloc(sizeof *dev
);
1202 static struct dpdk_tx_queue
*
1203 netdev_dpdk_alloc_txq(unsigned int n_txqs
)
1205 struct dpdk_tx_queue
*txqs
;
1208 txqs
= dpdk_rte_mzalloc(n_txqs
* sizeof *txqs
);
1210 for (i
= 0; i
< n_txqs
; i
++) {
1211 /* Initialize map for vhost devices. */
1212 txqs
[i
].map
= OVS_VHOST_QUEUE_MAP_UNKNOWN
;
1213 rte_spinlock_init(&txqs
[i
].tx_lock
);
1221 common_construct(struct netdev
*netdev
, dpdk_port_t port_no
,
1222 enum dpdk_dev_type type
, int socket_id
)
1223 OVS_REQUIRES(dpdk_mutex
)
1225 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1227 ovs_mutex_init(&dev
->mutex
);
1229 rte_spinlock_init(&dev
->stats_lock
);
1231 /* If the 'sid' is negative, it means that the kernel fails
1232 * to obtain the pci numa info. In that situation, always
1234 dev
->socket_id
= socket_id
< 0 ? SOCKET0
: socket_id
;
1235 dev
->requested_socket_id
= dev
->socket_id
;
1236 dev
->port_id
= port_no
;
1239 dev
->requested_mtu
= RTE_ETHER_MTU
;
1240 dev
->max_packet_len
= MTU_TO_FRAME_LEN(dev
->mtu
);
1241 dev
->requested_lsc_interrupt_mode
= 0;
1242 ovsrcu_index_init(&dev
->vid
, -1);
1243 dev
->vhost_reconfigured
= false;
1244 dev
->attached
= false;
1245 dev
->started
= false;
1246 dev
->reset_needed
= false;
1248 ovsrcu_init(&dev
->qos_conf
, NULL
);
1250 ovsrcu_init(&dev
->ingress_policer
, NULL
);
1251 dev
->policer_rate
= 0;
1252 dev
->policer_burst
= 0;
1256 dev
->requested_n_rxq
= NR_QUEUE
;
1257 dev
->requested_n_txq
= NR_QUEUE
;
1258 dev
->requested_rxq_size
= NIC_PORT_DEFAULT_RXQ_SIZE
;
1259 dev
->requested_txq_size
= NIC_PORT_DEFAULT_TXQ_SIZE
;
1261 /* Initialize the flow control to NULL */
1262 memset(&dev
->fc_conf
, 0, sizeof dev
->fc_conf
);
1264 /* Initilize the hardware offload flags to 0 */
1265 dev
->hw_ol_features
= 0;
1267 dev
->flags
= NETDEV_UP
| NETDEV_PROMISC
;
1269 ovs_list_push_back(&dpdk_list
, &dev
->list_node
);
1271 netdev_request_reconfigure(netdev
);
1273 dev
->rte_xstats_names
= NULL
;
1274 dev
->rte_xstats_names_size
= 0;
1276 dev
->rte_xstats_ids
= NULL
;
1277 dev
->rte_xstats_ids_size
= 0;
1279 dev
->sw_stats
= xzalloc(sizeof *dev
->sw_stats
);
1280 dev
->sw_stats
->tx_retries
= (dev
->type
== DPDK_DEV_VHOST
) ? 0 : UINT64_MAX
;
1285 /* Get the number of OVS interfaces which have the same DPDK
1286 * rte device (e.g. same pci bus address).
1287 * FIXME: avoid direct access to DPDK internal array rte_eth_devices.
1290 netdev_dpdk_get_num_ports(struct rte_device
*device
)
1291 OVS_REQUIRES(dpdk_mutex
)
1293 struct netdev_dpdk
*dev
;
1296 LIST_FOR_EACH (dev
, list_node
, &dpdk_list
) {
1297 if (rte_eth_devices
[dev
->port_id
].device
== device
1298 && rte_eth_devices
[dev
->port_id
].state
!= RTE_ETH_DEV_UNUSED
) {
1306 vhost_common_construct(struct netdev
*netdev
)
1307 OVS_REQUIRES(dpdk_mutex
)
1309 int socket_id
= rte_lcore_to_socket_id(rte_get_main_lcore());
1310 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1312 dev
->vhost_rxq_enabled
= dpdk_rte_mzalloc(OVS_VHOST_MAX_QUEUE_NUM
*
1313 sizeof *dev
->vhost_rxq_enabled
);
1314 if (!dev
->vhost_rxq_enabled
) {
1317 dev
->tx_q
= netdev_dpdk_alloc_txq(OVS_VHOST_MAX_QUEUE_NUM
);
1319 rte_free(dev
->vhost_rxq_enabled
);
1323 atomic_init(&dev
->vhost_tx_retries_max
, VHOST_ENQ_RETRY_DEF
);
1325 return common_construct(netdev
, DPDK_ETH_PORT_ID_INVALID
,
1326 DPDK_DEV_VHOST
, socket_id
);
1330 netdev_dpdk_vhost_construct(struct netdev
*netdev
)
1332 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1333 const char *name
= netdev
->name
;
1336 /* 'name' is appended to 'vhost_sock_dir' and used to create a socket in
1337 * the file system. '/' or '\' would traverse directories, so they're not
1338 * acceptable in 'name'. */
1339 if (strchr(name
, '/') || strchr(name
, '\\')) {
1340 VLOG_ERR("\"%s\" is not a valid name for a vhost-user port. "
1341 "A valid name must not include '/' or '\\'",
1346 ovs_mutex_lock(&dpdk_mutex
);
1347 /* Take the name of the vhost-user port and append it to the location where
1348 * the socket is to be created, then register the socket.
1350 dev
->vhost_id
= xasprintf("%s/%s", dpdk_get_vhost_sock_dir(), name
);
1352 dev
->vhost_driver_flags
&= ~RTE_VHOST_USER_CLIENT
;
1354 /* There is no support for multi-segments buffers. */
1355 dev
->vhost_driver_flags
|= RTE_VHOST_USER_LINEARBUF_SUPPORT
;
1356 err
= rte_vhost_driver_register(dev
->vhost_id
, dev
->vhost_driver_flags
);
1358 VLOG_ERR("vhost-user socket device setup failure for socket %s\n",
1362 fatal_signal_add_file_to_unlink(dev
->vhost_id
);
1363 VLOG_INFO("Socket %s created for vhost-user port %s\n",
1364 dev
->vhost_id
, name
);
1367 err
= rte_vhost_driver_callback_register(dev
->vhost_id
,
1368 &virtio_net_device_ops
);
1370 VLOG_ERR("rte_vhost_driver_callback_register failed for vhost user "
1371 "port: %s\n", name
);
1375 if (!userspace_tso_enabled()) {
1376 err
= rte_vhost_driver_disable_features(dev
->vhost_id
,
1377 1ULL << VIRTIO_NET_F_HOST_TSO4
1378 | 1ULL << VIRTIO_NET_F_HOST_TSO6
1379 | 1ULL << VIRTIO_NET_F_CSUM
);
1381 VLOG_ERR("rte_vhost_driver_disable_features failed for vhost user "
1382 "port: %s\n", name
);
1387 err
= rte_vhost_driver_start(dev
->vhost_id
);
1389 VLOG_ERR("rte_vhost_driver_start failed for vhost user "
1390 "port: %s\n", name
);
1394 err
= vhost_common_construct(netdev
);
1396 VLOG_ERR("vhost_common_construct failed for vhost user "
1397 "port: %s\n", name
);
1402 free(dev
->vhost_id
);
1403 dev
->vhost_id
= NULL
;
1406 ovs_mutex_unlock(&dpdk_mutex
);
1407 VLOG_WARN_ONCE("dpdkvhostuser ports are considered deprecated; "
1408 "please migrate to dpdkvhostuserclient ports.");
1413 netdev_dpdk_vhost_client_construct(struct netdev
*netdev
)
1417 ovs_mutex_lock(&dpdk_mutex
);
1418 err
= vhost_common_construct(netdev
);
1420 VLOG_ERR("vhost_common_construct failed for vhost user client"
1421 "port: %s\n", netdev
->name
);
1423 ovs_mutex_unlock(&dpdk_mutex
);
1428 netdev_dpdk_construct(struct netdev
*netdev
)
1432 ovs_mutex_lock(&dpdk_mutex
);
1433 err
= common_construct(netdev
, DPDK_ETH_PORT_ID_INVALID
,
1434 DPDK_DEV_ETH
, SOCKET0
);
1435 ovs_mutex_unlock(&dpdk_mutex
);
1440 common_destruct(struct netdev_dpdk
*dev
)
1441 OVS_REQUIRES(dpdk_mutex
)
1442 OVS_EXCLUDED(dev
->mutex
)
1444 rte_free(dev
->tx_q
);
1445 dpdk_mp_put(dev
->dpdk_mp
);
1447 ovs_list_remove(&dev
->list_node
);
1448 free(ovsrcu_get_protected(struct ingress_policer
*,
1449 &dev
->ingress_policer
));
1450 free(dev
->sw_stats
);
1451 ovs_mutex_destroy(&dev
->mutex
);
1455 netdev_dpdk_destruct(struct netdev
*netdev
)
1457 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1458 struct rte_device
*rte_dev
;
1459 struct rte_eth_dev
*eth_dev
;
1461 ovs_mutex_lock(&dpdk_mutex
);
1463 rte_eth_dev_stop(dev
->port_id
);
1464 dev
->started
= false;
1466 if (dev
->attached
) {
1467 /* Retrieve eth device data before closing it.
1468 * FIXME: avoid direct access to DPDK internal array rte_eth_devices.
1470 eth_dev
= &rte_eth_devices
[dev
->port_id
];
1471 rte_dev
= eth_dev
->device
;
1473 /* Remove the eth device. */
1474 rte_eth_dev_close(dev
->port_id
);
1476 /* Remove this rte device and all its eth devices if all the eth
1477 * devices belonging to the rte device are closed.
1479 if (!netdev_dpdk_get_num_ports(rte_dev
)) {
1480 int ret
= rte_dev_remove(rte_dev
);
1483 VLOG_ERR("Device '%s' can not be detached: %s.",
1484 dev
->devargs
, rte_strerror(-ret
));
1486 /* Device was closed and detached. */
1487 VLOG_INFO("Device '%s' has been removed and detached",
1491 /* Device was only closed. rte_dev_remove() was not called. */
1492 VLOG_INFO("Device '%s' has been removed", dev
->devargs
);
1496 netdev_dpdk_clear_xstats(dev
);
1498 common_destruct(dev
);
1500 ovs_mutex_unlock(&dpdk_mutex
);
1503 /* rte_vhost_driver_unregister() can call back destroy_device(), which will
1504 * try to acquire 'dpdk_mutex' and possibly 'dev->mutex'. To avoid a
1505 * deadlock, none of the mutexes must be held while calling this function. */
1507 dpdk_vhost_driver_unregister(struct netdev_dpdk
*dev OVS_UNUSED
,
1509 OVS_EXCLUDED(dpdk_mutex
)
1510 OVS_EXCLUDED(dev
->mutex
)
1512 return rte_vhost_driver_unregister(vhost_id
);
1516 netdev_dpdk_vhost_destruct(struct netdev
*netdev
)
1518 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1521 ovs_mutex_lock(&dpdk_mutex
);
1523 /* Guest becomes an orphan if still attached. */
1524 if (netdev_dpdk_get_vid(dev
) >= 0
1525 && !(dev
->vhost_driver_flags
& RTE_VHOST_USER_CLIENT
)) {
1526 VLOG_ERR("Removing port '%s' while vhost device still attached.",
1528 VLOG_ERR("To restore connectivity after re-adding of port, VM on "
1529 "socket '%s' must be restarted.", dev
->vhost_id
);
1532 vhost_id
= dev
->vhost_id
;
1533 dev
->vhost_id
= NULL
;
1534 rte_free(dev
->vhost_rxq_enabled
);
1536 common_destruct(dev
);
1538 ovs_mutex_unlock(&dpdk_mutex
);
1544 if (dpdk_vhost_driver_unregister(dev
, vhost_id
)) {
1545 VLOG_ERR("%s: Unable to unregister vhost driver for socket '%s'.\n",
1546 netdev
->name
, vhost_id
);
1547 } else if (!(dev
->vhost_driver_flags
& RTE_VHOST_USER_CLIENT
)) {
1548 /* OVS server mode - remove this socket from list for deletion */
1549 fatal_signal_remove_file_to_unlink(vhost_id
);
1556 netdev_dpdk_dealloc(struct netdev
*netdev
)
1558 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1564 netdev_dpdk_clear_xstats(struct netdev_dpdk
*dev
)
1566 /* If statistics are already allocated, we have to
1567 * reconfigure, as port_id could have been changed. */
1568 if (dev
->rte_xstats_names
) {
1569 free(dev
->rte_xstats_names
);
1570 dev
->rte_xstats_names
= NULL
;
1571 dev
->rte_xstats_names_size
= 0;
1573 if (dev
->rte_xstats_ids
) {
1574 free(dev
->rte_xstats_ids
);
1575 dev
->rte_xstats_ids
= NULL
;
1576 dev
->rte_xstats_ids_size
= 0;
1581 netdev_dpdk_get_xstat_name(struct netdev_dpdk
*dev
, uint64_t id
)
1583 if (id
>= dev
->rte_xstats_names_size
) {
1586 return dev
->rte_xstats_names
[id
].name
;
1590 netdev_dpdk_configure_xstats(struct netdev_dpdk
*dev
)
1591 OVS_REQUIRES(dev
->mutex
)
1595 struct rte_eth_xstat
*rte_xstats
;
1600 /* Retrieving all XSTATS names. If something will go wrong
1601 * or amount of counters will be equal 0, rte_xstats_names
1602 * buffer will be marked as NULL, and any further xstats
1603 * query won't be performed (e.g. during netdev_dpdk_get_stats
1609 if (dev
->rte_xstats_names
== NULL
|| dev
->rte_xstats_ids
== NULL
) {
1610 dev
->rte_xstats_names_size
=
1611 rte_eth_xstats_get_names(dev
->port_id
, NULL
, 0);
1613 if (dev
->rte_xstats_names_size
< 0) {
1614 VLOG_WARN("Cannot get XSTATS for port: "DPDK_PORT_ID_FMT
,
1616 dev
->rte_xstats_names_size
= 0;
1618 /* Reserve memory for xstats names and values */
1619 dev
->rte_xstats_names
= xcalloc(dev
->rte_xstats_names_size
,
1620 sizeof *dev
->rte_xstats_names
);
1622 if (dev
->rte_xstats_names
) {
1623 /* Retreive xstats names */
1625 rte_eth_xstats_get_names(dev
->port_id
,
1626 dev
->rte_xstats_names
,
1627 dev
->rte_xstats_names_size
);
1629 if (rte_xstats_len
< 0) {
1630 VLOG_WARN("Cannot get XSTATS names for port: "
1631 DPDK_PORT_ID_FMT
, dev
->port_id
);
1633 } else if (rte_xstats_len
!= dev
->rte_xstats_names_size
) {
1634 VLOG_WARN("XSTATS size doesn't match for port: "
1635 DPDK_PORT_ID_FMT
, dev
->port_id
);
1639 dev
->rte_xstats_ids
= xcalloc(dev
->rte_xstats_names_size
,
1642 /* We have to calculate number of counters */
1643 rte_xstats
= xmalloc(rte_xstats_len
* sizeof *rte_xstats
);
1644 memset(rte_xstats
, 0xff, sizeof *rte_xstats
* rte_xstats_len
);
1646 /* Retreive xstats values */
1647 if (rte_eth_xstats_get(dev
->port_id
, rte_xstats
,
1648 rte_xstats_len
) > 0) {
1649 dev
->rte_xstats_ids_size
= 0;
1651 for (uint32_t i
= 0; i
< rte_xstats_len
; i
++) {
1652 id
= rte_xstats
[i
].id
;
1653 name
= netdev_dpdk_get_xstat_name(dev
, id
);
1654 /* We need to filter out everything except
1655 * dropped, error and management counters */
1656 if (string_ends_with(name
, "_errors") ||
1657 strstr(name
, "_management_") ||
1658 string_ends_with(name
, "_dropped")) {
1660 dev
->rte_xstats_ids
[xstats_no
] = id
;
1664 dev
->rte_xstats_ids_size
= xstats_no
;
1667 VLOG_WARN("Can't get XSTATS IDs for port: "
1668 DPDK_PORT_ID_FMT
, dev
->port_id
);
1675 /* Already configured */
1681 netdev_dpdk_clear_xstats(dev
);
1687 dpdk_port_is_representor(struct netdev_dpdk
*dev
)
1688 OVS_REQUIRES(dev
->mutex
)
1690 struct rte_eth_dev_info dev_info
;
1692 rte_eth_dev_info_get(dev
->port_id
, &dev_info
);
1693 return (*dev_info
.dev_flags
) & RTE_ETH_DEV_REPRESENTOR
;
1697 netdev_dpdk_get_config(const struct netdev
*netdev
, struct smap
*args
)
1699 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1701 ovs_mutex_lock(&dev
->mutex
);
1703 smap_add_format(args
, "requested_rx_queues", "%d", dev
->requested_n_rxq
);
1704 smap_add_format(args
, "configured_rx_queues", "%d", netdev
->n_rxq
);
1705 smap_add_format(args
, "requested_tx_queues", "%d", dev
->requested_n_txq
);
1706 smap_add_format(args
, "configured_tx_queues", "%d", netdev
->n_txq
);
1707 smap_add_format(args
, "mtu", "%d", dev
->mtu
);
1709 if (dev
->type
== DPDK_DEV_ETH
) {
1710 smap_add_format(args
, "requested_rxq_descriptors", "%d",
1711 dev
->requested_rxq_size
);
1712 smap_add_format(args
, "configured_rxq_descriptors", "%d",
1714 smap_add_format(args
, "requested_txq_descriptors", "%d",
1715 dev
->requested_txq_size
);
1716 smap_add_format(args
, "configured_txq_descriptors", "%d",
1718 if (dev
->hw_ol_features
& NETDEV_RX_CHECKSUM_OFFLOAD
) {
1719 smap_add(args
, "rx_csum_offload", "true");
1721 smap_add(args
, "rx_csum_offload", "false");
1723 if (dev
->hw_ol_features
& NETDEV_TX_TSO_OFFLOAD
) {
1724 smap_add(args
, "tx_tso_offload", "true");
1726 smap_add(args
, "tx_tso_offload", "false");
1728 smap_add(args
, "lsc_interrupt_mode",
1729 dev
->lsc_interrupt_mode
? "true" : "false");
1731 if (dpdk_port_is_representor(dev
)) {
1732 smap_add_format(args
, "dpdk-vf-mac", ETH_ADDR_FMT
,
1733 ETH_ADDR_ARGS(dev
->requested_hwaddr
));
1736 ovs_mutex_unlock(&dev
->mutex
);
1741 static struct netdev_dpdk
*
1742 netdev_dpdk_lookup_by_port_id(dpdk_port_t port_id
)
1743 OVS_REQUIRES(dpdk_mutex
)
1745 struct netdev_dpdk
*dev
;
1747 LIST_FOR_EACH (dev
, list_node
, &dpdk_list
) {
1748 if (dev
->port_id
== port_id
) {
1757 netdev_dpdk_get_port_by_mac(const char *mac_str
)
1759 dpdk_port_t port_id
;
1760 struct eth_addr mac
, port_mac
;
1762 if (!eth_addr_from_string(mac_str
, &mac
)) {
1763 VLOG_ERR("invalid mac: %s", mac_str
);
1764 return DPDK_ETH_PORT_ID_INVALID
;
1767 RTE_ETH_FOREACH_DEV (port_id
) {
1768 struct rte_ether_addr ea
;
1770 rte_eth_macaddr_get(port_id
, &ea
);
1771 memcpy(port_mac
.ea
, ea
.addr_bytes
, ETH_ADDR_LEN
);
1772 if (eth_addr_equals(mac
, port_mac
)) {
1777 return DPDK_ETH_PORT_ID_INVALID
;
1780 /* Return the first DPDK port id matching the devargs pattern. */
1781 static dpdk_port_t
netdev_dpdk_get_port_by_devargs(const char *devargs
)
1782 OVS_REQUIRES(dpdk_mutex
)
1784 dpdk_port_t port_id
;
1785 struct rte_dev_iterator iterator
;
1787 RTE_ETH_FOREACH_MATCHING_DEV (port_id
, devargs
, &iterator
) {
1788 /* If a break is done - must call rte_eth_iterator_cleanup. */
1789 rte_eth_iterator_cleanup(&iterator
);
1797 * Normally, a PCI id (optionally followed by a representor number)
1798 * is enough for identifying a specific DPDK port.
1799 * However, for some NICs having multiple ports sharing the same PCI
1800 * id, using PCI id won't work then.
1802 * To fix that, here one more method is introduced: "class=eth,mac=$MAC".
1804 * Note that the compatibility is fully kept: user can still use the
1805 * PCI id for adding ports (when it's enough for them).
1808 netdev_dpdk_process_devargs(struct netdev_dpdk
*dev
,
1809 const char *devargs
, char **errp
)
1810 OVS_REQUIRES(dpdk_mutex
)
1812 dpdk_port_t new_port_id
;
1814 if (strncmp(devargs
, "class=eth,mac=", 14) == 0) {
1815 new_port_id
= netdev_dpdk_get_port_by_mac(&devargs
[14]);
1817 new_port_id
= netdev_dpdk_get_port_by_devargs(devargs
);
1818 if (!rte_eth_dev_is_valid_port(new_port_id
)) {
1819 /* Device not found in DPDK, attempt to attach it */
1820 if (rte_dev_probe(devargs
)) {
1821 new_port_id
= DPDK_ETH_PORT_ID_INVALID
;
1823 new_port_id
= netdev_dpdk_get_port_by_devargs(devargs
);
1824 if (rte_eth_dev_is_valid_port(new_port_id
)) {
1825 /* Attach successful */
1826 dev
->attached
= true;
1827 VLOG_INFO("Device '%s' attached to DPDK", devargs
);
1829 /* Attach unsuccessful */
1830 new_port_id
= DPDK_ETH_PORT_ID_INVALID
;
1836 if (new_port_id
== DPDK_ETH_PORT_ID_INVALID
) {
1837 VLOG_WARN_BUF(errp
, "Error attaching device '%s' to DPDK", devargs
);
1844 dpdk_eth_event_callback(dpdk_port_t port_id
, enum rte_eth_event_type type
,
1845 void *param OVS_UNUSED
, void *ret_param OVS_UNUSED
)
1847 struct netdev_dpdk
*dev
;
1849 switch ((int) type
) {
1850 case RTE_ETH_EVENT_INTR_RESET
:
1851 ovs_mutex_lock(&dpdk_mutex
);
1852 dev
= netdev_dpdk_lookup_by_port_id(port_id
);
1854 ovs_mutex_lock(&dev
->mutex
);
1855 dev
->reset_needed
= true;
1856 netdev_request_reconfigure(&dev
->up
);
1857 VLOG_DBG_RL(&rl
, "%s: Device reset requested.",
1858 netdev_get_name(&dev
->up
));
1859 ovs_mutex_unlock(&dev
->mutex
);
1861 ovs_mutex_unlock(&dpdk_mutex
);
1865 /* Ignore all other types. */
1872 dpdk_set_rxq_config(struct netdev_dpdk
*dev
, const struct smap
*args
)
1873 OVS_REQUIRES(dev
->mutex
)
1877 new_n_rxq
= MAX(smap_get_int(args
, "n_rxq", NR_QUEUE
), 1);
1878 if (new_n_rxq
!= dev
->requested_n_rxq
) {
1879 dev
->requested_n_rxq
= new_n_rxq
;
1880 netdev_request_reconfigure(&dev
->up
);
1885 dpdk_process_queue_size(struct netdev
*netdev
, const struct smap
*args
,
1886 const char *flag
, int default_size
, int *new_size
)
1888 int queue_size
= smap_get_int(args
, flag
, default_size
);
1890 if (queue_size
<= 0 || queue_size
> NIC_PORT_MAX_Q_SIZE
1891 || !is_pow2(queue_size
)) {
1892 queue_size
= default_size
;
1895 if (queue_size
!= *new_size
) {
1896 *new_size
= queue_size
;
1897 netdev_request_reconfigure(netdev
);
1902 netdev_dpdk_set_config(struct netdev
*netdev
, const struct smap
*args
,
1905 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1906 bool rx_fc_en
, tx_fc_en
, autoneg
, lsc_interrupt_mode
;
1907 bool flow_control_requested
= true;
1908 enum rte_eth_fc_mode fc_mode
;
1909 static const enum rte_eth_fc_mode fc_mode_set
[2][2] = {
1910 {RTE_FC_NONE
, RTE_FC_TX_PAUSE
},
1911 {RTE_FC_RX_PAUSE
, RTE_FC_FULL
}
1913 const char *new_devargs
;
1917 ovs_mutex_lock(&dpdk_mutex
);
1918 ovs_mutex_lock(&dev
->mutex
);
1920 dpdk_set_rxq_config(dev
, args
);
1922 dpdk_process_queue_size(netdev
, args
, "n_rxq_desc",
1923 NIC_PORT_DEFAULT_RXQ_SIZE
,
1924 &dev
->requested_rxq_size
);
1925 dpdk_process_queue_size(netdev
, args
, "n_txq_desc",
1926 NIC_PORT_DEFAULT_TXQ_SIZE
,
1927 &dev
->requested_txq_size
);
1929 new_devargs
= smap_get(args
, "dpdk-devargs");
1931 if (dev
->devargs
&& new_devargs
&& strcmp(new_devargs
, dev
->devargs
)) {
1932 /* The user requested a new device. If we return error, the caller
1933 * will delete this netdev and try to recreate it. */
1938 /* dpdk-devargs is required for device configuration */
1939 if (new_devargs
&& new_devargs
[0]) {
1940 /* Don't process dpdk-devargs if value is unchanged and port id
1942 if (!(dev
->devargs
&& !strcmp(dev
->devargs
, new_devargs
)
1943 && rte_eth_dev_is_valid_port(dev
->port_id
))) {
1944 dpdk_port_t new_port_id
= netdev_dpdk_process_devargs(dev
,
1947 if (!rte_eth_dev_is_valid_port(new_port_id
)) {
1949 } else if (new_port_id
== dev
->port_id
) {
1950 /* Already configured, do not reconfigure again */
1953 struct netdev_dpdk
*dup_dev
;
1955 dup_dev
= netdev_dpdk_lookup_by_port_id(new_port_id
);
1957 VLOG_WARN_BUF(errp
, "'%s' is trying to use device '%s' "
1958 "which is already in use by '%s'",
1959 netdev_get_name(netdev
), new_devargs
,
1960 netdev_get_name(&dup_dev
->up
));
1963 int sid
= rte_eth_dev_socket_id(new_port_id
);
1965 dev
->requested_socket_id
= sid
< 0 ? SOCKET0
: sid
;
1966 dev
->devargs
= xstrdup(new_devargs
);
1967 dev
->port_id
= new_port_id
;
1968 netdev_request_reconfigure(&dev
->up
);
1969 netdev_dpdk_clear_xstats(dev
);
1975 VLOG_WARN_BUF(errp
, "'%s' is missing 'options:dpdk-devargs'. "
1976 "The old 'dpdk<port_id>' names are not supported",
1977 netdev_get_name(netdev
));
1985 vf_mac
= smap_get(args
, "dpdk-vf-mac");
1987 struct eth_addr mac
;
1989 if (!dpdk_port_is_representor(dev
)) {
1990 VLOG_WARN_BUF(errp
, "'%s' is trying to set the VF MAC '%s' "
1991 "but 'options:dpdk-vf-mac' is only supported for "
1993 netdev_get_name(netdev
), vf_mac
);
1994 } else if (!eth_addr_from_string(vf_mac
, &mac
)) {
1995 VLOG_WARN_BUF(errp
, "interface '%s': cannot parse VF MAC '%s'.",
1996 netdev_get_name(netdev
), vf_mac
);
1997 } else if (eth_addr_is_multicast(mac
)) {
1999 "interface '%s': cannot set VF MAC to multicast "
2000 "address '%s'.", netdev_get_name(netdev
), vf_mac
);
2001 } else if (!eth_addr_equals(dev
->requested_hwaddr
, mac
)) {
2002 dev
->requested_hwaddr
= mac
;
2003 netdev_request_reconfigure(netdev
);
2007 lsc_interrupt_mode
= smap_get_bool(args
, "dpdk-lsc-interrupt", false);
2008 if (dev
->requested_lsc_interrupt_mode
!= lsc_interrupt_mode
) {
2009 dev
->requested_lsc_interrupt_mode
= lsc_interrupt_mode
;
2010 netdev_request_reconfigure(netdev
);
2013 rx_fc_en
= smap_get_bool(args
, "rx-flow-ctrl", false);
2014 tx_fc_en
= smap_get_bool(args
, "tx-flow-ctrl", false);
2015 autoneg
= smap_get_bool(args
, "flow-ctrl-autoneg", false);
2017 fc_mode
= fc_mode_set
[tx_fc_en
][rx_fc_en
];
2019 if (!smap_get(args
, "rx-flow-ctrl") && !smap_get(args
, "tx-flow-ctrl")
2020 && !smap_get(args
, "flow-ctrl-autoneg")) {
2021 /* FIXME: User didn't ask for flow control configuration.
2022 * For now we'll not print a warning if flow control is not
2023 * supported by the DPDK port. */
2024 flow_control_requested
= false;
2027 /* Get the Flow control configuration. */
2028 err
= -rte_eth_dev_flow_ctrl_get(dev
->port_id
, &dev
->fc_conf
);
2030 if (err
== ENOTSUP
) {
2031 if (flow_control_requested
) {
2032 VLOG_WARN("%s: Flow control is not supported.",
2033 netdev_get_name(netdev
));
2035 err
= 0; /* Not fatal. */
2037 VLOG_WARN("%s: Cannot get flow control parameters: %s",
2038 netdev_get_name(netdev
), rte_strerror(err
));
2043 if (dev
->fc_conf
.mode
!= fc_mode
|| autoneg
!= dev
->fc_conf
.autoneg
) {
2044 dev
->fc_conf
.mode
= fc_mode
;
2045 dev
->fc_conf
.autoneg
= autoneg
;
2046 dpdk_eth_flow_ctrl_setup(dev
);
2050 ovs_mutex_unlock(&dev
->mutex
);
2051 ovs_mutex_unlock(&dpdk_mutex
);
2057 netdev_dpdk_vhost_client_set_config(struct netdev
*netdev
,
2058 const struct smap
*args
,
2059 char **errp OVS_UNUSED
)
2061 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
2063 int max_tx_retries
, cur_max_tx_retries
;
2065 ovs_mutex_lock(&dev
->mutex
);
2066 if (!(dev
->vhost_driver_flags
& RTE_VHOST_USER_CLIENT
)) {
2067 path
= smap_get(args
, "vhost-server-path");
2068 if (!nullable_string_is_equal(path
, dev
->vhost_id
)) {
2069 free(dev
->vhost_id
);
2070 dev
->vhost_id
= nullable_xstrdup(path
);
2071 netdev_request_reconfigure(netdev
);
2075 max_tx_retries
= smap_get_int(args
, "tx-retries-max",
2076 VHOST_ENQ_RETRY_DEF
);
2077 if (max_tx_retries
< VHOST_ENQ_RETRY_MIN
2078 || max_tx_retries
> VHOST_ENQ_RETRY_MAX
) {
2079 max_tx_retries
= VHOST_ENQ_RETRY_DEF
;
2081 atomic_read_relaxed(&dev
->vhost_tx_retries_max
, &cur_max_tx_retries
);
2082 if (max_tx_retries
!= cur_max_tx_retries
) {
2083 atomic_store_relaxed(&dev
->vhost_tx_retries_max
, max_tx_retries
);
2084 VLOG_INFO("Max Tx retries for vhost device '%s' set to %d",
2085 netdev_get_name(netdev
), max_tx_retries
);
2087 ovs_mutex_unlock(&dev
->mutex
);
2093 netdev_dpdk_get_numa_id(const struct netdev
*netdev
)
2095 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
2097 return dev
->socket_id
;
2100 /* Sets the number of tx queues for the dpdk interface. */
2102 netdev_dpdk_set_tx_multiq(struct netdev
*netdev
, unsigned int n_txq
)
2104 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
2106 ovs_mutex_lock(&dev
->mutex
);
2108 if (dev
->requested_n_txq
== n_txq
) {
2112 dev
->requested_n_txq
= n_txq
;
2113 netdev_request_reconfigure(netdev
);
2116 ovs_mutex_unlock(&dev
->mutex
);
2120 static struct netdev_rxq
*
2121 netdev_dpdk_rxq_alloc(void)
2123 struct netdev_rxq_dpdk
*rx
= dpdk_rte_mzalloc(sizeof *rx
);
2132 static struct netdev_rxq_dpdk
*
2133 netdev_rxq_dpdk_cast(const struct netdev_rxq
*rxq
)
2135 return CONTAINER_OF(rxq
, struct netdev_rxq_dpdk
, up
);
2139 netdev_dpdk_rxq_construct(struct netdev_rxq
*rxq
)
2141 struct netdev_rxq_dpdk
*rx
= netdev_rxq_dpdk_cast(rxq
);
2142 struct netdev_dpdk
*dev
= netdev_dpdk_cast(rxq
->netdev
);
2144 ovs_mutex_lock(&dev
->mutex
);
2145 rx
->port_id
= dev
->port_id
;
2146 ovs_mutex_unlock(&dev
->mutex
);
2152 netdev_dpdk_rxq_destruct(struct netdev_rxq
*rxq OVS_UNUSED
)
2157 netdev_dpdk_rxq_dealloc(struct netdev_rxq
*rxq
)
2159 struct netdev_rxq_dpdk
*rx
= netdev_rxq_dpdk_cast(rxq
);
2164 /* Prepare the packet for HWOL.
2165 * Return True if the packet is OK to continue. */
2167 netdev_dpdk_prep_hwol_packet(struct netdev_dpdk
*dev
, struct rte_mbuf
*mbuf
)
2169 struct dp_packet
*pkt
= CONTAINER_OF(mbuf
, struct dp_packet
, mbuf
);
2171 if (mbuf
->ol_flags
& PKT_TX_L4_MASK
) {
2172 mbuf
->l2_len
= (char *)dp_packet_l3(pkt
) - (char *)dp_packet_eth(pkt
);
2173 mbuf
->l3_len
= (char *)dp_packet_l4(pkt
) - (char *)dp_packet_l3(pkt
);
2174 mbuf
->outer_l2_len
= 0;
2175 mbuf
->outer_l3_len
= 0;
2178 if (mbuf
->ol_flags
& PKT_TX_TCP_SEG
) {
2179 struct tcp_header
*th
= dp_packet_l4(pkt
);
2182 VLOG_WARN_RL(&rl
, "%s: TCP Segmentation without L4 header"
2183 " pkt len: %"PRIu32
"", dev
->up
.name
, mbuf
->pkt_len
);
2187 mbuf
->l4_len
= TCP_OFFSET(th
->tcp_ctl
) * 4;
2188 mbuf
->ol_flags
|= PKT_TX_TCP_CKSUM
;
2189 mbuf
->tso_segsz
= dev
->mtu
- mbuf
->l3_len
- mbuf
->l4_len
;
2191 if (mbuf
->ol_flags
& PKT_TX_IPV4
) {
2192 mbuf
->ol_flags
|= PKT_TX_IP_CKSUM
;
2198 /* Prepare a batch for HWOL.
2199 * Return the number of good packets in the batch. */
2201 netdev_dpdk_prep_hwol_batch(struct netdev_dpdk
*dev
, struct rte_mbuf
**pkts
,
2206 struct rte_mbuf
*pkt
;
2208 /* Prepare and filter bad HWOL packets. */
2209 for (i
= 0; i
< pkt_cnt
; i
++) {
2211 if (!netdev_dpdk_prep_hwol_packet(dev
, pkt
)) {
2212 rte_pktmbuf_free(pkt
);
2216 if (OVS_UNLIKELY(i
!= cnt
)) {
2225 /* Tries to transmit 'pkts' to txq 'qid' of device 'dev'. Takes ownership of
2226 * 'pkts', even in case of failure.
2228 * Returns the number of packets that weren't transmitted. */
2230 netdev_dpdk_eth_tx_burst(struct netdev_dpdk
*dev
, int qid
,
2231 struct rte_mbuf
**pkts
, int cnt
)
2234 uint16_t nb_tx_prep
= cnt
;
2236 if (userspace_tso_enabled()) {
2237 nb_tx_prep
= rte_eth_tx_prepare(dev
->port_id
, qid
, pkts
, cnt
);
2238 if (nb_tx_prep
!= cnt
) {
2239 VLOG_WARN_RL(&rl
, "%s: Output batch contains invalid packets. "
2240 "Only %u/%u are valid: %s", dev
->up
.name
, nb_tx_prep
,
2241 cnt
, rte_strerror(rte_errno
));
2245 while (nb_tx
!= nb_tx_prep
) {
2248 ret
= rte_eth_tx_burst(dev
->port_id
, qid
, pkts
+ nb_tx
,
2249 nb_tx_prep
- nb_tx
);
2257 if (OVS_UNLIKELY(nb_tx
!= cnt
)) {
2258 /* Free buffers, which we couldn't transmit, one at a time (each
2259 * packet could come from a different mempool) */
2262 for (i
= nb_tx
; i
< cnt
; i
++) {
2263 rte_pktmbuf_free(pkts
[i
]);
2271 netdev_dpdk_srtcm_policer_pkt_handle(struct rte_meter_srtcm
*meter
,
2272 struct rte_meter_srtcm_profile
*profile
,
2273 struct rte_mbuf
*pkt
, uint64_t time
)
2275 uint32_t pkt_len
= rte_pktmbuf_pkt_len(pkt
) - sizeof(struct rte_ether_hdr
);
2277 return rte_meter_srtcm_color_blind_check(meter
, profile
, time
, pkt_len
) ==
2282 srtcm_policer_run_single_packet(struct rte_meter_srtcm
*meter
,
2283 struct rte_meter_srtcm_profile
*profile
,
2284 struct rte_mbuf
**pkts
, int pkt_cnt
,
2289 struct rte_mbuf
*pkt
= NULL
;
2290 uint64_t current_time
= rte_rdtsc();
2292 for (i
= 0; i
< pkt_cnt
; i
++) {
2294 /* Handle current packet */
2295 if (netdev_dpdk_srtcm_policer_pkt_handle(meter
, profile
,
2296 pkt
, current_time
)) {
2303 rte_pktmbuf_free(pkt
);
2312 ingress_policer_run(struct ingress_policer
*policer
, struct rte_mbuf
**pkts
,
2313 int pkt_cnt
, bool should_steal
)
2317 rte_spinlock_lock(&policer
->policer_lock
);
2318 cnt
= srtcm_policer_run_single_packet(&policer
->in_policer
,
2320 pkts
, pkt_cnt
, should_steal
);
2321 rte_spinlock_unlock(&policer
->policer_lock
);
2327 is_vhost_running(struct netdev_dpdk
*dev
)
2329 return (netdev_dpdk_get_vid(dev
) >= 0 && dev
->vhost_reconfigured
);
2333 netdev_dpdk_vhost_update_rx_size_counters(struct netdev_stats
*stats
,
2334 unsigned int packet_size
)
2336 /* Hard-coded search for the size bucket. */
2337 if (packet_size
< 256) {
2338 if (packet_size
>= 128) {
2339 stats
->rx_128_to_255_packets
++;
2340 } else if (packet_size
<= 64) {
2341 stats
->rx_1_to_64_packets
++;
2343 stats
->rx_65_to_127_packets
++;
2346 if (packet_size
>= 1523) {
2347 stats
->rx_1523_to_max_packets
++;
2348 } else if (packet_size
>= 1024) {
2349 stats
->rx_1024_to_1522_packets
++;
2350 } else if (packet_size
< 512) {
2351 stats
->rx_256_to_511_packets
++;
2353 stats
->rx_512_to_1023_packets
++;
2359 netdev_dpdk_vhost_update_rx_counters(struct netdev_dpdk
*dev
,
2360 struct dp_packet
**packets
, int count
,
2363 struct netdev_stats
*stats
= &dev
->stats
;
2364 struct dp_packet
*packet
;
2365 unsigned int packet_size
;
2368 stats
->rx_packets
+= count
;
2369 stats
->rx_dropped
+= qos_drops
;
2370 for (i
= 0; i
< count
; i
++) {
2371 packet
= packets
[i
];
2372 packet_size
= dp_packet_size(packet
);
2374 if (OVS_UNLIKELY(packet_size
< ETH_HEADER_LEN
)) {
2375 /* This only protects the following multicast counting from
2376 * too short packets, but it does not stop the packet from
2377 * further processing. */
2379 stats
->rx_length_errors
++;
2383 netdev_dpdk_vhost_update_rx_size_counters(stats
, packet_size
);
2385 struct eth_header
*eh
= (struct eth_header
*) dp_packet_data(packet
);
2386 if (OVS_UNLIKELY(eth_addr_is_multicast(eh
->eth_dst
))) {
2390 stats
->rx_bytes
+= packet_size
;
2393 if (OVS_UNLIKELY(qos_drops
)) {
2394 dev
->sw_stats
->rx_qos_drops
+= qos_drops
;
2399 * The receive path for the vhost port is the TX path out from guest.
2402 netdev_dpdk_vhost_rxq_recv(struct netdev_rxq
*rxq
,
2403 struct dp_packet_batch
*batch
, int *qfill
)
2405 struct netdev_dpdk
*dev
= netdev_dpdk_cast(rxq
->netdev
);
2406 struct ingress_policer
*policer
= netdev_dpdk_get_ingress_policer(dev
);
2408 uint16_t qos_drops
= 0;
2409 int qid
= rxq
->queue_id
* VIRTIO_QNUM
+ VIRTIO_TXQ
;
2410 int vid
= netdev_dpdk_get_vid(dev
);
2412 if (OVS_UNLIKELY(vid
< 0 || !dev
->vhost_reconfigured
2413 || !(dev
->flags
& NETDEV_UP
))) {
2417 nb_rx
= rte_vhost_dequeue_burst(vid
, qid
, dev
->dpdk_mp
->mp
,
2418 (struct rte_mbuf
**) batch
->packets
,
2425 if (nb_rx
== NETDEV_MAX_BURST
) {
2426 /* The DPDK API returns a uint32_t which often has invalid bits in
2427 * the upper 16-bits. Need to restrict the value to uint16_t. */
2428 *qfill
= rte_vhost_rx_queue_count(vid
, qid
) & UINT16_MAX
;
2436 nb_rx
= ingress_policer_run(policer
,
2437 (struct rte_mbuf
**) batch
->packets
,
2442 rte_spinlock_lock(&dev
->stats_lock
);
2443 netdev_dpdk_vhost_update_rx_counters(dev
, batch
->packets
,
2445 rte_spinlock_unlock(&dev
->stats_lock
);
2447 batch
->count
= nb_rx
;
2448 dp_packet_batch_init_packet_fields(batch
);
2454 netdev_dpdk_vhost_rxq_enabled(struct netdev_rxq
*rxq
)
2456 struct netdev_dpdk
*dev
= netdev_dpdk_cast(rxq
->netdev
);
2458 return dev
->vhost_rxq_enabled
[rxq
->queue_id
];
2462 netdev_dpdk_rxq_recv(struct netdev_rxq
*rxq
, struct dp_packet_batch
*batch
,
2465 struct netdev_rxq_dpdk
*rx
= netdev_rxq_dpdk_cast(rxq
);
2466 struct netdev_dpdk
*dev
= netdev_dpdk_cast(rxq
->netdev
);
2467 struct ingress_policer
*policer
= netdev_dpdk_get_ingress_policer(dev
);
2471 if (OVS_UNLIKELY(!(dev
->flags
& NETDEV_UP
))) {
2475 nb_rx
= rte_eth_rx_burst(rx
->port_id
, rxq
->queue_id
,
2476 (struct rte_mbuf
**) batch
->packets
,
2484 nb_rx
= ingress_policer_run(policer
,
2485 (struct rte_mbuf
**) batch
->packets
,
2490 /* Update stats to reflect dropped packets */
2491 if (OVS_UNLIKELY(dropped
)) {
2492 rte_spinlock_lock(&dev
->stats_lock
);
2493 dev
->stats
.rx_dropped
+= dropped
;
2494 dev
->sw_stats
->rx_qos_drops
+= dropped
;
2495 rte_spinlock_unlock(&dev
->stats_lock
);
2498 batch
->count
= nb_rx
;
2499 dp_packet_batch_init_packet_fields(batch
);
2502 if (nb_rx
== NETDEV_MAX_BURST
) {
2503 *qfill
= rte_eth_rx_queue_count(rx
->port_id
, rxq
->queue_id
);
2513 netdev_dpdk_qos_run(struct netdev_dpdk
*dev
, struct rte_mbuf
**pkts
,
2514 int cnt
, bool should_steal
)
2516 struct qos_conf
*qos_conf
= ovsrcu_get(struct qos_conf
*, &dev
->qos_conf
);
2519 rte_spinlock_lock(&qos_conf
->lock
);
2520 cnt
= qos_conf
->ops
->qos_run(qos_conf
, pkts
, cnt
, should_steal
);
2521 rte_spinlock_unlock(&qos_conf
->lock
);
2528 netdev_dpdk_filter_packet_len(struct netdev_dpdk
*dev
, struct rte_mbuf
**pkts
,
2533 struct rte_mbuf
*pkt
;
2535 /* Filter oversized packets, unless are marked for TSO. */
2536 for (i
= 0; i
< pkt_cnt
; i
++) {
2538 if (OVS_UNLIKELY((pkt
->pkt_len
> dev
->max_packet_len
)
2539 && !(pkt
->ol_flags
& PKT_TX_TCP_SEG
))) {
2540 VLOG_WARN_RL(&rl
, "%s: Too big size %" PRIu32
" "
2541 "max_packet_len %d", dev
->up
.name
, pkt
->pkt_len
,
2542 dev
->max_packet_len
);
2543 rte_pktmbuf_free(pkt
);
2547 if (OVS_UNLIKELY(i
!= cnt
)) {
2557 netdev_dpdk_vhost_update_tx_counters(struct netdev_dpdk
*dev
,
2558 struct dp_packet
**packets
,
2560 struct netdev_dpdk_sw_stats
*sw_stats_add
)
2562 int dropped
= sw_stats_add
->tx_mtu_exceeded_drops
+
2563 sw_stats_add
->tx_qos_drops
+
2564 sw_stats_add
->tx_failure_drops
+
2565 sw_stats_add
->tx_invalid_hwol_drops
;
2566 struct netdev_stats
*stats
= &dev
->stats
;
2567 int sent
= attempted
- dropped
;
2570 stats
->tx_packets
+= sent
;
2571 stats
->tx_dropped
+= dropped
;
2573 for (i
= 0; i
< sent
; i
++) {
2574 stats
->tx_bytes
+= dp_packet_size(packets
[i
]);
2577 if (OVS_UNLIKELY(dropped
|| sw_stats_add
->tx_retries
)) {
2578 struct netdev_dpdk_sw_stats
*sw_stats
= dev
->sw_stats
;
2580 sw_stats
->tx_retries
+= sw_stats_add
->tx_retries
;
2581 sw_stats
->tx_failure_drops
+= sw_stats_add
->tx_failure_drops
;
2582 sw_stats
->tx_mtu_exceeded_drops
+= sw_stats_add
->tx_mtu_exceeded_drops
;
2583 sw_stats
->tx_qos_drops
+= sw_stats_add
->tx_qos_drops
;
2584 sw_stats
->tx_invalid_hwol_drops
+= sw_stats_add
->tx_invalid_hwol_drops
;
2589 __netdev_dpdk_vhost_send(struct netdev
*netdev
, int qid
,
2590 struct dp_packet
**pkts
, int cnt
)
2592 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
2593 struct rte_mbuf
**cur_pkts
= (struct rte_mbuf
**) pkts
;
2594 struct netdev_dpdk_sw_stats sw_stats_add
;
2595 unsigned int n_packets_to_free
= cnt
;
2596 unsigned int total_packets
= cnt
;
2598 int max_retries
= VHOST_ENQ_RETRY_MIN
;
2599 int vid
= netdev_dpdk_get_vid(dev
);
2601 qid
= dev
->tx_q
[qid
% netdev
->n_txq
].map
;
2603 if (OVS_UNLIKELY(vid
< 0 || !dev
->vhost_reconfigured
|| qid
< 0
2604 || !(dev
->flags
& NETDEV_UP
))) {
2605 rte_spinlock_lock(&dev
->stats_lock
);
2606 dev
->stats
.tx_dropped
+= cnt
;
2607 rte_spinlock_unlock(&dev
->stats_lock
);
2611 if (OVS_UNLIKELY(!rte_spinlock_trylock(&dev
->tx_q
[qid
].tx_lock
))) {
2612 COVERAGE_INC(vhost_tx_contention
);
2613 rte_spinlock_lock(&dev
->tx_q
[qid
].tx_lock
);
2616 sw_stats_add
.tx_invalid_hwol_drops
= cnt
;
2617 if (userspace_tso_enabled()) {
2618 cnt
= netdev_dpdk_prep_hwol_batch(dev
, cur_pkts
, cnt
);
2621 sw_stats_add
.tx_invalid_hwol_drops
-= cnt
;
2622 sw_stats_add
.tx_mtu_exceeded_drops
= cnt
;
2623 cnt
= netdev_dpdk_filter_packet_len(dev
, cur_pkts
, cnt
);
2624 sw_stats_add
.tx_mtu_exceeded_drops
-= cnt
;
2626 /* Check has QoS has been configured for the netdev */
2627 sw_stats_add
.tx_qos_drops
= cnt
;
2628 cnt
= netdev_dpdk_qos_run(dev
, cur_pkts
, cnt
, true);
2629 sw_stats_add
.tx_qos_drops
-= cnt
;
2631 n_packets_to_free
= cnt
;
2634 int vhost_qid
= qid
* VIRTIO_QNUM
+ VIRTIO_RXQ
;
2635 unsigned int tx_pkts
;
2637 tx_pkts
= rte_vhost_enqueue_burst(vid
, vhost_qid
, cur_pkts
, cnt
);
2638 if (OVS_LIKELY(tx_pkts
)) {
2639 /* Packets have been sent.*/
2641 /* Prepare for possible retry.*/
2642 cur_pkts
= &cur_pkts
[tx_pkts
];
2643 if (OVS_UNLIKELY(cnt
&& !retries
)) {
2645 * Read max retries as there are packets not sent
2646 * and no retries have already occurred.
2648 atomic_read_relaxed(&dev
->vhost_tx_retries_max
, &max_retries
);
2651 /* No packets sent - do not retry.*/
2654 } while (cnt
&& (retries
++ < max_retries
));
2656 rte_spinlock_unlock(&dev
->tx_q
[qid
].tx_lock
);
2658 sw_stats_add
.tx_failure_drops
= cnt
;
2659 sw_stats_add
.tx_retries
= MIN(retries
, max_retries
);
2661 rte_spinlock_lock(&dev
->stats_lock
);
2662 netdev_dpdk_vhost_update_tx_counters(dev
, pkts
, total_packets
,
2664 rte_spinlock_unlock(&dev
->stats_lock
);
2667 for (i
= 0; i
< n_packets_to_free
; i
++) {
2668 dp_packet_delete(pkts
[i
]);
2673 netdev_dpdk_extbuf_free(void *addr OVS_UNUSED
, void *opaque
)
2678 static struct rte_mbuf
*
2679 dpdk_pktmbuf_attach_extbuf(struct rte_mbuf
*pkt
, uint32_t data_len
)
2681 uint32_t total_len
= RTE_PKTMBUF_HEADROOM
+ data_len
;
2682 struct rte_mbuf_ext_shared_info
*shinfo
= NULL
;
2686 total_len
+= sizeof *shinfo
+ sizeof(uintptr_t);
2687 total_len
= RTE_ALIGN_CEIL(total_len
, sizeof(uintptr_t));
2689 if (OVS_UNLIKELY(total_len
> UINT16_MAX
)) {
2690 VLOG_ERR("Can't copy packet: too big %u", total_len
);
2694 buf_len
= total_len
;
2695 buf
= rte_malloc(NULL
, buf_len
, RTE_CACHE_LINE_SIZE
);
2696 if (OVS_UNLIKELY(buf
== NULL
)) {
2697 VLOG_ERR("Failed to allocate memory using rte_malloc: %u", buf_len
);
2701 /* Initialize shinfo. */
2702 shinfo
= rte_pktmbuf_ext_shinfo_init_helper(buf
, &buf_len
,
2703 netdev_dpdk_extbuf_free
,
2705 if (OVS_UNLIKELY(shinfo
== NULL
)) {
2707 VLOG_ERR("Failed to initialize shared info for mbuf while "
2708 "attempting to attach an external buffer.");
2712 rte_pktmbuf_attach_extbuf(pkt
, buf
, rte_malloc_virt2iova(buf
), buf_len
,
2714 rte_pktmbuf_reset_headroom(pkt
);
2719 static struct rte_mbuf
*
2720 dpdk_pktmbuf_alloc(struct rte_mempool
*mp
, uint32_t data_len
)
2722 struct rte_mbuf
*pkt
= rte_pktmbuf_alloc(mp
);
2724 if (OVS_UNLIKELY(!pkt
)) {
2728 if (rte_pktmbuf_tailroom(pkt
) >= data_len
) {
2732 if (dpdk_pktmbuf_attach_extbuf(pkt
, data_len
)) {
2736 rte_pktmbuf_free(pkt
);
2741 static struct dp_packet
*
2742 dpdk_copy_dp_packet_to_mbuf(struct rte_mempool
*mp
, struct dp_packet
*pkt_orig
)
2744 struct rte_mbuf
*mbuf_dest
;
2745 struct dp_packet
*pkt_dest
;
2748 pkt_len
= dp_packet_size(pkt_orig
);
2749 mbuf_dest
= dpdk_pktmbuf_alloc(mp
, pkt_len
);
2750 if (OVS_UNLIKELY(mbuf_dest
== NULL
)) {
2754 pkt_dest
= CONTAINER_OF(mbuf_dest
, struct dp_packet
, mbuf
);
2755 memcpy(dp_packet_data(pkt_dest
), dp_packet_data(pkt_orig
), pkt_len
);
2756 dp_packet_set_size(pkt_dest
, pkt_len
);
2758 mbuf_dest
->tx_offload
= pkt_orig
->mbuf
.tx_offload
;
2759 mbuf_dest
->packet_type
= pkt_orig
->mbuf
.packet_type
;
2760 mbuf_dest
->ol_flags
|= (pkt_orig
->mbuf
.ol_flags
&
2761 ~(EXT_ATTACHED_MBUF
| IND_ATTACHED_MBUF
));
2763 memcpy(&pkt_dest
->l2_pad_size
, &pkt_orig
->l2_pad_size
,
2764 sizeof(struct dp_packet
) - offsetof(struct dp_packet
, l2_pad_size
));
2766 if (mbuf_dest
->ol_flags
& PKT_TX_L4_MASK
) {
2767 mbuf_dest
->l2_len
= (char *)dp_packet_l3(pkt_dest
)
2768 - (char *)dp_packet_eth(pkt_dest
);
2769 mbuf_dest
->l3_len
= (char *)dp_packet_l4(pkt_dest
)
2770 - (char *) dp_packet_l3(pkt_dest
);
2776 /* Tx function. Transmit packets indefinitely */
2778 dpdk_do_tx_copy(struct netdev
*netdev
, int qid
, struct dp_packet_batch
*batch
)
2779 OVS_NO_THREAD_SAFETY_ANALYSIS
2781 const size_t batch_cnt
= dp_packet_batch_size(batch
);
2782 #if !defined(__CHECKER__) && !defined(_WIN32)
2783 const size_t PKT_ARRAY_SIZE
= batch_cnt
;
2785 /* Sparse or MSVC doesn't like variable length array. */
2786 enum { PKT_ARRAY_SIZE
= NETDEV_MAX_BURST
};
2788 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
2789 struct dp_packet
*pkts
[PKT_ARRAY_SIZE
];
2790 struct netdev_dpdk_sw_stats
*sw_stats
= dev
->sw_stats
;
2791 uint32_t cnt
= batch_cnt
;
2792 uint32_t dropped
= 0;
2793 uint32_t tx_failure
= 0;
2794 uint32_t mtu_drops
= 0;
2795 uint32_t qos_drops
= 0;
2797 if (dev
->type
!= DPDK_DEV_VHOST
) {
2798 /* Check if QoS has been configured for this netdev. */
2799 cnt
= netdev_dpdk_qos_run(dev
, (struct rte_mbuf
**) batch
->packets
,
2801 qos_drops
= batch_cnt
- cnt
;
2806 for (uint32_t i
= 0; i
< cnt
; i
++) {
2807 struct dp_packet
*packet
= batch
->packets
[i
];
2808 uint32_t size
= dp_packet_size(packet
);
2810 if (size
> dev
->max_packet_len
2811 && !(packet
->mbuf
.ol_flags
& PKT_TX_TCP_SEG
)) {
2812 VLOG_WARN_RL(&rl
, "Too big size %u max_packet_len %d", size
,
2813 dev
->max_packet_len
);
2818 pkts
[txcnt
] = dpdk_copy_dp_packet_to_mbuf(dev
->dpdk_mp
->mp
, packet
);
2819 if (OVS_UNLIKELY(!pkts
[txcnt
])) {
2827 if (OVS_LIKELY(txcnt
)) {
2828 if (dev
->type
== DPDK_DEV_VHOST
) {
2829 __netdev_dpdk_vhost_send(netdev
, qid
, pkts
, txcnt
);
2831 tx_failure
+= netdev_dpdk_eth_tx_burst(dev
, qid
,
2832 (struct rte_mbuf
**)pkts
,
2837 dropped
+= qos_drops
+ mtu_drops
+ tx_failure
;
2838 if (OVS_UNLIKELY(dropped
)) {
2839 rte_spinlock_lock(&dev
->stats_lock
);
2840 dev
->stats
.tx_dropped
+= dropped
;
2841 sw_stats
->tx_failure_drops
+= tx_failure
;
2842 sw_stats
->tx_mtu_exceeded_drops
+= mtu_drops
;
2843 sw_stats
->tx_qos_drops
+= qos_drops
;
2844 rte_spinlock_unlock(&dev
->stats_lock
);
2849 netdev_dpdk_vhost_send(struct netdev
*netdev
, int qid
,
2850 struct dp_packet_batch
*batch
,
2851 bool concurrent_txq OVS_UNUSED
)
2854 if (OVS_UNLIKELY(batch
->packets
[0]->source
!= DPBUF_DPDK
)) {
2855 dpdk_do_tx_copy(netdev
, qid
, batch
);
2856 dp_packet_delete_batch(batch
, true);
2858 __netdev_dpdk_vhost_send(netdev
, qid
, batch
->packets
,
2859 dp_packet_batch_size(batch
));
2865 netdev_dpdk_send__(struct netdev_dpdk
*dev
, int qid
,
2866 struct dp_packet_batch
*batch
,
2867 bool concurrent_txq
)
2869 if (OVS_UNLIKELY(!(dev
->flags
& NETDEV_UP
))) {
2870 dp_packet_delete_batch(batch
, true);
2874 if (OVS_UNLIKELY(concurrent_txq
)) {
2875 qid
= qid
% dev
->up
.n_txq
;
2876 rte_spinlock_lock(&dev
->tx_q
[qid
].tx_lock
);
2879 if (OVS_UNLIKELY(batch
->packets
[0]->source
!= DPBUF_DPDK
)) {
2880 struct netdev
*netdev
= &dev
->up
;
2882 dpdk_do_tx_copy(netdev
, qid
, batch
);
2883 dp_packet_delete_batch(batch
, true);
2885 struct netdev_dpdk_sw_stats
*sw_stats
= dev
->sw_stats
;
2887 int tx_failure
, mtu_drops
, qos_drops
, hwol_drops
;
2888 int batch_cnt
= dp_packet_batch_size(batch
);
2889 struct rte_mbuf
**pkts
= (struct rte_mbuf
**) batch
->packets
;
2891 hwol_drops
= batch_cnt
;
2892 if (userspace_tso_enabled()) {
2893 batch_cnt
= netdev_dpdk_prep_hwol_batch(dev
, pkts
, batch_cnt
);
2895 hwol_drops
-= batch_cnt
;
2896 mtu_drops
= batch_cnt
;
2897 batch_cnt
= netdev_dpdk_filter_packet_len(dev
, pkts
, batch_cnt
);
2898 mtu_drops
-= batch_cnt
;
2899 qos_drops
= batch_cnt
;
2900 batch_cnt
= netdev_dpdk_qos_run(dev
, pkts
, batch_cnt
, true);
2901 qos_drops
-= batch_cnt
;
2903 tx_failure
= netdev_dpdk_eth_tx_burst(dev
, qid
, pkts
, batch_cnt
);
2905 dropped
= tx_failure
+ mtu_drops
+ qos_drops
+ hwol_drops
;
2906 if (OVS_UNLIKELY(dropped
)) {
2907 rte_spinlock_lock(&dev
->stats_lock
);
2908 dev
->stats
.tx_dropped
+= dropped
;
2909 sw_stats
->tx_failure_drops
+= tx_failure
;
2910 sw_stats
->tx_mtu_exceeded_drops
+= mtu_drops
;
2911 sw_stats
->tx_qos_drops
+= qos_drops
;
2912 sw_stats
->tx_invalid_hwol_drops
+= hwol_drops
;
2913 rte_spinlock_unlock(&dev
->stats_lock
);
2917 if (OVS_UNLIKELY(concurrent_txq
)) {
2918 rte_spinlock_unlock(&dev
->tx_q
[qid
].tx_lock
);
2923 netdev_dpdk_eth_send(struct netdev
*netdev
, int qid
,
2924 struct dp_packet_batch
*batch
, bool concurrent_txq
)
2926 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
2928 netdev_dpdk_send__(dev
, qid
, batch
, concurrent_txq
);
2933 netdev_dpdk_set_etheraddr__(struct netdev_dpdk
*dev
, const struct eth_addr mac
)
2934 OVS_REQUIRES(dev
->mutex
)
2938 if (dev
->type
== DPDK_DEV_ETH
) {
2939 struct rte_ether_addr ea
;
2941 memcpy(ea
.addr_bytes
, mac
.ea
, ETH_ADDR_LEN
);
2942 err
= -rte_eth_dev_default_mac_addr_set(dev
->port_id
, &ea
);
2947 VLOG_WARN("%s: Failed to set requested mac("ETH_ADDR_FMT
"): %s",
2948 netdev_get_name(&dev
->up
), ETH_ADDR_ARGS(mac
),
2956 netdev_dpdk_set_etheraddr(struct netdev
*netdev
, const struct eth_addr mac
)
2958 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
2961 ovs_mutex_lock(&dev
->mutex
);
2962 if (!eth_addr_equals(dev
->hwaddr
, mac
)) {
2963 err
= netdev_dpdk_set_etheraddr__(dev
, mac
);
2965 netdev_change_seq_changed(netdev
);
2968 ovs_mutex_unlock(&dev
->mutex
);
2974 netdev_dpdk_get_etheraddr(const struct netdev
*netdev
, struct eth_addr
*mac
)
2976 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
2978 ovs_mutex_lock(&dev
->mutex
);
2980 ovs_mutex_unlock(&dev
->mutex
);
2986 netdev_dpdk_get_mtu(const struct netdev
*netdev
, int *mtup
)
2988 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
2990 ovs_mutex_lock(&dev
->mutex
);
2992 ovs_mutex_unlock(&dev
->mutex
);
2998 netdev_dpdk_set_mtu(struct netdev
*netdev
, int mtu
)
3000 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
3002 /* XXX: Ensure that the overall frame length of the requested MTU does not
3003 * surpass the NETDEV_DPDK_MAX_PKT_LEN. DPDK device drivers differ in how
3004 * the L2 frame length is calculated for a given MTU when
3005 * rte_eth_dev_set_mtu(mtu) is called e.g. i40e driver includes 2 x vlan
3006 * headers, the em driver includes 1 x vlan header, the ixgbe driver does
3007 * not include vlan headers. As such we should use
3008 * MTU_TO_MAX_FRAME_LEN(mtu) which includes an additional 2 x vlan headers
3009 * (8 bytes) for comparison. This avoids a failure later with
3010 * rte_eth_dev_set_mtu(). This approach should be used until DPDK provides
3011 * a method to retrieve the upper bound MTU for a given device.
3013 if (MTU_TO_MAX_FRAME_LEN(mtu
) > NETDEV_DPDK_MAX_PKT_LEN
3014 || mtu
< RTE_ETHER_MIN_MTU
) {
3015 VLOG_WARN("%s: unsupported MTU %d\n", dev
->up
.name
, mtu
);
3019 ovs_mutex_lock(&dev
->mutex
);
3020 if (dev
->requested_mtu
!= mtu
) {
3021 dev
->requested_mtu
= mtu
;
3022 netdev_request_reconfigure(netdev
);
3024 ovs_mutex_unlock(&dev
->mutex
);
3030 netdev_dpdk_get_carrier(const struct netdev
*netdev
, bool *carrier
);
3033 netdev_dpdk_vhost_get_stats(const struct netdev
*netdev
,
3034 struct netdev_stats
*stats
)
3036 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
3038 ovs_mutex_lock(&dev
->mutex
);
3040 rte_spinlock_lock(&dev
->stats_lock
);
3041 /* Supported Stats */
3042 stats
->rx_packets
= dev
->stats
.rx_packets
;
3043 stats
->tx_packets
= dev
->stats
.tx_packets
;
3044 stats
->rx_dropped
= dev
->stats
.rx_dropped
;
3045 stats
->tx_dropped
= dev
->stats
.tx_dropped
;
3046 stats
->multicast
= dev
->stats
.multicast
;
3047 stats
->rx_bytes
= dev
->stats
.rx_bytes
;
3048 stats
->tx_bytes
= dev
->stats
.tx_bytes
;
3049 stats
->rx_errors
= dev
->stats
.rx_errors
;
3050 stats
->rx_length_errors
= dev
->stats
.rx_length_errors
;
3052 stats
->rx_1_to_64_packets
= dev
->stats
.rx_1_to_64_packets
;
3053 stats
->rx_65_to_127_packets
= dev
->stats
.rx_65_to_127_packets
;
3054 stats
->rx_128_to_255_packets
= dev
->stats
.rx_128_to_255_packets
;
3055 stats
->rx_256_to_511_packets
= dev
->stats
.rx_256_to_511_packets
;
3056 stats
->rx_512_to_1023_packets
= dev
->stats
.rx_512_to_1023_packets
;
3057 stats
->rx_1024_to_1522_packets
= dev
->stats
.rx_1024_to_1522_packets
;
3058 stats
->rx_1523_to_max_packets
= dev
->stats
.rx_1523_to_max_packets
;
3060 rte_spinlock_unlock(&dev
->stats_lock
);
3062 ovs_mutex_unlock(&dev
->mutex
);
3068 netdev_dpdk_convert_xstats(struct netdev_stats
*stats
,
3069 const struct rte_eth_xstat
*xstats
,
3070 const struct rte_eth_xstat_name
*names
,
3071 const unsigned int size
)
3073 /* DPDK XSTATS Counter names definition. */
3074 #define DPDK_XSTATS \
3075 DPDK_XSTAT(multicast, "rx_multicast_packets" ) \
3076 DPDK_XSTAT(tx_multicast_packets, "tx_multicast_packets" ) \
3077 DPDK_XSTAT(rx_broadcast_packets, "rx_broadcast_packets" ) \
3078 DPDK_XSTAT(tx_broadcast_packets, "tx_broadcast_packets" ) \
3079 DPDK_XSTAT(rx_undersized_errors, "rx_undersized_errors" ) \
3080 DPDK_XSTAT(rx_oversize_errors, "rx_oversize_errors" ) \
3081 DPDK_XSTAT(rx_fragmented_errors, "rx_fragmented_errors" ) \
3082 DPDK_XSTAT(rx_jabber_errors, "rx_jabber_errors" ) \
3083 DPDK_XSTAT(rx_1_to_64_packets, "rx_size_64_packets" ) \
3084 DPDK_XSTAT(rx_65_to_127_packets, "rx_size_65_to_127_packets" ) \
3085 DPDK_XSTAT(rx_128_to_255_packets, "rx_size_128_to_255_packets" ) \
3086 DPDK_XSTAT(rx_256_to_511_packets, "rx_size_256_to_511_packets" ) \
3087 DPDK_XSTAT(rx_512_to_1023_packets, "rx_size_512_to_1023_packets" ) \
3088 DPDK_XSTAT(rx_1024_to_1522_packets, "rx_size_1024_to_1522_packets" ) \
3089 DPDK_XSTAT(rx_1523_to_max_packets, "rx_size_1523_to_max_packets" ) \
3090 DPDK_XSTAT(tx_1_to_64_packets, "tx_size_64_packets" ) \
3091 DPDK_XSTAT(tx_65_to_127_packets, "tx_size_65_to_127_packets" ) \
3092 DPDK_XSTAT(tx_128_to_255_packets, "tx_size_128_to_255_packets" ) \
3093 DPDK_XSTAT(tx_256_to_511_packets, "tx_size_256_to_511_packets" ) \
3094 DPDK_XSTAT(tx_512_to_1023_packets, "tx_size_512_to_1023_packets" ) \
3095 DPDK_XSTAT(tx_1024_to_1522_packets, "tx_size_1024_to_1522_packets" ) \
3096 DPDK_XSTAT(tx_1523_to_max_packets, "tx_size_1523_to_max_packets" )
3098 for (unsigned int i
= 0; i
< size
; i
++) {
3099 #define DPDK_XSTAT(MEMBER, NAME) \
3100 if (strcmp(NAME, names[i].name) == 0) { \
3101 stats->MEMBER = xstats[i].value; \
3111 netdev_dpdk_get_stats(const struct netdev
*netdev
, struct netdev_stats
*stats
)
3113 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
3114 struct rte_eth_stats rte_stats
;
3117 netdev_dpdk_get_carrier(netdev
, &gg
);
3118 ovs_mutex_lock(&dev
->mutex
);
3120 struct rte_eth_xstat
*rte_xstats
= NULL
;
3121 struct rte_eth_xstat_name
*rte_xstats_names
= NULL
;
3122 int rte_xstats_len
, rte_xstats_new_len
, rte_xstats_ret
;
3124 if (rte_eth_stats_get(dev
->port_id
, &rte_stats
)) {
3125 VLOG_ERR("Can't get ETH statistics for port: "DPDK_PORT_ID_FMT
,
3127 ovs_mutex_unlock(&dev
->mutex
);
3131 /* Get length of statistics */
3132 rte_xstats_len
= rte_eth_xstats_get_names(dev
->port_id
, NULL
, 0);
3133 if (rte_xstats_len
< 0) {
3134 VLOG_WARN("Cannot get XSTATS values for port: "DPDK_PORT_ID_FMT
,
3138 /* Reserve memory for xstats names and values */
3139 rte_xstats_names
= xcalloc(rte_xstats_len
, sizeof *rte_xstats_names
);
3140 rte_xstats
= xcalloc(rte_xstats_len
, sizeof *rte_xstats
);
3142 /* Retreive xstats names */
3143 rte_xstats_new_len
= rte_eth_xstats_get_names(dev
->port_id
,
3146 if (rte_xstats_new_len
!= rte_xstats_len
) {
3147 VLOG_WARN("Cannot get XSTATS names for port: "DPDK_PORT_ID_FMT
,
3151 /* Retreive xstats values */
3152 memset(rte_xstats
, 0xff, sizeof *rte_xstats
* rte_xstats_len
);
3153 rte_xstats_ret
= rte_eth_xstats_get(dev
->port_id
, rte_xstats
,
3155 if (rte_xstats_ret
> 0 && rte_xstats_ret
<= rte_xstats_len
) {
3156 netdev_dpdk_convert_xstats(stats
, rte_xstats
, rte_xstats_names
,
3159 VLOG_WARN("Cannot get XSTATS values for port: "DPDK_PORT_ID_FMT
,
3165 free(rte_xstats_names
);
3167 stats
->rx_packets
= rte_stats
.ipackets
;
3168 stats
->tx_packets
= rte_stats
.opackets
;
3169 stats
->rx_bytes
= rte_stats
.ibytes
;
3170 stats
->tx_bytes
= rte_stats
.obytes
;
3171 stats
->rx_errors
= rte_stats
.ierrors
;
3172 stats
->tx_errors
= rte_stats
.oerrors
;
3174 rte_spinlock_lock(&dev
->stats_lock
);
3175 stats
->tx_dropped
= dev
->stats
.tx_dropped
;
3176 stats
->rx_dropped
= dev
->stats
.rx_dropped
;
3177 rte_spinlock_unlock(&dev
->stats_lock
);
3179 /* These are the available DPDK counters for packets not received due to
3180 * local resource constraints in DPDK and NIC respectively. */
3181 stats
->rx_dropped
+= rte_stats
.rx_nombuf
+ rte_stats
.imissed
;
3182 stats
->rx_missed_errors
= rte_stats
.imissed
;
3184 ovs_mutex_unlock(&dev
->mutex
);
3190 netdev_dpdk_get_custom_stats(const struct netdev
*netdev
,
3191 struct netdev_custom_stats
*custom_stats
)
3195 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
3196 int rte_xstats_ret
, sw_stats_size
;
3198 netdev_dpdk_get_sw_custom_stats(netdev
, custom_stats
);
3200 ovs_mutex_lock(&dev
->mutex
);
3202 if (netdev_dpdk_configure_xstats(dev
)) {
3203 uint64_t *values
= xcalloc(dev
->rte_xstats_ids_size
,
3207 rte_eth_xstats_get_by_id(dev
->port_id
, dev
->rte_xstats_ids
,
3208 values
, dev
->rte_xstats_ids_size
);
3210 if (rte_xstats_ret
> 0 &&
3211 rte_xstats_ret
<= dev
->rte_xstats_ids_size
) {
3213 sw_stats_size
= custom_stats
->size
;
3214 custom_stats
->size
+= rte_xstats_ret
;
3215 custom_stats
->counters
= xrealloc(custom_stats
->counters
,
3216 custom_stats
->size
*
3217 sizeof *custom_stats
->counters
);
3219 for (i
= 0; i
< rte_xstats_ret
; i
++) {
3220 ovs_strlcpy(custom_stats
->counters
[sw_stats_size
+ i
].name
,
3221 netdev_dpdk_get_xstat_name(dev
,
3222 dev
->rte_xstats_ids
[i
]),
3223 NETDEV_CUSTOM_STATS_NAME_SIZE
);
3224 custom_stats
->counters
[sw_stats_size
+ i
].value
= values
[i
];
3227 VLOG_WARN("Cannot get XSTATS values for port: "DPDK_PORT_ID_FMT
,
3229 /* Let's clear statistics cache, so it will be
3231 netdev_dpdk_clear_xstats(dev
);
3237 ovs_mutex_unlock(&dev
->mutex
);
3243 netdev_dpdk_get_sw_custom_stats(const struct netdev
*netdev
,
3244 struct netdev_custom_stats
*custom_stats
)
3246 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
3250 SW_CSTAT(tx_retries) \
3251 SW_CSTAT(tx_failure_drops) \
3252 SW_CSTAT(tx_mtu_exceeded_drops) \
3253 SW_CSTAT(tx_qos_drops) \
3254 SW_CSTAT(rx_qos_drops) \
3255 SW_CSTAT(tx_invalid_hwol_drops)
3257 #define SW_CSTAT(NAME) + 1
3258 custom_stats
->size
= SW_CSTATS
;
3260 custom_stats
->counters
= xcalloc(custom_stats
->size
,
3261 sizeof *custom_stats
->counters
);
3263 ovs_mutex_lock(&dev
->mutex
);
3265 rte_spinlock_lock(&dev
->stats_lock
);
3267 #define SW_CSTAT(NAME) \
3268 custom_stats->counters[i++].value = dev->sw_stats->NAME;
3271 rte_spinlock_unlock(&dev
->stats_lock
);
3273 ovs_mutex_unlock(&dev
->mutex
);
3277 #define SW_CSTAT(NAME) \
3278 if (custom_stats->counters[i].value != UINT64_MAX) { \
3279 ovs_strlcpy(custom_stats->counters[n].name, \
3280 "ovs_"#NAME, NETDEV_CUSTOM_STATS_NAME_SIZE); \
3281 custom_stats->counters[n].value = custom_stats->counters[i].value; \
3288 custom_stats
->size
= n
;
3293 netdev_dpdk_get_features(const struct netdev
*netdev
,
3294 enum netdev_features
*current
,
3295 enum netdev_features
*advertised
,
3296 enum netdev_features
*supported
,
3297 enum netdev_features
*peer
)
3299 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
3300 struct rte_eth_link link
;
3301 uint32_t feature
= 0;
3303 ovs_mutex_lock(&dev
->mutex
);
3305 ovs_mutex_unlock(&dev
->mutex
);
3307 /* Match against OpenFlow defined link speed values. */
3308 if (link
.link_duplex
== ETH_LINK_FULL_DUPLEX
) {
3309 switch (link
.link_speed
) {
3310 case ETH_SPEED_NUM_10M
:
3311 feature
|= NETDEV_F_10MB_FD
;
3313 case ETH_SPEED_NUM_100M
:
3314 feature
|= NETDEV_F_100MB_FD
;
3316 case ETH_SPEED_NUM_1G
:
3317 feature
|= NETDEV_F_1GB_FD
;
3319 case ETH_SPEED_NUM_10G
:
3320 feature
|= NETDEV_F_10GB_FD
;
3322 case ETH_SPEED_NUM_40G
:
3323 feature
|= NETDEV_F_40GB_FD
;
3325 case ETH_SPEED_NUM_100G
:
3326 feature
|= NETDEV_F_100GB_FD
;
3329 feature
|= NETDEV_F_OTHER
;
3331 } else if (link
.link_duplex
== ETH_LINK_HALF_DUPLEX
) {
3332 switch (link
.link_speed
) {
3333 case ETH_SPEED_NUM_10M
:
3334 feature
|= NETDEV_F_10MB_HD
;
3336 case ETH_SPEED_NUM_100M
:
3337 feature
|= NETDEV_F_100MB_HD
;
3339 case ETH_SPEED_NUM_1G
:
3340 feature
|= NETDEV_F_1GB_HD
;
3343 feature
|= NETDEV_F_OTHER
;
3347 if (link
.link_autoneg
) {
3348 feature
|= NETDEV_F_AUTONEG
;
3352 *advertised
= *supported
= *peer
= 0;
3357 static struct ingress_policer
*
3358 netdev_dpdk_policer_construct(uint32_t rate
, uint32_t burst
)
3360 struct ingress_policer
*policer
= NULL
;
3361 uint64_t rate_bytes
;
3362 uint64_t burst_bytes
;
3365 policer
= xmalloc(sizeof *policer
);
3366 rte_spinlock_init(&policer
->policer_lock
);
3368 /* rte_meter requires bytes so convert kbits rate and burst to bytes. */
3369 rate_bytes
= rate
* 1000ULL / 8;
3370 burst_bytes
= burst
* 1000ULL / 8;
3372 policer
->app_srtcm_params
.cir
= rate_bytes
;
3373 policer
->app_srtcm_params
.cbs
= burst_bytes
;
3374 policer
->app_srtcm_params
.ebs
= 0;
3375 err
= rte_meter_srtcm_profile_config(&policer
->in_prof
,
3376 &policer
->app_srtcm_params
);
3378 err
= rte_meter_srtcm_config(&policer
->in_policer
,
3382 VLOG_ERR("Could not create rte meter for ingress policer");
3391 netdev_dpdk_set_policing(struct netdev
* netdev
, uint32_t policer_rate
,
3392 uint32_t policer_burst
)
3394 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
3395 struct ingress_policer
*policer
;
3397 /* Force to 0 if no rate specified,
3398 * default to 8000 kbits if burst is 0,
3399 * else stick with user-specified value.
3401 policer_burst
= (!policer_rate
? 0
3402 : !policer_burst
? 8000
3405 ovs_mutex_lock(&dev
->mutex
);
3407 policer
= ovsrcu_get_protected(struct ingress_policer
*,
3408 &dev
->ingress_policer
);
3410 if (dev
->policer_rate
== policer_rate
&&
3411 dev
->policer_burst
== policer_burst
) {
3412 /* Assume that settings haven't changed since we last set them. */
3413 ovs_mutex_unlock(&dev
->mutex
);
3417 /* Destroy any existing ingress policer for the device if one exists */
3419 ovsrcu_postpone(free
, policer
);
3422 if (policer_rate
!= 0) {
3423 policer
= netdev_dpdk_policer_construct(policer_rate
, policer_burst
);
3427 ovsrcu_set(&dev
->ingress_policer
, policer
);
3428 dev
->policer_rate
= policer_rate
;
3429 dev
->policer_burst
= policer_burst
;
3430 ovs_mutex_unlock(&dev
->mutex
);
3436 netdev_dpdk_get_ifindex(const struct netdev
*netdev
)
3438 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
3440 ovs_mutex_lock(&dev
->mutex
);
3441 /* Calculate hash from the netdev name. Ensure that ifindex is a 24-bit
3442 * postive integer to meet RFC 2863 recommendations.
3444 int ifindex
= hash_string(netdev
->name
, 0) % 0xfffffe + 1;
3445 ovs_mutex_unlock(&dev
->mutex
);
3451 netdev_dpdk_get_carrier(const struct netdev
*netdev
, bool *carrier
)
3453 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
3455 ovs_mutex_lock(&dev
->mutex
);
3456 check_link_status(dev
);
3457 *carrier
= dev
->link
.link_status
;
3459 ovs_mutex_unlock(&dev
->mutex
);
3465 netdev_dpdk_vhost_get_carrier(const struct netdev
*netdev
, bool *carrier
)
3467 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
3469 ovs_mutex_lock(&dev
->mutex
);
3471 if (is_vhost_running(dev
)) {
3477 ovs_mutex_unlock(&dev
->mutex
);
3482 static long long int
3483 netdev_dpdk_get_carrier_resets(const struct netdev
*netdev
)
3485 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
3486 long long int carrier_resets
;
3488 ovs_mutex_lock(&dev
->mutex
);
3489 carrier_resets
= dev
->link_reset_cnt
;
3490 ovs_mutex_unlock(&dev
->mutex
);
3492 return carrier_resets
;
3496 netdev_dpdk_set_miimon(struct netdev
*netdev OVS_UNUSED
,
3497 long long int interval OVS_UNUSED
)
3503 netdev_dpdk_update_flags__(struct netdev_dpdk
*dev
,
3504 enum netdev_flags off
, enum netdev_flags on
,
3505 enum netdev_flags
*old_flagsp
)
3506 OVS_REQUIRES(dev
->mutex
)
3508 if ((off
| on
) & ~(NETDEV_UP
| NETDEV_PROMISC
)) {
3512 *old_flagsp
= dev
->flags
;
3516 if (dev
->flags
== *old_flagsp
) {
3520 if (dev
->type
== DPDK_DEV_ETH
) {
3522 if ((dev
->flags
^ *old_flagsp
) & NETDEV_UP
) {
3525 if (dev
->flags
& NETDEV_UP
) {
3526 err
= rte_eth_dev_set_link_up(dev
->port_id
);
3528 err
= rte_eth_dev_set_link_down(dev
->port_id
);
3530 if (err
== -ENOTSUP
) {
3531 VLOG_INFO("Interface %s does not support link state "
3532 "configuration", netdev_get_name(&dev
->up
));
3533 } else if (err
< 0) {
3534 VLOG_ERR("Interface %s link change error: %s",
3535 netdev_get_name(&dev
->up
), rte_strerror(-err
));
3536 dev
->flags
= *old_flagsp
;
3541 if (dev
->flags
& NETDEV_PROMISC
) {
3542 rte_eth_promiscuous_enable(dev
->port_id
);
3545 netdev_change_seq_changed(&dev
->up
);
3547 /* If DPDK_DEV_VHOST device's NETDEV_UP flag was changed and vhost is
3548 * running then change netdev's change_seq to trigger link state
3551 if ((NETDEV_UP
& ((*old_flagsp
^ on
) | (*old_flagsp
^ off
)))
3552 && is_vhost_running(dev
)) {
3553 netdev_change_seq_changed(&dev
->up
);
3555 /* Clear statistics if device is getting up. */
3556 if (NETDEV_UP
& on
) {
3557 rte_spinlock_lock(&dev
->stats_lock
);
3558 memset(&dev
->stats
, 0, sizeof dev
->stats
);
3559 rte_spinlock_unlock(&dev
->stats_lock
);
3568 netdev_dpdk_update_flags(struct netdev
*netdev
,
3569 enum netdev_flags off
, enum netdev_flags on
,
3570 enum netdev_flags
*old_flagsp
)
3572 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
3575 ovs_mutex_lock(&dev
->mutex
);
3576 error
= netdev_dpdk_update_flags__(dev
, off
, on
, old_flagsp
);
3577 ovs_mutex_unlock(&dev
->mutex
);
3583 netdev_dpdk_vhost_user_get_status(const struct netdev
*netdev
,
3586 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
3588 ovs_mutex_lock(&dev
->mutex
);
3590 bool client_mode
= dev
->vhost_driver_flags
& RTE_VHOST_USER_CLIENT
;
3591 smap_add_format(args
, "mode", "%s", client_mode
? "client" : "server");
3593 int vid
= netdev_dpdk_get_vid(dev
);
3595 smap_add_format(args
, "status", "disconnected");
3596 ovs_mutex_unlock(&dev
->mutex
);
3599 smap_add_format(args
, "status", "connected");
3602 char socket_name
[PATH_MAX
];
3603 if (!rte_vhost_get_ifname(vid
, socket_name
, PATH_MAX
)) {
3604 smap_add_format(args
, "socket", "%s", socket_name
);
3608 if (!rte_vhost_get_negotiated_features(vid
, &features
)) {
3609 smap_add_format(args
, "features", "0x%016"PRIx64
, features
);
3613 if (!rte_vhost_get_mtu(vid
, &mtu
)) {
3614 smap_add_format(args
, "mtu", "%d", mtu
);
3617 int numa
= rte_vhost_get_numa_node(vid
);
3619 smap_add_format(args
, "numa", "%d", numa
);
3622 uint16_t vring_num
= rte_vhost_get_vring_num(vid
);
3624 smap_add_format(args
, "num_of_vrings", "%d", vring_num
);
3627 for (int i
= 0; i
< vring_num
; i
++) {
3628 struct rte_vhost_vring vring
;
3630 rte_vhost_get_vhost_vring(vid
, i
, &vring
);
3631 smap_add_nocopy(args
, xasprintf("vring_%d_size", i
),
3632 xasprintf("%d", vring
.size
));
3635 ovs_mutex_unlock(&dev
->mutex
);
3640 * Convert a given uint32_t link speed defined in DPDK to a string
3644 netdev_dpdk_link_speed_to_str__(uint32_t link_speed
)
3646 switch (link_speed
) {
3647 case ETH_SPEED_NUM_10M
: return "10Mbps";
3648 case ETH_SPEED_NUM_100M
: return "100Mbps";
3649 case ETH_SPEED_NUM_1G
: return "1Gbps";
3650 case ETH_SPEED_NUM_2_5G
: return "2.5Gbps";
3651 case ETH_SPEED_NUM_5G
: return "5Gbps";
3652 case ETH_SPEED_NUM_10G
: return "10Gbps";
3653 case ETH_SPEED_NUM_20G
: return "20Gbps";
3654 case ETH_SPEED_NUM_25G
: return "25Gbps";
3655 case ETH_SPEED_NUM_40G
: return "40Gbps";
3656 case ETH_SPEED_NUM_50G
: return "50Gbps";
3657 case ETH_SPEED_NUM_56G
: return "56Gbps";
3658 case ETH_SPEED_NUM_100G
: return "100Gbps";
3659 default: return "Not Defined";
3664 netdev_dpdk_get_status(const struct netdev
*netdev
, struct smap
*args
)
3666 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
3667 struct rte_eth_dev_info dev_info
;
3668 uint32_t link_speed
;
3671 if (!rte_eth_dev_is_valid_port(dev
->port_id
)) {
3675 ovs_mutex_lock(&dpdk_mutex
);
3676 ovs_mutex_lock(&dev
->mutex
);
3677 rte_eth_dev_info_get(dev
->port_id
, &dev_info
);
3678 link_speed
= dev
->link
.link_speed
;
3679 dev_flags
= *dev_info
.dev_flags
;
3680 ovs_mutex_unlock(&dev
->mutex
);
3681 const struct rte_bus
*bus
;
3682 const struct rte_pci_device
*pci_dev
;
3683 uint16_t vendor_id
= PCI_ANY_ID
;
3684 uint16_t device_id
= PCI_ANY_ID
;
3685 bus
= rte_bus_find_by_device(dev_info
.device
);
3686 if (bus
&& !strcmp(bus
->name
, "pci")) {
3687 pci_dev
= RTE_DEV_TO_PCI(dev_info
.device
);
3689 vendor_id
= pci_dev
->id
.vendor_id
;
3690 device_id
= pci_dev
->id
.device_id
;
3693 ovs_mutex_unlock(&dpdk_mutex
);
3695 smap_add_format(args
, "port_no", DPDK_PORT_ID_FMT
, dev
->port_id
);
3696 smap_add_format(args
, "numa_id", "%d",
3697 rte_eth_dev_socket_id(dev
->port_id
));
3698 smap_add_format(args
, "driver_name", "%s", dev_info
.driver_name
);
3699 smap_add_format(args
, "min_rx_bufsize", "%u", dev_info
.min_rx_bufsize
);
3700 smap_add_format(args
, "max_rx_pktlen", "%u", dev
->max_packet_len
);
3701 smap_add_format(args
, "max_rx_queues", "%u", dev_info
.max_rx_queues
);
3702 smap_add_format(args
, "max_tx_queues", "%u", dev_info
.max_tx_queues
);
3703 smap_add_format(args
, "max_mac_addrs", "%u", dev_info
.max_mac_addrs
);
3704 smap_add_format(args
, "max_hash_mac_addrs", "%u",
3705 dev_info
.max_hash_mac_addrs
);
3706 smap_add_format(args
, "max_vfs", "%u", dev_info
.max_vfs
);
3707 smap_add_format(args
, "max_vmdq_pools", "%u", dev_info
.max_vmdq_pools
);
3709 /* Querying the DPDK library for iftype may be done in future, pending
3710 * support; cf. RFC 3635 Section 3.2.4. */
3711 enum { IF_TYPE_ETHERNETCSMACD
= 6 };
3713 smap_add_format(args
, "if_type", "%"PRIu32
, IF_TYPE_ETHERNETCSMACD
);
3714 smap_add_format(args
, "if_descr", "%s %s", rte_version(),
3715 dev_info
.driver_name
);
3716 smap_add_format(args
, "pci-vendor_id", "0x%x", vendor_id
);
3717 smap_add_format(args
, "pci-device_id", "0x%x", device_id
);
3719 /* Not all link speeds are defined in the OpenFlow specs e.g. 25 Gbps.
3720 * In that case the speed will not be reported as part of the usual
3721 * call to get_features(). Get the link speed of the device and add it
3722 * to the device status in an easy to read string format.
3724 smap_add(args
, "link_speed",
3725 netdev_dpdk_link_speed_to_str__(link_speed
));
3727 if (dev_flags
& RTE_ETH_DEV_REPRESENTOR
) {
3728 smap_add_format(args
, "dpdk-vf-mac", ETH_ADDR_FMT
,
3729 ETH_ADDR_ARGS(dev
->hwaddr
));
3736 netdev_dpdk_set_admin_state__(struct netdev_dpdk
*dev
, bool admin_state
)
3737 OVS_REQUIRES(dev
->mutex
)
3739 enum netdev_flags old_flags
;
3742 netdev_dpdk_update_flags__(dev
, 0, NETDEV_UP
, &old_flags
);
3744 netdev_dpdk_update_flags__(dev
, NETDEV_UP
, 0, &old_flags
);
3749 netdev_dpdk_set_admin_state(struct unixctl_conn
*conn
, int argc
,
3750 const char *argv
[], void *aux OVS_UNUSED
)
3754 if (!strcasecmp(argv
[argc
- 1], "up")) {
3756 } else if ( !strcasecmp(argv
[argc
- 1], "down")) {
3759 unixctl_command_reply_error(conn
, "Invalid Admin State");
3764 struct netdev
*netdev
= netdev_from_name(argv
[1]);
3766 if (netdev
&& is_dpdk_class(netdev
->netdev_class
)) {
3767 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
3769 ovs_mutex_lock(&dev
->mutex
);
3770 netdev_dpdk_set_admin_state__(dev
, up
);
3771 ovs_mutex_unlock(&dev
->mutex
);
3773 netdev_close(netdev
);
3775 unixctl_command_reply_error(conn
, "Not a DPDK Interface");
3776 netdev_close(netdev
);
3780 struct netdev_dpdk
*dev
;
3782 ovs_mutex_lock(&dpdk_mutex
);
3783 LIST_FOR_EACH (dev
, list_node
, &dpdk_list
) {
3784 ovs_mutex_lock(&dev
->mutex
);
3785 netdev_dpdk_set_admin_state__(dev
, up
);
3786 ovs_mutex_unlock(&dev
->mutex
);
3788 ovs_mutex_unlock(&dpdk_mutex
);
3790 unixctl_command_reply(conn
, "OK");
3794 netdev_dpdk_detach(struct unixctl_conn
*conn
, int argc OVS_UNUSED
,
3795 const char *argv
[], void *aux OVS_UNUSED
)
3798 dpdk_port_t port_id
;
3799 struct netdev_dpdk
*dev
;
3800 struct rte_device
*rte_dev
;
3801 struct ds used_interfaces
= DS_EMPTY_INITIALIZER
;
3804 ovs_mutex_lock(&dpdk_mutex
);
3806 port_id
= netdev_dpdk_get_port_by_devargs(argv
[1]);
3807 if (!rte_eth_dev_is_valid_port(port_id
)) {
3808 response
= xasprintf("Device '%s' not found in DPDK", argv
[1]);
3812 rte_dev
= rte_eth_devices
[port_id
].device
;
3813 ds_put_format(&used_interfaces
,
3814 "Device '%s' is being used by the following interfaces:",
3817 LIST_FOR_EACH (dev
, list_node
, &dpdk_list
) {
3818 /* FIXME: avoid direct access to DPDK array rte_eth_devices. */
3819 if (rte_eth_devices
[dev
->port_id
].device
== rte_dev
3820 && rte_eth_devices
[dev
->port_id
].state
!= RTE_ETH_DEV_UNUSED
) {
3822 ds_put_format(&used_interfaces
, " %s",
3823 netdev_get_name(&dev
->up
));
3828 ds_put_cstr(&used_interfaces
, ". Remove them before detaching.");
3829 response
= ds_steal_cstr(&used_interfaces
);
3830 ds_destroy(&used_interfaces
);
3833 ds_destroy(&used_interfaces
);
3835 rte_eth_dev_close(port_id
);
3836 if (rte_dev_remove(rte_dev
) < 0) {
3837 response
= xasprintf("Device '%s' can not be detached", argv
[1]);
3841 response
= xasprintf("All devices shared with device '%s' "
3842 "have been detached", argv
[1]);
3844 ovs_mutex_unlock(&dpdk_mutex
);
3845 unixctl_command_reply(conn
, response
);
3850 ovs_mutex_unlock(&dpdk_mutex
);
3851 unixctl_command_reply_error(conn
, response
);
3856 netdev_dpdk_get_mempool_info(struct unixctl_conn
*conn
,
3857 int argc
, const char *argv
[],
3858 void *aux OVS_UNUSED
)
3862 char *response
= NULL
;
3863 struct netdev
*netdev
= NULL
;
3866 netdev
= netdev_from_name(argv
[1]);
3867 if (!netdev
|| !is_dpdk_class(netdev
->netdev_class
)) {
3868 unixctl_command_reply_error(conn
, "Not a DPDK Interface");
3873 stream
= open_memstream(&response
, &size
);
3875 response
= xasprintf("Unable to open memstream: %s.",
3876 ovs_strerror(errno
));
3877 unixctl_command_reply_error(conn
, response
);
3882 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
3884 ovs_mutex_lock(&dev
->mutex
);
3885 ovs_mutex_lock(&dpdk_mp_mutex
);
3887 rte_mempool_dump(stream
, dev
->dpdk_mp
->mp
);
3889 ovs_mutex_unlock(&dpdk_mp_mutex
);
3890 ovs_mutex_unlock(&dev
->mutex
);
3892 ovs_mutex_lock(&dpdk_mp_mutex
);
3893 rte_mempool_list_dump(stream
);
3894 ovs_mutex_unlock(&dpdk_mp_mutex
);
3899 unixctl_command_reply(conn
, response
);
3902 netdev_close(netdev
);
3906 * Set virtqueue flags so that we do not receive interrupts.
3909 set_irq_status(int vid
)
3913 for (i
= 0; i
< rte_vhost_get_vring_num(vid
); i
++) {
3914 rte_vhost_enable_guest_notification(vid
, i
, 0);
3919 * Fixes mapping for vhost-user tx queues. Must be called after each
3920 * enabling/disabling of queues and n_txq modifications.
3923 netdev_dpdk_remap_txqs(struct netdev_dpdk
*dev
)
3924 OVS_REQUIRES(dev
->mutex
)
3926 int *enabled_queues
, n_enabled
= 0;
3927 int i
, k
, total_txqs
= dev
->up
.n_txq
;
3929 enabled_queues
= xcalloc(total_txqs
, sizeof *enabled_queues
);
3931 for (i
= 0; i
< total_txqs
; i
++) {
3932 /* Enabled queues always mapped to themselves. */
3933 if (dev
->tx_q
[i
].map
== i
) {
3934 enabled_queues
[n_enabled
++] = i
;
3938 if (n_enabled
== 0 && total_txqs
!= 0) {
3939 enabled_queues
[0] = OVS_VHOST_QUEUE_DISABLED
;
3944 for (i
= 0; i
< total_txqs
; i
++) {
3945 if (dev
->tx_q
[i
].map
!= i
) {
3946 dev
->tx_q
[i
].map
= enabled_queues
[k
];
3947 k
= (k
+ 1) % n_enabled
;
3951 if (VLOG_IS_DBG_ENABLED()) {
3952 struct ds mapping
= DS_EMPTY_INITIALIZER
;
3954 ds_put_format(&mapping
, "TX queue mapping for port '%s':\n",
3955 netdev_get_name(&dev
->up
));
3956 for (i
= 0; i
< total_txqs
; i
++) {
3957 ds_put_format(&mapping
, "%2d --> %2d\n", i
, dev
->tx_q
[i
].map
);
3960 VLOG_DBG("%s", ds_cstr(&mapping
));
3961 ds_destroy(&mapping
);
3964 free(enabled_queues
);
3968 * A new virtio-net device is added to a vhost port.
3973 struct netdev_dpdk
*dev
;
3974 bool exists
= false;
3976 char ifname
[IF_NAME_SZ
];
3978 rte_vhost_get_ifname(vid
, ifname
, sizeof ifname
);
3980 ovs_mutex_lock(&dpdk_mutex
);
3981 /* Add device to the vhost port with the same name as that passed down. */
3982 LIST_FOR_EACH(dev
, list_node
, &dpdk_list
) {
3983 ovs_mutex_lock(&dev
->mutex
);
3984 if (nullable_string_is_equal(ifname
, dev
->vhost_id
)) {
3985 uint32_t qp_num
= rte_vhost_get_vring_num(vid
) / VIRTIO_QNUM
;
3987 /* Get NUMA information */
3988 newnode
= rte_vhost_get_numa_node(vid
);
3989 if (newnode
== -1) {
3991 VLOG_INFO("Error getting NUMA info for vHost Device '%s'",
3994 newnode
= dev
->socket_id
;
3997 if (dev
->requested_n_txq
< qp_num
3998 || dev
->requested_n_rxq
< qp_num
3999 || dev
->requested_socket_id
!= newnode
) {
4000 dev
->requested_socket_id
= newnode
;
4001 dev
->requested_n_rxq
= qp_num
;
4002 dev
->requested_n_txq
= qp_num
;
4003 netdev_request_reconfigure(&dev
->up
);
4005 /* Reconfiguration not required. */
4006 dev
->vhost_reconfigured
= true;
4009 ovsrcu_index_set(&dev
->vid
, vid
);
4012 /* Disable notifications. */
4013 set_irq_status(vid
);
4014 netdev_change_seq_changed(&dev
->up
);
4015 ovs_mutex_unlock(&dev
->mutex
);
4018 ovs_mutex_unlock(&dev
->mutex
);
4020 ovs_mutex_unlock(&dpdk_mutex
);
4023 VLOG_INFO("vHost Device '%s' can't be added - name not found", ifname
);
4028 VLOG_INFO("vHost Device '%s' has been added on numa node %i",
4034 /* Clears mapping for all available queues of vhost interface. */
4036 netdev_dpdk_txq_map_clear(struct netdev_dpdk
*dev
)
4037 OVS_REQUIRES(dev
->mutex
)
4041 for (i
= 0; i
< dev
->up
.n_txq
; i
++) {
4042 dev
->tx_q
[i
].map
= OVS_VHOST_QUEUE_MAP_UNKNOWN
;
4047 * Remove a virtio-net device from the specific vhost port. Use dev->remove
4048 * flag to stop any more packets from being sent or received to/from a VM and
4049 * ensure all currently queued packets have been sent/received before removing
4053 destroy_device(int vid
)
4055 struct netdev_dpdk
*dev
;
4056 bool exists
= false;
4057 char ifname
[IF_NAME_SZ
];
4059 rte_vhost_get_ifname(vid
, ifname
, sizeof ifname
);
4061 ovs_mutex_lock(&dpdk_mutex
);
4062 LIST_FOR_EACH (dev
, list_node
, &dpdk_list
) {
4063 if (netdev_dpdk_get_vid(dev
) == vid
) {
4065 ovs_mutex_lock(&dev
->mutex
);
4066 dev
->vhost_reconfigured
= false;
4067 ovsrcu_index_set(&dev
->vid
, -1);
4068 memset(dev
->vhost_rxq_enabled
, 0,
4069 dev
->up
.n_rxq
* sizeof *dev
->vhost_rxq_enabled
);
4070 netdev_dpdk_txq_map_clear(dev
);
4072 netdev_change_seq_changed(&dev
->up
);
4073 ovs_mutex_unlock(&dev
->mutex
);
4079 ovs_mutex_unlock(&dpdk_mutex
);
4083 * Wait for other threads to quiesce after setting the 'virtio_dev'
4084 * to NULL, before returning.
4086 ovsrcu_synchronize();
4088 * As call to ovsrcu_synchronize() will end the quiescent state,
4089 * put thread back into quiescent state before returning.
4091 ovsrcu_quiesce_start();
4092 VLOG_INFO("vHost Device '%s' has been removed", ifname
);
4094 VLOG_INFO("vHost Device '%s' not found", ifname
);
4099 vring_state_changed(int vid
, uint16_t queue_id
, int enable
)
4101 struct netdev_dpdk
*dev
;
4102 bool exists
= false;
4103 int qid
= queue_id
/ VIRTIO_QNUM
;
4104 bool is_rx
= (queue_id
% VIRTIO_QNUM
) == VIRTIO_TXQ
;
4105 char ifname
[IF_NAME_SZ
];
4107 rte_vhost_get_ifname(vid
, ifname
, sizeof ifname
);
4109 ovs_mutex_lock(&dpdk_mutex
);
4110 LIST_FOR_EACH (dev
, list_node
, &dpdk_list
) {
4111 ovs_mutex_lock(&dev
->mutex
);
4112 if (nullable_string_is_equal(ifname
, dev
->vhost_id
)) {
4114 bool old_state
= dev
->vhost_rxq_enabled
[qid
];
4116 dev
->vhost_rxq_enabled
[qid
] = enable
!= 0;
4117 if (old_state
!= dev
->vhost_rxq_enabled
[qid
]) {
4118 netdev_change_seq_changed(&dev
->up
);
4122 dev
->tx_q
[qid
].map
= qid
;
4124 dev
->tx_q
[qid
].map
= OVS_VHOST_QUEUE_DISABLED
;
4126 netdev_dpdk_remap_txqs(dev
);
4129 ovs_mutex_unlock(&dev
->mutex
);
4132 ovs_mutex_unlock(&dev
->mutex
);
4134 ovs_mutex_unlock(&dpdk_mutex
);
4137 VLOG_INFO("State of queue %d ( %s_qid %d ) of vhost device '%s' "
4138 "changed to \'%s\'", queue_id
, is_rx
== true ? "rx" : "tx",
4139 qid
, ifname
, (enable
== 1) ? "enabled" : "disabled");
4141 VLOG_INFO("vHost Device '%s' not found", ifname
);
4149 destroy_connection(int vid
)
4151 struct netdev_dpdk
*dev
;
4152 char ifname
[IF_NAME_SZ
];
4153 bool exists
= false;
4155 rte_vhost_get_ifname(vid
, ifname
, sizeof ifname
);
4157 ovs_mutex_lock(&dpdk_mutex
);
4158 LIST_FOR_EACH (dev
, list_node
, &dpdk_list
) {
4159 ovs_mutex_lock(&dev
->mutex
);
4160 if (nullable_string_is_equal(ifname
, dev
->vhost_id
)) {
4161 uint32_t qp_num
= NR_QUEUE
;
4163 if (netdev_dpdk_get_vid(dev
) >= 0) {
4164 VLOG_ERR("Connection on socket '%s' destroyed while vhost "
4165 "device still attached.", dev
->vhost_id
);
4168 /* Restore the number of queue pairs to default. */
4169 if (dev
->requested_n_txq
!= qp_num
4170 || dev
->requested_n_rxq
!= qp_num
) {
4171 dev
->requested_n_rxq
= qp_num
;
4172 dev
->requested_n_txq
= qp_num
;
4173 netdev_request_reconfigure(&dev
->up
);
4175 ovs_mutex_unlock(&dev
->mutex
);
4179 ovs_mutex_unlock(&dev
->mutex
);
4181 ovs_mutex_unlock(&dpdk_mutex
);
4184 VLOG_INFO("vHost Device '%s' connection has been destroyed", ifname
);
4186 VLOG_INFO("vHost Device '%s' not found", ifname
);
4191 void vhost_guest_notified(int vid OVS_UNUSED
)
4193 COVERAGE_INC(vhost_notification
);
4197 * Retrieve the DPDK virtio device ID (vid) associated with a vhostuser
4198 * or vhostuserclient netdev.
4200 * Returns a value greater or equal to zero for a valid vid or '-1' if
4201 * there is no valid vid associated. A vid of '-1' must not be used in
4202 * rte_vhost_ APi calls.
4204 * Once obtained and validated, a vid can be used by a PMD for multiple
4205 * subsequent rte_vhost API calls until the PMD quiesces. A PMD should
4206 * not fetch the vid again for each of a series of API calls.
4210 netdev_dpdk_get_vid(const struct netdev_dpdk
*dev
)
4212 return ovsrcu_index_get(&dev
->vid
);
4215 struct ingress_policer
*
4216 netdev_dpdk_get_ingress_policer(const struct netdev_dpdk
*dev
)
4218 return ovsrcu_get(struct ingress_policer
*, &dev
->ingress_policer
);
4222 netdev_dpdk_class_init(void)
4224 static struct ovsthread_once once
= OVSTHREAD_ONCE_INITIALIZER
;
4226 /* This function can be called for different classes. The initialization
4227 * needs to be done only once */
4228 if (ovsthread_once_start(&once
)) {
4231 ovs_thread_create("dpdk_watchdog", dpdk_watchdog
, NULL
);
4232 unixctl_command_register("netdev-dpdk/set-admin-state",
4233 "[netdev] up|down", 1, 2,
4234 netdev_dpdk_set_admin_state
, NULL
);
4236 unixctl_command_register("netdev-dpdk/detach",
4237 "pci address of device", 1, 1,
4238 netdev_dpdk_detach
, NULL
);
4240 unixctl_command_register("netdev-dpdk/get-mempool-info",
4242 netdev_dpdk_get_mempool_info
, NULL
);
4244 ret
= rte_eth_dev_callback_register(RTE_ETH_ALL
,
4245 RTE_ETH_EVENT_INTR_RESET
,
4246 dpdk_eth_event_callback
, NULL
);
4248 VLOG_ERR("Ethernet device callback register error: %s",
4249 rte_strerror(-ret
));
4252 ovsthread_once_done(&once
);
4261 * Initialize QoS configuration operations.
4264 qos_conf_init(struct qos_conf
*conf
, const struct dpdk_qos_ops
*ops
)
4267 rte_spinlock_init(&conf
->lock
);
4271 * Search existing QoS operations in qos_ops and compare each set of
4272 * operations qos_name to name. Return a dpdk_qos_ops pointer to a match,
4275 static const struct dpdk_qos_ops
*
4276 qos_lookup_name(const char *name
)
4278 const struct dpdk_qos_ops
*const *opsp
;
4280 for (opsp
= qos_confs
; *opsp
!= NULL
; opsp
++) {
4281 const struct dpdk_qos_ops
*ops
= *opsp
;
4282 if (!strcmp(name
, ops
->qos_name
)) {
4290 netdev_dpdk_get_qos_types(const struct netdev
*netdev OVS_UNUSED
,
4293 const struct dpdk_qos_ops
*const *opsp
;
4295 for (opsp
= qos_confs
; *opsp
!= NULL
; opsp
++) {
4296 const struct dpdk_qos_ops
*ops
= *opsp
;
4297 if (ops
->qos_construct
&& ops
->qos_name
[0] != '\0') {
4298 sset_add(types
, ops
->qos_name
);
4305 netdev_dpdk_get_qos(const struct netdev
*netdev
,
4306 const char **typep
, struct smap
*details
)
4308 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
4309 struct qos_conf
*qos_conf
;
4312 ovs_mutex_lock(&dev
->mutex
);
4313 qos_conf
= ovsrcu_get_protected(struct qos_conf
*, &dev
->qos_conf
);
4315 *typep
= qos_conf
->ops
->qos_name
;
4316 error
= (qos_conf
->ops
->qos_get
4317 ? qos_conf
->ops
->qos_get(qos_conf
, details
): 0);
4319 /* No QoS configuration set, return an empty string */
4322 ovs_mutex_unlock(&dev
->mutex
);
4328 netdev_dpdk_set_qos(struct netdev
*netdev
, const char *type
,
4329 const struct smap
*details
)
4331 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
4332 const struct dpdk_qos_ops
*new_ops
= NULL
;
4333 struct qos_conf
*qos_conf
, *new_qos_conf
= NULL
;
4336 ovs_mutex_lock(&dev
->mutex
);
4338 qos_conf
= ovsrcu_get_protected(struct qos_conf
*, &dev
->qos_conf
);
4340 new_ops
= qos_lookup_name(type
);
4342 if (!new_ops
|| !new_ops
->qos_construct
) {
4343 new_qos_conf
= NULL
;
4344 if (type
&& type
[0]) {
4347 } else if (qos_conf
&& qos_conf
->ops
== new_ops
4348 && qos_conf
->ops
->qos_is_equal(qos_conf
, details
)) {
4349 new_qos_conf
= qos_conf
;
4351 error
= new_ops
->qos_construct(details
, &new_qos_conf
);
4355 VLOG_ERR("Failed to set QoS type %s on port %s: %s",
4356 type
, netdev
->name
, rte_strerror(error
));
4359 if (new_qos_conf
!= qos_conf
) {
4360 ovsrcu_set(&dev
->qos_conf
, new_qos_conf
);
4362 ovsrcu_postpone(qos_conf
->ops
->qos_destruct
, qos_conf
);
4366 ovs_mutex_unlock(&dev
->mutex
);
4372 netdev_dpdk_get_queue(const struct netdev
*netdev
, uint32_t queue_id
,
4373 struct smap
*details
)
4375 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
4376 struct qos_conf
*qos_conf
;
4379 ovs_mutex_lock(&dev
->mutex
);
4381 qos_conf
= ovsrcu_get_protected(struct qos_conf
*, &dev
->qos_conf
);
4382 if (!qos_conf
|| !qos_conf
->ops
|| !qos_conf
->ops
->qos_queue_get
) {
4385 error
= qos_conf
->ops
->qos_queue_get(details
, queue_id
, qos_conf
);
4388 ovs_mutex_unlock(&dev
->mutex
);
4394 netdev_dpdk_set_queue(struct netdev
*netdev
, uint32_t queue_id
,
4395 const struct smap
*details
)
4397 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
4398 struct qos_conf
*qos_conf
;
4401 ovs_mutex_lock(&dev
->mutex
);
4403 qos_conf
= ovsrcu_get_protected(struct qos_conf
*, &dev
->qos_conf
);
4404 if (!qos_conf
|| !qos_conf
->ops
|| !qos_conf
->ops
->qos_queue_construct
) {
4407 error
= qos_conf
->ops
->qos_queue_construct(details
, queue_id
,
4411 if (error
&& error
!= EOPNOTSUPP
) {
4412 VLOG_ERR("Failed to set QoS queue %d on port %s: %s",
4413 queue_id
, netdev_get_name(netdev
), rte_strerror(error
));
4416 ovs_mutex_unlock(&dev
->mutex
);
4422 netdev_dpdk_delete_queue(struct netdev
*netdev
, uint32_t queue_id
)
4424 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
4425 struct qos_conf
*qos_conf
;
4428 ovs_mutex_lock(&dev
->mutex
);
4430 qos_conf
= ovsrcu_get_protected(struct qos_conf
*, &dev
->qos_conf
);
4431 if (qos_conf
&& qos_conf
->ops
&& qos_conf
->ops
->qos_queue_destruct
) {
4432 qos_conf
->ops
->qos_queue_destruct(qos_conf
, queue_id
);
4437 ovs_mutex_unlock(&dev
->mutex
);
4443 netdev_dpdk_get_queue_stats(const struct netdev
*netdev
, uint32_t queue_id
,
4444 struct netdev_queue_stats
*stats
)
4446 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
4447 struct qos_conf
*qos_conf
;
4450 ovs_mutex_lock(&dev
->mutex
);
4452 qos_conf
= ovsrcu_get_protected(struct qos_conf
*, &dev
->qos_conf
);
4453 if (qos_conf
&& qos_conf
->ops
&& qos_conf
->ops
->qos_queue_get_stats
) {
4454 qos_conf
->ops
->qos_queue_get_stats(qos_conf
, queue_id
, stats
);
4459 ovs_mutex_unlock(&dev
->mutex
);
4465 netdev_dpdk_queue_dump_start(const struct netdev
*netdev
, void **statep
)
4468 struct qos_conf
*qos_conf
;
4469 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
4471 ovs_mutex_lock(&dev
->mutex
);
4473 qos_conf
= ovsrcu_get_protected(struct qos_conf
*, &dev
->qos_conf
);
4474 if (qos_conf
&& qos_conf
->ops
4475 && qos_conf
->ops
->qos_queue_dump_state_init
) {
4476 struct netdev_dpdk_queue_state
*state
;
4478 *statep
= state
= xmalloc(sizeof *state
);
4479 error
= qos_conf
->ops
->qos_queue_dump_state_init(qos_conf
, state
);
4484 ovs_mutex_unlock(&dev
->mutex
);
4490 netdev_dpdk_queue_dump_next(const struct netdev
*netdev
, void *state_
,
4491 uint32_t *queue_idp
, struct smap
*details
)
4493 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
4494 struct netdev_dpdk_queue_state
*state
= state_
;
4495 struct qos_conf
*qos_conf
;
4498 ovs_mutex_lock(&dev
->mutex
);
4500 while (state
->cur_queue
< state
->n_queues
) {
4501 uint32_t queue_id
= state
->queues
[state
->cur_queue
++];
4503 qos_conf
= ovsrcu_get_protected(struct qos_conf
*, &dev
->qos_conf
);
4504 if (qos_conf
&& qos_conf
->ops
&& qos_conf
->ops
->qos_queue_get
) {
4505 *queue_idp
= queue_id
;
4506 error
= qos_conf
->ops
->qos_queue_get(details
, queue_id
, qos_conf
);
4511 ovs_mutex_unlock(&dev
->mutex
);
4517 netdev_dpdk_queue_dump_done(const struct netdev
*netdev OVS_UNUSED
,
4520 struct netdev_dpdk_queue_state
*state
= state_
;
4522 free(state
->queues
);
4529 /* egress-policer details */
4531 struct egress_policer
{
4532 struct qos_conf qos_conf
;
4533 struct rte_meter_srtcm_params app_srtcm_params
;
4534 struct rte_meter_srtcm egress_meter
;
4535 struct rte_meter_srtcm_profile egress_prof
;
4539 egress_policer_details_to_param(const struct smap
*details
,
4540 struct rte_meter_srtcm_params
*params
)
4542 memset(params
, 0, sizeof *params
);
4543 params
->cir
= smap_get_ullong(details
, "cir", 0);
4544 params
->cbs
= smap_get_ullong(details
, "cbs", 0);
4549 egress_policer_qos_construct(const struct smap
*details
,
4550 struct qos_conf
**conf
)
4552 struct egress_policer
*policer
;
4555 policer
= xmalloc(sizeof *policer
);
4556 qos_conf_init(&policer
->qos_conf
, &egress_policer_ops
);
4557 egress_policer_details_to_param(details
, &policer
->app_srtcm_params
);
4558 err
= rte_meter_srtcm_profile_config(&policer
->egress_prof
,
4559 &policer
->app_srtcm_params
);
4561 err
= rte_meter_srtcm_config(&policer
->egress_meter
,
4562 &policer
->egress_prof
);
4566 *conf
= &policer
->qos_conf
;
4568 VLOG_ERR("Could not create rte meter for egress policer");
4578 egress_policer_qos_destruct(struct qos_conf
*conf
)
4580 struct egress_policer
*policer
= CONTAINER_OF(conf
, struct egress_policer
,
4586 egress_policer_qos_get(const struct qos_conf
*conf
, struct smap
*details
)
4588 struct egress_policer
*policer
=
4589 CONTAINER_OF(conf
, struct egress_policer
, qos_conf
);
4591 smap_add_format(details
, "cir", "%"PRIu64
, policer
->app_srtcm_params
.cir
);
4592 smap_add_format(details
, "cbs", "%"PRIu64
, policer
->app_srtcm_params
.cbs
);
4598 egress_policer_qos_is_equal(const struct qos_conf
*conf
,
4599 const struct smap
*details
)
4601 struct egress_policer
*policer
=
4602 CONTAINER_OF(conf
, struct egress_policer
, qos_conf
);
4603 struct rte_meter_srtcm_params params
;
4605 egress_policer_details_to_param(details
, ¶ms
);
4607 return !memcmp(¶ms
, &policer
->app_srtcm_params
, sizeof params
);
4611 egress_policer_run(struct qos_conf
*conf
, struct rte_mbuf
**pkts
, int pkt_cnt
,
4615 struct egress_policer
*policer
=
4616 CONTAINER_OF(conf
, struct egress_policer
, qos_conf
);
4618 cnt
= srtcm_policer_run_single_packet(&policer
->egress_meter
,
4619 &policer
->egress_prof
, pkts
,
4620 pkt_cnt
, should_steal
);
4625 static const struct dpdk_qos_ops egress_policer_ops
= {
4626 .qos_name
= "egress-policer", /* qos_name */
4627 .qos_construct
= egress_policer_qos_construct
,
4628 .qos_destruct
= egress_policer_qos_destruct
,
4629 .qos_get
= egress_policer_qos_get
,
4630 .qos_is_equal
= egress_policer_qos_is_equal
,
4631 .qos_run
= egress_policer_run
4634 /* trtcm-policer details */
4636 struct trtcm_policer
{
4637 struct qos_conf qos_conf
;
4638 struct rte_meter_trtcm_rfc4115_params meter_params
;
4639 struct rte_meter_trtcm_rfc4115_profile meter_profile
;
4640 struct rte_meter_trtcm_rfc4115 meter
;
4641 struct netdev_queue_stats stats
;
4645 struct trtcm_policer_queue
{
4646 struct hmap_node hmap_node
;
4648 struct rte_meter_trtcm_rfc4115_params meter_params
;
4649 struct rte_meter_trtcm_rfc4115_profile meter_profile
;
4650 struct rte_meter_trtcm_rfc4115 meter
;
4651 struct netdev_queue_stats stats
;
4655 trtcm_policer_details_to_param(const struct smap
*details
,
4656 struct rte_meter_trtcm_rfc4115_params
*params
)
4658 memset(params
, 0, sizeof *params
);
4659 params
->cir
= smap_get_ullong(details
, "cir", 0);
4660 params
->eir
= smap_get_ullong(details
, "eir", 0);
4661 params
->cbs
= smap_get_ullong(details
, "cbs", 0);
4662 params
->ebs
= smap_get_ullong(details
, "ebs", 0);
4666 trtcm_policer_param_to_detail(
4667 const struct rte_meter_trtcm_rfc4115_params
*params
,
4668 struct smap
*details
)
4670 smap_add_format(details
, "cir", "%"PRIu64
, params
->cir
);
4671 smap_add_format(details
, "eir", "%"PRIu64
, params
->eir
);
4672 smap_add_format(details
, "cbs", "%"PRIu64
, params
->cbs
);
4673 smap_add_format(details
, "ebs", "%"PRIu64
, params
->ebs
);
4678 trtcm_policer_qos_construct(const struct smap
*details
,
4679 struct qos_conf
**conf
)
4681 struct trtcm_policer
*policer
;
4684 policer
= xmalloc(sizeof *policer
);
4685 qos_conf_init(&policer
->qos_conf
, &trtcm_policer_ops
);
4686 trtcm_policer_details_to_param(details
, &policer
->meter_params
);
4687 err
= rte_meter_trtcm_rfc4115_profile_config(&policer
->meter_profile
,
4688 &policer
->meter_params
);
4690 err
= rte_meter_trtcm_rfc4115_config(&policer
->meter
,
4691 &policer
->meter_profile
);
4695 *conf
= &policer
->qos_conf
;
4696 memset(&policer
->stats
, 0, sizeof policer
->stats
);
4697 hmap_init(&policer
->queues
);
4708 trtcm_policer_qos_destruct(struct qos_conf
*conf
)
4710 struct trtcm_policer_queue
*queue
, *next_queue
;
4711 struct trtcm_policer
*policer
= CONTAINER_OF(conf
, struct trtcm_policer
,
4714 HMAP_FOR_EACH_SAFE (queue
, next_queue
, hmap_node
, &policer
->queues
) {
4715 hmap_remove(&policer
->queues
, &queue
->hmap_node
);
4718 hmap_destroy(&policer
->queues
);
4723 trtcm_policer_qos_get(const struct qos_conf
*conf
, struct smap
*details
)
4725 struct trtcm_policer
*policer
= CONTAINER_OF(conf
, struct trtcm_policer
,
4728 trtcm_policer_param_to_detail(&policer
->meter_params
, details
);
4733 trtcm_policer_qos_is_equal(const struct qos_conf
*conf
,
4734 const struct smap
*details
)
4736 struct trtcm_policer
*policer
= CONTAINER_OF(conf
, struct trtcm_policer
,
4738 struct rte_meter_trtcm_rfc4115_params params
;
4740 trtcm_policer_details_to_param(details
, ¶ms
);
4742 return !memcmp(¶ms
, &policer
->meter_params
, sizeof params
);
4745 static struct trtcm_policer_queue
*
4746 trtcm_policer_qos_find_queue(struct trtcm_policer
*policer
, uint32_t queue_id
)
4748 struct trtcm_policer_queue
*queue
;
4749 HMAP_FOR_EACH_WITH_HASH (queue
, hmap_node
, hash_2words(queue_id
, 0),
4751 if (queue
->queue_id
== queue_id
) {
4759 trtcm_policer_run_single_packet(struct trtcm_policer
*policer
,
4760 struct rte_mbuf
*pkt
, uint64_t time
)
4762 enum rte_color pkt_color
;
4763 struct trtcm_policer_queue
*queue
;
4764 uint32_t pkt_len
= rte_pktmbuf_pkt_len(pkt
) - sizeof(struct rte_ether_hdr
);
4765 struct dp_packet
*dpkt
= CONTAINER_OF(pkt
, struct dp_packet
, mbuf
);
4767 queue
= trtcm_policer_qos_find_queue(policer
, dpkt
->md
.skb_priority
);
4769 /* If no queue is found, use the default queue, which MUST exist. */
4770 queue
= trtcm_policer_qos_find_queue(policer
, 0);
4776 pkt_color
= rte_meter_trtcm_rfc4115_color_blind_check(&queue
->meter
,
4777 &queue
->meter_profile
,
4781 if (pkt_color
== RTE_COLOR_RED
) {
4782 queue
->stats
.tx_errors
++;
4784 queue
->stats
.tx_bytes
+= pkt_len
;
4785 queue
->stats
.tx_packets
++;
4788 pkt_color
= rte_meter_trtcm_rfc4115_color_aware_check(&policer
->meter
,
4789 &policer
->meter_profile
,
4793 if (pkt_color
== RTE_COLOR_RED
) {
4794 policer
->stats
.tx_errors
++;
4798 policer
->stats
.tx_bytes
+= pkt_len
;
4799 policer
->stats
.tx_packets
++;
4804 trtcm_policer_run(struct qos_conf
*conf
, struct rte_mbuf
**pkts
, int pkt_cnt
,
4809 struct rte_mbuf
*pkt
= NULL
;
4810 uint64_t current_time
= rte_rdtsc();
4812 struct trtcm_policer
*policer
= CONTAINER_OF(conf
, struct trtcm_policer
,
4815 for (i
= 0; i
< pkt_cnt
; i
++) {
4818 if (trtcm_policer_run_single_packet(policer
, pkt
, current_time
)) {
4825 rte_pktmbuf_free(pkt
);
4833 trtcm_policer_qos_queue_construct(const struct smap
*details
,
4834 uint32_t queue_id
, struct qos_conf
*conf
)
4837 struct trtcm_policer_queue
*queue
;
4838 struct trtcm_policer
*policer
= CONTAINER_OF(conf
, struct trtcm_policer
,
4841 queue
= trtcm_policer_qos_find_queue(policer
, queue_id
);
4843 queue
= xmalloc(sizeof *queue
);
4844 queue
->queue_id
= queue_id
;
4845 memset(&queue
->stats
, 0, sizeof queue
->stats
);
4846 queue
->stats
.created
= time_msec();
4847 hmap_insert(&policer
->queues
, &queue
->hmap_node
,
4848 hash_2words(queue_id
, 0));
4850 if (queue_id
== 0 && smap_is_empty(details
)) {
4851 /* No default queue configured, use port values */
4852 memcpy(&queue
->meter_params
, &policer
->meter_params
,
4853 sizeof queue
->meter_params
);
4855 trtcm_policer_details_to_param(details
, &queue
->meter_params
);
4858 err
= rte_meter_trtcm_rfc4115_profile_config(&queue
->meter_profile
,
4859 &queue
->meter_params
);
4862 err
= rte_meter_trtcm_rfc4115_config(&queue
->meter
,
4863 &queue
->meter_profile
);
4866 hmap_remove(&policer
->queues
, &queue
->hmap_node
);
4874 trtcm_policer_qos_queue_destruct(struct qos_conf
*conf
, uint32_t queue_id
)
4876 struct trtcm_policer_queue
*queue
;
4877 struct trtcm_policer
*policer
= CONTAINER_OF(conf
, struct trtcm_policer
,
4880 queue
= trtcm_policer_qos_find_queue(policer
, queue_id
);
4882 hmap_remove(&policer
->queues
, &queue
->hmap_node
);
4888 trtcm_policer_qos_queue_get(struct smap
*details
, uint32_t queue_id
,
4889 const struct qos_conf
*conf
)
4891 struct trtcm_policer_queue
*queue
;
4892 struct trtcm_policer
*policer
= CONTAINER_OF(conf
, struct trtcm_policer
,
4895 queue
= trtcm_policer_qos_find_queue(policer
, queue_id
);
4900 trtcm_policer_param_to_detail(&queue
->meter_params
, details
);
4905 trtcm_policer_qos_queue_get_stats(const struct qos_conf
*conf
,
4907 struct netdev_queue_stats
*stats
)
4909 struct trtcm_policer_queue
*queue
;
4910 struct trtcm_policer
*policer
= CONTAINER_OF(conf
, struct trtcm_policer
,
4913 queue
= trtcm_policer_qos_find_queue(policer
, queue_id
);
4917 memcpy(stats
, &queue
->stats
, sizeof *stats
);
4922 trtcm_policer_qos_queue_dump_state_init(const struct qos_conf
*conf
,
4923 struct netdev_dpdk_queue_state
*state
)
4926 struct trtcm_policer_queue
*queue
;
4927 struct trtcm_policer
*policer
= CONTAINER_OF(conf
, struct trtcm_policer
,
4930 state
->n_queues
= hmap_count(&policer
->queues
);
4931 state
->cur_queue
= 0;
4932 state
->queues
= xmalloc(state
->n_queues
* sizeof *state
->queues
);
4934 HMAP_FOR_EACH (queue
, hmap_node
, &policer
->queues
) {
4935 state
->queues
[i
++] = queue
->queue_id
;
4940 static const struct dpdk_qos_ops trtcm_policer_ops
= {
4941 .qos_name
= "trtcm-policer",
4942 .qos_construct
= trtcm_policer_qos_construct
,
4943 .qos_destruct
= trtcm_policer_qos_destruct
,
4944 .qos_get
= trtcm_policer_qos_get
,
4945 .qos_is_equal
= trtcm_policer_qos_is_equal
,
4946 .qos_run
= trtcm_policer_run
,
4947 .qos_queue_construct
= trtcm_policer_qos_queue_construct
,
4948 .qos_queue_destruct
= trtcm_policer_qos_queue_destruct
,
4949 .qos_queue_get
= trtcm_policer_qos_queue_get
,
4950 .qos_queue_get_stats
= trtcm_policer_qos_queue_get_stats
,
4951 .qos_queue_dump_state_init
= trtcm_policer_qos_queue_dump_state_init
4955 netdev_dpdk_reconfigure(struct netdev
*netdev
)
4957 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
4960 ovs_mutex_lock(&dev
->mutex
);
4962 if (netdev
->n_txq
== dev
->requested_n_txq
4963 && netdev
->n_rxq
== dev
->requested_n_rxq
4964 && dev
->mtu
== dev
->requested_mtu
4965 && dev
->lsc_interrupt_mode
== dev
->requested_lsc_interrupt_mode
4966 && dev
->rxq_size
== dev
->requested_rxq_size
4967 && dev
->txq_size
== dev
->requested_txq_size
4968 && eth_addr_equals(dev
->hwaddr
, dev
->requested_hwaddr
)
4969 && dev
->socket_id
== dev
->requested_socket_id
4970 && dev
->started
&& !dev
->reset_needed
) {
4971 /* Reconfiguration is unnecessary */
4976 if (dev
->reset_needed
) {
4977 rte_eth_dev_reset(dev
->port_id
);
4978 if_notifier_manual_report();
4979 dev
->reset_needed
= false;
4981 rte_eth_dev_stop(dev
->port_id
);
4984 dev
->started
= false;
4986 err
= netdev_dpdk_mempool_configure(dev
);
4987 if (err
&& err
!= EEXIST
) {
4991 dev
->lsc_interrupt_mode
= dev
->requested_lsc_interrupt_mode
;
4993 netdev
->n_txq
= dev
->requested_n_txq
;
4994 netdev
->n_rxq
= dev
->requested_n_rxq
;
4996 dev
->rxq_size
= dev
->requested_rxq_size
;
4997 dev
->txq_size
= dev
->requested_txq_size
;
4999 rte_free(dev
->tx_q
);
5001 if (!eth_addr_equals(dev
->hwaddr
, dev
->requested_hwaddr
)) {
5002 err
= netdev_dpdk_set_etheraddr__(dev
, dev
->requested_hwaddr
);
5008 err
= dpdk_eth_dev_init(dev
);
5009 if (dev
->hw_ol_features
& NETDEV_TX_TSO_OFFLOAD
) {
5010 netdev
->ol_flags
|= NETDEV_TX_OFFLOAD_TCP_TSO
;
5011 netdev
->ol_flags
|= NETDEV_TX_OFFLOAD_TCP_CKSUM
;
5012 netdev
->ol_flags
|= NETDEV_TX_OFFLOAD_UDP_CKSUM
;
5013 netdev
->ol_flags
|= NETDEV_TX_OFFLOAD_IPV4_CKSUM
;
5014 if (dev
->hw_ol_features
& NETDEV_TX_SCTP_CHECKSUM_OFFLOAD
) {
5015 netdev
->ol_flags
|= NETDEV_TX_OFFLOAD_SCTP_CKSUM
;
5019 /* If both requested and actual hwaddr were previously
5020 * unset (initialized to 0), then first device init above
5021 * will have set actual hwaddr to something new.
5022 * This would trigger spurious MAC reconfiguration unless
5023 * the requested MAC is kept in sync.
5025 * This is harmless in case requested_hwaddr was
5026 * configured by the user, as netdev_dpdk_set_etheraddr__()
5027 * will have succeeded to get to this point.
5029 dev
->requested_hwaddr
= dev
->hwaddr
;
5031 dev
->tx_q
= netdev_dpdk_alloc_txq(netdev
->n_txq
);
5036 netdev_change_seq_changed(netdev
);
5039 ovs_mutex_unlock(&dev
->mutex
);
5044 dpdk_vhost_reconfigure_helper(struct netdev_dpdk
*dev
)
5045 OVS_REQUIRES(dev
->mutex
)
5047 dev
->up
.n_txq
= dev
->requested_n_txq
;
5048 dev
->up
.n_rxq
= dev
->requested_n_rxq
;
5051 /* Always keep RX queue 0 enabled for implementations that won't
5052 * report vring states. */
5053 dev
->vhost_rxq_enabled
[0] = true;
5055 /* Enable TX queue 0 by default if it wasn't disabled. */
5056 if (dev
->tx_q
[0].map
== OVS_VHOST_QUEUE_MAP_UNKNOWN
) {
5057 dev
->tx_q
[0].map
= 0;
5060 if (userspace_tso_enabled()) {
5061 dev
->hw_ol_features
|= NETDEV_TX_TSO_OFFLOAD
;
5062 VLOG_DBG("%s: TSO enabled on vhost port", netdev_get_name(&dev
->up
));
5065 netdev_dpdk_remap_txqs(dev
);
5067 err
= netdev_dpdk_mempool_configure(dev
);
5069 /* A new mempool was created or re-used. */
5070 netdev_change_seq_changed(&dev
->up
);
5071 } else if (err
!= EEXIST
) {
5074 if (netdev_dpdk_get_vid(dev
) >= 0) {
5075 if (dev
->vhost_reconfigured
== false) {
5076 dev
->vhost_reconfigured
= true;
5077 /* Carrier status may need updating. */
5078 netdev_change_seq_changed(&dev
->up
);
5086 netdev_dpdk_vhost_reconfigure(struct netdev
*netdev
)
5088 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
5091 ovs_mutex_lock(&dev
->mutex
);
5092 err
= dpdk_vhost_reconfigure_helper(dev
);
5093 ovs_mutex_unlock(&dev
->mutex
);
5099 netdev_dpdk_vhost_client_reconfigure(struct netdev
*netdev
)
5101 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
5103 uint64_t vhost_flags
= 0;
5104 uint64_t vhost_unsup_flags
;
5106 ovs_mutex_lock(&dev
->mutex
);
5108 /* Configure vHost client mode if requested and if the following criteria
5110 * 1. Device hasn't been registered yet.
5111 * 2. A path has been specified.
5113 if (!(dev
->vhost_driver_flags
& RTE_VHOST_USER_CLIENT
) && dev
->vhost_id
) {
5114 /* Register client-mode device. */
5115 vhost_flags
|= RTE_VHOST_USER_CLIENT
;
5117 /* There is no support for multi-segments buffers. */
5118 vhost_flags
|= RTE_VHOST_USER_LINEARBUF_SUPPORT
;
5120 /* Enable IOMMU support, if explicitly requested. */
5121 if (dpdk_vhost_iommu_enabled()) {
5122 vhost_flags
|= RTE_VHOST_USER_IOMMU_SUPPORT
;
5125 /* Enable POSTCOPY support, if explicitly requested. */
5126 if (dpdk_vhost_postcopy_enabled()) {
5127 vhost_flags
|= RTE_VHOST_USER_POSTCOPY_SUPPORT
;
5130 /* Enable External Buffers if TCP Segmentation Offload is enabled. */
5131 if (userspace_tso_enabled()) {
5132 vhost_flags
|= RTE_VHOST_USER_EXTBUF_SUPPORT
;
5135 err
= rte_vhost_driver_register(dev
->vhost_id
, vhost_flags
);
5137 VLOG_ERR("vhost-user device setup failure for device %s\n",
5141 /* Configuration successful */
5142 dev
->vhost_driver_flags
|= vhost_flags
;
5143 VLOG_INFO("vHost User device '%s' created in 'client' mode, "
5144 "using client socket '%s'",
5145 dev
->up
.name
, dev
->vhost_id
);
5148 err
= rte_vhost_driver_callback_register(dev
->vhost_id
,
5149 &virtio_net_device_ops
);
5151 VLOG_ERR("rte_vhost_driver_callback_register failed for "
5152 "vhost user client port: %s\n", dev
->up
.name
);
5156 if (userspace_tso_enabled()) {
5157 netdev
->ol_flags
|= NETDEV_TX_OFFLOAD_TCP_TSO
;
5158 netdev
->ol_flags
|= NETDEV_TX_OFFLOAD_TCP_CKSUM
;
5159 netdev
->ol_flags
|= NETDEV_TX_OFFLOAD_UDP_CKSUM
;
5160 netdev
->ol_flags
|= NETDEV_TX_OFFLOAD_SCTP_CKSUM
;
5161 netdev
->ol_flags
|= NETDEV_TX_OFFLOAD_IPV4_CKSUM
;
5162 vhost_unsup_flags
= 1ULL << VIRTIO_NET_F_HOST_ECN
5163 | 1ULL << VIRTIO_NET_F_HOST_UFO
;
5165 /* This disables checksum offloading and all the features
5166 * that depends on it (TSO, UFO, ECN) according to virtio
5168 vhost_unsup_flags
= 1ULL << VIRTIO_NET_F_CSUM
;
5171 err
= rte_vhost_driver_disable_features(dev
->vhost_id
,
5174 VLOG_ERR("rte_vhost_driver_disable_features failed for "
5175 "vhost user client port: %s\n", dev
->up
.name
);
5179 err
= rte_vhost_driver_start(dev
->vhost_id
);
5181 VLOG_ERR("rte_vhost_driver_start failed for vhost user "
5182 "client port: %s\n", dev
->up
.name
);
5187 err
= dpdk_vhost_reconfigure_helper(dev
);
5190 ovs_mutex_unlock(&dev
->mutex
);
5196 netdev_dpdk_get_port_id(struct netdev
*netdev
)
5198 struct netdev_dpdk
*dev
;
5201 if (!is_dpdk_class(netdev
->netdev_class
)) {
5205 dev
= netdev_dpdk_cast(netdev
);
5206 ovs_mutex_lock(&dev
->mutex
);
5208 ovs_mutex_unlock(&dev
->mutex
);
5214 netdev_dpdk_flow_api_supported(struct netdev
*netdev
)
5216 struct netdev_dpdk
*dev
;
5219 if (!is_dpdk_class(netdev
->netdev_class
)) {
5223 dev
= netdev_dpdk_cast(netdev
);
5224 ovs_mutex_lock(&dev
->mutex
);
5225 if (dev
->type
== DPDK_DEV_ETH
) {
5226 /* TODO: Check if we able to offload some minimal flow. */
5229 ovs_mutex_unlock(&dev
->mutex
);
5235 netdev_dpdk_rte_flow_destroy(struct netdev
*netdev
,
5236 struct rte_flow
*rte_flow
,
5237 struct rte_flow_error
*error
)
5239 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
5242 ovs_mutex_lock(&dev
->mutex
);
5243 ret
= rte_flow_destroy(dev
->port_id
, rte_flow
, error
);
5244 ovs_mutex_unlock(&dev
->mutex
);
5249 netdev_dpdk_rte_flow_create(struct netdev
*netdev
,
5250 const struct rte_flow_attr
*attr
,
5251 const struct rte_flow_item
*items
,
5252 const struct rte_flow_action
*actions
,
5253 struct rte_flow_error
*error
)
5255 struct rte_flow
*flow
;
5256 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
5258 ovs_mutex_lock(&dev
->mutex
);
5259 flow
= rte_flow_create(dev
->port_id
, attr
, items
, actions
, error
);
5260 ovs_mutex_unlock(&dev
->mutex
);
5265 netdev_dpdk_rte_flow_query_count(struct netdev
*netdev
,
5266 struct rte_flow
*rte_flow
,
5267 struct rte_flow_query_count
*query
,
5268 struct rte_flow_error
*error
)
5270 struct rte_flow_action_count count
= { .shared
= 0, .id
= 0 };
5271 const struct rte_flow_action actions
[] = {
5273 .type
= RTE_FLOW_ACTION_TYPE_COUNT
,
5277 .type
= RTE_FLOW_ACTION_TYPE_END
,
5280 struct netdev_dpdk
*dev
;
5283 if (!is_dpdk_class(netdev
->netdev_class
)) {
5287 dev
= netdev_dpdk_cast(netdev
);
5288 ovs_mutex_lock(&dev
->mutex
);
5289 ret
= rte_flow_query(dev
->port_id
, rte_flow
, actions
, query
, error
);
5290 ovs_mutex_unlock(&dev
->mutex
);
5294 #define NETDEV_DPDK_CLASS_COMMON \
5296 .alloc = netdev_dpdk_alloc, \
5297 .dealloc = netdev_dpdk_dealloc, \
5298 .get_config = netdev_dpdk_get_config, \
5299 .get_numa_id = netdev_dpdk_get_numa_id, \
5300 .set_etheraddr = netdev_dpdk_set_etheraddr, \
5301 .get_etheraddr = netdev_dpdk_get_etheraddr, \
5302 .get_mtu = netdev_dpdk_get_mtu, \
5303 .set_mtu = netdev_dpdk_set_mtu, \
5304 .get_ifindex = netdev_dpdk_get_ifindex, \
5305 .get_carrier_resets = netdev_dpdk_get_carrier_resets, \
5306 .set_miimon_interval = netdev_dpdk_set_miimon, \
5307 .set_policing = netdev_dpdk_set_policing, \
5308 .get_qos_types = netdev_dpdk_get_qos_types, \
5309 .get_qos = netdev_dpdk_get_qos, \
5310 .set_qos = netdev_dpdk_set_qos, \
5311 .get_queue = netdev_dpdk_get_queue, \
5312 .set_queue = netdev_dpdk_set_queue, \
5313 .delete_queue = netdev_dpdk_delete_queue, \
5314 .get_queue_stats = netdev_dpdk_get_queue_stats, \
5315 .queue_dump_start = netdev_dpdk_queue_dump_start, \
5316 .queue_dump_next = netdev_dpdk_queue_dump_next, \
5317 .queue_dump_done = netdev_dpdk_queue_dump_done, \
5318 .update_flags = netdev_dpdk_update_flags, \
5319 .rxq_alloc = netdev_dpdk_rxq_alloc, \
5320 .rxq_construct = netdev_dpdk_rxq_construct, \
5321 .rxq_destruct = netdev_dpdk_rxq_destruct, \
5322 .rxq_dealloc = netdev_dpdk_rxq_dealloc
5324 #define NETDEV_DPDK_CLASS_BASE \
5325 NETDEV_DPDK_CLASS_COMMON, \
5326 .init = netdev_dpdk_class_init, \
5327 .destruct = netdev_dpdk_destruct, \
5328 .set_tx_multiq = netdev_dpdk_set_tx_multiq, \
5329 .get_carrier = netdev_dpdk_get_carrier, \
5330 .get_stats = netdev_dpdk_get_stats, \
5331 .get_custom_stats = netdev_dpdk_get_custom_stats, \
5332 .get_features = netdev_dpdk_get_features, \
5333 .get_status = netdev_dpdk_get_status, \
5334 .reconfigure = netdev_dpdk_reconfigure, \
5335 .rxq_recv = netdev_dpdk_rxq_recv
5337 static const struct netdev_class dpdk_class
= {
5339 NETDEV_DPDK_CLASS_BASE
,
5340 .construct
= netdev_dpdk_construct
,
5341 .set_config
= netdev_dpdk_set_config
,
5342 .send
= netdev_dpdk_eth_send
,
5345 static const struct netdev_class dpdk_vhost_class
= {
5346 .type
= "dpdkvhostuser",
5347 NETDEV_DPDK_CLASS_COMMON
,
5348 .construct
= netdev_dpdk_vhost_construct
,
5349 .destruct
= netdev_dpdk_vhost_destruct
,
5350 .send
= netdev_dpdk_vhost_send
,
5351 .get_carrier
= netdev_dpdk_vhost_get_carrier
,
5352 .get_stats
= netdev_dpdk_vhost_get_stats
,
5353 .get_custom_stats
= netdev_dpdk_get_sw_custom_stats
,
5354 .get_status
= netdev_dpdk_vhost_user_get_status
,
5355 .reconfigure
= netdev_dpdk_vhost_reconfigure
,
5356 .rxq_recv
= netdev_dpdk_vhost_rxq_recv
,
5357 .rxq_enabled
= netdev_dpdk_vhost_rxq_enabled
,
5360 static const struct netdev_class dpdk_vhost_client_class
= {
5361 .type
= "dpdkvhostuserclient",
5362 NETDEV_DPDK_CLASS_COMMON
,
5363 .construct
= netdev_dpdk_vhost_client_construct
,
5364 .destruct
= netdev_dpdk_vhost_destruct
,
5365 .set_config
= netdev_dpdk_vhost_client_set_config
,
5366 .send
= netdev_dpdk_vhost_send
,
5367 .get_carrier
= netdev_dpdk_vhost_get_carrier
,
5368 .get_stats
= netdev_dpdk_vhost_get_stats
,
5369 .get_custom_stats
= netdev_dpdk_get_sw_custom_stats
,
5370 .get_status
= netdev_dpdk_vhost_user_get_status
,
5371 .reconfigure
= netdev_dpdk_vhost_client_reconfigure
,
5372 .rxq_recv
= netdev_dpdk_vhost_rxq_recv
,
5373 .rxq_enabled
= netdev_dpdk_vhost_rxq_enabled
,
5377 netdev_dpdk_register(void)
5379 netdev_register_provider(&dpdk_class
);
5380 netdev_register_provider(&dpdk_vhost_class
);
5381 netdev_register_provider(&dpdk_vhost_client_class
);