2 * Copyright (c) 2014, 2015, 2016 Nicira, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
30 #include <sys/types.h>
35 #include "dp-packet.h"
36 #include "dpif-netdev.h"
37 #include "fatal-signal.h"
38 #include "netdev-dpdk.h"
39 #include "netdev-provider.h"
40 #include "netdev-vport.h"
42 #include "openvswitch/dynamic-string.h"
43 #include "openvswitch/list.h"
44 #include "openvswitch/ofp-print.h"
45 #include "openvswitch/vlog.h"
47 #include "ovs-thread.h"
50 #include "openvswitch/shash.h"
53 #include "unaligned.h"
57 #include "rte_config.h"
59 #include "rte_meter.h"
61 #include "rte_pdump.h"
63 #include "rte_virtio_net.h"
65 VLOG_DEFINE_THIS_MODULE(dpdk
);
66 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(5, 20);
68 #define DPDK_PORT_WATCHDOG_INTERVAL 5
70 #define OVS_CACHE_LINE_SIZE CACHE_LINE_SIZE
71 #define OVS_VPORT_DPDK "ovs_dpdk"
74 * need to reserve tons of extra space in the mbufs so we can align the
75 * DMA addresses to 4KB.
76 * The minimum mbuf size is limited to avoid scatter behaviour and drop in
77 * performance for standard Ethernet MTU.
79 #define ETHER_HDR_MAX_LEN (ETHER_HDR_LEN + ETHER_CRC_LEN + (2 * VLAN_HEADER_LEN))
80 #define MTU_TO_FRAME_LEN(mtu) ((mtu) + ETHER_HDR_LEN + ETHER_CRC_LEN)
81 #define MTU_TO_MAX_FRAME_LEN(mtu) ((mtu) + ETHER_HDR_MAX_LEN)
82 #define FRAME_LEN_TO_MTU(frame_len) ((frame_len)- ETHER_HDR_LEN - ETHER_CRC_LEN)
83 #define MBUF_SIZE(mtu) ( MTU_TO_MAX_FRAME_LEN(mtu) \
84 + sizeof(struct dp_packet) \
85 + RTE_PKTMBUF_HEADROOM)
86 #define NETDEV_DPDK_MBUF_ALIGN 1024
87 #define NETDEV_DPDK_MAX_PKT_LEN 9728
89 /* Max and min number of packets in the mempool. OVS tries to allocate a
90 * mempool with MAX_NB_MBUF: if this fails (because the system doesn't have
91 * enough hugepages) we keep halving the number until the allocation succeeds
92 * or we reach MIN_NB_MBUF */
94 #define MAX_NB_MBUF (4096 * 64)
95 #define MIN_NB_MBUF (4096 * 4)
96 #define MP_CACHE_SZ RTE_MEMPOOL_CACHE_MAX_SIZE
98 /* MAX_NB_MBUF can be divided by 2 many times, until MIN_NB_MBUF */
99 BUILD_ASSERT_DECL(MAX_NB_MBUF
% ROUND_DOWN_POW2(MAX_NB_MBUF
/MIN_NB_MBUF
) == 0);
101 /* The smallest possible NB_MBUF that we're going to try should be a multiple
102 * of MP_CACHE_SZ. This is advised by DPDK documentation. */
103 BUILD_ASSERT_DECL((MAX_NB_MBUF
/ ROUND_DOWN_POW2(MAX_NB_MBUF
/MIN_NB_MBUF
))
107 * DPDK XSTATS Counter names definition
109 #define XSTAT_RX_64_PACKETS "rx_size_64_packets"
110 #define XSTAT_RX_65_TO_127_PACKETS "rx_size_65_to_127_packets"
111 #define XSTAT_RX_128_TO_255_PACKETS "rx_size_128_to_255_packets"
112 #define XSTAT_RX_256_TO_511_PACKETS "rx_size_256_to_511_packets"
113 #define XSTAT_RX_512_TO_1023_PACKETS "rx_size_512_to_1023_packets"
114 #define XSTAT_RX_1024_TO_1522_PACKETS "rx_size_1024_to_1522_packets"
115 #define XSTAT_RX_1523_TO_MAX_PACKETS "rx_size_1523_to_max_packets"
117 #define XSTAT_TX_64_PACKETS "tx_size_64_packets"
118 #define XSTAT_TX_65_TO_127_PACKETS "tx_size_65_to_127_packets"
119 #define XSTAT_TX_128_TO_255_PACKETS "tx_size_128_to_255_packets"
120 #define XSTAT_TX_256_TO_511_PACKETS "tx_size_256_to_511_packets"
121 #define XSTAT_TX_512_TO_1023_PACKETS "tx_size_512_to_1023_packets"
122 #define XSTAT_TX_1024_TO_1522_PACKETS "tx_size_1024_to_1522_packets"
123 #define XSTAT_TX_1523_TO_MAX_PACKETS "tx_size_1523_to_max_packets"
125 #define XSTAT_TX_MULTICAST_PACKETS "tx_multicast_packets"
126 #define XSTAT_RX_BROADCAST_PACKETS "rx_broadcast_packets"
127 #define XSTAT_TX_BROADCAST_PACKETS "tx_broadcast_packets"
128 #define XSTAT_RX_UNDERSIZED_ERRORS "rx_undersized_errors"
129 #define XSTAT_RX_OVERSIZE_ERRORS "rx_oversize_errors"
130 #define XSTAT_RX_FRAGMENTED_ERRORS "rx_fragmented_errors"
131 #define XSTAT_RX_JABBER_ERRORS "rx_jabber_errors"
135 #define NIC_PORT_RX_Q_SIZE 2048 /* Size of Physical NIC RX Queue, Max (n+32<=4096)*/
136 #define NIC_PORT_TX_Q_SIZE 2048 /* Size of Physical NIC TX Queue, Max (n+32<=4096)*/
138 #define OVS_VHOST_MAX_QUEUE_NUM 1024 /* Maximum number of vHost TX queues. */
139 #define OVS_VHOST_QUEUE_MAP_UNKNOWN (-1) /* Mapping not initialized. */
140 #define OVS_VHOST_QUEUE_DISABLED (-2) /* Queue was disabled by guest and not
141 * yet mapped to another queue. */
143 static char *vhost_sock_dir
= NULL
; /* Location of vhost-user sockets */
145 #define VHOST_ENQ_RETRY_NUM 8
146 #define IF_NAME_SZ (PATH_MAX > IFNAMSIZ ? PATH_MAX : IFNAMSIZ)
148 static const struct rte_eth_conf port_conf
= {
150 .mq_mode
= ETH_MQ_RX_RSS
,
152 .header_split
= 0, /* Header Split disabled */
153 .hw_ip_checksum
= 0, /* IP checksum offload disabled */
154 .hw_vlan_filter
= 0, /* VLAN filtering disabled */
155 .jumbo_frame
= 0, /* Jumbo Frame Support disabled */
161 .rss_hf
= ETH_RSS_IP
| ETH_RSS_UDP
| ETH_RSS_TCP
,
165 .mq_mode
= ETH_MQ_TX_NONE
,
169 enum { DPDK_RING_SIZE
= 256 };
170 BUILD_ASSERT_DECL(IS_POW2(DPDK_RING_SIZE
));
171 enum { DRAIN_TSC
= 200000ULL };
178 static int rte_eal_init_ret
= ENODEV
;
180 static struct ovs_mutex dpdk_mutex
= OVS_MUTEX_INITIALIZER
;
182 /* Quality of Service */
184 /* An instance of a QoS configuration. Always associated with a particular
187 * Each QoS implementation subclasses this with whatever additional data it
191 const struct dpdk_qos_ops
*ops
;
194 /* A particular implementation of dpdk QoS operations.
196 * The functions below return 0 if successful or a positive errno value on
197 * failure, except where otherwise noted. All of them must be provided, except
198 * where otherwise noted.
200 struct dpdk_qos_ops
{
202 /* Name of the QoS type */
203 const char *qos_name
;
205 /* Called to construct the QoS implementation on 'netdev'. The
206 * implementation should make the appropriate calls to configure QoS
207 * according to 'details'. The implementation may assume that any current
208 * QoS configuration already installed should be destroyed before
209 * constructing the new configuration.
211 * The contents of 'details' should be documented as valid for 'ovs_name'
212 * in the "other_config" column in the "QoS" table in vswitchd/vswitch.xml
213 * (which is built as ovs-vswitchd.conf.db(8)).
215 * This function must return 0 if and only if it sets 'netdev->qos_conf'
216 * to an initialized 'struct qos_conf'.
218 * For all QoS implementations it should always be non-null.
220 int (*qos_construct
)(struct netdev
*netdev
, const struct smap
*details
);
222 /* Destroys the data structures allocated by the implementation as part of
225 * For all QoS implementations it should always be non-null.
227 void (*qos_destruct
)(struct netdev
*netdev
, struct qos_conf
*conf
);
229 /* Retrieves details of 'netdev->qos_conf' configuration into 'details'.
231 * The contents of 'details' should be documented as valid for 'ovs_name'
232 * in the "other_config" column in the "QoS" table in vswitchd/vswitch.xml
233 * (which is built as ovs-vswitchd.conf.db(8)).
235 int (*qos_get
)(const struct netdev
*netdev
, struct smap
*details
);
237 /* Reconfigures 'netdev->qos_conf' according to 'details', performing any
238 * required calls to complete the reconfiguration.
240 * The contents of 'details' should be documented as valid for 'ovs_name'
241 * in the "other_config" column in the "QoS" table in vswitchd/vswitch.xml
242 * (which is built as ovs-vswitchd.conf.db(8)).
244 * This function may be null if 'qos_conf' is not configurable.
246 int (*qos_set
)(struct netdev
*netdev
, const struct smap
*details
);
248 /* Modify an array of rte_mbufs. The modification is specific to
249 * each qos implementation.
251 * The function should take and array of mbufs and an int representing
252 * the current number of mbufs present in the array.
254 * After the function has performed a qos modification to the array of
255 * mbufs it returns an int representing the number of mbufs now present in
256 * the array. This value is can then be passed to the port send function
257 * along with the modified array for transmission.
259 * For all QoS implementations it should always be non-null.
261 int (*qos_run
)(struct netdev
*netdev
, struct rte_mbuf
**pkts
,
265 /* dpdk_qos_ops for each type of user space QoS implementation */
266 static const struct dpdk_qos_ops egress_policer_ops
;
269 * Array of dpdk_qos_ops, contains pointer to all supported QoS
272 static const struct dpdk_qos_ops
*const qos_confs
[] = {
277 /* Contains all 'struct dpdk_dev's. */
278 static struct ovs_list dpdk_list
OVS_GUARDED_BY(dpdk_mutex
)
279 = OVS_LIST_INITIALIZER(&dpdk_list
);
281 static struct ovs_list dpdk_mp_list
OVS_GUARDED_BY(dpdk_mutex
)
282 = OVS_LIST_INITIALIZER(&dpdk_mp_list
);
284 /* This mutex must be used by non pmd threads when allocating or freeing
285 * mbufs through mempools. */
286 static struct ovs_mutex nonpmd_mempool_mutex
= OVS_MUTEX_INITIALIZER
;
289 struct rte_mempool
*mp
;
293 struct ovs_list list_node
OVS_GUARDED_BY(dpdk_mutex
);
296 /* There should be one 'struct dpdk_tx_queue' created for
298 struct dpdk_tx_queue
{
299 rte_spinlock_t tx_lock
; /* Protects the members and the NIC queue
300 * from concurrent access. It is used only
301 * if the queue is shared among different
302 * pmd threads (see 'concurrent_txq'). */
303 int map
; /* Mapping of configured vhost-user queues
304 * to enabled by guest. */
307 /* dpdk has no way to remove dpdk ring ethernet devices
308 so we have to keep them around once they've been created
311 static struct ovs_list dpdk_ring_list
OVS_GUARDED_BY(dpdk_mutex
)
312 = OVS_LIST_INITIALIZER(&dpdk_ring_list
);
315 /* For the client rings */
316 struct rte_ring
*cring_tx
;
317 struct rte_ring
*cring_rx
;
318 unsigned int user_port_id
; /* User given port no, parsed from port name */
319 int eth_port_id
; /* ethernet device port id */
320 struct ovs_list list_node
OVS_GUARDED_BY(dpdk_mutex
);
323 struct ingress_policer
{
324 struct rte_meter_srtcm_params app_srtcm_params
;
325 struct rte_meter_srtcm in_policer
;
326 rte_spinlock_t policer_lock
;
333 enum dpdk_dev_type type
;
335 struct dpdk_tx_queue
*tx_q
;
337 struct ovs_mutex mutex
OVS_ACQ_AFTER(dpdk_mutex
);
339 struct dpdk_mp
*dpdk_mp
;
343 struct netdev_stats stats
;
345 rte_spinlock_t stats_lock
;
347 struct eth_addr hwaddr
;
348 enum netdev_flags flags
;
350 struct rte_eth_link link
;
353 /* virtio identifier for vhost devices */
356 /* True if vHost device is 'up' and has been reconfigured at least once */
357 bool vhost_reconfigured
;
359 /* Identifier used to distinguish vhost devices from each other. */
360 char vhost_id
[PATH_MAX
];
363 struct ovs_list list_node
OVS_GUARDED_BY(dpdk_mutex
);
365 /* QoS configuration and lock for the device */
366 struct qos_conf
*qos_conf
;
367 rte_spinlock_t qos_lock
;
369 /* The following properties cannot be changed when a device is running,
370 * so we remember the request and update them next time
371 * netdev_dpdk*_reconfigure() is called */
376 /* Socket ID detected when vHost device is brought up */
377 int requested_socket_id
;
379 /* Denotes whether vHost port is client/server mode */
380 uint64_t vhost_driver_flags
;
382 /* Ingress Policer */
383 OVSRCU_TYPE(struct ingress_policer
*) ingress_policer
;
384 uint32_t policer_rate
;
385 uint32_t policer_burst
;
387 /* DPDK-ETH Flow control */
388 struct rte_eth_fc_conf fc_conf
;
391 struct netdev_rxq_dpdk
{
392 struct netdev_rxq up
;
396 static bool dpdk_thread_is_pmd(void);
398 static int netdev_dpdk_construct(struct netdev
*);
400 int netdev_dpdk_get_vid(const struct netdev_dpdk
*dev
);
402 struct ingress_policer
*
403 netdev_dpdk_get_ingress_policer(const struct netdev_dpdk
*dev
);
406 is_dpdk_class(const struct netdev_class
*class)
408 return class->construct
== netdev_dpdk_construct
;
411 /* DPDK NIC drivers allocate RX buffers at a particular granularity, typically
412 * aligned at 1k or less. If a declared mbuf size is not a multiple of this
413 * value, insufficient buffers are allocated to accomodate the packet in its
414 * entirety. Furthermore, certain drivers need to ensure that there is also
415 * sufficient space in the Rx buffer to accommodate two VLAN tags (for QinQ
416 * frames). If the RX buffer is too small, then the driver enables scatter RX
417 * behaviour, which reduces performance. To prevent this, use a buffer size that
418 * is closest to 'mtu', but which satisfies the aforementioned criteria.
421 dpdk_buf_size(int mtu
)
423 return ROUND_UP((MTU_TO_MAX_FRAME_LEN(mtu
) + RTE_PKTMBUF_HEADROOM
),
424 NETDEV_DPDK_MBUF_ALIGN
);
427 /* XXX: use dpdk malloc for entire OVS. in fact huge page should be used
428 * for all other segments data, bss and text. */
431 dpdk_rte_mzalloc(size_t sz
)
435 ptr
= rte_zmalloc(OVS_VPORT_DPDK
, sz
, OVS_CACHE_LINE_SIZE
);
442 /* XXX this function should be called only by pmd threads (or by non pmd
443 * threads holding the nonpmd_mempool_mutex) */
445 free_dpdk_buf(struct dp_packet
*p
)
447 struct rte_mbuf
*pkt
= (struct rte_mbuf
*) p
;
449 rte_pktmbuf_free(pkt
);
453 ovs_rte_pktmbuf_init(struct rte_mempool
*mp
,
454 void *opaque_arg OVS_UNUSED
,
456 unsigned i OVS_UNUSED
)
458 struct rte_mbuf
*m
= _m
;
460 rte_pktmbuf_init(mp
, opaque_arg
, _m
, i
);
462 dp_packet_init_dpdk((struct dp_packet
*) m
, m
->buf_len
);
465 static struct dpdk_mp
*
466 dpdk_mp_get(int socket_id
, int mtu
) OVS_REQUIRES(dpdk_mutex
)
468 struct dpdk_mp
*dmp
= NULL
;
469 char mp_name
[RTE_MEMPOOL_NAMESIZE
];
471 struct rte_pktmbuf_pool_private mbp_priv
;
473 LIST_FOR_EACH (dmp
, list_node
, &dpdk_mp_list
) {
474 if (dmp
->socket_id
== socket_id
&& dmp
->mtu
== mtu
) {
480 dmp
= dpdk_rte_mzalloc(sizeof *dmp
);
481 dmp
->socket_id
= socket_id
;
484 mbp_priv
.mbuf_data_room_size
= MBUF_SIZE(mtu
) - sizeof(struct dp_packet
);
485 mbp_priv
.mbuf_priv_size
= sizeof (struct dp_packet
)
486 - sizeof (struct rte_mbuf
);
487 /* XXX: this is a really rough method of provisioning memory.
488 * It's impossible to determine what the exact memory requirements are when
489 * the number of ports and rxqs that utilize a particular mempool can change
490 * dynamically at runtime. For the moment, use this rough heurisitic.
492 if (mtu
>= ETHER_MTU
) {
493 mp_size
= MAX_NB_MBUF
;
495 mp_size
= MIN_NB_MBUF
;
499 if (snprintf(mp_name
, RTE_MEMPOOL_NAMESIZE
, "ovs_mp_%d_%d_%u",
500 dmp
->mtu
, dmp
->socket_id
, mp_size
) < 0) {
504 dmp
->mp
= rte_mempool_create(mp_name
, mp_size
, MBUF_SIZE(mtu
),
506 sizeof(struct rte_pktmbuf_pool_private
),
507 rte_pktmbuf_pool_init
, &mbp_priv
,
508 ovs_rte_pktmbuf_init
, NULL
,
510 } while (!dmp
->mp
&& rte_errno
== ENOMEM
&& (mp_size
/= 2) >= MIN_NB_MBUF
);
512 if (dmp
->mp
== NULL
) {
515 VLOG_DBG("Allocated \"%s\" mempool with %u mbufs", mp_name
, mp_size
);
518 ovs_list_push_back(&dpdk_mp_list
, &dmp
->list_node
);
527 dpdk_mp_put(struct dpdk_mp
*dmp
) OVS_REQUIRES(dpdk_mutex
)
533 ovs_assert(dmp
->refcount
);
535 if (!--dmp
->refcount
) {
536 ovs_list_remove(&dmp
->list_node
);
537 rte_mempool_free(dmp
->mp
);
542 /* Tries to allocate new mempool on requested_socket_id with
543 * mbuf size corresponding to requested_mtu.
544 * On success new configuration will be applied.
545 * On error, device will be left unchanged. */
547 netdev_dpdk_mempool_configure(struct netdev_dpdk
*dev
)
548 OVS_REQUIRES(dpdk_mutex
)
549 OVS_REQUIRES(dev
->mutex
)
551 uint32_t buf_size
= dpdk_buf_size(dev
->requested_mtu
);
554 mp
= dpdk_mp_get(dev
->requested_socket_id
, FRAME_LEN_TO_MTU(buf_size
));
556 VLOG_ERR("Insufficient memory to create memory pool for netdev "
557 "%s, with MTU %d on socket %d\n",
558 dev
->up
.name
, dev
->requested_mtu
, dev
->requested_socket_id
);
561 dpdk_mp_put(dev
->dpdk_mp
);
563 dev
->mtu
= dev
->requested_mtu
;
564 dev
->socket_id
= dev
->requested_socket_id
;
565 dev
->max_packet_len
= MTU_TO_FRAME_LEN(dev
->mtu
);
572 check_link_status(struct netdev_dpdk
*dev
)
574 struct rte_eth_link link
;
576 rte_eth_link_get_nowait(dev
->port_id
, &link
);
578 if (dev
->link
.link_status
!= link
.link_status
) {
579 netdev_change_seq_changed(&dev
->up
);
581 dev
->link_reset_cnt
++;
583 if (dev
->link
.link_status
) {
584 VLOG_DBG_RL(&rl
, "Port %d Link Up - speed %u Mbps - %s",
585 dev
->port_id
, (unsigned)dev
->link
.link_speed
,
586 (dev
->link
.link_duplex
== ETH_LINK_FULL_DUPLEX
) ?
587 ("full-duplex") : ("half-duplex"));
589 VLOG_DBG_RL(&rl
, "Port %d Link Down", dev
->port_id
);
595 dpdk_watchdog(void *dummy OVS_UNUSED
)
597 struct netdev_dpdk
*dev
;
599 pthread_detach(pthread_self());
602 ovs_mutex_lock(&dpdk_mutex
);
603 LIST_FOR_EACH (dev
, list_node
, &dpdk_list
) {
604 ovs_mutex_lock(&dev
->mutex
);
605 if (dev
->type
== DPDK_DEV_ETH
) {
606 check_link_status(dev
);
608 ovs_mutex_unlock(&dev
->mutex
);
610 ovs_mutex_unlock(&dpdk_mutex
);
611 xsleep(DPDK_PORT_WATCHDOG_INTERVAL
);
618 dpdk_eth_dev_queue_setup(struct netdev_dpdk
*dev
, int n_rxq
, int n_txq
)
622 struct rte_eth_conf conf
= port_conf
;
624 if (dev
->mtu
> ETHER_MTU
) {
625 conf
.rxmode
.jumbo_frame
= 1;
626 conf
.rxmode
.max_rx_pkt_len
= dev
->max_packet_len
;
628 conf
.rxmode
.jumbo_frame
= 0;
629 conf
.rxmode
.max_rx_pkt_len
= 0;
631 /* A device may report more queues than it makes available (this has
632 * been observed for Intel xl710, which reserves some of them for
633 * SRIOV): rte_eth_*_queue_setup will fail if a queue is not
634 * available. When this happens we can retry the configuration
635 * and request less queues */
636 while (n_rxq
&& n_txq
) {
638 VLOG_INFO("Retrying setup with (rxq:%d txq:%d)", n_rxq
, n_txq
);
641 diag
= rte_eth_dev_configure(dev
->port_id
, n_rxq
, n_txq
, &conf
);
643 VLOG_WARN("Interface %s eth_dev setup error %s\n",
644 dev
->up
.name
, rte_strerror(-diag
));
648 for (i
= 0; i
< n_txq
; i
++) {
649 diag
= rte_eth_tx_queue_setup(dev
->port_id
, i
, NIC_PORT_TX_Q_SIZE
,
650 dev
->socket_id
, NULL
);
652 VLOG_INFO("Interface %s txq(%d) setup error: %s",
653 dev
->up
.name
, i
, rte_strerror(-diag
));
659 /* Retry with less tx queues */
664 for (i
= 0; i
< n_rxq
; i
++) {
665 diag
= rte_eth_rx_queue_setup(dev
->port_id
, i
, NIC_PORT_RX_Q_SIZE
,
666 dev
->socket_id
, NULL
,
669 VLOG_INFO("Interface %s rxq(%d) setup error: %s",
670 dev
->up
.name
, i
, rte_strerror(-diag
));
676 /* Retry with less rx queues */
681 dev
->up
.n_rxq
= n_rxq
;
682 dev
->up
.n_txq
= n_txq
;
691 dpdk_eth_flow_ctrl_setup(struct netdev_dpdk
*dev
) OVS_REQUIRES(dev
->mutex
)
693 if (rte_eth_dev_flow_ctrl_set(dev
->port_id
, &dev
->fc_conf
)) {
694 VLOG_WARN("Failed to enable flow control on device %d", dev
->port_id
);
699 dpdk_eth_dev_init(struct netdev_dpdk
*dev
) OVS_REQUIRES(dpdk_mutex
)
701 struct rte_pktmbuf_pool_private
*mbp_priv
;
702 struct rte_eth_dev_info info
;
703 struct ether_addr eth_addr
;
707 if (!rte_eth_dev_is_valid_port(dev
->port_id
)) {
711 rte_eth_dev_info_get(dev
->port_id
, &info
);
713 n_rxq
= MIN(info
.max_rx_queues
, dev
->up
.n_rxq
);
714 n_txq
= MIN(info
.max_tx_queues
, dev
->up
.n_txq
);
716 diag
= dpdk_eth_dev_queue_setup(dev
, n_rxq
, n_txq
);
718 VLOG_ERR("Interface %s(rxq:%d txq:%d) configure error: %s",
719 dev
->up
.name
, n_rxq
, n_txq
, rte_strerror(-diag
));
723 diag
= rte_eth_dev_start(dev
->port_id
);
725 VLOG_ERR("Interface %s start error: %s", dev
->up
.name
,
726 rte_strerror(-diag
));
730 rte_eth_promiscuous_enable(dev
->port_id
);
731 rte_eth_allmulticast_enable(dev
->port_id
);
733 memset(ð_addr
, 0x0, sizeof(eth_addr
));
734 rte_eth_macaddr_get(dev
->port_id
, ð_addr
);
735 VLOG_INFO_RL(&rl
, "Port %d: "ETH_ADDR_FMT
"",
736 dev
->port_id
, ETH_ADDR_BYTES_ARGS(eth_addr
.addr_bytes
));
738 memcpy(dev
->hwaddr
.ea
, eth_addr
.addr_bytes
, ETH_ADDR_LEN
);
739 rte_eth_link_get_nowait(dev
->port_id
, &dev
->link
);
741 mbp_priv
= rte_mempool_get_priv(dev
->dpdk_mp
->mp
);
742 dev
->buf_size
= mbp_priv
->mbuf_data_room_size
- RTE_PKTMBUF_HEADROOM
;
744 dev
->flags
= NETDEV_UP
| NETDEV_PROMISC
;
746 /* Get the Flow control configuration for DPDK-ETH */
747 diag
= rte_eth_dev_flow_ctrl_get(dev
->port_id
, &dev
->fc_conf
);
749 VLOG_DBG("cannot get flow control parameters on port=%d, err=%d",
756 static struct netdev_dpdk
*
757 netdev_dpdk_cast(const struct netdev
*netdev
)
759 return CONTAINER_OF(netdev
, struct netdev_dpdk
, up
);
762 static struct netdev
*
763 netdev_dpdk_alloc(void)
765 struct netdev_dpdk
*dev
;
767 if (!rte_eal_init_ret
) { /* Only after successful initialization */
768 dev
= dpdk_rte_mzalloc(sizeof *dev
);
777 netdev_dpdk_alloc_txq(struct netdev_dpdk
*dev
, unsigned int n_txqs
)
781 dev
->tx_q
= dpdk_rte_mzalloc(n_txqs
* sizeof *dev
->tx_q
);
782 for (i
= 0; i
< n_txqs
; i
++) {
783 /* Initialize map for vhost devices. */
784 dev
->tx_q
[i
].map
= OVS_VHOST_QUEUE_MAP_UNKNOWN
;
785 rte_spinlock_init(&dev
->tx_q
[i
].tx_lock
);
790 netdev_dpdk_init(struct netdev
*netdev
, unsigned int port_no
,
791 enum dpdk_dev_type type
)
792 OVS_REQUIRES(dpdk_mutex
)
794 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
798 ovs_mutex_init(&dev
->mutex
);
799 ovs_mutex_lock(&dev
->mutex
);
801 rte_spinlock_init(&dev
->stats_lock
);
803 /* If the 'sid' is negative, it means that the kernel fails
804 * to obtain the pci numa info. In that situation, always
806 if (type
== DPDK_DEV_ETH
) {
807 sid
= rte_eth_dev_socket_id(port_no
);
809 sid
= rte_lcore_to_socket_id(rte_get_master_lcore());
812 dev
->socket_id
= sid
< 0 ? SOCKET0
: sid
;
813 dev
->requested_socket_id
= dev
->socket_id
;
814 dev
->port_id
= port_no
;
817 dev
->requested_mtu
= dev
->mtu
= ETHER_MTU
;
818 dev
->max_packet_len
= MTU_TO_FRAME_LEN(dev
->mtu
);
819 ovsrcu_index_init(&dev
->vid
, -1);
820 dev
->vhost_reconfigured
= false;
822 err
= netdev_dpdk_mempool_configure(dev
);
827 /* Initialise QoS configuration to NULL and qos lock to unlocked */
828 dev
->qos_conf
= NULL
;
829 rte_spinlock_init(&dev
->qos_lock
);
831 /* Initialise rcu pointer for ingress policer to NULL */
832 ovsrcu_init(&dev
->ingress_policer
, NULL
);
833 dev
->policer_rate
= 0;
834 dev
->policer_burst
= 0;
836 netdev
->n_rxq
= NR_QUEUE
;
837 netdev
->n_txq
= NR_QUEUE
;
838 dev
->requested_n_rxq
= netdev
->n_rxq
;
839 dev
->requested_n_txq
= netdev
->n_txq
;
841 /* Initialize the flow control to NULL */
842 memset(&dev
->fc_conf
, 0, sizeof dev
->fc_conf
);
843 if (type
== DPDK_DEV_ETH
) {
844 err
= dpdk_eth_dev_init(dev
);
848 netdev_dpdk_alloc_txq(dev
, netdev
->n_txq
);
850 netdev_dpdk_alloc_txq(dev
, OVS_VHOST_MAX_QUEUE_NUM
);
851 /* Enable DPDK_DEV_VHOST device and set promiscuous mode flag. */
852 dev
->flags
= NETDEV_UP
| NETDEV_PROMISC
;
855 ovs_list_push_back(&dpdk_list
, &dev
->list_node
);
858 ovs_mutex_unlock(&dev
->mutex
);
862 /* dev_name must be the prefix followed by a positive decimal number.
863 * (no leading + or - signs are allowed) */
865 dpdk_dev_parse_name(const char dev_name
[], const char prefix
[],
866 unsigned int *port_no
)
870 if (strncmp(dev_name
, prefix
, strlen(prefix
))) {
874 cport
= dev_name
+ strlen(prefix
);
876 if (str_to_uint(cport
, 10, port_no
)) {
884 netdev_dpdk_vhost_construct(struct netdev
*netdev
)
886 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
887 const char *name
= netdev
->name
;
890 /* 'name' is appended to 'vhost_sock_dir' and used to create a socket in
891 * the file system. '/' or '\' would traverse directories, so they're not
892 * acceptable in 'name'. */
893 if (strchr(name
, '/') || strchr(name
, '\\')) {
894 VLOG_ERR("\"%s\" is not a valid name for a vhost-user port. "
895 "A valid name must not include '/' or '\\'",
900 if (rte_eal_init_ret
) {
901 return rte_eal_init_ret
;
904 ovs_mutex_lock(&dpdk_mutex
);
905 /* Take the name of the vhost-user port and append it to the location where
906 * the socket is to be created, then register the socket.
908 snprintf(dev
->vhost_id
, sizeof dev
->vhost_id
, "%s/%s",
909 vhost_sock_dir
, name
);
911 dev
->vhost_driver_flags
&= ~RTE_VHOST_USER_CLIENT
;
912 err
= rte_vhost_driver_register(dev
->vhost_id
, dev
->vhost_driver_flags
);
914 VLOG_ERR("vhost-user socket device setup failure for socket %s\n",
917 fatal_signal_add_file_to_unlink(dev
->vhost_id
);
918 VLOG_INFO("Socket %s created for vhost-user port %s\n",
919 dev
->vhost_id
, name
);
921 err
= netdev_dpdk_init(netdev
, -1, DPDK_DEV_VHOST
);
923 ovs_mutex_unlock(&dpdk_mutex
);
928 netdev_dpdk_vhost_client_construct(struct netdev
*netdev
)
932 if (rte_eal_init_ret
) {
933 return rte_eal_init_ret
;
936 ovs_mutex_lock(&dpdk_mutex
);
937 err
= netdev_dpdk_init(netdev
, -1, DPDK_DEV_VHOST
);
938 ovs_mutex_unlock(&dpdk_mutex
);
943 netdev_dpdk_construct(struct netdev
*netdev
)
945 unsigned int port_no
;
948 if (rte_eal_init_ret
) {
949 return rte_eal_init_ret
;
952 /* Names always start with "dpdk" */
953 err
= dpdk_dev_parse_name(netdev
->name
, "dpdk", &port_no
);
958 ovs_mutex_lock(&dpdk_mutex
);
959 err
= netdev_dpdk_init(netdev
, port_no
, DPDK_DEV_ETH
);
960 ovs_mutex_unlock(&dpdk_mutex
);
965 netdev_dpdk_destruct(struct netdev
*netdev
)
967 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
969 ovs_mutex_lock(&dpdk_mutex
);
970 ovs_mutex_lock(&dev
->mutex
);
972 rte_eth_dev_stop(dev
->port_id
);
973 free(ovsrcu_get_protected(struct ingress_policer
*,
974 &dev
->ingress_policer
));
977 ovs_list_remove(&dev
->list_node
);
978 dpdk_mp_put(dev
->dpdk_mp
);
980 ovs_mutex_unlock(&dev
->mutex
);
981 ovs_mutex_unlock(&dpdk_mutex
);
984 /* rte_vhost_driver_unregister() can call back destroy_device(), which will
985 * try to acquire 'dpdk_mutex' and possibly 'dev->mutex'. To avoid a
986 * deadlock, none of the mutexes must be held while calling this function. */
988 dpdk_vhost_driver_unregister(struct netdev_dpdk
*dev OVS_UNUSED
,
990 OVS_EXCLUDED(dpdk_mutex
)
991 OVS_EXCLUDED(dev
->mutex
)
993 return rte_vhost_driver_unregister(vhost_id
);
997 netdev_dpdk_vhost_destruct(struct netdev
*netdev
)
999 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1002 ovs_mutex_lock(&dpdk_mutex
);
1003 ovs_mutex_lock(&dev
->mutex
);
1005 /* Guest becomes an orphan if still attached. */
1006 if (netdev_dpdk_get_vid(dev
) >= 0
1007 && !(dev
->vhost_driver_flags
& RTE_VHOST_USER_CLIENT
)) {
1008 VLOG_ERR("Removing port '%s' while vhost device still attached.",
1010 VLOG_ERR("To restore connectivity after re-adding of port, VM on socket"
1011 " '%s' must be restarted.", dev
->vhost_id
);
1014 free(ovsrcu_get_protected(struct ingress_policer
*,
1015 &dev
->ingress_policer
));
1017 rte_free(dev
->tx_q
);
1018 ovs_list_remove(&dev
->list_node
);
1019 dpdk_mp_put(dev
->dpdk_mp
);
1021 vhost_id
= xstrdup(dev
->vhost_id
);
1023 ovs_mutex_unlock(&dev
->mutex
);
1024 ovs_mutex_unlock(&dpdk_mutex
);
1026 if (dpdk_vhost_driver_unregister(dev
, vhost_id
)) {
1027 VLOG_ERR("Unable to remove vhost-user socket %s", vhost_id
);
1028 } else if (!(dev
->vhost_driver_flags
& RTE_VHOST_USER_CLIENT
)) {
1029 /* OVS server mode - remove this socket from list for deletion */
1030 fatal_signal_remove_file_to_unlink(vhost_id
);
1036 netdev_dpdk_dealloc(struct netdev
*netdev
)
1038 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1044 netdev_dpdk_get_config(const struct netdev
*netdev
, struct smap
*args
)
1046 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1048 ovs_mutex_lock(&dev
->mutex
);
1050 smap_add_format(args
, "requested_rx_queues", "%d", dev
->requested_n_rxq
);
1051 smap_add_format(args
, "configured_rx_queues", "%d", netdev
->n_rxq
);
1052 smap_add_format(args
, "requested_tx_queues", "%d", dev
->requested_n_txq
);
1053 smap_add_format(args
, "configured_tx_queues", "%d", netdev
->n_txq
);
1054 smap_add_format(args
, "mtu", "%d", dev
->mtu
);
1055 ovs_mutex_unlock(&dev
->mutex
);
1061 dpdk_set_rxq_config(struct netdev_dpdk
*dev
, const struct smap
*args
)
1065 new_n_rxq
= MAX(smap_get_int(args
, "n_rxq", dev
->requested_n_rxq
), 1);
1066 if (new_n_rxq
!= dev
->requested_n_rxq
) {
1067 dev
->requested_n_rxq
= new_n_rxq
;
1068 netdev_request_reconfigure(&dev
->up
);
1073 netdev_dpdk_set_config(struct netdev
*netdev
, const struct smap
*args
)
1075 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1077 ovs_mutex_lock(&dev
->mutex
);
1079 dpdk_set_rxq_config(dev
, args
);
1081 /* Flow control support is only available for DPDK Ethernet ports. */
1082 bool rx_fc_en
= false;
1083 bool tx_fc_en
= false;
1084 enum rte_eth_fc_mode fc_mode_set
[2][2] =
1085 {{RTE_FC_NONE
, RTE_FC_TX_PAUSE
},
1086 {RTE_FC_RX_PAUSE
, RTE_FC_FULL
}
1088 rx_fc_en
= smap_get_bool(args
, "rx-flow-ctrl", false);
1089 tx_fc_en
= smap_get_bool(args
, "tx-flow-ctrl", false);
1090 dev
->fc_conf
.autoneg
= smap_get_bool(args
, "flow-ctrl-autoneg", false);
1091 dev
->fc_conf
.mode
= fc_mode_set
[tx_fc_en
][rx_fc_en
];
1093 dpdk_eth_flow_ctrl_setup(dev
);
1095 ovs_mutex_unlock(&dev
->mutex
);
1101 netdev_dpdk_ring_set_config(struct netdev
*netdev
, const struct smap
*args
)
1103 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1105 ovs_mutex_lock(&dev
->mutex
);
1106 dpdk_set_rxq_config(dev
, args
);
1107 ovs_mutex_unlock(&dev
->mutex
);
1113 netdev_dpdk_vhost_client_set_config(struct netdev
*netdev
,
1114 const struct smap
*args
)
1116 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1119 ovs_mutex_lock(&dev
->mutex
);
1120 if (!(dev
->vhost_driver_flags
& RTE_VHOST_USER_CLIENT
)) {
1121 path
= smap_get(args
, "vhost-server-path");
1122 if (path
&& strcmp(path
, dev
->vhost_id
)) {
1123 strcpy(dev
->vhost_id
, path
);
1124 netdev_request_reconfigure(netdev
);
1127 ovs_mutex_unlock(&dev
->mutex
);
1133 netdev_dpdk_get_numa_id(const struct netdev
*netdev
)
1135 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1137 return dev
->socket_id
;
1140 /* Sets the number of tx queues for the dpdk interface. */
1142 netdev_dpdk_set_tx_multiq(struct netdev
*netdev
, unsigned int n_txq
)
1144 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1146 ovs_mutex_lock(&dev
->mutex
);
1148 if (dev
->requested_n_txq
== n_txq
) {
1152 dev
->requested_n_txq
= n_txq
;
1153 netdev_request_reconfigure(netdev
);
1156 ovs_mutex_unlock(&dev
->mutex
);
1160 static struct netdev_rxq
*
1161 netdev_dpdk_rxq_alloc(void)
1163 struct netdev_rxq_dpdk
*rx
= dpdk_rte_mzalloc(sizeof *rx
);
1168 static struct netdev_rxq_dpdk
*
1169 netdev_rxq_dpdk_cast(const struct netdev_rxq
*rxq
)
1171 return CONTAINER_OF(rxq
, struct netdev_rxq_dpdk
, up
);
1175 netdev_dpdk_rxq_construct(struct netdev_rxq
*rxq
)
1177 struct netdev_rxq_dpdk
*rx
= netdev_rxq_dpdk_cast(rxq
);
1178 struct netdev_dpdk
*dev
= netdev_dpdk_cast(rxq
->netdev
);
1180 ovs_mutex_lock(&dev
->mutex
);
1181 rx
->port_id
= dev
->port_id
;
1182 ovs_mutex_unlock(&dev
->mutex
);
1188 netdev_dpdk_rxq_destruct(struct netdev_rxq
*rxq OVS_UNUSED
)
1193 netdev_dpdk_rxq_dealloc(struct netdev_rxq
*rxq
)
1195 struct netdev_rxq_dpdk
*rx
= netdev_rxq_dpdk_cast(rxq
);
1201 netdev_dpdk_eth_tx_burst(struct netdev_dpdk
*dev
, int qid
,
1202 struct rte_mbuf
**pkts
, int cnt
)
1206 while (nb_tx
!= cnt
) {
1209 ret
= rte_eth_tx_burst(dev
->port_id
, qid
, pkts
+ nb_tx
, cnt
- nb_tx
);
1217 if (OVS_UNLIKELY(nb_tx
!= cnt
)) {
1218 /* free buffers, which we couldn't transmit, one at a time (each
1219 * packet could come from a different mempool) */
1222 for (i
= nb_tx
; i
< cnt
; i
++) {
1223 rte_pktmbuf_free(pkts
[i
]);
1225 rte_spinlock_lock(&dev
->stats_lock
);
1226 dev
->stats
.tx_dropped
+= cnt
- nb_tx
;
1227 rte_spinlock_unlock(&dev
->stats_lock
);
1232 netdev_dpdk_policer_pkt_handle(struct rte_meter_srtcm
*meter
,
1233 struct rte_mbuf
*pkt
, uint64_t time
)
1235 uint32_t pkt_len
= rte_pktmbuf_pkt_len(pkt
) - sizeof(struct ether_hdr
);
1237 return rte_meter_srtcm_color_blind_check(meter
, time
, pkt_len
) ==
1242 netdev_dpdk_policer_run(struct rte_meter_srtcm
*meter
,
1243 struct rte_mbuf
**pkts
, int pkt_cnt
)
1247 struct rte_mbuf
*pkt
= NULL
;
1248 uint64_t current_time
= rte_rdtsc();
1250 for (i
= 0; i
< pkt_cnt
; i
++) {
1252 /* Handle current packet */
1253 if (netdev_dpdk_policer_pkt_handle(meter
, pkt
, current_time
)) {
1259 rte_pktmbuf_free(pkt
);
1267 ingress_policer_run(struct ingress_policer
*policer
, struct rte_mbuf
**pkts
,
1272 rte_spinlock_lock(&policer
->policer_lock
);
1273 cnt
= netdev_dpdk_policer_run(&policer
->in_policer
, pkts
, pkt_cnt
);
1274 rte_spinlock_unlock(&policer
->policer_lock
);
1280 is_vhost_running(struct netdev_dpdk
*dev
)
1282 return (netdev_dpdk_get_vid(dev
) >= 0 && dev
->vhost_reconfigured
);
1286 netdev_dpdk_vhost_update_rx_size_counters(struct netdev_stats
*stats
,
1287 unsigned int packet_size
)
1289 /* Hard-coded search for the size bucket. */
1290 if (packet_size
< 256) {
1291 if (packet_size
>= 128) {
1292 stats
->rx_128_to_255_packets
++;
1293 } else if (packet_size
<= 64) {
1294 stats
->rx_1_to_64_packets
++;
1296 stats
->rx_65_to_127_packets
++;
1299 if (packet_size
>= 1523) {
1300 stats
->rx_1523_to_max_packets
++;
1301 } else if (packet_size
>= 1024) {
1302 stats
->rx_1024_to_1522_packets
++;
1303 } else if (packet_size
< 512) {
1304 stats
->rx_256_to_511_packets
++;
1306 stats
->rx_512_to_1023_packets
++;
1312 netdev_dpdk_vhost_update_rx_counters(struct netdev_stats
*stats
,
1313 struct dp_packet
**packets
, int count
,
1317 unsigned int packet_size
;
1318 struct dp_packet
*packet
;
1320 stats
->rx_packets
+= count
;
1321 stats
->rx_dropped
+= dropped
;
1322 for (i
= 0; i
< count
; i
++) {
1323 packet
= packets
[i
];
1324 packet_size
= dp_packet_size(packet
);
1326 if (OVS_UNLIKELY(packet_size
< ETH_HEADER_LEN
)) {
1327 /* This only protects the following multicast counting from
1328 * too short packets, but it does not stop the packet from
1329 * further processing. */
1331 stats
->rx_length_errors
++;
1335 netdev_dpdk_vhost_update_rx_size_counters(stats
, packet_size
);
1337 struct eth_header
*eh
= (struct eth_header
*) dp_packet_data(packet
);
1338 if (OVS_UNLIKELY(eth_addr_is_multicast(eh
->eth_dst
))) {
1342 stats
->rx_bytes
+= packet_size
;
1347 * The receive path for the vhost port is the TX path out from guest.
1350 netdev_dpdk_vhost_rxq_recv(struct netdev_rxq
*rxq
,
1351 struct dp_packet_batch
*batch
)
1353 struct netdev_dpdk
*dev
= netdev_dpdk_cast(rxq
->netdev
);
1354 int qid
= rxq
->queue_id
;
1355 struct ingress_policer
*policer
= netdev_dpdk_get_ingress_policer(dev
);
1357 uint16_t dropped
= 0;
1359 if (OVS_UNLIKELY(!is_vhost_running(dev
)
1360 || !(dev
->flags
& NETDEV_UP
))) {
1364 nb_rx
= rte_vhost_dequeue_burst(netdev_dpdk_get_vid(dev
),
1365 qid
* VIRTIO_QNUM
+ VIRTIO_TXQ
,
1367 (struct rte_mbuf
**) batch
->packets
,
1375 nb_rx
= ingress_policer_run(policer
,
1376 (struct rte_mbuf
**) batch
->packets
,
1381 rte_spinlock_lock(&dev
->stats_lock
);
1382 netdev_dpdk_vhost_update_rx_counters(&dev
->stats
, batch
->packets
,
1384 rte_spinlock_unlock(&dev
->stats_lock
);
1386 batch
->count
= (int) nb_rx
;
1391 netdev_dpdk_rxq_recv(struct netdev_rxq
*rxq
, struct dp_packet_batch
*batch
)
1393 struct netdev_rxq_dpdk
*rx
= netdev_rxq_dpdk_cast(rxq
);
1394 struct netdev_dpdk
*dev
= netdev_dpdk_cast(rxq
->netdev
);
1395 struct ingress_policer
*policer
= netdev_dpdk_get_ingress_policer(dev
);
1399 nb_rx
= rte_eth_rx_burst(rx
->port_id
, rxq
->queue_id
,
1400 (struct rte_mbuf
**) batch
->packets
,
1408 nb_rx
= ingress_policer_run(policer
,
1409 (struct rte_mbuf
**)batch
->packets
,
1414 /* Update stats to reflect dropped packets */
1415 if (OVS_UNLIKELY(dropped
)) {
1416 rte_spinlock_lock(&dev
->stats_lock
);
1417 dev
->stats
.rx_dropped
+= dropped
;
1418 rte_spinlock_unlock(&dev
->stats_lock
);
1421 batch
->count
= nb_rx
;
1427 netdev_dpdk_qos_run__(struct netdev_dpdk
*dev
, struct rte_mbuf
**pkts
,
1430 struct netdev
*netdev
= &dev
->up
;
1432 if (dev
->qos_conf
!= NULL
) {
1433 rte_spinlock_lock(&dev
->qos_lock
);
1434 if (dev
->qos_conf
!= NULL
) {
1435 cnt
= dev
->qos_conf
->ops
->qos_run(netdev
, pkts
, cnt
);
1437 rte_spinlock_unlock(&dev
->qos_lock
);
1444 netdev_dpdk_filter_packet_len(struct netdev_dpdk
*dev
, struct rte_mbuf
**pkts
,
1449 struct rte_mbuf
*pkt
;
1451 for (i
= 0; i
< pkt_cnt
; i
++) {
1453 if (OVS_UNLIKELY(pkt
->pkt_len
> dev
->max_packet_len
)) {
1454 VLOG_WARN_RL(&rl
, "%s: Too big size %" PRIu32
" max_packet_len %d",
1455 dev
->up
.name
, pkt
->pkt_len
, dev
->max_packet_len
);
1456 rte_pktmbuf_free(pkt
);
1460 if (OVS_UNLIKELY(i
!= cnt
)) {
1470 netdev_dpdk_vhost_update_tx_counters(struct netdev_stats
*stats
,
1471 struct dp_packet
**packets
,
1476 int sent
= attempted
- dropped
;
1478 stats
->tx_packets
+= sent
;
1479 stats
->tx_dropped
+= dropped
;
1481 for (i
= 0; i
< sent
; i
++) {
1482 stats
->tx_bytes
+= dp_packet_size(packets
[i
]);
1487 __netdev_dpdk_vhost_send(struct netdev
*netdev
, int qid
,
1488 struct dp_packet
**pkts
, int cnt
)
1490 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1491 struct rte_mbuf
**cur_pkts
= (struct rte_mbuf
**) pkts
;
1492 unsigned int total_pkts
= cnt
;
1493 unsigned int dropped
= 0;
1496 qid
= dev
->tx_q
[qid
% netdev
->n_txq
].map
;
1498 if (OVS_UNLIKELY(!is_vhost_running(dev
) || qid
< 0
1499 || !(dev
->flags
& NETDEV_UP
))) {
1500 rte_spinlock_lock(&dev
->stats_lock
);
1501 dev
->stats
.tx_dropped
+= cnt
;
1502 rte_spinlock_unlock(&dev
->stats_lock
);
1506 rte_spinlock_lock(&dev
->tx_q
[qid
].tx_lock
);
1508 cnt
= netdev_dpdk_filter_packet_len(dev
, cur_pkts
, cnt
);
1509 /* Check has QoS has been configured for the netdev */
1510 cnt
= netdev_dpdk_qos_run__(dev
, cur_pkts
, cnt
);
1511 dropped
= total_pkts
- cnt
;
1514 int vhost_qid
= qid
* VIRTIO_QNUM
+ VIRTIO_RXQ
;
1515 unsigned int tx_pkts
;
1517 tx_pkts
= rte_vhost_enqueue_burst(netdev_dpdk_get_vid(dev
),
1518 vhost_qid
, cur_pkts
, cnt
);
1519 if (OVS_LIKELY(tx_pkts
)) {
1520 /* Packets have been sent.*/
1522 /* Prepare for possible retry.*/
1523 cur_pkts
= &cur_pkts
[tx_pkts
];
1525 /* No packets sent - do not retry.*/
1528 } while (cnt
&& (retries
++ <= VHOST_ENQ_RETRY_NUM
));
1530 rte_spinlock_unlock(&dev
->tx_q
[qid
].tx_lock
);
1532 rte_spinlock_lock(&dev
->stats_lock
);
1533 netdev_dpdk_vhost_update_tx_counters(&dev
->stats
, pkts
, total_pkts
,
1535 rte_spinlock_unlock(&dev
->stats_lock
);
1538 for (i
= 0; i
< total_pkts
- dropped
; i
++) {
1539 dp_packet_delete(pkts
[i
]);
1543 /* Tx function. Transmit packets indefinitely */
1545 dpdk_do_tx_copy(struct netdev
*netdev
, int qid
, struct dp_packet_batch
*batch
)
1546 OVS_NO_THREAD_SAFETY_ANALYSIS
1548 #if !defined(__CHECKER__) && !defined(_WIN32)
1549 const size_t PKT_ARRAY_SIZE
= batch
->count
;
1551 /* Sparse or MSVC doesn't like variable length array. */
1552 enum { PKT_ARRAY_SIZE
= NETDEV_MAX_BURST
};
1554 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1555 struct rte_mbuf
*mbufs
[PKT_ARRAY_SIZE
];
1560 /* If we are on a non pmd thread we have to use the mempool mutex, because
1561 * every non pmd thread shares the same mempool cache */
1563 if (!dpdk_thread_is_pmd()) {
1564 ovs_mutex_lock(&nonpmd_mempool_mutex
);
1567 dp_packet_batch_apply_cutlen(batch
);
1569 for (i
= 0; i
< batch
->count
; i
++) {
1570 int size
= dp_packet_size(batch
->packets
[i
]);
1572 if (OVS_UNLIKELY(size
> dev
->max_packet_len
)) {
1573 VLOG_WARN_RL(&rl
, "Too big size %d max_packet_len %d",
1574 (int)size
, dev
->max_packet_len
);
1580 mbufs
[newcnt
] = rte_pktmbuf_alloc(dev
->dpdk_mp
->mp
);
1582 if (!mbufs
[newcnt
]) {
1583 dropped
+= batch
->count
- i
;
1587 /* We have to do a copy for now */
1588 memcpy(rte_pktmbuf_mtod(mbufs
[newcnt
], void *),
1589 dp_packet_data(batch
->packets
[i
]), size
);
1591 rte_pktmbuf_data_len(mbufs
[newcnt
]) = size
;
1592 rte_pktmbuf_pkt_len(mbufs
[newcnt
]) = size
;
1597 if (dev
->type
== DPDK_DEV_VHOST
) {
1598 __netdev_dpdk_vhost_send(netdev
, qid
, (struct dp_packet
**) mbufs
,
1601 unsigned int qos_pkts
= newcnt
;
1603 /* Check if QoS has been configured for this netdev. */
1604 newcnt
= netdev_dpdk_qos_run__(dev
, mbufs
, newcnt
);
1606 dropped
+= qos_pkts
- newcnt
;
1607 netdev_dpdk_eth_tx_burst(dev
, qid
, mbufs
, newcnt
);
1610 if (OVS_UNLIKELY(dropped
)) {
1611 rte_spinlock_lock(&dev
->stats_lock
);
1612 dev
->stats
.tx_dropped
+= dropped
;
1613 rte_spinlock_unlock(&dev
->stats_lock
);
1616 if (!dpdk_thread_is_pmd()) {
1617 ovs_mutex_unlock(&nonpmd_mempool_mutex
);
1622 netdev_dpdk_vhost_send(struct netdev
*netdev
, int qid
,
1623 struct dp_packet_batch
*batch
,
1624 bool may_steal
, bool concurrent_txq OVS_UNUSED
)
1627 if (OVS_UNLIKELY(!may_steal
|| batch
->packets
[0]->source
!= DPBUF_DPDK
)) {
1628 dpdk_do_tx_copy(netdev
, qid
, batch
);
1629 dp_packet_delete_batch(batch
, may_steal
);
1631 dp_packet_batch_apply_cutlen(batch
);
1632 __netdev_dpdk_vhost_send(netdev
, qid
, batch
->packets
, batch
->count
);
1638 netdev_dpdk_send__(struct netdev_dpdk
*dev
, int qid
,
1639 struct dp_packet_batch
*batch
, bool may_steal
,
1640 bool concurrent_txq
)
1642 if (OVS_UNLIKELY(concurrent_txq
)) {
1643 qid
= qid
% dev
->up
.n_txq
;
1644 rte_spinlock_lock(&dev
->tx_q
[qid
].tx_lock
);
1647 if (OVS_UNLIKELY(!may_steal
||
1648 batch
->packets
[0]->source
!= DPBUF_DPDK
)) {
1649 struct netdev
*netdev
= &dev
->up
;
1651 dpdk_do_tx_copy(netdev
, qid
, batch
);
1652 dp_packet_delete_batch(batch
, may_steal
);
1655 int cnt
= batch
->count
;
1656 struct rte_mbuf
**cur_pkts
= (struct rte_mbuf
**) batch
->packets
;
1658 dp_packet_batch_apply_cutlen(batch
);
1660 cnt
= netdev_dpdk_filter_packet_len(dev
, cur_pkts
, cnt
);
1661 cnt
= netdev_dpdk_qos_run__(dev
, cur_pkts
, cnt
);
1662 dropped
= batch
->count
- cnt
;
1664 netdev_dpdk_eth_tx_burst(dev
, qid
, cur_pkts
, cnt
);
1666 if (OVS_UNLIKELY(dropped
)) {
1667 rte_spinlock_lock(&dev
->stats_lock
);
1668 dev
->stats
.tx_dropped
+= dropped
;
1669 rte_spinlock_unlock(&dev
->stats_lock
);
1673 if (OVS_UNLIKELY(concurrent_txq
)) {
1674 rte_spinlock_unlock(&dev
->tx_q
[qid
].tx_lock
);
1679 netdev_dpdk_eth_send(struct netdev
*netdev
, int qid
,
1680 struct dp_packet_batch
*batch
, bool may_steal
,
1681 bool concurrent_txq
)
1683 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1685 netdev_dpdk_send__(dev
, qid
, batch
, may_steal
, concurrent_txq
);
1690 netdev_dpdk_set_etheraddr(struct netdev
*netdev
, const struct eth_addr mac
)
1692 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1694 ovs_mutex_lock(&dev
->mutex
);
1695 if (!eth_addr_equals(dev
->hwaddr
, mac
)) {
1697 netdev_change_seq_changed(netdev
);
1699 ovs_mutex_unlock(&dev
->mutex
);
1705 netdev_dpdk_get_etheraddr(const struct netdev
*netdev
, struct eth_addr
*mac
)
1707 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1709 ovs_mutex_lock(&dev
->mutex
);
1711 ovs_mutex_unlock(&dev
->mutex
);
1717 netdev_dpdk_get_mtu(const struct netdev
*netdev
, int *mtup
)
1719 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1721 ovs_mutex_lock(&dev
->mutex
);
1723 ovs_mutex_unlock(&dev
->mutex
);
1729 netdev_dpdk_set_mtu(struct netdev
*netdev
, int mtu
)
1731 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1733 if (MTU_TO_FRAME_LEN(mtu
) > NETDEV_DPDK_MAX_PKT_LEN
1734 || mtu
< ETHER_MIN_MTU
) {
1735 VLOG_WARN("%s: unsupported MTU %d\n", dev
->up
.name
, mtu
);
1739 ovs_mutex_lock(&dev
->mutex
);
1740 if (dev
->requested_mtu
!= mtu
) {
1741 dev
->requested_mtu
= mtu
;
1742 netdev_request_reconfigure(netdev
);
1744 ovs_mutex_unlock(&dev
->mutex
);
1750 netdev_dpdk_get_carrier(const struct netdev
*netdev
, bool *carrier
);
1753 netdev_dpdk_vhost_get_stats(const struct netdev
*netdev
,
1754 struct netdev_stats
*stats
)
1756 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1758 ovs_mutex_lock(&dev
->mutex
);
1760 rte_spinlock_lock(&dev
->stats_lock
);
1761 /* Supported Stats */
1762 stats
->rx_packets
+= dev
->stats
.rx_packets
;
1763 stats
->tx_packets
+= dev
->stats
.tx_packets
;
1764 stats
->rx_dropped
= dev
->stats
.rx_dropped
;
1765 stats
->tx_dropped
+= dev
->stats
.tx_dropped
;
1766 stats
->multicast
= dev
->stats
.multicast
;
1767 stats
->rx_bytes
= dev
->stats
.rx_bytes
;
1768 stats
->tx_bytes
= dev
->stats
.tx_bytes
;
1769 stats
->rx_errors
= dev
->stats
.rx_errors
;
1770 stats
->rx_length_errors
= dev
->stats
.rx_length_errors
;
1772 stats
->rx_1_to_64_packets
= dev
->stats
.rx_1_to_64_packets
;
1773 stats
->rx_65_to_127_packets
= dev
->stats
.rx_65_to_127_packets
;
1774 stats
->rx_128_to_255_packets
= dev
->stats
.rx_128_to_255_packets
;
1775 stats
->rx_256_to_511_packets
= dev
->stats
.rx_256_to_511_packets
;
1776 stats
->rx_512_to_1023_packets
= dev
->stats
.rx_512_to_1023_packets
;
1777 stats
->rx_1024_to_1522_packets
= dev
->stats
.rx_1024_to_1522_packets
;
1778 stats
->rx_1523_to_max_packets
= dev
->stats
.rx_1523_to_max_packets
;
1780 rte_spinlock_unlock(&dev
->stats_lock
);
1782 ovs_mutex_unlock(&dev
->mutex
);
1788 netdev_dpdk_convert_xstats(struct netdev_stats
*stats
,
1789 const struct rte_eth_xstat
*xstats
,
1790 const struct rte_eth_xstat_name
*names
,
1791 const unsigned int size
)
1793 for (unsigned int i
= 0; i
< size
; i
++) {
1794 if (strcmp(XSTAT_RX_64_PACKETS
, names
[i
].name
) == 0) {
1795 stats
->rx_1_to_64_packets
= xstats
[i
].value
;
1796 } else if (strcmp(XSTAT_RX_65_TO_127_PACKETS
, names
[i
].name
) == 0) {
1797 stats
->rx_65_to_127_packets
= xstats
[i
].value
;
1798 } else if (strcmp(XSTAT_RX_128_TO_255_PACKETS
, names
[i
].name
) == 0) {
1799 stats
->rx_128_to_255_packets
= xstats
[i
].value
;
1800 } else if (strcmp(XSTAT_RX_256_TO_511_PACKETS
, names
[i
].name
) == 0) {
1801 stats
->rx_256_to_511_packets
= xstats
[i
].value
;
1802 } else if (strcmp(XSTAT_RX_512_TO_1023_PACKETS
, names
[i
].name
) == 0) {
1803 stats
->rx_512_to_1023_packets
= xstats
[i
].value
;
1804 } else if (strcmp(XSTAT_RX_1024_TO_1522_PACKETS
, names
[i
].name
) == 0) {
1805 stats
->rx_1024_to_1522_packets
= xstats
[i
].value
;
1806 } else if (strcmp(XSTAT_RX_1523_TO_MAX_PACKETS
, names
[i
].name
) == 0) {
1807 stats
->rx_1523_to_max_packets
= xstats
[i
].value
;
1808 } else if (strcmp(XSTAT_TX_64_PACKETS
, names
[i
].name
) == 0) {
1809 stats
->tx_1_to_64_packets
= xstats
[i
].value
;
1810 } else if (strcmp(XSTAT_TX_65_TO_127_PACKETS
, names
[i
].name
) == 0) {
1811 stats
->tx_65_to_127_packets
= xstats
[i
].value
;
1812 } else if (strcmp(XSTAT_TX_128_TO_255_PACKETS
, names
[i
].name
) == 0) {
1813 stats
->tx_128_to_255_packets
= xstats
[i
].value
;
1814 } else if (strcmp(XSTAT_TX_256_TO_511_PACKETS
, names
[i
].name
) == 0) {
1815 stats
->tx_256_to_511_packets
= xstats
[i
].value
;
1816 } else if (strcmp(XSTAT_TX_512_TO_1023_PACKETS
, names
[i
].name
) == 0) {
1817 stats
->tx_512_to_1023_packets
= xstats
[i
].value
;
1818 } else if (strcmp(XSTAT_TX_1024_TO_1522_PACKETS
, names
[i
].name
) == 0) {
1819 stats
->tx_1024_to_1522_packets
= xstats
[i
].value
;
1820 } else if (strcmp(XSTAT_TX_1523_TO_MAX_PACKETS
, names
[i
].name
) == 0) {
1821 stats
->tx_1523_to_max_packets
= xstats
[i
].value
;
1822 } else if (strcmp(XSTAT_TX_MULTICAST_PACKETS
, names
[i
].name
) == 0) {
1823 stats
->tx_multicast_packets
= xstats
[i
].value
;
1824 } else if (strcmp(XSTAT_RX_BROADCAST_PACKETS
, names
[i
].name
) == 0) {
1825 stats
->rx_broadcast_packets
= xstats
[i
].value
;
1826 } else if (strcmp(XSTAT_TX_BROADCAST_PACKETS
, names
[i
].name
) == 0) {
1827 stats
->tx_broadcast_packets
= xstats
[i
].value
;
1828 } else if (strcmp(XSTAT_RX_UNDERSIZED_ERRORS
, names
[i
].name
) == 0) {
1829 stats
->rx_undersized_errors
= xstats
[i
].value
;
1830 } else if (strcmp(XSTAT_RX_FRAGMENTED_ERRORS
, names
[i
].name
) == 0) {
1831 stats
->rx_fragmented_errors
= xstats
[i
].value
;
1832 } else if (strcmp(XSTAT_RX_JABBER_ERRORS
, names
[i
].name
) == 0) {
1833 stats
->rx_jabber_errors
= xstats
[i
].value
;
1839 netdev_dpdk_get_stats(const struct netdev
*netdev
, struct netdev_stats
*stats
)
1841 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1842 struct rte_eth_stats rte_stats
;
1845 netdev_dpdk_get_carrier(netdev
, &gg
);
1846 ovs_mutex_lock(&dev
->mutex
);
1848 struct rte_eth_xstat
*rte_xstats
= NULL
;
1849 struct rte_eth_xstat_name
*rte_xstats_names
= NULL
;
1850 int rte_xstats_len
, rte_xstats_new_len
, rte_xstats_ret
;
1852 if (rte_eth_stats_get(dev
->port_id
, &rte_stats
)) {
1853 VLOG_ERR("Can't get ETH statistics for port: %i.", dev
->port_id
);
1854 ovs_mutex_unlock(&dev
->mutex
);
1858 /* Get length of statistics */
1859 rte_xstats_len
= rte_eth_xstats_get_names(dev
->port_id
, NULL
, 0);
1860 if (rte_xstats_len
< 0) {
1861 VLOG_WARN("Cannot get XSTATS values for port: %i", dev
->port_id
);
1864 /* Reserve memory for xstats names and values */
1865 rte_xstats_names
= xcalloc(rte_xstats_len
, sizeof *rte_xstats_names
);
1866 rte_xstats
= xcalloc(rte_xstats_len
, sizeof *rte_xstats
);
1868 /* Retreive xstats names */
1869 rte_xstats_new_len
= rte_eth_xstats_get_names(dev
->port_id
,
1872 if (rte_xstats_new_len
!= rte_xstats_len
) {
1873 VLOG_WARN("Cannot get XSTATS names for port: %i.", dev
->port_id
);
1876 /* Retreive xstats values */
1877 memset(rte_xstats
, 0xff, sizeof *rte_xstats
* rte_xstats_len
);
1878 rte_xstats_ret
= rte_eth_xstats_get(dev
->port_id
, rte_xstats
,
1880 if (rte_xstats_ret
> 0 && rte_xstats_ret
<= rte_xstats_len
) {
1881 netdev_dpdk_convert_xstats(stats
, rte_xstats
, rte_xstats_names
,
1884 VLOG_WARN("Cannot get XSTATS values for port: %i.", dev
->port_id
);
1889 free(rte_xstats_names
);
1891 stats
->rx_packets
= rte_stats
.ipackets
;
1892 stats
->tx_packets
= rte_stats
.opackets
;
1893 stats
->rx_bytes
= rte_stats
.ibytes
;
1894 stats
->tx_bytes
= rte_stats
.obytes
;
1895 /* DPDK counts imissed as errors, but count them here as dropped instead */
1896 stats
->rx_errors
= rte_stats
.ierrors
- rte_stats
.imissed
;
1897 stats
->tx_errors
= rte_stats
.oerrors
;
1899 rte_spinlock_lock(&dev
->stats_lock
);
1900 stats
->tx_dropped
= dev
->stats
.tx_dropped
;
1901 stats
->rx_dropped
= dev
->stats
.rx_dropped
;
1902 rte_spinlock_unlock(&dev
->stats_lock
);
1904 /* These are the available DPDK counters for packets not received due to
1905 * local resource constraints in DPDK and NIC respectively. */
1906 stats
->rx_dropped
+= rte_stats
.rx_nombuf
+ rte_stats
.imissed
;
1907 stats
->rx_missed_errors
= rte_stats
.imissed
;
1909 ovs_mutex_unlock(&dev
->mutex
);
1915 netdev_dpdk_get_features(const struct netdev
*netdev
,
1916 enum netdev_features
*current
,
1917 enum netdev_features
*advertised OVS_UNUSED
,
1918 enum netdev_features
*supported OVS_UNUSED
,
1919 enum netdev_features
*peer OVS_UNUSED
)
1921 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1922 struct rte_eth_link link
;
1924 ovs_mutex_lock(&dev
->mutex
);
1926 ovs_mutex_unlock(&dev
->mutex
);
1928 if (link
.link_duplex
== ETH_LINK_HALF_DUPLEX
) {
1929 if (link
.link_speed
== ETH_SPEED_NUM_10M
) {
1930 *current
= NETDEV_F_10MB_HD
;
1932 if (link
.link_speed
== ETH_SPEED_NUM_100M
) {
1933 *current
= NETDEV_F_100MB_HD
;
1935 if (link
.link_speed
== ETH_SPEED_NUM_1G
) {
1936 *current
= NETDEV_F_1GB_HD
;
1938 } else if (link
.link_duplex
== ETH_LINK_FULL_DUPLEX
) {
1939 if (link
.link_speed
== ETH_SPEED_NUM_10M
) {
1940 *current
= NETDEV_F_10MB_FD
;
1942 if (link
.link_speed
== ETH_SPEED_NUM_100M
) {
1943 *current
= NETDEV_F_100MB_FD
;
1945 if (link
.link_speed
== ETH_SPEED_NUM_1G
) {
1946 *current
= NETDEV_F_1GB_FD
;
1948 if (link
.link_speed
== ETH_SPEED_NUM_10G
) {
1949 *current
= NETDEV_F_10GB_FD
;
1953 if (link
.link_autoneg
) {
1954 *current
|= NETDEV_F_AUTONEG
;
1960 static struct ingress_policer
*
1961 netdev_dpdk_policer_construct(uint32_t rate
, uint32_t burst
)
1963 struct ingress_policer
*policer
= NULL
;
1964 uint64_t rate_bytes
;
1965 uint64_t burst_bytes
;
1968 policer
= xmalloc(sizeof *policer
);
1969 rte_spinlock_init(&policer
->policer_lock
);
1971 /* rte_meter requires bytes so convert kbits rate and burst to bytes. */
1972 rate_bytes
= rate
* 1000/8;
1973 burst_bytes
= burst
* 1000/8;
1975 policer
->app_srtcm_params
.cir
= rate_bytes
;
1976 policer
->app_srtcm_params
.cbs
= burst_bytes
;
1977 policer
->app_srtcm_params
.ebs
= 0;
1978 err
= rte_meter_srtcm_config(&policer
->in_policer
,
1979 &policer
->app_srtcm_params
);
1981 VLOG_ERR("Could not create rte meter for ingress policer");
1989 netdev_dpdk_set_policing(struct netdev
* netdev
, uint32_t policer_rate
,
1990 uint32_t policer_burst
)
1992 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1993 struct ingress_policer
*policer
;
1995 /* Force to 0 if no rate specified,
1996 * default to 8000 kbits if burst is 0,
1997 * else stick with user-specified value.
1999 policer_burst
= (!policer_rate
? 0
2000 : !policer_burst
? 8000
2003 ovs_mutex_lock(&dev
->mutex
);
2005 policer
= ovsrcu_get_protected(struct ingress_policer
*,
2006 &dev
->ingress_policer
);
2008 if (dev
->policer_rate
== policer_rate
&&
2009 dev
->policer_burst
== policer_burst
) {
2010 /* Assume that settings haven't changed since we last set them. */
2011 ovs_mutex_unlock(&dev
->mutex
);
2015 /* Destroy any existing ingress policer for the device if one exists */
2017 ovsrcu_postpone(free
, policer
);
2020 if (policer_rate
!= 0) {
2021 policer
= netdev_dpdk_policer_construct(policer_rate
, policer_burst
);
2025 ovsrcu_set(&dev
->ingress_policer
, policer
);
2026 dev
->policer_rate
= policer_rate
;
2027 dev
->policer_burst
= policer_burst
;
2028 ovs_mutex_unlock(&dev
->mutex
);
2034 netdev_dpdk_get_ifindex(const struct netdev
*netdev
)
2036 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
2039 ovs_mutex_lock(&dev
->mutex
);
2040 ifindex
= dev
->port_id
;
2041 ovs_mutex_unlock(&dev
->mutex
);
2047 netdev_dpdk_get_carrier(const struct netdev
*netdev
, bool *carrier
)
2049 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
2051 ovs_mutex_lock(&dev
->mutex
);
2052 check_link_status(dev
);
2053 *carrier
= dev
->link
.link_status
;
2055 ovs_mutex_unlock(&dev
->mutex
);
2061 netdev_dpdk_vhost_get_carrier(const struct netdev
*netdev
, bool *carrier
)
2063 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
2065 ovs_mutex_lock(&dev
->mutex
);
2067 if (is_vhost_running(dev
)) {
2073 ovs_mutex_unlock(&dev
->mutex
);
2078 static long long int
2079 netdev_dpdk_get_carrier_resets(const struct netdev
*netdev
)
2081 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
2082 long long int carrier_resets
;
2084 ovs_mutex_lock(&dev
->mutex
);
2085 carrier_resets
= dev
->link_reset_cnt
;
2086 ovs_mutex_unlock(&dev
->mutex
);
2088 return carrier_resets
;
2092 netdev_dpdk_set_miimon(struct netdev
*netdev OVS_UNUSED
,
2093 long long int interval OVS_UNUSED
)
2099 netdev_dpdk_update_flags__(struct netdev_dpdk
*dev
,
2100 enum netdev_flags off
, enum netdev_flags on
,
2101 enum netdev_flags
*old_flagsp
)
2102 OVS_REQUIRES(dev
->mutex
)
2106 if ((off
| on
) & ~(NETDEV_UP
| NETDEV_PROMISC
)) {
2110 *old_flagsp
= dev
->flags
;
2114 if (dev
->flags
== *old_flagsp
) {
2118 if (dev
->type
== DPDK_DEV_ETH
) {
2119 if (dev
->flags
& NETDEV_UP
) {
2120 err
= rte_eth_dev_start(dev
->port_id
);
2125 if (dev
->flags
& NETDEV_PROMISC
) {
2126 rte_eth_promiscuous_enable(dev
->port_id
);
2129 if (!(dev
->flags
& NETDEV_UP
)) {
2130 rte_eth_dev_stop(dev
->port_id
);
2133 /* If DPDK_DEV_VHOST device's NETDEV_UP flag was changed and vhost is
2134 * running then change netdev's change_seq to trigger link state
2137 if ((NETDEV_UP
& ((*old_flagsp
^ on
) | (*old_flagsp
^ off
)))
2138 && is_vhost_running(dev
)) {
2139 netdev_change_seq_changed(&dev
->up
);
2141 /* Clear statistics if device is getting up. */
2142 if (NETDEV_UP
& on
) {
2143 rte_spinlock_lock(&dev
->stats_lock
);
2144 memset(&dev
->stats
, 0, sizeof(dev
->stats
));
2145 rte_spinlock_unlock(&dev
->stats_lock
);
2154 netdev_dpdk_update_flags(struct netdev
*netdev
,
2155 enum netdev_flags off
, enum netdev_flags on
,
2156 enum netdev_flags
*old_flagsp
)
2158 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
2161 ovs_mutex_lock(&dev
->mutex
);
2162 error
= netdev_dpdk_update_flags__(dev
, off
, on
, old_flagsp
);
2163 ovs_mutex_unlock(&dev
->mutex
);
2169 netdev_dpdk_get_status(const struct netdev
*netdev
, struct smap
*args
)
2171 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
2172 struct rte_eth_dev_info dev_info
;
2174 if (!rte_eth_dev_is_valid_port(dev
->port_id
)) {
2178 ovs_mutex_lock(&dev
->mutex
);
2179 rte_eth_dev_info_get(dev
->port_id
, &dev_info
);
2180 ovs_mutex_unlock(&dev
->mutex
);
2182 smap_add_format(args
, "port_no", "%d", dev
->port_id
);
2183 smap_add_format(args
, "numa_id", "%d", rte_eth_dev_socket_id(dev
->port_id
));
2184 smap_add_format(args
, "driver_name", "%s", dev_info
.driver_name
);
2185 smap_add_format(args
, "min_rx_bufsize", "%u", dev_info
.min_rx_bufsize
);
2186 smap_add_format(args
, "max_rx_pktlen", "%u", dev
->max_packet_len
);
2187 smap_add_format(args
, "max_rx_queues", "%u", dev_info
.max_rx_queues
);
2188 smap_add_format(args
, "max_tx_queues", "%u", dev_info
.max_tx_queues
);
2189 smap_add_format(args
, "max_mac_addrs", "%u", dev_info
.max_mac_addrs
);
2190 smap_add_format(args
, "max_hash_mac_addrs", "%u", dev_info
.max_hash_mac_addrs
);
2191 smap_add_format(args
, "max_vfs", "%u", dev_info
.max_vfs
);
2192 smap_add_format(args
, "max_vmdq_pools", "%u", dev_info
.max_vmdq_pools
);
2194 if (dev_info
.pci_dev
) {
2195 smap_add_format(args
, "pci-vendor_id", "0x%u",
2196 dev_info
.pci_dev
->id
.vendor_id
);
2197 smap_add_format(args
, "pci-device_id", "0x%x",
2198 dev_info
.pci_dev
->id
.device_id
);
2205 netdev_dpdk_set_admin_state__(struct netdev_dpdk
*dev
, bool admin_state
)
2206 OVS_REQUIRES(dev
->mutex
)
2208 enum netdev_flags old_flags
;
2211 netdev_dpdk_update_flags__(dev
, 0, NETDEV_UP
, &old_flags
);
2213 netdev_dpdk_update_flags__(dev
, NETDEV_UP
, 0, &old_flags
);
2218 netdev_dpdk_set_admin_state(struct unixctl_conn
*conn
, int argc
,
2219 const char *argv
[], void *aux OVS_UNUSED
)
2223 if (!strcasecmp(argv
[argc
- 1], "up")) {
2225 } else if ( !strcasecmp(argv
[argc
- 1], "down")) {
2228 unixctl_command_reply_error(conn
, "Invalid Admin State");
2233 struct netdev
*netdev
= netdev_from_name(argv
[1]);
2234 if (netdev
&& is_dpdk_class(netdev
->netdev_class
)) {
2235 struct netdev_dpdk
*dpdk_dev
= netdev_dpdk_cast(netdev
);
2237 ovs_mutex_lock(&dpdk_dev
->mutex
);
2238 netdev_dpdk_set_admin_state__(dpdk_dev
, up
);
2239 ovs_mutex_unlock(&dpdk_dev
->mutex
);
2241 netdev_close(netdev
);
2243 unixctl_command_reply_error(conn
, "Not a DPDK Interface");
2244 netdev_close(netdev
);
2248 struct netdev_dpdk
*netdev
;
2250 ovs_mutex_lock(&dpdk_mutex
);
2251 LIST_FOR_EACH (netdev
, list_node
, &dpdk_list
) {
2252 ovs_mutex_lock(&netdev
->mutex
);
2253 netdev_dpdk_set_admin_state__(netdev
, up
);
2254 ovs_mutex_unlock(&netdev
->mutex
);
2256 ovs_mutex_unlock(&dpdk_mutex
);
2258 unixctl_command_reply(conn
, "OK");
2262 * Set virtqueue flags so that we do not receive interrupts.
2265 set_irq_status(int vid
)
2270 for (i
= 0; i
< rte_vhost_get_queue_num(vid
); i
++) {
2271 idx
= i
* VIRTIO_QNUM
;
2272 rte_vhost_enable_guest_notification(vid
, idx
+ VIRTIO_RXQ
, 0);
2273 rte_vhost_enable_guest_notification(vid
, idx
+ VIRTIO_TXQ
, 0);
2278 * Fixes mapping for vhost-user tx queues. Must be called after each
2279 * enabling/disabling of queues and n_txq modifications.
2282 netdev_dpdk_remap_txqs(struct netdev_dpdk
*dev
)
2283 OVS_REQUIRES(dev
->mutex
)
2285 int *enabled_queues
, n_enabled
= 0;
2286 int i
, k
, total_txqs
= dev
->up
.n_txq
;
2288 enabled_queues
= dpdk_rte_mzalloc(total_txqs
* sizeof *enabled_queues
);
2290 for (i
= 0; i
< total_txqs
; i
++) {
2291 /* Enabled queues always mapped to themselves. */
2292 if (dev
->tx_q
[i
].map
== i
) {
2293 enabled_queues
[n_enabled
++] = i
;
2297 if (n_enabled
== 0 && total_txqs
!= 0) {
2298 enabled_queues
[0] = OVS_VHOST_QUEUE_DISABLED
;
2303 for (i
= 0; i
< total_txqs
; i
++) {
2304 if (dev
->tx_q
[i
].map
!= i
) {
2305 dev
->tx_q
[i
].map
= enabled_queues
[k
];
2306 k
= (k
+ 1) % n_enabled
;
2310 VLOG_DBG("TX queue mapping for %s\n", dev
->vhost_id
);
2311 for (i
= 0; i
< total_txqs
; i
++) {
2312 VLOG_DBG("%2d --> %2d", i
, dev
->tx_q
[i
].map
);
2315 rte_free(enabled_queues
);
2319 * A new virtio-net device is added to a vhost port.
2324 struct netdev_dpdk
*dev
;
2325 bool exists
= false;
2327 char ifname
[IF_NAME_SZ
];
2329 rte_vhost_get_ifname(vid
, ifname
, sizeof(ifname
));
2331 ovs_mutex_lock(&dpdk_mutex
);
2332 /* Add device to the vhost port with the same name as that passed down. */
2333 LIST_FOR_EACH(dev
, list_node
, &dpdk_list
) {
2334 ovs_mutex_lock(&dev
->mutex
);
2335 if (strncmp(ifname
, dev
->vhost_id
, IF_NAME_SZ
) == 0) {
2336 uint32_t qp_num
= rte_vhost_get_queue_num(vid
);
2338 /* Get NUMA information */
2339 newnode
= rte_vhost_get_numa_node(vid
);
2340 if (newnode
== -1) {
2342 VLOG_INFO("Error getting NUMA info for vHost Device '%s'",
2345 newnode
= dev
->socket_id
;
2348 if (dev
->requested_n_txq
!= qp_num
2349 || dev
->requested_n_rxq
!= qp_num
2350 || dev
->requested_socket_id
!= newnode
) {
2351 dev
->requested_socket_id
= newnode
;
2352 dev
->requested_n_rxq
= qp_num
;
2353 dev
->requested_n_txq
= qp_num
;
2354 netdev_request_reconfigure(&dev
->up
);
2356 /* Reconfiguration not required. */
2357 dev
->vhost_reconfigured
= true;
2360 ovsrcu_index_set(&dev
->vid
, vid
);
2363 /* Disable notifications. */
2364 set_irq_status(vid
);
2365 netdev_change_seq_changed(&dev
->up
);
2366 ovs_mutex_unlock(&dev
->mutex
);
2369 ovs_mutex_unlock(&dev
->mutex
);
2371 ovs_mutex_unlock(&dpdk_mutex
);
2374 VLOG_INFO("vHost Device '%s' can't be added - name not found", ifname
);
2379 VLOG_INFO("vHost Device '%s' has been added on numa node %i",
2385 /* Clears mapping for all available queues of vhost interface. */
2387 netdev_dpdk_txq_map_clear(struct netdev_dpdk
*dev
)
2388 OVS_REQUIRES(dev
->mutex
)
2392 for (i
= 0; i
< dev
->up
.n_txq
; i
++) {
2393 dev
->tx_q
[i
].map
= OVS_VHOST_QUEUE_MAP_UNKNOWN
;
2398 * Remove a virtio-net device from the specific vhost port. Use dev->remove
2399 * flag to stop any more packets from being sent or received to/from a VM and
2400 * ensure all currently queued packets have been sent/received before removing
2404 destroy_device(int vid
)
2406 struct netdev_dpdk
*dev
;
2407 bool exists
= false;
2408 char ifname
[IF_NAME_SZ
];
2410 rte_vhost_get_ifname(vid
, ifname
, sizeof(ifname
));
2412 ovs_mutex_lock(&dpdk_mutex
);
2413 LIST_FOR_EACH (dev
, list_node
, &dpdk_list
) {
2414 if (netdev_dpdk_get_vid(dev
) == vid
) {
2416 ovs_mutex_lock(&dev
->mutex
);
2417 dev
->vhost_reconfigured
= false;
2418 ovsrcu_index_set(&dev
->vid
, -1);
2419 netdev_dpdk_txq_map_clear(dev
);
2421 netdev_change_seq_changed(&dev
->up
);
2422 ovs_mutex_unlock(&dev
->mutex
);
2428 ovs_mutex_unlock(&dpdk_mutex
);
2432 * Wait for other threads to quiesce after setting the 'virtio_dev'
2433 * to NULL, before returning.
2435 ovsrcu_synchronize();
2437 * As call to ovsrcu_synchronize() will end the quiescent state,
2438 * put thread back into quiescent state before returning.
2440 ovsrcu_quiesce_start();
2441 VLOG_INFO("vHost Device '%s' has been removed", ifname
);
2443 VLOG_INFO("vHost Device '%s' not found", ifname
);
2448 vring_state_changed(int vid
, uint16_t queue_id
, int enable
)
2450 struct netdev_dpdk
*dev
;
2451 bool exists
= false;
2452 int qid
= queue_id
/ VIRTIO_QNUM
;
2453 char ifname
[IF_NAME_SZ
];
2455 rte_vhost_get_ifname(vid
, ifname
, sizeof(ifname
));
2457 if (queue_id
% VIRTIO_QNUM
== VIRTIO_TXQ
) {
2461 ovs_mutex_lock(&dpdk_mutex
);
2462 LIST_FOR_EACH (dev
, list_node
, &dpdk_list
) {
2463 ovs_mutex_lock(&dev
->mutex
);
2464 if (strncmp(ifname
, dev
->vhost_id
, IF_NAME_SZ
) == 0) {
2466 dev
->tx_q
[qid
].map
= qid
;
2468 dev
->tx_q
[qid
].map
= OVS_VHOST_QUEUE_DISABLED
;
2470 netdev_dpdk_remap_txqs(dev
);
2472 ovs_mutex_unlock(&dev
->mutex
);
2475 ovs_mutex_unlock(&dev
->mutex
);
2477 ovs_mutex_unlock(&dpdk_mutex
);
2480 VLOG_INFO("State of queue %d ( tx_qid %d ) of vhost device '%s'"
2481 "changed to \'%s\'", queue_id
, qid
, ifname
,
2482 (enable
== 1) ? "enabled" : "disabled");
2484 VLOG_INFO("vHost Device '%s' not found", ifname
);
2492 netdev_dpdk_get_vid(const struct netdev_dpdk
*dev
)
2494 return ovsrcu_index_get(&dev
->vid
);
2497 struct ingress_policer
*
2498 netdev_dpdk_get_ingress_policer(const struct netdev_dpdk
*dev
)
2500 return ovsrcu_get(struct ingress_policer
*, &dev
->ingress_policer
);
2504 * These callbacks allow virtio-net devices to be added to vhost ports when
2505 * configuration has been fully complete.
2507 static const struct virtio_net_device_ops virtio_net_device_ops
=
2509 .new_device
= new_device
,
2510 .destroy_device
= destroy_device
,
2511 .vring_state_changed
= vring_state_changed
2515 start_vhost_loop(void *dummy OVS_UNUSED
)
2517 pthread_detach(pthread_self());
2518 /* Put the vhost thread into quiescent state. */
2519 ovsrcu_quiesce_start();
2520 rte_vhost_driver_session_start();
2525 dpdk_vhost_class_init(void)
2527 rte_vhost_driver_callback_register(&virtio_net_device_ops
);
2528 rte_vhost_feature_disable(1ULL << VIRTIO_NET_F_HOST_TSO4
2529 | 1ULL << VIRTIO_NET_F_HOST_TSO6
2530 | 1ULL << VIRTIO_NET_F_CSUM
);
2532 ovs_thread_create("vhost_thread", start_vhost_loop
, NULL
);
2537 dpdk_common_init(void)
2539 unixctl_command_register("netdev-dpdk/set-admin-state",
2540 "[netdev] up|down", 1, 2,
2541 netdev_dpdk_set_admin_state
, NULL
);
2548 dpdk_ring_create(const char dev_name
[], unsigned int port_no
,
2549 unsigned int *eth_port_id
)
2551 struct dpdk_ring
*ivshmem
;
2552 char ring_name
[RTE_RING_NAMESIZE
];
2555 ivshmem
= dpdk_rte_mzalloc(sizeof *ivshmem
);
2556 if (ivshmem
== NULL
) {
2560 /* XXX: Add support for multiquque ring. */
2561 err
= snprintf(ring_name
, sizeof(ring_name
), "%s_tx", dev_name
);
2566 /* Create single producer tx ring, netdev does explicit locking. */
2567 ivshmem
->cring_tx
= rte_ring_create(ring_name
, DPDK_RING_SIZE
, SOCKET0
,
2569 if (ivshmem
->cring_tx
== NULL
) {
2574 err
= snprintf(ring_name
, sizeof(ring_name
), "%s_rx", dev_name
);
2579 /* Create single consumer rx ring, netdev does explicit locking. */
2580 ivshmem
->cring_rx
= rte_ring_create(ring_name
, DPDK_RING_SIZE
, SOCKET0
,
2582 if (ivshmem
->cring_rx
== NULL
) {
2587 err
= rte_eth_from_rings(dev_name
, &ivshmem
->cring_rx
, 1,
2588 &ivshmem
->cring_tx
, 1, SOCKET0
);
2595 ivshmem
->user_port_id
= port_no
;
2596 ivshmem
->eth_port_id
= rte_eth_dev_count() - 1;
2597 ovs_list_push_back(&dpdk_ring_list
, &ivshmem
->list_node
);
2599 *eth_port_id
= ivshmem
->eth_port_id
;
2604 dpdk_ring_open(const char dev_name
[], unsigned int *eth_port_id
)
2605 OVS_REQUIRES(dpdk_mutex
)
2607 struct dpdk_ring
*ivshmem
;
2608 unsigned int port_no
;
2611 /* Names always start with "dpdkr" */
2612 err
= dpdk_dev_parse_name(dev_name
, "dpdkr", &port_no
);
2617 /* look through our list to find the device */
2618 LIST_FOR_EACH (ivshmem
, list_node
, &dpdk_ring_list
) {
2619 if (ivshmem
->user_port_id
== port_no
) {
2620 VLOG_INFO("Found dpdk ring device %s:", dev_name
);
2621 *eth_port_id
= ivshmem
->eth_port_id
; /* really all that is needed */
2625 /* Need to create the device rings */
2626 return dpdk_ring_create(dev_name
, port_no
, eth_port_id
);
2630 netdev_dpdk_ring_send(struct netdev
*netdev
, int qid
,
2631 struct dp_packet_batch
*batch
, bool may_steal
,
2632 bool concurrent_txq
)
2634 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
2637 /* When using 'dpdkr' and sending to a DPDK ring, we want to ensure that the
2638 * rss hash field is clear. This is because the same mbuf may be modified by
2639 * the consumer of the ring and return into the datapath without recalculating
2641 for (i
= 0; i
< batch
->count
; i
++) {
2642 dp_packet_rss_invalidate(batch
->packets
[i
]);
2645 netdev_dpdk_send__(dev
, qid
, batch
, may_steal
, concurrent_txq
);
2650 netdev_dpdk_ring_construct(struct netdev
*netdev
)
2652 unsigned int port_no
= 0;
2655 if (rte_eal_init_ret
) {
2656 return rte_eal_init_ret
;
2659 ovs_mutex_lock(&dpdk_mutex
);
2661 err
= dpdk_ring_open(netdev
->name
, &port_no
);
2666 err
= netdev_dpdk_init(netdev
, port_no
, DPDK_DEV_ETH
);
2669 ovs_mutex_unlock(&dpdk_mutex
);
2676 * Initialize QoS configuration operations.
2679 qos_conf_init(struct qos_conf
*conf
, const struct dpdk_qos_ops
*ops
)
2685 * Search existing QoS operations in qos_ops and compare each set of
2686 * operations qos_name to name. Return a dpdk_qos_ops pointer to a match,
2689 static const struct dpdk_qos_ops
*
2690 qos_lookup_name(const char *name
)
2692 const struct dpdk_qos_ops
*const *opsp
;
2694 for (opsp
= qos_confs
; *opsp
!= NULL
; opsp
++) {
2695 const struct dpdk_qos_ops
*ops
= *opsp
;
2696 if (!strcmp(name
, ops
->qos_name
)) {
2704 * Call qos_destruct to clean up items associated with the netdevs
2705 * qos_conf. Set netdevs qos_conf to NULL.
2708 qos_delete_conf(struct netdev
*netdev
)
2710 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
2712 rte_spinlock_lock(&dev
->qos_lock
);
2713 if (dev
->qos_conf
) {
2714 if (dev
->qos_conf
->ops
->qos_destruct
) {
2715 dev
->qos_conf
->ops
->qos_destruct(netdev
, dev
->qos_conf
);
2717 dev
->qos_conf
= NULL
;
2719 rte_spinlock_unlock(&dev
->qos_lock
);
2723 netdev_dpdk_get_qos_types(const struct netdev
*netdev OVS_UNUSED
,
2726 const struct dpdk_qos_ops
*const *opsp
;
2728 for (opsp
= qos_confs
; *opsp
!= NULL
; opsp
++) {
2729 const struct dpdk_qos_ops
*ops
= *opsp
;
2730 if (ops
->qos_construct
&& ops
->qos_name
[0] != '\0') {
2731 sset_add(types
, ops
->qos_name
);
2738 netdev_dpdk_get_qos(const struct netdev
*netdev
,
2739 const char **typep
, struct smap
*details
)
2741 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
2744 ovs_mutex_lock(&dev
->mutex
);
2746 *typep
= dev
->qos_conf
->ops
->qos_name
;
2747 error
= (dev
->qos_conf
->ops
->qos_get
2748 ? dev
->qos_conf
->ops
->qos_get(netdev
, details
): 0);
2750 /* No QoS configuration set, return an empty string */
2753 ovs_mutex_unlock(&dev
->mutex
);
2759 netdev_dpdk_set_qos(struct netdev
*netdev
,
2760 const char *type
, const struct smap
*details
)
2762 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
2763 const struct dpdk_qos_ops
*new_ops
= NULL
;
2766 /* If type is empty or unsupported then the current QoS configuration
2767 * for the dpdk-netdev can be destroyed */
2768 new_ops
= qos_lookup_name(type
);
2770 if (type
[0] == '\0' || !new_ops
|| !new_ops
->qos_construct
) {
2771 qos_delete_conf(netdev
);
2775 ovs_mutex_lock(&dev
->mutex
);
2777 if (dev
->qos_conf
) {
2778 if (new_ops
== dev
->qos_conf
->ops
) {
2779 error
= new_ops
->qos_set
? new_ops
->qos_set(netdev
, details
) : 0;
2781 /* Delete existing QoS configuration. */
2782 qos_delete_conf(netdev
);
2783 ovs_assert(dev
->qos_conf
== NULL
);
2785 /* Install new QoS configuration. */
2786 error
= new_ops
->qos_construct(netdev
, details
);
2789 error
= new_ops
->qos_construct(netdev
, details
);
2792 ovs_assert((error
== 0) == (dev
->qos_conf
!= NULL
));
2794 VLOG_ERR("Failed to set QoS type %s on port %s, returned error: %s",
2795 type
, netdev
->name
, rte_strerror(-error
));
2798 ovs_mutex_unlock(&dev
->mutex
);
2802 /* egress-policer details */
2804 struct egress_policer
{
2805 struct qos_conf qos_conf
;
2806 struct rte_meter_srtcm_params app_srtcm_params
;
2807 struct rte_meter_srtcm egress_meter
;
2810 static struct egress_policer
*
2811 egress_policer_get__(const struct netdev
*netdev
)
2813 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
2814 return CONTAINER_OF(dev
->qos_conf
, struct egress_policer
, qos_conf
);
2818 egress_policer_qos_construct(struct netdev
*netdev
,
2819 const struct smap
*details
)
2821 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
2822 struct egress_policer
*policer
;
2825 rte_spinlock_lock(&dev
->qos_lock
);
2826 policer
= xmalloc(sizeof *policer
);
2827 qos_conf_init(&policer
->qos_conf
, &egress_policer_ops
);
2828 dev
->qos_conf
= &policer
->qos_conf
;
2829 policer
->app_srtcm_params
.cir
= smap_get_ullong(details
, "cir", 0);
2830 policer
->app_srtcm_params
.cbs
= smap_get_ullong(details
, "cbs", 0);
2831 policer
->app_srtcm_params
.ebs
= 0;
2832 err
= rte_meter_srtcm_config(&policer
->egress_meter
,
2833 &policer
->app_srtcm_params
);
2836 /* Error occurred during rte_meter creation, destroy the policer
2837 * and set the qos configuration for the netdev dpdk to NULL
2840 dev
->qos_conf
= NULL
;
2843 rte_spinlock_unlock(&dev
->qos_lock
);
2849 egress_policer_qos_destruct(struct netdev
*netdev OVS_UNUSED
,
2850 struct qos_conf
*conf
)
2852 struct egress_policer
*policer
= CONTAINER_OF(conf
, struct egress_policer
,
2858 egress_policer_qos_get(const struct netdev
*netdev
, struct smap
*details
)
2860 struct egress_policer
*policer
= egress_policer_get__(netdev
);
2861 smap_add_format(details
, "cir", "%llu",
2862 1ULL * policer
->app_srtcm_params
.cir
);
2863 smap_add_format(details
, "cbs", "%llu",
2864 1ULL * policer
->app_srtcm_params
.cbs
);
2870 egress_policer_qos_set(struct netdev
*netdev
, const struct smap
*details
)
2872 struct egress_policer
*policer
;
2873 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
2876 policer
= egress_policer_get__(netdev
);
2877 rte_spinlock_lock(&dev
->qos_lock
);
2878 policer
->app_srtcm_params
.cir
= smap_get_ullong(details
, "cir", 0);
2879 policer
->app_srtcm_params
.cbs
= smap_get_ullong(details
, "cbs", 0);
2880 policer
->app_srtcm_params
.ebs
= 0;
2881 err
= rte_meter_srtcm_config(&policer
->egress_meter
,
2882 &policer
->app_srtcm_params
);
2885 /* Error occurred during rte_meter creation, destroy the policer
2886 * and set the qos configuration for the netdev dpdk to NULL
2889 dev
->qos_conf
= NULL
;
2892 rte_spinlock_unlock(&dev
->qos_lock
);
2898 egress_policer_run(struct netdev
*netdev
, struct rte_mbuf
**pkts
, int pkt_cnt
)
2901 struct egress_policer
*policer
= egress_policer_get__(netdev
);
2903 cnt
= netdev_dpdk_policer_run(&policer
->egress_meter
, pkts
, pkt_cnt
);
2908 static const struct dpdk_qos_ops egress_policer_ops
= {
2909 "egress-policer", /* qos_name */
2910 egress_policer_qos_construct
,
2911 egress_policer_qos_destruct
,
2912 egress_policer_qos_get
,
2913 egress_policer_qos_set
,
2918 netdev_dpdk_reconfigure(struct netdev
*netdev
)
2920 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
2923 ovs_mutex_lock(&dpdk_mutex
);
2924 ovs_mutex_lock(&dev
->mutex
);
2926 if (netdev
->n_txq
== dev
->requested_n_txq
2927 && netdev
->n_rxq
== dev
->requested_n_rxq
2928 && dev
->mtu
== dev
->requested_mtu
) {
2929 /* Reconfiguration is unnecessary */
2934 rte_eth_dev_stop(dev
->port_id
);
2936 if (dev
->mtu
!= dev
->requested_mtu
) {
2937 netdev_dpdk_mempool_configure(dev
);
2940 netdev
->n_txq
= dev
->requested_n_txq
;
2941 netdev
->n_rxq
= dev
->requested_n_rxq
;
2943 rte_free(dev
->tx_q
);
2944 err
= dpdk_eth_dev_init(dev
);
2945 netdev_dpdk_alloc_txq(dev
, netdev
->n_txq
);
2947 netdev_change_seq_changed(netdev
);
2951 ovs_mutex_unlock(&dev
->mutex
);
2952 ovs_mutex_unlock(&dpdk_mutex
);
2958 dpdk_vhost_reconfigure_helper(struct netdev_dpdk
*dev
)
2959 OVS_REQUIRES(dpdk_mutex
)
2960 OVS_REQUIRES(dev
->mutex
)
2962 dev
->up
.n_txq
= dev
->requested_n_txq
;
2963 dev
->up
.n_rxq
= dev
->requested_n_rxq
;
2965 /* Enable TX queue 0 by default if it wasn't disabled. */
2966 if (dev
->tx_q
[0].map
== OVS_VHOST_QUEUE_MAP_UNKNOWN
) {
2967 dev
->tx_q
[0].map
= 0;
2970 netdev_dpdk_remap_txqs(dev
);
2972 if (dev
->requested_socket_id
!= dev
->socket_id
2973 || dev
->requested_mtu
!= dev
->mtu
) {
2974 if (!netdev_dpdk_mempool_configure(dev
)) {
2975 netdev_change_seq_changed(&dev
->up
);
2979 if (netdev_dpdk_get_vid(dev
) >= 0) {
2980 dev
->vhost_reconfigured
= true;
2985 netdev_dpdk_vhost_reconfigure(struct netdev
*netdev
)
2987 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
2989 ovs_mutex_lock(&dpdk_mutex
);
2990 ovs_mutex_lock(&dev
->mutex
);
2992 dpdk_vhost_reconfigure_helper(dev
);
2994 ovs_mutex_unlock(&dev
->mutex
);
2995 ovs_mutex_unlock(&dpdk_mutex
);
3001 netdev_dpdk_vhost_client_reconfigure(struct netdev
*netdev
)
3003 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
3006 ovs_mutex_lock(&dpdk_mutex
);
3007 ovs_mutex_lock(&dev
->mutex
);
3009 dpdk_vhost_reconfigure_helper(dev
);
3011 /* Configure vHost client mode if requested and if the following criteria
3013 * 1. Device hasn't been registered yet.
3014 * 2. A path has been specified.
3016 if (!(dev
->vhost_driver_flags
& RTE_VHOST_USER_CLIENT
)
3017 && strlen(dev
->vhost_id
)) {
3018 /* Register client-mode device */
3019 err
= rte_vhost_driver_register(dev
->vhost_id
,
3020 RTE_VHOST_USER_CLIENT
);
3022 VLOG_ERR("vhost-user device setup failure for device %s\n",
3025 /* Configuration successful */
3026 dev
->vhost_driver_flags
|= RTE_VHOST_USER_CLIENT
;
3027 VLOG_INFO("vHost User device '%s' created in 'client' mode, "
3028 "using client socket '%s'",
3029 dev
->up
.name
, dev
->vhost_id
);
3033 ovs_mutex_unlock(&dev
->mutex
);
3034 ovs_mutex_unlock(&dpdk_mutex
);
3039 #define NETDEV_DPDK_CLASS(NAME, CONSTRUCT, DESTRUCT, \
3040 SET_CONFIG, SET_TX_MULTIQ, SEND, \
3041 GET_CARRIER, GET_STATS, \
3042 GET_FEATURES, GET_STATUS, \
3043 RECONFIGURE, RXQ_RECV) \
3046 true, /* is_pmd */ \
3048 NULL, /* netdev_dpdk_run */ \
3049 NULL, /* netdev_dpdk_wait */ \
3051 netdev_dpdk_alloc, \
3054 netdev_dpdk_dealloc, \
3055 netdev_dpdk_get_config, \
3057 NULL, /* get_tunnel_config */ \
3058 NULL, /* build header */ \
3059 NULL, /* push header */ \
3060 NULL, /* pop header */ \
3061 netdev_dpdk_get_numa_id, /* get_numa_id */ \
3065 NULL, /* send_wait */ \
3067 netdev_dpdk_set_etheraddr, \
3068 netdev_dpdk_get_etheraddr, \
3069 netdev_dpdk_get_mtu, \
3070 netdev_dpdk_set_mtu, \
3071 netdev_dpdk_get_ifindex, \
3073 netdev_dpdk_get_carrier_resets, \
3074 netdev_dpdk_set_miimon, \
3077 NULL, /* set_advertisements */ \
3079 netdev_dpdk_set_policing, \
3080 netdev_dpdk_get_qos_types, \
3081 NULL, /* get_qos_capabilities */ \
3082 netdev_dpdk_get_qos, \
3083 netdev_dpdk_set_qos, \
3084 NULL, /* get_queue */ \
3085 NULL, /* set_queue */ \
3086 NULL, /* delete_queue */ \
3087 NULL, /* get_queue_stats */ \
3088 NULL, /* queue_dump_start */ \
3089 NULL, /* queue_dump_next */ \
3090 NULL, /* queue_dump_done */ \
3091 NULL, /* dump_queue_stats */ \
3093 NULL, /* set_in4 */ \
3094 NULL, /* get_addr_list */ \
3095 NULL, /* add_router */ \
3096 NULL, /* get_next_hop */ \
3098 NULL, /* arp_lookup */ \
3100 netdev_dpdk_update_flags, \
3103 netdev_dpdk_rxq_alloc, \
3104 netdev_dpdk_rxq_construct, \
3105 netdev_dpdk_rxq_destruct, \
3106 netdev_dpdk_rxq_dealloc, \
3108 NULL, /* rx_wait */ \
3109 NULL, /* rxq_drain */ \
3113 process_vhost_flags(char *flag
, char *default_val
, int size
,
3114 const struct smap
*ovs_other_config
,
3120 val
= smap_get(ovs_other_config
, flag
);
3122 /* Process the vhost-sock-dir flag if it is provided, otherwise resort to
3125 if (val
&& (strlen(val
) <= size
)) {
3127 *new_val
= xstrdup(val
);
3128 VLOG_INFO("User-provided %s in use: %s", flag
, *new_val
);
3130 VLOG_INFO("No %s provided - defaulting to %s", flag
, default_val
);
3131 *new_val
= default_val
;
3138 grow_argv(char ***argv
, size_t cur_siz
, size_t grow_by
)
3140 return xrealloc(*argv
, sizeof(char *) * (cur_siz
+ grow_by
));
3144 dpdk_option_extend(char ***argv
, int argc
, const char *option
,
3147 char **newargv
= grow_argv(argv
, argc
, 2);
3149 newargv
[argc
] = xstrdup(option
);
3150 newargv
[argc
+1] = xstrdup(value
);
3154 move_argv(char ***argv
, size_t cur_size
, char **src_argv
, size_t src_argc
)
3156 char **newargv
= grow_argv(argv
, cur_size
, src_argc
);
3157 while (src_argc
--) {
3158 newargv
[cur_size
+src_argc
] = src_argv
[src_argc
];
3159 src_argv
[src_argc
] = NULL
;
3165 extra_dpdk_args(const char *ovs_extra_config
, char ***argv
, int argc
)
3168 char *release_tok
= xstrdup(ovs_extra_config
);
3169 char *tok
, *endptr
= NULL
;
3171 for (tok
= strtok_r(release_tok
, " ", &endptr
); tok
!= NULL
;
3172 tok
= strtok_r(NULL
, " ", &endptr
)) {
3173 char **newarg
= grow_argv(argv
, ret
, 1);
3175 newarg
[ret
++] = xstrdup(tok
);
3182 argv_contains(char **argv_haystack
, const size_t argc_haystack
,
3185 for (size_t i
= 0; i
< argc_haystack
; ++i
) {
3186 if (!strcmp(argv_haystack
[i
], needle
))
3193 construct_dpdk_options(const struct smap
*ovs_other_config
,
3194 char ***argv
, const int initial_size
,
3195 char **extra_args
, const size_t extra_argc
)
3197 struct dpdk_options_map
{
3198 const char *ovs_configuration
;
3199 const char *dpdk_option
;
3200 bool default_enabled
;
3201 const char *default_value
;
3203 {"dpdk-lcore-mask", "-c", false, NULL
},
3204 {"dpdk-hugepage-dir", "--huge-dir", false, NULL
},
3207 int i
, ret
= initial_size
;
3209 /*First, construct from the flat-options (non-mutex)*/
3210 for (i
= 0; i
< ARRAY_SIZE(opts
); ++i
) {
3211 const char *lookup
= smap_get(ovs_other_config
,
3212 opts
[i
].ovs_configuration
);
3213 if (!lookup
&& opts
[i
].default_enabled
) {
3214 lookup
= opts
[i
].default_value
;
3218 if (!argv_contains(extra_args
, extra_argc
, opts
[i
].dpdk_option
)) {
3219 dpdk_option_extend(argv
, ret
, opts
[i
].dpdk_option
, lookup
);
3222 VLOG_WARN("Ignoring database defined option '%s' due to "
3223 "dpdk_extras config", opts
[i
].dpdk_option
);
3231 #define MAX_DPDK_EXCL_OPTS 10
3234 construct_dpdk_mutex_options(const struct smap
*ovs_other_config
,
3235 char ***argv
, const int initial_size
,
3236 char **extra_args
, const size_t extra_argc
)
3238 struct dpdk_exclusive_options_map
{
3239 const char *category
;
3240 const char *ovs_dpdk_options
[MAX_DPDK_EXCL_OPTS
];
3241 const char *eal_dpdk_options
[MAX_DPDK_EXCL_OPTS
];
3242 const char *default_value
;
3246 {"dpdk-alloc-mem", "dpdk-socket-mem", NULL
,},
3247 {"-m", "--socket-mem", NULL
,},
3252 int i
, ret
= initial_size
;
3253 for (i
= 0; i
< ARRAY_SIZE(excl_opts
); ++i
) {
3254 int found_opts
= 0, scan
, found_pos
= -1;
3255 const char *found_value
;
3256 struct dpdk_exclusive_options_map
*popt
= &excl_opts
[i
];
3258 for (scan
= 0; scan
< MAX_DPDK_EXCL_OPTS
3259 && popt
->ovs_dpdk_options
[scan
]; ++scan
) {
3260 const char *lookup
= smap_get(ovs_other_config
,
3261 popt
->ovs_dpdk_options
[scan
]);
3262 if (lookup
&& strlen(lookup
)) {
3265 found_value
= lookup
;
3270 if (popt
->default_option
) {
3271 found_pos
= popt
->default_option
;
3272 found_value
= popt
->default_value
;
3278 if (found_opts
> 1) {
3279 VLOG_ERR("Multiple defined options for %s. Please check your"
3280 " database settings and reconfigure if necessary.",
3284 if (!argv_contains(extra_args
, extra_argc
,
3285 popt
->eal_dpdk_options
[found_pos
])) {
3286 dpdk_option_extend(argv
, ret
, popt
->eal_dpdk_options
[found_pos
],
3290 VLOG_WARN("Ignoring database defined option '%s' due to "
3291 "dpdk_extras config", popt
->eal_dpdk_options
[found_pos
]);
3299 get_dpdk_args(const struct smap
*ovs_other_config
, char ***argv
,
3302 const char *extra_configuration
;
3303 char **extra_args
= NULL
;
3305 size_t extra_argc
= 0;
3307 extra_configuration
= smap_get(ovs_other_config
, "dpdk-extra");
3308 if (extra_configuration
) {
3309 extra_argc
= extra_dpdk_args(extra_configuration
, &extra_args
, 0);
3312 i
= construct_dpdk_options(ovs_other_config
, argv
, argc
, extra_args
,
3314 i
= construct_dpdk_mutex_options(ovs_other_config
, argv
, i
, extra_args
,
3317 if (extra_configuration
) {
3318 *argv
= move_argv(argv
, i
, extra_args
, extra_argc
);
3321 return i
+ extra_argc
;
3324 static char **dpdk_argv
;
3325 static int dpdk_argc
;
3328 deferred_argv_release(void)
3331 for (result
= 0; result
< dpdk_argc
; ++result
) {
3332 free(dpdk_argv
[result
]);
3339 dpdk_init__(const struct smap
*ovs_other_config
)
3344 bool auto_determine
= true;
3347 char *sock_dir_subcomponent
;
3349 if (!smap_get_bool(ovs_other_config
, "dpdk-init", false)) {
3350 VLOG_INFO("DPDK Disabled - to change this requires a restart.\n");
3354 VLOG_INFO("DPDK Enabled, initializing");
3355 if (process_vhost_flags("vhost-sock-dir", xstrdup(ovs_rundir()),
3356 NAME_MAX
, ovs_other_config
,
3357 &sock_dir_subcomponent
)) {
3359 if (!strstr(sock_dir_subcomponent
, "..")) {
3360 vhost_sock_dir
= xasprintf("%s/%s", ovs_rundir(),
3361 sock_dir_subcomponent
);
3363 err
= stat(vhost_sock_dir
, &s
);
3365 VLOG_ERR("vhost-user sock directory '%s' does not exist.",
3369 vhost_sock_dir
= xstrdup(ovs_rundir());
3370 VLOG_ERR("vhost-user sock directory request '%s/%s' has invalid"
3371 "characters '..' - using %s instead.",
3372 ovs_rundir(), sock_dir_subcomponent
, ovs_rundir());
3374 free(sock_dir_subcomponent
);
3376 vhost_sock_dir
= sock_dir_subcomponent
;
3379 argv
= grow_argv(&argv
, 0, 1);
3381 argv
[0] = xstrdup(ovs_get_program_name());
3382 argc_tmp
= get_dpdk_args(ovs_other_config
, &argv
, argc
);
3384 while (argc_tmp
!= argc
) {
3385 if (!strcmp("-c", argv
[argc
]) || !strcmp("-l", argv
[argc
])) {
3386 auto_determine
= false;
3394 * NOTE: This is an unsophisticated mechanism for determining the DPDK
3395 * lcore for the DPDK Master.
3397 if (auto_determine
) {
3399 /* Get the main thread affinity */
3401 err
= pthread_getaffinity_np(pthread_self(), sizeof(cpu_set_t
),
3404 for (i
= 0; i
< CPU_SETSIZE
; i
++) {
3405 if (CPU_ISSET(i
, &cpuset
)) {
3406 argv
= grow_argv(&argv
, argc
, 2);
3407 argv
[argc
++] = xstrdup("-c");
3408 argv
[argc
++] = xasprintf("0x%08llX", (1ULL<<i
));
3413 VLOG_ERR("Thread getaffinity error %d. Using core 0x1", err
);
3414 /* User did not set dpdk-lcore-mask and unable to get current
3415 * thread affintity - default to core 0x1 */
3416 argv
= grow_argv(&argv
, argc
, 2);
3417 argv
[argc
++] = xstrdup("-c");
3418 argv
[argc
++] = xasprintf("0x%X", 1);
3422 argv
= grow_argv(&argv
, argc
, 1);
3427 if (VLOG_IS_INFO_ENABLED()) {
3431 ds_put_cstr(&eal_args
, "EAL ARGS:");
3432 for (opt
= 0; opt
< argc
; ++opt
) {
3433 ds_put_cstr(&eal_args
, " ");
3434 ds_put_cstr(&eal_args
, argv
[opt
]);
3436 VLOG_INFO("%s", ds_cstr_ro(&eal_args
));
3437 ds_destroy(&eal_args
);
3440 /* Make sure things are initialized ... */
3441 result
= rte_eal_init(argc
, argv
);
3443 ovs_abort(result
, "Cannot init EAL");
3446 /* Set the main thread affinity back to pre rte_eal_init() value */
3447 if (auto_determine
&& !err
) {
3448 err
= pthread_setaffinity_np(pthread_self(), sizeof(cpu_set_t
),
3451 VLOG_ERR("Thread setaffinity error %d", err
);
3458 atexit(deferred_argv_release
);
3460 rte_memzone_dump(stdout
);
3461 rte_eal_init_ret
= 0;
3463 /* We are called from the main thread here */
3464 RTE_PER_LCORE(_lcore_id
) = NON_PMD_CORE_ID
;
3466 ovs_thread_create("dpdk_watchdog", dpdk_watchdog
, NULL
);
3468 dpdk_vhost_class_init();
3471 VLOG_INFO("DPDK pdump packet capture enabled");
3472 err
= rte_pdump_init(ovs_rundir());
3474 VLOG_INFO("Error initialising DPDK pdump");
3477 char *server_socket_path
;
3479 server_socket_path
= xasprintf("%s/%s", ovs_rundir(),
3480 "pdump_server_socket");
3481 fatal_signal_add_file_to_unlink(server_socket_path
);
3482 free(server_socket_path
);
3486 /* Finally, register the dpdk classes */
3487 netdev_dpdk_register();
3491 dpdk_init(const struct smap
*ovs_other_config
)
3493 static struct ovsthread_once once
= OVSTHREAD_ONCE_INITIALIZER
;
3495 if (ovs_other_config
&& ovsthread_once_start(&once
)) {
3496 dpdk_init__(ovs_other_config
);
3497 ovsthread_once_done(&once
);
3501 static const struct netdev_class dpdk_class
=
3504 netdev_dpdk_construct
,
3505 netdev_dpdk_destruct
,
3506 netdev_dpdk_set_config
,
3507 netdev_dpdk_set_tx_multiq
,
3508 netdev_dpdk_eth_send
,
3509 netdev_dpdk_get_carrier
,
3510 netdev_dpdk_get_stats
,
3511 netdev_dpdk_get_features
,
3512 netdev_dpdk_get_status
,
3513 netdev_dpdk_reconfigure
,
3514 netdev_dpdk_rxq_recv
);
3516 static const struct netdev_class dpdk_ring_class
=
3519 netdev_dpdk_ring_construct
,
3520 netdev_dpdk_destruct
,
3521 netdev_dpdk_ring_set_config
,
3522 netdev_dpdk_set_tx_multiq
,
3523 netdev_dpdk_ring_send
,
3524 netdev_dpdk_get_carrier
,
3525 netdev_dpdk_get_stats
,
3526 netdev_dpdk_get_features
,
3527 netdev_dpdk_get_status
,
3528 netdev_dpdk_reconfigure
,
3529 netdev_dpdk_rxq_recv
);
3531 static const struct netdev_class dpdk_vhost_class
=
3534 netdev_dpdk_vhost_construct
,
3535 netdev_dpdk_vhost_destruct
,
3538 netdev_dpdk_vhost_send
,
3539 netdev_dpdk_vhost_get_carrier
,
3540 netdev_dpdk_vhost_get_stats
,
3543 netdev_dpdk_vhost_reconfigure
,
3544 netdev_dpdk_vhost_rxq_recv
);
3545 static const struct netdev_class dpdk_vhost_client_class
=
3547 "dpdkvhostuserclient",
3548 netdev_dpdk_vhost_client_construct
,
3549 netdev_dpdk_vhost_destruct
,
3550 netdev_dpdk_vhost_client_set_config
,
3552 netdev_dpdk_vhost_send
,
3553 netdev_dpdk_vhost_get_carrier
,
3554 netdev_dpdk_vhost_get_stats
,
3557 netdev_dpdk_vhost_client_reconfigure
,
3558 netdev_dpdk_vhost_rxq_recv
);
3561 netdev_dpdk_register(void)
3564 netdev_register_provider(&dpdk_class
);
3565 netdev_register_provider(&dpdk_ring_class
);
3566 netdev_register_provider(&dpdk_vhost_class
);
3567 netdev_register_provider(&dpdk_vhost_client_class
);
3571 dpdk_set_lcore_id(unsigned cpu
)
3573 /* NON_PMD_CORE_ID is reserved for use by non pmd threads. */
3574 ovs_assert(cpu
!= NON_PMD_CORE_ID
);
3575 RTE_PER_LCORE(_lcore_id
) = cpu
;
3579 dpdk_thread_is_pmd(void)
3581 return rte_lcore_id() != NON_PMD_CORE_ID
;