2 * Copyright (c) 2014, 2015, 2016 Nicira, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
30 #include <sys/types.h>
35 #include "dp-packet.h"
36 #include "dpif-netdev.h"
37 #include "fatal-signal.h"
38 #include "netdev-dpdk.h"
39 #include "netdev-provider.h"
40 #include "netdev-vport.h"
42 #include "openvswitch/dynamic-string.h"
43 #include "openvswitch/list.h"
44 #include "openvswitch/ofp-print.h"
45 #include "openvswitch/vlog.h"
47 #include "ovs-thread.h"
50 #include "openvswitch/shash.h"
53 #include "unaligned.h"
57 #include "rte_config.h"
59 #include "rte_meter.h"
61 #include "rte_pdump.h"
63 #include "rte_virtio_net.h"
65 VLOG_DEFINE_THIS_MODULE(dpdk
);
66 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(5, 20);
68 #define DPDK_PORT_WATCHDOG_INTERVAL 5
70 #define OVS_CACHE_LINE_SIZE CACHE_LINE_SIZE
71 #define OVS_VPORT_DPDK "ovs_dpdk"
74 * need to reserve tons of extra space in the mbufs so we can align the
75 * DMA addresses to 4KB.
76 * The minimum mbuf size is limited to avoid scatter behaviour and drop in
77 * performance for standard Ethernet MTU.
79 #define ETHER_HDR_MAX_LEN (ETHER_HDR_LEN + ETHER_CRC_LEN \
80 + (2 * VLAN_HEADER_LEN))
81 #define MTU_TO_FRAME_LEN(mtu) ((mtu) + ETHER_HDR_LEN + ETHER_CRC_LEN)
82 #define MTU_TO_MAX_FRAME_LEN(mtu) ((mtu) + ETHER_HDR_MAX_LEN)
83 #define FRAME_LEN_TO_MTU(frame_len) ((frame_len) \
84 - ETHER_HDR_LEN - ETHER_CRC_LEN)
85 #define MBUF_SIZE(mtu) (MTU_TO_MAX_FRAME_LEN(mtu) \
86 + sizeof(struct dp_packet) \
87 + RTE_PKTMBUF_HEADROOM)
88 #define NETDEV_DPDK_MBUF_ALIGN 1024
89 #define NETDEV_DPDK_MAX_PKT_LEN 9728
91 /* Max and min number of packets in the mempool. OVS tries to allocate a
92 * mempool with MAX_NB_MBUF: if this fails (because the system doesn't have
93 * enough hugepages) we keep halving the number until the allocation succeeds
94 * or we reach MIN_NB_MBUF */
96 #define MAX_NB_MBUF (4096 * 64)
97 #define MIN_NB_MBUF (4096 * 4)
98 #define MP_CACHE_SZ RTE_MEMPOOL_CACHE_MAX_SIZE
100 /* MAX_NB_MBUF can be divided by 2 many times, until MIN_NB_MBUF */
101 BUILD_ASSERT_DECL(MAX_NB_MBUF
% ROUND_DOWN_POW2(MAX_NB_MBUF
/MIN_NB_MBUF
) == 0);
103 /* The smallest possible NB_MBUF that we're going to try should be a multiple
104 * of MP_CACHE_SZ. This is advised by DPDK documentation. */
105 BUILD_ASSERT_DECL((MAX_NB_MBUF
/ ROUND_DOWN_POW2(MAX_NB_MBUF
/MIN_NB_MBUF
))
109 * DPDK XSTATS Counter names definition
111 #define XSTAT_RX_64_PACKETS "rx_size_64_packets"
112 #define XSTAT_RX_65_TO_127_PACKETS "rx_size_65_to_127_packets"
113 #define XSTAT_RX_128_TO_255_PACKETS "rx_size_128_to_255_packets"
114 #define XSTAT_RX_256_TO_511_PACKETS "rx_size_256_to_511_packets"
115 #define XSTAT_RX_512_TO_1023_PACKETS "rx_size_512_to_1023_packets"
116 #define XSTAT_RX_1024_TO_1522_PACKETS "rx_size_1024_to_1522_packets"
117 #define XSTAT_RX_1523_TO_MAX_PACKETS "rx_size_1523_to_max_packets"
119 #define XSTAT_TX_64_PACKETS "tx_size_64_packets"
120 #define XSTAT_TX_65_TO_127_PACKETS "tx_size_65_to_127_packets"
121 #define XSTAT_TX_128_TO_255_PACKETS "tx_size_128_to_255_packets"
122 #define XSTAT_TX_256_TO_511_PACKETS "tx_size_256_to_511_packets"
123 #define XSTAT_TX_512_TO_1023_PACKETS "tx_size_512_to_1023_packets"
124 #define XSTAT_TX_1024_TO_1522_PACKETS "tx_size_1024_to_1522_packets"
125 #define XSTAT_TX_1523_TO_MAX_PACKETS "tx_size_1523_to_max_packets"
127 #define XSTAT_TX_MULTICAST_PACKETS "tx_multicast_packets"
128 #define XSTAT_RX_BROADCAST_PACKETS "rx_broadcast_packets"
129 #define XSTAT_TX_BROADCAST_PACKETS "tx_broadcast_packets"
130 #define XSTAT_RX_UNDERSIZED_ERRORS "rx_undersized_errors"
131 #define XSTAT_RX_OVERSIZE_ERRORS "rx_oversize_errors"
132 #define XSTAT_RX_FRAGMENTED_ERRORS "rx_fragmented_errors"
133 #define XSTAT_RX_JABBER_ERRORS "rx_jabber_errors"
137 /* Default size of Physical NIC RXQ */
138 #define NIC_PORT_DEFAULT_RXQ_SIZE 2048
139 /* Default size of Physical NIC TXQ */
140 #define NIC_PORT_DEFAULT_TXQ_SIZE 2048
141 /* Maximum size of Physical NIC Queues */
142 #define NIC_PORT_MAX_Q_SIZE 4096
144 #define OVS_VHOST_MAX_QUEUE_NUM 1024 /* Maximum number of vHost TX queues. */
145 #define OVS_VHOST_QUEUE_MAP_UNKNOWN (-1) /* Mapping not initialized. */
146 #define OVS_VHOST_QUEUE_DISABLED (-2) /* Queue was disabled by guest and not
147 * yet mapped to another queue. */
149 static char *vhost_sock_dir
= NULL
; /* Location of vhost-user sockets */
151 #define VHOST_ENQ_RETRY_NUM 8
152 #define IF_NAME_SZ (PATH_MAX > IFNAMSIZ ? PATH_MAX : IFNAMSIZ)
154 static const struct rte_eth_conf port_conf
= {
156 .mq_mode
= ETH_MQ_RX_RSS
,
158 .header_split
= 0, /* Header Split disabled */
159 .hw_ip_checksum
= 0, /* IP checksum offload disabled */
160 .hw_vlan_filter
= 0, /* VLAN filtering disabled */
161 .jumbo_frame
= 0, /* Jumbo Frame Support disabled */
167 .rss_hf
= ETH_RSS_IP
| ETH_RSS_UDP
| ETH_RSS_TCP
,
171 .mq_mode
= ETH_MQ_TX_NONE
,
175 enum { DPDK_RING_SIZE
= 256 };
176 BUILD_ASSERT_DECL(IS_POW2(DPDK_RING_SIZE
));
177 enum { DRAIN_TSC
= 200000ULL };
184 static int rte_eal_init_ret
= ENODEV
;
186 /* Quality of Service */
188 /* An instance of a QoS configuration. Always associated with a particular
191 * Each QoS implementation subclasses this with whatever additional data it
195 const struct dpdk_qos_ops
*ops
;
199 /* A particular implementation of dpdk QoS operations.
201 * The functions below return 0 if successful or a positive errno value on
202 * failure, except where otherwise noted. All of them must be provided, except
203 * where otherwise noted.
205 struct dpdk_qos_ops
{
207 /* Name of the QoS type */
208 const char *qos_name
;
210 /* Called to construct a qos_conf object. The implementation should make
211 * the appropriate calls to configure QoS according to 'details'.
213 * The contents of 'details' should be documented as valid for 'ovs_name'
214 * in the "other_config" column in the "QoS" table in vswitchd/vswitch.xml
215 * (which is built as ovs-vswitchd.conf.db(8)).
217 * This function must return 0 if and only if it sets '*conf' to an
218 * initialized 'struct qos_conf'.
220 * For all QoS implementations it should always be non-null.
222 int (*qos_construct
)(const struct smap
*details
, struct qos_conf
**conf
);
224 /* Destroys the data structures allocated by the implementation as part of
227 * For all QoS implementations it should always be non-null.
229 void (*qos_destruct
)(struct qos_conf
*conf
);
231 /* Retrieves details of 'conf' configuration into 'details'.
233 * The contents of 'details' should be documented as valid for 'ovs_name'
234 * in the "other_config" column in the "QoS" table in vswitchd/vswitch.xml
235 * (which is built as ovs-vswitchd.conf.db(8)).
237 int (*qos_get
)(const struct qos_conf
*conf
, struct smap
*details
);
239 /* Returns true if 'conf' is already configured according to 'details'.
241 * The contents of 'details' should be documented as valid for 'ovs_name'
242 * in the "other_config" column in the "QoS" table in vswitchd/vswitch.xml
243 * (which is built as ovs-vswitchd.conf.db(8)).
245 * For all QoS implementations it should always be non-null.
247 bool (*qos_is_equal
)(const struct qos_conf
*conf
,
248 const struct smap
*details
);
250 /* Modify an array of rte_mbufs. The modification is specific to
251 * each qos implementation.
253 * The function should take and array of mbufs and an int representing
254 * the current number of mbufs present in the array.
256 * After the function has performed a qos modification to the array of
257 * mbufs it returns an int representing the number of mbufs now present in
258 * the array. This value is can then be passed to the port send function
259 * along with the modified array for transmission.
261 * For all QoS implementations it should always be non-null.
263 int (*qos_run
)(struct qos_conf
*qos_conf
, struct rte_mbuf
**pkts
,
267 /* dpdk_qos_ops for each type of user space QoS implementation */
268 static const struct dpdk_qos_ops egress_policer_ops
;
271 * Array of dpdk_qos_ops, contains pointer to all supported QoS
274 static const struct dpdk_qos_ops
*const qos_confs
[] = {
279 static struct ovs_mutex dpdk_mutex
= OVS_MUTEX_INITIALIZER
;
281 /* Contains all 'struct dpdk_dev's. */
282 static struct ovs_list dpdk_list
OVS_GUARDED_BY(dpdk_mutex
)
283 = OVS_LIST_INITIALIZER(&dpdk_list
);
285 static struct ovs_mutex dpdk_mp_mutex
OVS_ACQ_AFTER(dpdk_mutex
)
286 = OVS_MUTEX_INITIALIZER
;
288 static struct ovs_list dpdk_mp_list
OVS_GUARDED_BY(dpdk_mp_mutex
)
289 = OVS_LIST_INITIALIZER(&dpdk_mp_list
);
291 /* This mutex must be used by non pmd threads when allocating or freeing
292 * mbufs through mempools. */
293 static struct ovs_mutex nonpmd_mempool_mutex
= OVS_MUTEX_INITIALIZER
;
296 struct rte_mempool
*mp
;
300 struct ovs_list list_node
OVS_GUARDED_BY(dpdk_mp_mutex
);
303 /* There should be one 'struct dpdk_tx_queue' created for
305 struct dpdk_tx_queue
{
306 rte_spinlock_t tx_lock
; /* Protects the members and the NIC queue
307 * from concurrent access. It is used only
308 * if the queue is shared among different
309 * pmd threads (see 'concurrent_txq'). */
310 int map
; /* Mapping of configured vhost-user queues
311 * to enabled by guest. */
314 /* dpdk has no way to remove dpdk ring ethernet devices
315 so we have to keep them around once they've been created
318 static struct ovs_list dpdk_ring_list
OVS_GUARDED_BY(dpdk_mutex
)
319 = OVS_LIST_INITIALIZER(&dpdk_ring_list
);
322 /* For the client rings */
323 struct rte_ring
*cring_tx
;
324 struct rte_ring
*cring_rx
;
325 unsigned int user_port_id
; /* User given port no, parsed from port name */
326 int eth_port_id
; /* ethernet device port id */
327 struct ovs_list list_node
OVS_GUARDED_BY(dpdk_mutex
);
330 struct ingress_policer
{
331 struct rte_meter_srtcm_params app_srtcm_params
;
332 struct rte_meter_srtcm in_policer
;
333 rte_spinlock_t policer_lock
;
340 enum dpdk_dev_type type
;
342 struct dpdk_tx_queue
*tx_q
;
344 struct ovs_mutex mutex
OVS_ACQ_AFTER(dpdk_mutex
);
346 struct dpdk_mp
*dpdk_mp
;
350 struct netdev_stats stats
;
352 rte_spinlock_t stats_lock
;
354 struct eth_addr hwaddr
;
355 enum netdev_flags flags
;
357 struct rte_eth_link link
;
360 /* virtio identifier for vhost devices */
363 /* True if vHost device is 'up' and has been reconfigured at least once */
364 bool vhost_reconfigured
;
366 /* Identifier used to distinguish vhost devices from each other. */
367 char vhost_id
[PATH_MAX
];
370 struct ovs_list list_node
OVS_GUARDED_BY(dpdk_mutex
);
372 /* QoS configuration and lock for the device */
373 OVSRCU_TYPE(struct qos_conf
*) qos_conf
;
375 /* The following properties cannot be changed when a device is running,
376 * so we remember the request and update them next time
377 * netdev_dpdk*_reconfigure() is called */
381 int requested_rxq_size
;
382 int requested_txq_size
;
384 /* Number of rx/tx descriptors for physical devices */
388 /* Socket ID detected when vHost device is brought up */
389 int requested_socket_id
;
391 /* Denotes whether vHost port is client/server mode */
392 uint64_t vhost_driver_flags
;
394 /* Ingress Policer */
395 OVSRCU_TYPE(struct ingress_policer
*) ingress_policer
;
396 uint32_t policer_rate
;
397 uint32_t policer_burst
;
399 /* DPDK-ETH Flow control */
400 struct rte_eth_fc_conf fc_conf
;
403 struct netdev_rxq_dpdk
{
404 struct netdev_rxq up
;
408 static bool dpdk_thread_is_pmd(void);
410 static int netdev_dpdk_construct(struct netdev
*);
412 int netdev_dpdk_get_vid(const struct netdev_dpdk
*dev
);
414 struct ingress_policer
*
415 netdev_dpdk_get_ingress_policer(const struct netdev_dpdk
*dev
);
418 is_dpdk_class(const struct netdev_class
*class)
420 return class->construct
== netdev_dpdk_construct
;
423 /* DPDK NIC drivers allocate RX buffers at a particular granularity, typically
424 * aligned at 1k or less. If a declared mbuf size is not a multiple of this
425 * value, insufficient buffers are allocated to accomodate the packet in its
426 * entirety. Furthermore, certain drivers need to ensure that there is also
427 * sufficient space in the Rx buffer to accommodate two VLAN tags (for QinQ
428 * frames). If the RX buffer is too small, then the driver enables scatter RX
429 * behaviour, which reduces performance. To prevent this, use a buffer size
430 * that is closest to 'mtu', but which satisfies the aforementioned criteria.
433 dpdk_buf_size(int mtu
)
435 return ROUND_UP((MTU_TO_MAX_FRAME_LEN(mtu
) + RTE_PKTMBUF_HEADROOM
),
436 NETDEV_DPDK_MBUF_ALIGN
);
439 /* Allocates an area of 'sz' bytes from DPDK. The memory is zero'ed.
441 * Unlike xmalloc(), this function can return NULL on failure. */
443 dpdk_rte_mzalloc(size_t sz
)
445 return rte_zmalloc(OVS_VPORT_DPDK
, sz
, OVS_CACHE_LINE_SIZE
);
448 /* XXX this function should be called only by pmd threads (or by non pmd
449 * threads holding the nonpmd_mempool_mutex) */
451 free_dpdk_buf(struct dp_packet
*p
)
453 struct rte_mbuf
*pkt
= (struct rte_mbuf
*) p
;
455 rte_pktmbuf_free(pkt
);
459 ovs_rte_pktmbuf_init(struct rte_mempool
*mp
,
460 void *opaque_arg OVS_UNUSED
,
462 unsigned i OVS_UNUSED
)
464 struct rte_mbuf
*pkt
= _p
;
466 rte_pktmbuf_init(mp
, opaque_arg
, _p
, i
);
468 dp_packet_init_dpdk((struct dp_packet
*) pkt
, pkt
->buf_len
);
471 static struct dpdk_mp
*
472 dpdk_mp_create(int socket_id
, int mtu
)
474 struct rte_pktmbuf_pool_private mbp_priv
;
479 dmp
= dpdk_rte_mzalloc(sizeof *dmp
);
483 dmp
->socket_id
= socket_id
;
486 mbp_priv
.mbuf_data_room_size
= MBUF_SIZE(mtu
) - sizeof(struct dp_packet
);
487 mbp_priv
.mbuf_priv_size
= sizeof(struct dp_packet
)
488 - sizeof(struct rte_mbuf
);
489 /* XXX: this is a really rough method of provisioning memory.
490 * It's impossible to determine what the exact memory requirements are
491 * when the number of ports and rxqs that utilize a particular mempool can
492 * change dynamically at runtime. For now, use this rough heurisitic.
494 if (mtu
>= ETHER_MTU
) {
495 mp_size
= MAX_NB_MBUF
;
497 mp_size
= MIN_NB_MBUF
;
501 mp_name
= xasprintf("ovs_mp_%d_%d_%u", dmp
->mtu
, dmp
->socket_id
,
504 dmp
->mp
= rte_mempool_create(mp_name
, mp_size
, MBUF_SIZE(mtu
),
506 sizeof(struct rte_pktmbuf_pool_private
),
507 rte_pktmbuf_pool_init
, &mbp_priv
,
508 ovs_rte_pktmbuf_init
, NULL
,
511 VLOG_DBG("Allocated \"%s\" mempool with %u mbufs",
518 } while (rte_errno
== ENOMEM
&& (mp_size
/= 2) >= MIN_NB_MBUF
);
524 static struct dpdk_mp
*
525 dpdk_mp_get(int socket_id
, int mtu
)
529 ovs_mutex_lock(&dpdk_mp_mutex
);
530 LIST_FOR_EACH (dmp
, list_node
, &dpdk_mp_list
) {
531 if (dmp
->socket_id
== socket_id
&& dmp
->mtu
== mtu
) {
537 dmp
= dpdk_mp_create(socket_id
, mtu
);
538 ovs_list_push_back(&dpdk_mp_list
, &dmp
->list_node
);
541 ovs_mutex_unlock(&dpdk_mp_mutex
);
547 dpdk_mp_put(struct dpdk_mp
*dmp
)
553 ovs_mutex_lock(&dpdk_mp_mutex
);
554 ovs_assert(dmp
->refcount
);
556 if (!--dmp
->refcount
) {
557 ovs_list_remove(&dmp
->list_node
);
558 rte_mempool_free(dmp
->mp
);
561 ovs_mutex_unlock(&dpdk_mp_mutex
);
564 /* Tries to allocate new mempool on requested_socket_id with
565 * mbuf size corresponding to requested_mtu.
566 * On success new configuration will be applied.
567 * On error, device will be left unchanged. */
569 netdev_dpdk_mempool_configure(struct netdev_dpdk
*dev
)
570 OVS_REQUIRES(dev
->mutex
)
572 uint32_t buf_size
= dpdk_buf_size(dev
->requested_mtu
);
575 mp
= dpdk_mp_get(dev
->requested_socket_id
, FRAME_LEN_TO_MTU(buf_size
));
577 VLOG_ERR("Insufficient memory to create memory pool for netdev "
578 "%s, with MTU %d on socket %d\n",
579 dev
->up
.name
, dev
->requested_mtu
, dev
->requested_socket_id
);
582 dpdk_mp_put(dev
->dpdk_mp
);
584 dev
->mtu
= dev
->requested_mtu
;
585 dev
->socket_id
= dev
->requested_socket_id
;
586 dev
->max_packet_len
= MTU_TO_FRAME_LEN(dev
->mtu
);
593 check_link_status(struct netdev_dpdk
*dev
)
595 struct rte_eth_link link
;
597 rte_eth_link_get_nowait(dev
->port_id
, &link
);
599 if (dev
->link
.link_status
!= link
.link_status
) {
600 netdev_change_seq_changed(&dev
->up
);
602 dev
->link_reset_cnt
++;
604 if (dev
->link
.link_status
) {
605 VLOG_DBG_RL(&rl
, "Port %d Link Up - speed %u Mbps - %s",
606 dev
->port_id
, (unsigned) dev
->link
.link_speed
,
607 (dev
->link
.link_duplex
== ETH_LINK_FULL_DUPLEX
) ?
608 ("full-duplex") : ("half-duplex"));
610 VLOG_DBG_RL(&rl
, "Port %d Link Down", dev
->port_id
);
616 dpdk_watchdog(void *dummy OVS_UNUSED
)
618 struct netdev_dpdk
*dev
;
620 pthread_detach(pthread_self());
623 ovs_mutex_lock(&dpdk_mutex
);
624 LIST_FOR_EACH (dev
, list_node
, &dpdk_list
) {
625 ovs_mutex_lock(&dev
->mutex
);
626 if (dev
->type
== DPDK_DEV_ETH
) {
627 check_link_status(dev
);
629 ovs_mutex_unlock(&dev
->mutex
);
631 ovs_mutex_unlock(&dpdk_mutex
);
632 xsleep(DPDK_PORT_WATCHDOG_INTERVAL
);
639 dpdk_eth_dev_queue_setup(struct netdev_dpdk
*dev
, int n_rxq
, int n_txq
)
643 struct rte_eth_conf conf
= port_conf
;
645 if (dev
->mtu
> ETHER_MTU
) {
646 conf
.rxmode
.jumbo_frame
= 1;
647 conf
.rxmode
.max_rx_pkt_len
= dev
->max_packet_len
;
649 conf
.rxmode
.jumbo_frame
= 0;
650 conf
.rxmode
.max_rx_pkt_len
= 0;
652 /* A device may report more queues than it makes available (this has
653 * been observed for Intel xl710, which reserves some of them for
654 * SRIOV): rte_eth_*_queue_setup will fail if a queue is not
655 * available. When this happens we can retry the configuration
656 * and request less queues */
657 while (n_rxq
&& n_txq
) {
659 VLOG_INFO("Retrying setup with (rxq:%d txq:%d)", n_rxq
, n_txq
);
662 diag
= rte_eth_dev_configure(dev
->port_id
, n_rxq
, n_txq
, &conf
);
664 VLOG_WARN("Interface %s eth_dev setup error %s\n",
665 dev
->up
.name
, rte_strerror(-diag
));
669 for (i
= 0; i
< n_txq
; i
++) {
670 diag
= rte_eth_tx_queue_setup(dev
->port_id
, i
, dev
->txq_size
,
671 dev
->socket_id
, NULL
);
673 VLOG_INFO("Interface %s txq(%d) setup error: %s",
674 dev
->up
.name
, i
, rte_strerror(-diag
));
680 /* Retry with less tx queues */
685 for (i
= 0; i
< n_rxq
; i
++) {
686 diag
= rte_eth_rx_queue_setup(dev
->port_id
, i
, dev
->rxq_size
,
687 dev
->socket_id
, NULL
,
690 VLOG_INFO("Interface %s rxq(%d) setup error: %s",
691 dev
->up
.name
, i
, rte_strerror(-diag
));
697 /* Retry with less rx queues */
702 dev
->up
.n_rxq
= n_rxq
;
703 dev
->up
.n_txq
= n_txq
;
712 dpdk_eth_flow_ctrl_setup(struct netdev_dpdk
*dev
) OVS_REQUIRES(dev
->mutex
)
714 if (rte_eth_dev_flow_ctrl_set(dev
->port_id
, &dev
->fc_conf
)) {
715 VLOG_WARN("Failed to enable flow control on device %d", dev
->port_id
);
720 dpdk_eth_dev_init(struct netdev_dpdk
*dev
)
721 OVS_REQUIRES(dev
->mutex
)
723 struct rte_pktmbuf_pool_private
*mbp_priv
;
724 struct rte_eth_dev_info info
;
725 struct ether_addr eth_addr
;
729 if (!rte_eth_dev_is_valid_port(dev
->port_id
)) {
733 rte_eth_dev_info_get(dev
->port_id
, &info
);
735 n_rxq
= MIN(info
.max_rx_queues
, dev
->up
.n_rxq
);
736 n_txq
= MIN(info
.max_tx_queues
, dev
->up
.n_txq
);
738 diag
= dpdk_eth_dev_queue_setup(dev
, n_rxq
, n_txq
);
740 VLOG_ERR("Interface %s(rxq:%d txq:%d) configure error: %s",
741 dev
->up
.name
, n_rxq
, n_txq
, rte_strerror(-diag
));
745 diag
= rte_eth_dev_start(dev
->port_id
);
747 VLOG_ERR("Interface %s start error: %s", dev
->up
.name
,
748 rte_strerror(-diag
));
752 rte_eth_promiscuous_enable(dev
->port_id
);
753 rte_eth_allmulticast_enable(dev
->port_id
);
755 memset(ð_addr
, 0x0, sizeof(eth_addr
));
756 rte_eth_macaddr_get(dev
->port_id
, ð_addr
);
757 VLOG_INFO_RL(&rl
, "Port %d: "ETH_ADDR_FMT
"",
758 dev
->port_id
, ETH_ADDR_BYTES_ARGS(eth_addr
.addr_bytes
));
760 memcpy(dev
->hwaddr
.ea
, eth_addr
.addr_bytes
, ETH_ADDR_LEN
);
761 rte_eth_link_get_nowait(dev
->port_id
, &dev
->link
);
763 mbp_priv
= rte_mempool_get_priv(dev
->dpdk_mp
->mp
);
764 dev
->buf_size
= mbp_priv
->mbuf_data_room_size
- RTE_PKTMBUF_HEADROOM
;
766 dev
->flags
= NETDEV_UP
| NETDEV_PROMISC
;
768 /* Get the Flow control configuration for DPDK-ETH */
769 diag
= rte_eth_dev_flow_ctrl_get(dev
->port_id
, &dev
->fc_conf
);
771 VLOG_DBG("cannot get flow control parameters on port=%d, err=%d",
778 static struct netdev_dpdk
*
779 netdev_dpdk_cast(const struct netdev
*netdev
)
781 return CONTAINER_OF(netdev
, struct netdev_dpdk
, up
);
784 static struct netdev
*
785 netdev_dpdk_alloc(void)
787 struct netdev_dpdk
*dev
;
789 if (!rte_eal_init_ret
) { /* Only after successful initialization */
790 dev
= dpdk_rte_mzalloc(sizeof *dev
);
798 static struct dpdk_tx_queue
*
799 netdev_dpdk_alloc_txq(unsigned int n_txqs
)
801 struct dpdk_tx_queue
*txqs
;
804 txqs
= dpdk_rte_mzalloc(n_txqs
* sizeof *txqs
);
806 for (i
= 0; i
< n_txqs
; i
++) {
807 /* Initialize map for vhost devices. */
808 txqs
[i
].map
= OVS_VHOST_QUEUE_MAP_UNKNOWN
;
809 rte_spinlock_init(&txqs
[i
].tx_lock
);
817 netdev_dpdk_init(struct netdev
*netdev
, unsigned int port_no
,
818 enum dpdk_dev_type type
)
819 OVS_REQUIRES(dpdk_mutex
)
821 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
825 ovs_mutex_init(&dev
->mutex
);
826 ovs_mutex_lock(&dev
->mutex
);
828 rte_spinlock_init(&dev
->stats_lock
);
830 /* If the 'sid' is negative, it means that the kernel fails
831 * to obtain the pci numa info. In that situation, always
833 if (type
== DPDK_DEV_ETH
) {
834 sid
= rte_eth_dev_socket_id(port_no
);
836 sid
= rte_lcore_to_socket_id(rte_get_master_lcore());
839 dev
->socket_id
= sid
< 0 ? SOCKET0
: sid
;
840 dev
->requested_socket_id
= dev
->socket_id
;
841 dev
->port_id
= port_no
;
844 dev
->requested_mtu
= dev
->mtu
= ETHER_MTU
;
845 dev
->max_packet_len
= MTU_TO_FRAME_LEN(dev
->mtu
);
846 ovsrcu_index_init(&dev
->vid
, -1);
847 dev
->vhost_reconfigured
= false;
849 err
= netdev_dpdk_mempool_configure(dev
);
854 ovsrcu_init(&dev
->qos_conf
, NULL
);
856 ovsrcu_init(&dev
->ingress_policer
, NULL
);
857 dev
->policer_rate
= 0;
858 dev
->policer_burst
= 0;
860 netdev
->n_rxq
= NR_QUEUE
;
861 netdev
->n_txq
= NR_QUEUE
;
862 dev
->requested_n_rxq
= netdev
->n_rxq
;
863 dev
->requested_n_txq
= netdev
->n_txq
;
864 dev
->rxq_size
= NIC_PORT_DEFAULT_RXQ_SIZE
;
865 dev
->txq_size
= NIC_PORT_DEFAULT_TXQ_SIZE
;
866 dev
->requested_rxq_size
= dev
->rxq_size
;
867 dev
->requested_txq_size
= dev
->txq_size
;
869 /* Initialize the flow control to NULL */
870 memset(&dev
->fc_conf
, 0, sizeof dev
->fc_conf
);
871 if (type
== DPDK_DEV_ETH
) {
872 err
= dpdk_eth_dev_init(dev
);
876 dev
->tx_q
= netdev_dpdk_alloc_txq(netdev
->n_txq
);
878 dev
->tx_q
= netdev_dpdk_alloc_txq(OVS_VHOST_MAX_QUEUE_NUM
);
879 /* Enable DPDK_DEV_VHOST device and set promiscuous mode flag. */
880 dev
->flags
= NETDEV_UP
| NETDEV_PROMISC
;
888 ovs_list_push_back(&dpdk_list
, &dev
->list_node
);
891 ovs_mutex_unlock(&dev
->mutex
);
895 /* dev_name must be the prefix followed by a positive decimal number.
896 * (no leading + or - signs are allowed) */
898 dpdk_dev_parse_name(const char dev_name
[], const char prefix
[],
899 unsigned int *port_no
)
903 if (strncmp(dev_name
, prefix
, strlen(prefix
))) {
907 cport
= dev_name
+ strlen(prefix
);
909 if (str_to_uint(cport
, 10, port_no
)) {
917 netdev_dpdk_vhost_construct(struct netdev
*netdev
)
919 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
920 const char *name
= netdev
->name
;
923 /* 'name' is appended to 'vhost_sock_dir' and used to create a socket in
924 * the file system. '/' or '\' would traverse directories, so they're not
925 * acceptable in 'name'. */
926 if (strchr(name
, '/') || strchr(name
, '\\')) {
927 VLOG_ERR("\"%s\" is not a valid name for a vhost-user port. "
928 "A valid name must not include '/' or '\\'",
933 if (rte_eal_init_ret
) {
934 return rte_eal_init_ret
;
937 ovs_mutex_lock(&dpdk_mutex
);
938 /* Take the name of the vhost-user port and append it to the location where
939 * the socket is to be created, then register the socket.
941 snprintf(dev
->vhost_id
, sizeof dev
->vhost_id
, "%s/%s",
942 vhost_sock_dir
, name
);
944 dev
->vhost_driver_flags
&= ~RTE_VHOST_USER_CLIENT
;
945 err
= rte_vhost_driver_register(dev
->vhost_id
, dev
->vhost_driver_flags
);
947 VLOG_ERR("vhost-user socket device setup failure for socket %s\n",
950 fatal_signal_add_file_to_unlink(dev
->vhost_id
);
951 VLOG_INFO("Socket %s created for vhost-user port %s\n",
952 dev
->vhost_id
, name
);
954 err
= netdev_dpdk_init(netdev
, -1, DPDK_DEV_VHOST
);
956 ovs_mutex_unlock(&dpdk_mutex
);
961 netdev_dpdk_vhost_client_construct(struct netdev
*netdev
)
965 if (rte_eal_init_ret
) {
966 return rte_eal_init_ret
;
969 ovs_mutex_lock(&dpdk_mutex
);
970 err
= netdev_dpdk_init(netdev
, -1, DPDK_DEV_VHOST
);
971 ovs_mutex_unlock(&dpdk_mutex
);
976 netdev_dpdk_construct(struct netdev
*netdev
)
978 unsigned int port_no
;
981 if (rte_eal_init_ret
) {
982 return rte_eal_init_ret
;
985 /* Names always start with "dpdk" */
986 err
= dpdk_dev_parse_name(netdev
->name
, "dpdk", &port_no
);
991 ovs_mutex_lock(&dpdk_mutex
);
992 err
= netdev_dpdk_init(netdev
, port_no
, DPDK_DEV_ETH
);
993 ovs_mutex_unlock(&dpdk_mutex
);
998 netdev_dpdk_destruct(struct netdev
*netdev
)
1000 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1002 ovs_mutex_lock(&dpdk_mutex
);
1003 ovs_mutex_lock(&dev
->mutex
);
1005 rte_eth_dev_stop(dev
->port_id
);
1006 free(ovsrcu_get_protected(struct ingress_policer
*,
1007 &dev
->ingress_policer
));
1009 rte_free(dev
->tx_q
);
1010 ovs_list_remove(&dev
->list_node
);
1011 dpdk_mp_put(dev
->dpdk_mp
);
1013 ovs_mutex_unlock(&dev
->mutex
);
1014 ovs_mutex_unlock(&dpdk_mutex
);
1017 /* rte_vhost_driver_unregister() can call back destroy_device(), which will
1018 * try to acquire 'dpdk_mutex' and possibly 'dev->mutex'. To avoid a
1019 * deadlock, none of the mutexes must be held while calling this function. */
1021 dpdk_vhost_driver_unregister(struct netdev_dpdk
*dev OVS_UNUSED
,
1023 OVS_EXCLUDED(dpdk_mutex
)
1024 OVS_EXCLUDED(dev
->mutex
)
1026 return rte_vhost_driver_unregister(vhost_id
);
1030 netdev_dpdk_vhost_destruct(struct netdev
*netdev
)
1032 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1035 ovs_mutex_lock(&dpdk_mutex
);
1036 ovs_mutex_lock(&dev
->mutex
);
1038 /* Guest becomes an orphan if still attached. */
1039 if (netdev_dpdk_get_vid(dev
) >= 0
1040 && !(dev
->vhost_driver_flags
& RTE_VHOST_USER_CLIENT
)) {
1041 VLOG_ERR("Removing port '%s' while vhost device still attached.",
1043 VLOG_ERR("To restore connectivity after re-adding of port, VM on "
1044 "socket '%s' must be restarted.", dev
->vhost_id
);
1047 free(ovsrcu_get_protected(struct ingress_policer
*,
1048 &dev
->ingress_policer
));
1050 rte_free(dev
->tx_q
);
1051 ovs_list_remove(&dev
->list_node
);
1052 dpdk_mp_put(dev
->dpdk_mp
);
1054 vhost_id
= xstrdup(dev
->vhost_id
);
1056 ovs_mutex_unlock(&dev
->mutex
);
1057 ovs_mutex_unlock(&dpdk_mutex
);
1059 if (dpdk_vhost_driver_unregister(dev
, vhost_id
)) {
1060 VLOG_ERR("%s: Unable to unregister vhost driver for socket '%s'.\n",
1061 netdev
->name
, vhost_id
);
1062 } else if (!(dev
->vhost_driver_flags
& RTE_VHOST_USER_CLIENT
)) {
1063 /* OVS server mode - remove this socket from list for deletion */
1064 fatal_signal_remove_file_to_unlink(vhost_id
);
1070 netdev_dpdk_dealloc(struct netdev
*netdev
)
1072 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1078 netdev_dpdk_get_config(const struct netdev
*netdev
, struct smap
*args
)
1080 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1082 ovs_mutex_lock(&dev
->mutex
);
1084 smap_add_format(args
, "requested_rx_queues", "%d", dev
->requested_n_rxq
);
1085 smap_add_format(args
, "configured_rx_queues", "%d", netdev
->n_rxq
);
1086 smap_add_format(args
, "requested_tx_queues", "%d", dev
->requested_n_txq
);
1087 smap_add_format(args
, "configured_tx_queues", "%d", netdev
->n_txq
);
1088 smap_add_format(args
, "requested_rxq_descriptors", "%d",
1089 dev
->requested_rxq_size
);
1090 smap_add_format(args
, "configured_rxq_descriptors", "%d", dev
->rxq_size
);
1091 smap_add_format(args
, "requested_txq_descriptors", "%d",
1092 dev
->requested_txq_size
);
1093 smap_add_format(args
, "configured_txq_descriptors", "%d", dev
->txq_size
);
1094 smap_add_format(args
, "mtu", "%d", dev
->mtu
);
1095 ovs_mutex_unlock(&dev
->mutex
);
1101 dpdk_set_rxq_config(struct netdev_dpdk
*dev
, const struct smap
*args
)
1102 OVS_REQUIRES(dev
->mutex
)
1106 new_n_rxq
= MAX(smap_get_int(args
, "n_rxq", dev
->requested_n_rxq
), 1);
1107 if (new_n_rxq
!= dev
->requested_n_rxq
) {
1108 dev
->requested_n_rxq
= new_n_rxq
;
1109 netdev_request_reconfigure(&dev
->up
);
1114 dpdk_process_queue_size(struct netdev
*netdev
, const struct smap
*args
,
1115 const char *flag
, int default_size
, int *new_size
)
1117 int queue_size
= smap_get_int(args
, flag
, default_size
);
1119 if (queue_size
<= 0 || queue_size
> NIC_PORT_MAX_Q_SIZE
1120 || !is_pow2(queue_size
)) {
1121 queue_size
= default_size
;
1124 if (queue_size
!= *new_size
) {
1125 *new_size
= queue_size
;
1126 netdev_request_reconfigure(netdev
);
1131 netdev_dpdk_set_config(struct netdev
*netdev
, const struct smap
*args
)
1133 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1134 bool rx_fc_en
, tx_fc_en
, autoneg
;
1135 enum rte_eth_fc_mode fc_mode
;
1136 static const enum rte_eth_fc_mode fc_mode_set
[2][2] = {
1137 {RTE_FC_NONE
, RTE_FC_TX_PAUSE
},
1138 {RTE_FC_RX_PAUSE
, RTE_FC_FULL
}
1141 ovs_mutex_lock(&dev
->mutex
);
1143 dpdk_set_rxq_config(dev
, args
);
1145 dpdk_process_queue_size(netdev
, args
, "n_rxq_desc",
1146 NIC_PORT_DEFAULT_RXQ_SIZE
,
1147 &dev
->requested_rxq_size
);
1148 dpdk_process_queue_size(netdev
, args
, "n_txq_desc",
1149 NIC_PORT_DEFAULT_TXQ_SIZE
,
1150 &dev
->requested_txq_size
);
1152 rx_fc_en
= smap_get_bool(args
, "rx-flow-ctrl", false);
1153 tx_fc_en
= smap_get_bool(args
, "tx-flow-ctrl", false);
1154 autoneg
= smap_get_bool(args
, "flow-ctrl-autoneg", false);
1156 fc_mode
= fc_mode_set
[tx_fc_en
][rx_fc_en
];
1157 if (dev
->fc_conf
.mode
!= fc_mode
|| autoneg
!= dev
->fc_conf
.autoneg
) {
1158 dev
->fc_conf
.mode
= fc_mode
;
1159 dev
->fc_conf
.autoneg
= autoneg
;
1160 dpdk_eth_flow_ctrl_setup(dev
);
1163 ovs_mutex_unlock(&dev
->mutex
);
1169 netdev_dpdk_ring_set_config(struct netdev
*netdev
, const struct smap
*args
)
1171 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1173 ovs_mutex_lock(&dev
->mutex
);
1174 dpdk_set_rxq_config(dev
, args
);
1175 ovs_mutex_unlock(&dev
->mutex
);
1181 netdev_dpdk_vhost_client_set_config(struct netdev
*netdev
,
1182 const struct smap
*args
)
1184 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1187 ovs_mutex_lock(&dev
->mutex
);
1188 if (!(dev
->vhost_driver_flags
& RTE_VHOST_USER_CLIENT
)) {
1189 path
= smap_get(args
, "vhost-server-path");
1190 if (path
&& strcmp(path
, dev
->vhost_id
)) {
1191 strcpy(dev
->vhost_id
, path
);
1192 netdev_request_reconfigure(netdev
);
1195 ovs_mutex_unlock(&dev
->mutex
);
1201 netdev_dpdk_get_numa_id(const struct netdev
*netdev
)
1203 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1205 return dev
->socket_id
;
1208 /* Sets the number of tx queues for the dpdk interface. */
1210 netdev_dpdk_set_tx_multiq(struct netdev
*netdev
, unsigned int n_txq
)
1212 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1214 ovs_mutex_lock(&dev
->mutex
);
1216 if (dev
->requested_n_txq
== n_txq
) {
1220 dev
->requested_n_txq
= n_txq
;
1221 netdev_request_reconfigure(netdev
);
1224 ovs_mutex_unlock(&dev
->mutex
);
1228 static struct netdev_rxq
*
1229 netdev_dpdk_rxq_alloc(void)
1231 struct netdev_rxq_dpdk
*rx
= dpdk_rte_mzalloc(sizeof *rx
);
1240 static struct netdev_rxq_dpdk
*
1241 netdev_rxq_dpdk_cast(const struct netdev_rxq
*rxq
)
1243 return CONTAINER_OF(rxq
, struct netdev_rxq_dpdk
, up
);
1247 netdev_dpdk_rxq_construct(struct netdev_rxq
*rxq
)
1249 struct netdev_rxq_dpdk
*rx
= netdev_rxq_dpdk_cast(rxq
);
1250 struct netdev_dpdk
*dev
= netdev_dpdk_cast(rxq
->netdev
);
1252 ovs_mutex_lock(&dev
->mutex
);
1253 rx
->port_id
= dev
->port_id
;
1254 ovs_mutex_unlock(&dev
->mutex
);
1260 netdev_dpdk_rxq_destruct(struct netdev_rxq
*rxq OVS_UNUSED
)
1265 netdev_dpdk_rxq_dealloc(struct netdev_rxq
*rxq
)
1267 struct netdev_rxq_dpdk
*rx
= netdev_rxq_dpdk_cast(rxq
);
1272 /* Tries to transmit 'pkts' to txq 'qid' of device 'dev'. Takes ownership of
1273 * 'pkts', even in case of failure.
1275 * Returns the number of packets that weren't transmitted. */
1277 netdev_dpdk_eth_tx_burst(struct netdev_dpdk
*dev
, int qid
,
1278 struct rte_mbuf
**pkts
, int cnt
)
1282 while (nb_tx
!= cnt
) {
1285 ret
= rte_eth_tx_burst(dev
->port_id
, qid
, pkts
+ nb_tx
, cnt
- nb_tx
);
1293 if (OVS_UNLIKELY(nb_tx
!= cnt
)) {
1294 /* Free buffers, which we couldn't transmit, one at a time (each
1295 * packet could come from a different mempool) */
1298 for (i
= nb_tx
; i
< cnt
; i
++) {
1299 rte_pktmbuf_free(pkts
[i
]);
1307 netdev_dpdk_policer_pkt_handle(struct rte_meter_srtcm
*meter
,
1308 struct rte_mbuf
*pkt
, uint64_t time
)
1310 uint32_t pkt_len
= rte_pktmbuf_pkt_len(pkt
) - sizeof(struct ether_hdr
);
1312 return rte_meter_srtcm_color_blind_check(meter
, time
, pkt_len
) ==
1317 netdev_dpdk_policer_run(struct rte_meter_srtcm
*meter
,
1318 struct rte_mbuf
**pkts
, int pkt_cnt
)
1322 struct rte_mbuf
*pkt
= NULL
;
1323 uint64_t current_time
= rte_rdtsc();
1325 for (i
= 0; i
< pkt_cnt
; i
++) {
1327 /* Handle current packet */
1328 if (netdev_dpdk_policer_pkt_handle(meter
, pkt
, current_time
)) {
1334 rte_pktmbuf_free(pkt
);
1342 ingress_policer_run(struct ingress_policer
*policer
, struct rte_mbuf
**pkts
,
1347 rte_spinlock_lock(&policer
->policer_lock
);
1348 cnt
= netdev_dpdk_policer_run(&policer
->in_policer
, pkts
, pkt_cnt
);
1349 rte_spinlock_unlock(&policer
->policer_lock
);
1355 is_vhost_running(struct netdev_dpdk
*dev
)
1357 return (netdev_dpdk_get_vid(dev
) >= 0 && dev
->vhost_reconfigured
);
1361 netdev_dpdk_vhost_update_rx_size_counters(struct netdev_stats
*stats
,
1362 unsigned int packet_size
)
1364 /* Hard-coded search for the size bucket. */
1365 if (packet_size
< 256) {
1366 if (packet_size
>= 128) {
1367 stats
->rx_128_to_255_packets
++;
1368 } else if (packet_size
<= 64) {
1369 stats
->rx_1_to_64_packets
++;
1371 stats
->rx_65_to_127_packets
++;
1374 if (packet_size
>= 1523) {
1375 stats
->rx_1523_to_max_packets
++;
1376 } else if (packet_size
>= 1024) {
1377 stats
->rx_1024_to_1522_packets
++;
1378 } else if (packet_size
< 512) {
1379 stats
->rx_256_to_511_packets
++;
1381 stats
->rx_512_to_1023_packets
++;
1387 netdev_dpdk_vhost_update_rx_counters(struct netdev_stats
*stats
,
1388 struct dp_packet
**packets
, int count
,
1392 unsigned int packet_size
;
1393 struct dp_packet
*packet
;
1395 stats
->rx_packets
+= count
;
1396 stats
->rx_dropped
+= dropped
;
1397 for (i
= 0; i
< count
; i
++) {
1398 packet
= packets
[i
];
1399 packet_size
= dp_packet_size(packet
);
1401 if (OVS_UNLIKELY(packet_size
< ETH_HEADER_LEN
)) {
1402 /* This only protects the following multicast counting from
1403 * too short packets, but it does not stop the packet from
1404 * further processing. */
1406 stats
->rx_length_errors
++;
1410 netdev_dpdk_vhost_update_rx_size_counters(stats
, packet_size
);
1412 struct eth_header
*eh
= (struct eth_header
*) dp_packet_data(packet
);
1413 if (OVS_UNLIKELY(eth_addr_is_multicast(eh
->eth_dst
))) {
1417 stats
->rx_bytes
+= packet_size
;
1422 * The receive path for the vhost port is the TX path out from guest.
1425 netdev_dpdk_vhost_rxq_recv(struct netdev_rxq
*rxq
,
1426 struct dp_packet_batch
*batch
)
1428 struct netdev_dpdk
*dev
= netdev_dpdk_cast(rxq
->netdev
);
1429 int qid
= rxq
->queue_id
;
1430 struct ingress_policer
*policer
= netdev_dpdk_get_ingress_policer(dev
);
1432 uint16_t dropped
= 0;
1434 if (OVS_UNLIKELY(!is_vhost_running(dev
)
1435 || !(dev
->flags
& NETDEV_UP
))) {
1439 nb_rx
= rte_vhost_dequeue_burst(netdev_dpdk_get_vid(dev
),
1440 qid
* VIRTIO_QNUM
+ VIRTIO_TXQ
,
1442 (struct rte_mbuf
**) batch
->packets
,
1450 nb_rx
= ingress_policer_run(policer
,
1451 (struct rte_mbuf
**) batch
->packets
,
1456 rte_spinlock_lock(&dev
->stats_lock
);
1457 netdev_dpdk_vhost_update_rx_counters(&dev
->stats
, batch
->packets
,
1459 rte_spinlock_unlock(&dev
->stats_lock
);
1461 batch
->count
= (int) nb_rx
;
1466 netdev_dpdk_rxq_recv(struct netdev_rxq
*rxq
, struct dp_packet_batch
*batch
)
1468 struct netdev_rxq_dpdk
*rx
= netdev_rxq_dpdk_cast(rxq
);
1469 struct netdev_dpdk
*dev
= netdev_dpdk_cast(rxq
->netdev
);
1470 struct ingress_policer
*policer
= netdev_dpdk_get_ingress_policer(dev
);
1474 nb_rx
= rte_eth_rx_burst(rx
->port_id
, rxq
->queue_id
,
1475 (struct rte_mbuf
**) batch
->packets
,
1483 nb_rx
= ingress_policer_run(policer
,
1484 (struct rte_mbuf
**) batch
->packets
,
1489 /* Update stats to reflect dropped packets */
1490 if (OVS_UNLIKELY(dropped
)) {
1491 rte_spinlock_lock(&dev
->stats_lock
);
1492 dev
->stats
.rx_dropped
+= dropped
;
1493 rte_spinlock_unlock(&dev
->stats_lock
);
1496 batch
->count
= nb_rx
;
1502 netdev_dpdk_qos_run(struct netdev_dpdk
*dev
, struct rte_mbuf
**pkts
,
1505 struct qos_conf
*qos_conf
= ovsrcu_get(struct qos_conf
*, &dev
->qos_conf
);
1508 rte_spinlock_lock(&qos_conf
->lock
);
1509 cnt
= qos_conf
->ops
->qos_run(qos_conf
, pkts
, cnt
);
1510 rte_spinlock_unlock(&qos_conf
->lock
);
1517 netdev_dpdk_filter_packet_len(struct netdev_dpdk
*dev
, struct rte_mbuf
**pkts
,
1522 struct rte_mbuf
*pkt
;
1524 for (i
= 0; i
< pkt_cnt
; i
++) {
1526 if (OVS_UNLIKELY(pkt
->pkt_len
> dev
->max_packet_len
)) {
1527 VLOG_WARN_RL(&rl
, "%s: Too big size %" PRIu32
" max_packet_len %d",
1528 dev
->up
.name
, pkt
->pkt_len
, dev
->max_packet_len
);
1529 rte_pktmbuf_free(pkt
);
1533 if (OVS_UNLIKELY(i
!= cnt
)) {
1543 netdev_dpdk_vhost_update_tx_counters(struct netdev_stats
*stats
,
1544 struct dp_packet
**packets
,
1549 int sent
= attempted
- dropped
;
1551 stats
->tx_packets
+= sent
;
1552 stats
->tx_dropped
+= dropped
;
1554 for (i
= 0; i
< sent
; i
++) {
1555 stats
->tx_bytes
+= dp_packet_size(packets
[i
]);
1560 __netdev_dpdk_vhost_send(struct netdev
*netdev
, int qid
,
1561 struct dp_packet
**pkts
, int cnt
)
1563 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1564 struct rte_mbuf
**cur_pkts
= (struct rte_mbuf
**) pkts
;
1565 unsigned int total_pkts
= cnt
;
1566 unsigned int dropped
= 0;
1569 qid
= dev
->tx_q
[qid
% netdev
->n_txq
].map
;
1571 if (OVS_UNLIKELY(!is_vhost_running(dev
) || qid
< 0
1572 || !(dev
->flags
& NETDEV_UP
))) {
1573 rte_spinlock_lock(&dev
->stats_lock
);
1574 dev
->stats
.tx_dropped
+= cnt
;
1575 rte_spinlock_unlock(&dev
->stats_lock
);
1579 rte_spinlock_lock(&dev
->tx_q
[qid
].tx_lock
);
1581 cnt
= netdev_dpdk_filter_packet_len(dev
, cur_pkts
, cnt
);
1582 /* Check has QoS has been configured for the netdev */
1583 cnt
= netdev_dpdk_qos_run(dev
, cur_pkts
, cnt
);
1584 dropped
= total_pkts
- cnt
;
1587 int vhost_qid
= qid
* VIRTIO_QNUM
+ VIRTIO_RXQ
;
1588 unsigned int tx_pkts
;
1590 tx_pkts
= rte_vhost_enqueue_burst(netdev_dpdk_get_vid(dev
),
1591 vhost_qid
, cur_pkts
, cnt
);
1592 if (OVS_LIKELY(tx_pkts
)) {
1593 /* Packets have been sent.*/
1595 /* Prepare for possible retry.*/
1596 cur_pkts
= &cur_pkts
[tx_pkts
];
1598 /* No packets sent - do not retry.*/
1601 } while (cnt
&& (retries
++ <= VHOST_ENQ_RETRY_NUM
));
1603 rte_spinlock_unlock(&dev
->tx_q
[qid
].tx_lock
);
1605 rte_spinlock_lock(&dev
->stats_lock
);
1606 netdev_dpdk_vhost_update_tx_counters(&dev
->stats
, pkts
, total_pkts
,
1608 rte_spinlock_unlock(&dev
->stats_lock
);
1611 for (i
= 0; i
< total_pkts
- dropped
; i
++) {
1612 dp_packet_delete(pkts
[i
]);
1616 /* Tx function. Transmit packets indefinitely */
1618 dpdk_do_tx_copy(struct netdev
*netdev
, int qid
, struct dp_packet_batch
*batch
)
1619 OVS_NO_THREAD_SAFETY_ANALYSIS
1621 #if !defined(__CHECKER__) && !defined(_WIN32)
1622 const size_t PKT_ARRAY_SIZE
= batch
->count
;
1624 /* Sparse or MSVC doesn't like variable length array. */
1625 enum { PKT_ARRAY_SIZE
= NETDEV_MAX_BURST
};
1627 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1628 struct rte_mbuf
*pkts
[PKT_ARRAY_SIZE
];
1633 /* If we are on a non pmd thread we have to use the mempool mutex, because
1634 * every non pmd thread shares the same mempool cache */
1636 if (!dpdk_thread_is_pmd()) {
1637 ovs_mutex_lock(&nonpmd_mempool_mutex
);
1640 dp_packet_batch_apply_cutlen(batch
);
1642 for (i
= 0; i
< batch
->count
; i
++) {
1643 int size
= dp_packet_size(batch
->packets
[i
]);
1645 if (OVS_UNLIKELY(size
> dev
->max_packet_len
)) {
1646 VLOG_WARN_RL(&rl
, "Too big size %d max_packet_len %d",
1647 (int) size
, dev
->max_packet_len
);
1653 pkts
[newcnt
] = rte_pktmbuf_alloc(dev
->dpdk_mp
->mp
);
1655 if (!pkts
[newcnt
]) {
1656 dropped
+= batch
->count
- i
;
1660 /* We have to do a copy for now */
1661 memcpy(rte_pktmbuf_mtod(pkts
[newcnt
], void *),
1662 dp_packet_data(batch
->packets
[i
]), size
);
1664 rte_pktmbuf_data_len(pkts
[newcnt
]) = size
;
1665 rte_pktmbuf_pkt_len(pkts
[newcnt
]) = size
;
1670 if (dev
->type
== DPDK_DEV_VHOST
) {
1671 __netdev_dpdk_vhost_send(netdev
, qid
, (struct dp_packet
**) pkts
,
1674 unsigned int qos_pkts
= newcnt
;
1676 /* Check if QoS has been configured for this netdev. */
1677 newcnt
= netdev_dpdk_qos_run(dev
, pkts
, newcnt
);
1679 dropped
+= qos_pkts
- newcnt
;
1680 dropped
+= netdev_dpdk_eth_tx_burst(dev
, qid
, pkts
, newcnt
);
1683 if (OVS_UNLIKELY(dropped
)) {
1684 rte_spinlock_lock(&dev
->stats_lock
);
1685 dev
->stats
.tx_dropped
+= dropped
;
1686 rte_spinlock_unlock(&dev
->stats_lock
);
1689 if (!dpdk_thread_is_pmd()) {
1690 ovs_mutex_unlock(&nonpmd_mempool_mutex
);
1695 netdev_dpdk_vhost_send(struct netdev
*netdev
, int qid
,
1696 struct dp_packet_batch
*batch
,
1697 bool may_steal
, bool concurrent_txq OVS_UNUSED
)
1700 if (OVS_UNLIKELY(!may_steal
|| batch
->packets
[0]->source
!= DPBUF_DPDK
)) {
1701 dpdk_do_tx_copy(netdev
, qid
, batch
);
1702 dp_packet_delete_batch(batch
, may_steal
);
1704 dp_packet_batch_apply_cutlen(batch
);
1705 __netdev_dpdk_vhost_send(netdev
, qid
, batch
->packets
, batch
->count
);
1711 netdev_dpdk_send__(struct netdev_dpdk
*dev
, int qid
,
1712 struct dp_packet_batch
*batch
, bool may_steal
,
1713 bool concurrent_txq
)
1715 if (OVS_UNLIKELY(concurrent_txq
)) {
1716 qid
= qid
% dev
->up
.n_txq
;
1717 rte_spinlock_lock(&dev
->tx_q
[qid
].tx_lock
);
1720 if (OVS_UNLIKELY(!may_steal
||
1721 batch
->packets
[0]->source
!= DPBUF_DPDK
)) {
1722 struct netdev
*netdev
= &dev
->up
;
1724 dpdk_do_tx_copy(netdev
, qid
, batch
);
1725 dp_packet_delete_batch(batch
, may_steal
);
1728 int cnt
= batch
->count
;
1729 struct rte_mbuf
**pkts
= (struct rte_mbuf
**) batch
->packets
;
1731 dp_packet_batch_apply_cutlen(batch
);
1733 cnt
= netdev_dpdk_filter_packet_len(dev
, pkts
, cnt
);
1734 cnt
= netdev_dpdk_qos_run(dev
, pkts
, cnt
);
1735 dropped
= batch
->count
- cnt
;
1737 dropped
+= netdev_dpdk_eth_tx_burst(dev
, qid
, pkts
, cnt
);
1739 if (OVS_UNLIKELY(dropped
)) {
1740 rte_spinlock_lock(&dev
->stats_lock
);
1741 dev
->stats
.tx_dropped
+= dropped
;
1742 rte_spinlock_unlock(&dev
->stats_lock
);
1746 if (OVS_UNLIKELY(concurrent_txq
)) {
1747 rte_spinlock_unlock(&dev
->tx_q
[qid
].tx_lock
);
1752 netdev_dpdk_eth_send(struct netdev
*netdev
, int qid
,
1753 struct dp_packet_batch
*batch
, bool may_steal
,
1754 bool concurrent_txq
)
1756 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1758 netdev_dpdk_send__(dev
, qid
, batch
, may_steal
, concurrent_txq
);
1763 netdev_dpdk_set_etheraddr(struct netdev
*netdev
, const struct eth_addr mac
)
1765 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1767 ovs_mutex_lock(&dev
->mutex
);
1768 if (!eth_addr_equals(dev
->hwaddr
, mac
)) {
1770 netdev_change_seq_changed(netdev
);
1772 ovs_mutex_unlock(&dev
->mutex
);
1778 netdev_dpdk_get_etheraddr(const struct netdev
*netdev
, struct eth_addr
*mac
)
1780 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1782 ovs_mutex_lock(&dev
->mutex
);
1784 ovs_mutex_unlock(&dev
->mutex
);
1790 netdev_dpdk_get_mtu(const struct netdev
*netdev
, int *mtup
)
1792 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1794 ovs_mutex_lock(&dev
->mutex
);
1796 ovs_mutex_unlock(&dev
->mutex
);
1802 netdev_dpdk_set_mtu(struct netdev
*netdev
, int mtu
)
1804 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1806 if (MTU_TO_FRAME_LEN(mtu
) > NETDEV_DPDK_MAX_PKT_LEN
1807 || mtu
< ETHER_MIN_MTU
) {
1808 VLOG_WARN("%s: unsupported MTU %d\n", dev
->up
.name
, mtu
);
1812 ovs_mutex_lock(&dev
->mutex
);
1813 if (dev
->requested_mtu
!= mtu
) {
1814 dev
->requested_mtu
= mtu
;
1815 netdev_request_reconfigure(netdev
);
1817 ovs_mutex_unlock(&dev
->mutex
);
1823 netdev_dpdk_get_carrier(const struct netdev
*netdev
, bool *carrier
);
1826 netdev_dpdk_vhost_get_stats(const struct netdev
*netdev
,
1827 struct netdev_stats
*stats
)
1829 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1831 ovs_mutex_lock(&dev
->mutex
);
1833 rte_spinlock_lock(&dev
->stats_lock
);
1834 /* Supported Stats */
1835 stats
->rx_packets
+= dev
->stats
.rx_packets
;
1836 stats
->tx_packets
+= dev
->stats
.tx_packets
;
1837 stats
->rx_dropped
= dev
->stats
.rx_dropped
;
1838 stats
->tx_dropped
+= dev
->stats
.tx_dropped
;
1839 stats
->multicast
= dev
->stats
.multicast
;
1840 stats
->rx_bytes
= dev
->stats
.rx_bytes
;
1841 stats
->tx_bytes
= dev
->stats
.tx_bytes
;
1842 stats
->rx_errors
= dev
->stats
.rx_errors
;
1843 stats
->rx_length_errors
= dev
->stats
.rx_length_errors
;
1845 stats
->rx_1_to_64_packets
= dev
->stats
.rx_1_to_64_packets
;
1846 stats
->rx_65_to_127_packets
= dev
->stats
.rx_65_to_127_packets
;
1847 stats
->rx_128_to_255_packets
= dev
->stats
.rx_128_to_255_packets
;
1848 stats
->rx_256_to_511_packets
= dev
->stats
.rx_256_to_511_packets
;
1849 stats
->rx_512_to_1023_packets
= dev
->stats
.rx_512_to_1023_packets
;
1850 stats
->rx_1024_to_1522_packets
= dev
->stats
.rx_1024_to_1522_packets
;
1851 stats
->rx_1523_to_max_packets
= dev
->stats
.rx_1523_to_max_packets
;
1853 rte_spinlock_unlock(&dev
->stats_lock
);
1855 ovs_mutex_unlock(&dev
->mutex
);
1861 netdev_dpdk_convert_xstats(struct netdev_stats
*stats
,
1862 const struct rte_eth_xstat
*xstats
,
1863 const struct rte_eth_xstat_name
*names
,
1864 const unsigned int size
)
1866 for (unsigned int i
= 0; i
< size
; i
++) {
1867 if (strcmp(XSTAT_RX_64_PACKETS
, names
[i
].name
) == 0) {
1868 stats
->rx_1_to_64_packets
= xstats
[i
].value
;
1869 } else if (strcmp(XSTAT_RX_65_TO_127_PACKETS
, names
[i
].name
) == 0) {
1870 stats
->rx_65_to_127_packets
= xstats
[i
].value
;
1871 } else if (strcmp(XSTAT_RX_128_TO_255_PACKETS
, names
[i
].name
) == 0) {
1872 stats
->rx_128_to_255_packets
= xstats
[i
].value
;
1873 } else if (strcmp(XSTAT_RX_256_TO_511_PACKETS
, names
[i
].name
) == 0) {
1874 stats
->rx_256_to_511_packets
= xstats
[i
].value
;
1875 } else if (strcmp(XSTAT_RX_512_TO_1023_PACKETS
, names
[i
].name
) == 0) {
1876 stats
->rx_512_to_1023_packets
= xstats
[i
].value
;
1877 } else if (strcmp(XSTAT_RX_1024_TO_1522_PACKETS
, names
[i
].name
) == 0) {
1878 stats
->rx_1024_to_1522_packets
= xstats
[i
].value
;
1879 } else if (strcmp(XSTAT_RX_1523_TO_MAX_PACKETS
, names
[i
].name
) == 0) {
1880 stats
->rx_1523_to_max_packets
= xstats
[i
].value
;
1881 } else if (strcmp(XSTAT_TX_64_PACKETS
, names
[i
].name
) == 0) {
1882 stats
->tx_1_to_64_packets
= xstats
[i
].value
;
1883 } else if (strcmp(XSTAT_TX_65_TO_127_PACKETS
, names
[i
].name
) == 0) {
1884 stats
->tx_65_to_127_packets
= xstats
[i
].value
;
1885 } else if (strcmp(XSTAT_TX_128_TO_255_PACKETS
, names
[i
].name
) == 0) {
1886 stats
->tx_128_to_255_packets
= xstats
[i
].value
;
1887 } else if (strcmp(XSTAT_TX_256_TO_511_PACKETS
, names
[i
].name
) == 0) {
1888 stats
->tx_256_to_511_packets
= xstats
[i
].value
;
1889 } else if (strcmp(XSTAT_TX_512_TO_1023_PACKETS
, names
[i
].name
) == 0) {
1890 stats
->tx_512_to_1023_packets
= xstats
[i
].value
;
1891 } else if (strcmp(XSTAT_TX_1024_TO_1522_PACKETS
, names
[i
].name
) == 0) {
1892 stats
->tx_1024_to_1522_packets
= xstats
[i
].value
;
1893 } else if (strcmp(XSTAT_TX_1523_TO_MAX_PACKETS
, names
[i
].name
) == 0) {
1894 stats
->tx_1523_to_max_packets
= xstats
[i
].value
;
1895 } else if (strcmp(XSTAT_TX_MULTICAST_PACKETS
, names
[i
].name
) == 0) {
1896 stats
->tx_multicast_packets
= xstats
[i
].value
;
1897 } else if (strcmp(XSTAT_RX_BROADCAST_PACKETS
, names
[i
].name
) == 0) {
1898 stats
->rx_broadcast_packets
= xstats
[i
].value
;
1899 } else if (strcmp(XSTAT_TX_BROADCAST_PACKETS
, names
[i
].name
) == 0) {
1900 stats
->tx_broadcast_packets
= xstats
[i
].value
;
1901 } else if (strcmp(XSTAT_RX_UNDERSIZED_ERRORS
, names
[i
].name
) == 0) {
1902 stats
->rx_undersized_errors
= xstats
[i
].value
;
1903 } else if (strcmp(XSTAT_RX_FRAGMENTED_ERRORS
, names
[i
].name
) == 0) {
1904 stats
->rx_fragmented_errors
= xstats
[i
].value
;
1905 } else if (strcmp(XSTAT_RX_JABBER_ERRORS
, names
[i
].name
) == 0) {
1906 stats
->rx_jabber_errors
= xstats
[i
].value
;
1912 netdev_dpdk_get_stats(const struct netdev
*netdev
, struct netdev_stats
*stats
)
1914 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1915 struct rte_eth_stats rte_stats
;
1918 netdev_dpdk_get_carrier(netdev
, &gg
);
1919 ovs_mutex_lock(&dev
->mutex
);
1921 struct rte_eth_xstat
*rte_xstats
= NULL
;
1922 struct rte_eth_xstat_name
*rte_xstats_names
= NULL
;
1923 int rte_xstats_len
, rte_xstats_new_len
, rte_xstats_ret
;
1925 if (rte_eth_stats_get(dev
->port_id
, &rte_stats
)) {
1926 VLOG_ERR("Can't get ETH statistics for port: %i.", dev
->port_id
);
1927 ovs_mutex_unlock(&dev
->mutex
);
1931 /* Get length of statistics */
1932 rte_xstats_len
= rte_eth_xstats_get_names(dev
->port_id
, NULL
, 0);
1933 if (rte_xstats_len
< 0) {
1934 VLOG_WARN("Cannot get XSTATS values for port: %i", dev
->port_id
);
1937 /* Reserve memory for xstats names and values */
1938 rte_xstats_names
= xcalloc(rte_xstats_len
, sizeof *rte_xstats_names
);
1939 rte_xstats
= xcalloc(rte_xstats_len
, sizeof *rte_xstats
);
1941 /* Retreive xstats names */
1942 rte_xstats_new_len
= rte_eth_xstats_get_names(dev
->port_id
,
1945 if (rte_xstats_new_len
!= rte_xstats_len
) {
1946 VLOG_WARN("Cannot get XSTATS names for port: %i.", dev
->port_id
);
1949 /* Retreive xstats values */
1950 memset(rte_xstats
, 0xff, sizeof *rte_xstats
* rte_xstats_len
);
1951 rte_xstats_ret
= rte_eth_xstats_get(dev
->port_id
, rte_xstats
,
1953 if (rte_xstats_ret
> 0 && rte_xstats_ret
<= rte_xstats_len
) {
1954 netdev_dpdk_convert_xstats(stats
, rte_xstats
, rte_xstats_names
,
1957 VLOG_WARN("Cannot get XSTATS values for port: %i.", dev
->port_id
);
1962 free(rte_xstats_names
);
1964 stats
->rx_packets
= rte_stats
.ipackets
;
1965 stats
->tx_packets
= rte_stats
.opackets
;
1966 stats
->rx_bytes
= rte_stats
.ibytes
;
1967 stats
->tx_bytes
= rte_stats
.obytes
;
1968 /* DPDK counts imissed as errors, but count them here as dropped instead */
1969 stats
->rx_errors
= rte_stats
.ierrors
- rte_stats
.imissed
;
1970 stats
->tx_errors
= rte_stats
.oerrors
;
1972 rte_spinlock_lock(&dev
->stats_lock
);
1973 stats
->tx_dropped
= dev
->stats
.tx_dropped
;
1974 stats
->rx_dropped
= dev
->stats
.rx_dropped
;
1975 rte_spinlock_unlock(&dev
->stats_lock
);
1977 /* These are the available DPDK counters for packets not received due to
1978 * local resource constraints in DPDK and NIC respectively. */
1979 stats
->rx_dropped
+= rte_stats
.rx_nombuf
+ rte_stats
.imissed
;
1980 stats
->rx_missed_errors
= rte_stats
.imissed
;
1982 ovs_mutex_unlock(&dev
->mutex
);
1988 netdev_dpdk_get_features(const struct netdev
*netdev
,
1989 enum netdev_features
*current
,
1990 enum netdev_features
*advertised OVS_UNUSED
,
1991 enum netdev_features
*supported OVS_UNUSED
,
1992 enum netdev_features
*peer OVS_UNUSED
)
1994 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1995 struct rte_eth_link link
;
1997 ovs_mutex_lock(&dev
->mutex
);
1999 ovs_mutex_unlock(&dev
->mutex
);
2001 if (link
.link_duplex
== ETH_LINK_HALF_DUPLEX
) {
2002 if (link
.link_speed
== ETH_SPEED_NUM_10M
) {
2003 *current
= NETDEV_F_10MB_HD
;
2005 if (link
.link_speed
== ETH_SPEED_NUM_100M
) {
2006 *current
= NETDEV_F_100MB_HD
;
2008 if (link
.link_speed
== ETH_SPEED_NUM_1G
) {
2009 *current
= NETDEV_F_1GB_HD
;
2011 } else if (link
.link_duplex
== ETH_LINK_FULL_DUPLEX
) {
2012 if (link
.link_speed
== ETH_SPEED_NUM_10M
) {
2013 *current
= NETDEV_F_10MB_FD
;
2015 if (link
.link_speed
== ETH_SPEED_NUM_100M
) {
2016 *current
= NETDEV_F_100MB_FD
;
2018 if (link
.link_speed
== ETH_SPEED_NUM_1G
) {
2019 *current
= NETDEV_F_1GB_FD
;
2021 if (link
.link_speed
== ETH_SPEED_NUM_10G
) {
2022 *current
= NETDEV_F_10GB_FD
;
2026 if (link
.link_autoneg
) {
2027 *current
|= NETDEV_F_AUTONEG
;
2033 static struct ingress_policer
*
2034 netdev_dpdk_policer_construct(uint32_t rate
, uint32_t burst
)
2036 struct ingress_policer
*policer
= NULL
;
2037 uint64_t rate_bytes
;
2038 uint64_t burst_bytes
;
2041 policer
= xmalloc(sizeof *policer
);
2042 rte_spinlock_init(&policer
->policer_lock
);
2044 /* rte_meter requires bytes so convert kbits rate and burst to bytes. */
2045 rate_bytes
= rate
* 1000/8;
2046 burst_bytes
= burst
* 1000/8;
2048 policer
->app_srtcm_params
.cir
= rate_bytes
;
2049 policer
->app_srtcm_params
.cbs
= burst_bytes
;
2050 policer
->app_srtcm_params
.ebs
= 0;
2051 err
= rte_meter_srtcm_config(&policer
->in_policer
,
2052 &policer
->app_srtcm_params
);
2054 VLOG_ERR("Could not create rte meter for ingress policer");
2062 netdev_dpdk_set_policing(struct netdev
* netdev
, uint32_t policer_rate
,
2063 uint32_t policer_burst
)
2065 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
2066 struct ingress_policer
*policer
;
2068 /* Force to 0 if no rate specified,
2069 * default to 8000 kbits if burst is 0,
2070 * else stick with user-specified value.
2072 policer_burst
= (!policer_rate
? 0
2073 : !policer_burst
? 8000
2076 ovs_mutex_lock(&dev
->mutex
);
2078 policer
= ovsrcu_get_protected(struct ingress_policer
*,
2079 &dev
->ingress_policer
);
2081 if (dev
->policer_rate
== policer_rate
&&
2082 dev
->policer_burst
== policer_burst
) {
2083 /* Assume that settings haven't changed since we last set them. */
2084 ovs_mutex_unlock(&dev
->mutex
);
2088 /* Destroy any existing ingress policer for the device if one exists */
2090 ovsrcu_postpone(free
, policer
);
2093 if (policer_rate
!= 0) {
2094 policer
= netdev_dpdk_policer_construct(policer_rate
, policer_burst
);
2098 ovsrcu_set(&dev
->ingress_policer
, policer
);
2099 dev
->policer_rate
= policer_rate
;
2100 dev
->policer_burst
= policer_burst
;
2101 ovs_mutex_unlock(&dev
->mutex
);
2107 netdev_dpdk_get_ifindex(const struct netdev
*netdev
)
2109 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
2112 ovs_mutex_lock(&dev
->mutex
);
2113 ifindex
= dev
->port_id
;
2114 ovs_mutex_unlock(&dev
->mutex
);
2120 netdev_dpdk_get_carrier(const struct netdev
*netdev
, bool *carrier
)
2122 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
2124 ovs_mutex_lock(&dev
->mutex
);
2125 check_link_status(dev
);
2126 *carrier
= dev
->link
.link_status
;
2128 ovs_mutex_unlock(&dev
->mutex
);
2134 netdev_dpdk_vhost_get_carrier(const struct netdev
*netdev
, bool *carrier
)
2136 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
2138 ovs_mutex_lock(&dev
->mutex
);
2140 if (is_vhost_running(dev
)) {
2146 ovs_mutex_unlock(&dev
->mutex
);
2151 static long long int
2152 netdev_dpdk_get_carrier_resets(const struct netdev
*netdev
)
2154 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
2155 long long int carrier_resets
;
2157 ovs_mutex_lock(&dev
->mutex
);
2158 carrier_resets
= dev
->link_reset_cnt
;
2159 ovs_mutex_unlock(&dev
->mutex
);
2161 return carrier_resets
;
2165 netdev_dpdk_set_miimon(struct netdev
*netdev OVS_UNUSED
,
2166 long long int interval OVS_UNUSED
)
2172 netdev_dpdk_update_flags__(struct netdev_dpdk
*dev
,
2173 enum netdev_flags off
, enum netdev_flags on
,
2174 enum netdev_flags
*old_flagsp
)
2175 OVS_REQUIRES(dev
->mutex
)
2179 if ((off
| on
) & ~(NETDEV_UP
| NETDEV_PROMISC
)) {
2183 *old_flagsp
= dev
->flags
;
2187 if (dev
->flags
== *old_flagsp
) {
2191 if (dev
->type
== DPDK_DEV_ETH
) {
2192 if (dev
->flags
& NETDEV_UP
) {
2193 err
= rte_eth_dev_start(dev
->port_id
);
2198 if (dev
->flags
& NETDEV_PROMISC
) {
2199 rte_eth_promiscuous_enable(dev
->port_id
);
2202 if (!(dev
->flags
& NETDEV_UP
)) {
2203 rte_eth_dev_stop(dev
->port_id
);
2206 /* If DPDK_DEV_VHOST device's NETDEV_UP flag was changed and vhost is
2207 * running then change netdev's change_seq to trigger link state
2210 if ((NETDEV_UP
& ((*old_flagsp
^ on
) | (*old_flagsp
^ off
)))
2211 && is_vhost_running(dev
)) {
2212 netdev_change_seq_changed(&dev
->up
);
2214 /* Clear statistics if device is getting up. */
2215 if (NETDEV_UP
& on
) {
2216 rte_spinlock_lock(&dev
->stats_lock
);
2217 memset(&dev
->stats
, 0, sizeof dev
->stats
);
2218 rte_spinlock_unlock(&dev
->stats_lock
);
2227 netdev_dpdk_update_flags(struct netdev
*netdev
,
2228 enum netdev_flags off
, enum netdev_flags on
,
2229 enum netdev_flags
*old_flagsp
)
2231 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
2234 ovs_mutex_lock(&dev
->mutex
);
2235 error
= netdev_dpdk_update_flags__(dev
, off
, on
, old_flagsp
);
2236 ovs_mutex_unlock(&dev
->mutex
);
2242 netdev_dpdk_get_status(const struct netdev
*netdev
, struct smap
*args
)
2244 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
2245 struct rte_eth_dev_info dev_info
;
2247 if (!rte_eth_dev_is_valid_port(dev
->port_id
)) {
2251 ovs_mutex_lock(&dev
->mutex
);
2252 rte_eth_dev_info_get(dev
->port_id
, &dev_info
);
2253 ovs_mutex_unlock(&dev
->mutex
);
2255 smap_add_format(args
, "port_no", "%d", dev
->port_id
);
2256 smap_add_format(args
, "numa_id", "%d",
2257 rte_eth_dev_socket_id(dev
->port_id
));
2258 smap_add_format(args
, "driver_name", "%s", dev_info
.driver_name
);
2259 smap_add_format(args
, "min_rx_bufsize", "%u", dev_info
.min_rx_bufsize
);
2260 smap_add_format(args
, "max_rx_pktlen", "%u", dev
->max_packet_len
);
2261 smap_add_format(args
, "max_rx_queues", "%u", dev_info
.max_rx_queues
);
2262 smap_add_format(args
, "max_tx_queues", "%u", dev_info
.max_tx_queues
);
2263 smap_add_format(args
, "max_mac_addrs", "%u", dev_info
.max_mac_addrs
);
2264 smap_add_format(args
, "max_hash_mac_addrs", "%u",
2265 dev_info
.max_hash_mac_addrs
);
2266 smap_add_format(args
, "max_vfs", "%u", dev_info
.max_vfs
);
2267 smap_add_format(args
, "max_vmdq_pools", "%u", dev_info
.max_vmdq_pools
);
2269 if (dev_info
.pci_dev
) {
2270 smap_add_format(args
, "pci-vendor_id", "0x%u",
2271 dev_info
.pci_dev
->id
.vendor_id
);
2272 smap_add_format(args
, "pci-device_id", "0x%x",
2273 dev_info
.pci_dev
->id
.device_id
);
2280 netdev_dpdk_set_admin_state__(struct netdev_dpdk
*dev
, bool admin_state
)
2281 OVS_REQUIRES(dev
->mutex
)
2283 enum netdev_flags old_flags
;
2286 netdev_dpdk_update_flags__(dev
, 0, NETDEV_UP
, &old_flags
);
2288 netdev_dpdk_update_flags__(dev
, NETDEV_UP
, 0, &old_flags
);
2293 netdev_dpdk_set_admin_state(struct unixctl_conn
*conn
, int argc
,
2294 const char *argv
[], void *aux OVS_UNUSED
)
2298 if (!strcasecmp(argv
[argc
- 1], "up")) {
2300 } else if ( !strcasecmp(argv
[argc
- 1], "down")) {
2303 unixctl_command_reply_error(conn
, "Invalid Admin State");
2308 struct netdev
*netdev
= netdev_from_name(argv
[1]);
2309 if (netdev
&& is_dpdk_class(netdev
->netdev_class
)) {
2310 struct netdev_dpdk
*dpdk_dev
= netdev_dpdk_cast(netdev
);
2312 ovs_mutex_lock(&dpdk_dev
->mutex
);
2313 netdev_dpdk_set_admin_state__(dpdk_dev
, up
);
2314 ovs_mutex_unlock(&dpdk_dev
->mutex
);
2316 netdev_close(netdev
);
2318 unixctl_command_reply_error(conn
, "Not a DPDK Interface");
2319 netdev_close(netdev
);
2323 struct netdev_dpdk
*netdev
;
2325 ovs_mutex_lock(&dpdk_mutex
);
2326 LIST_FOR_EACH (netdev
, list_node
, &dpdk_list
) {
2327 ovs_mutex_lock(&netdev
->mutex
);
2328 netdev_dpdk_set_admin_state__(netdev
, up
);
2329 ovs_mutex_unlock(&netdev
->mutex
);
2331 ovs_mutex_unlock(&dpdk_mutex
);
2333 unixctl_command_reply(conn
, "OK");
2337 * Set virtqueue flags so that we do not receive interrupts.
2340 set_irq_status(int vid
)
2345 for (i
= 0; i
< rte_vhost_get_queue_num(vid
); i
++) {
2346 idx
= i
* VIRTIO_QNUM
;
2347 rte_vhost_enable_guest_notification(vid
, idx
+ VIRTIO_RXQ
, 0);
2348 rte_vhost_enable_guest_notification(vid
, idx
+ VIRTIO_TXQ
, 0);
2353 * Fixes mapping for vhost-user tx queues. Must be called after each
2354 * enabling/disabling of queues and n_txq modifications.
2357 netdev_dpdk_remap_txqs(struct netdev_dpdk
*dev
)
2358 OVS_REQUIRES(dev
->mutex
)
2360 int *enabled_queues
, n_enabled
= 0;
2361 int i
, k
, total_txqs
= dev
->up
.n_txq
;
2363 enabled_queues
= xcalloc(total_txqs
, sizeof *enabled_queues
);
2365 for (i
= 0; i
< total_txqs
; i
++) {
2366 /* Enabled queues always mapped to themselves. */
2367 if (dev
->tx_q
[i
].map
== i
) {
2368 enabled_queues
[n_enabled
++] = i
;
2372 if (n_enabled
== 0 && total_txqs
!= 0) {
2373 enabled_queues
[0] = OVS_VHOST_QUEUE_DISABLED
;
2378 for (i
= 0; i
< total_txqs
; i
++) {
2379 if (dev
->tx_q
[i
].map
!= i
) {
2380 dev
->tx_q
[i
].map
= enabled_queues
[k
];
2381 k
= (k
+ 1) % n_enabled
;
2385 VLOG_DBG("TX queue mapping for %s\n", dev
->vhost_id
);
2386 for (i
= 0; i
< total_txqs
; i
++) {
2387 VLOG_DBG("%2d --> %2d", i
, dev
->tx_q
[i
].map
);
2390 free(enabled_queues
);
2394 * A new virtio-net device is added to a vhost port.
2399 struct netdev_dpdk
*dev
;
2400 bool exists
= false;
2402 char ifname
[IF_NAME_SZ
];
2404 rte_vhost_get_ifname(vid
, ifname
, sizeof ifname
);
2406 ovs_mutex_lock(&dpdk_mutex
);
2407 /* Add device to the vhost port with the same name as that passed down. */
2408 LIST_FOR_EACH(dev
, list_node
, &dpdk_list
) {
2409 ovs_mutex_lock(&dev
->mutex
);
2410 if (strncmp(ifname
, dev
->vhost_id
, IF_NAME_SZ
) == 0) {
2411 uint32_t qp_num
= rte_vhost_get_queue_num(vid
);
2413 /* Get NUMA information */
2414 newnode
= rte_vhost_get_numa_node(vid
);
2415 if (newnode
== -1) {
2417 VLOG_INFO("Error getting NUMA info for vHost Device '%s'",
2420 newnode
= dev
->socket_id
;
2423 if (dev
->requested_n_txq
!= qp_num
2424 || dev
->requested_n_rxq
!= qp_num
2425 || dev
->requested_socket_id
!= newnode
) {
2426 dev
->requested_socket_id
= newnode
;
2427 dev
->requested_n_rxq
= qp_num
;
2428 dev
->requested_n_txq
= qp_num
;
2429 netdev_request_reconfigure(&dev
->up
);
2431 /* Reconfiguration not required. */
2432 dev
->vhost_reconfigured
= true;
2435 ovsrcu_index_set(&dev
->vid
, vid
);
2438 /* Disable notifications. */
2439 set_irq_status(vid
);
2440 netdev_change_seq_changed(&dev
->up
);
2441 ovs_mutex_unlock(&dev
->mutex
);
2444 ovs_mutex_unlock(&dev
->mutex
);
2446 ovs_mutex_unlock(&dpdk_mutex
);
2449 VLOG_INFO("vHost Device '%s' can't be added - name not found", ifname
);
2454 VLOG_INFO("vHost Device '%s' has been added on numa node %i",
2460 /* Clears mapping for all available queues of vhost interface. */
2462 netdev_dpdk_txq_map_clear(struct netdev_dpdk
*dev
)
2463 OVS_REQUIRES(dev
->mutex
)
2467 for (i
= 0; i
< dev
->up
.n_txq
; i
++) {
2468 dev
->tx_q
[i
].map
= OVS_VHOST_QUEUE_MAP_UNKNOWN
;
2473 * Remove a virtio-net device from the specific vhost port. Use dev->remove
2474 * flag to stop any more packets from being sent or received to/from a VM and
2475 * ensure all currently queued packets have been sent/received before removing
2479 destroy_device(int vid
)
2481 struct netdev_dpdk
*dev
;
2482 bool exists
= false;
2483 char ifname
[IF_NAME_SZ
];
2485 rte_vhost_get_ifname(vid
, ifname
, sizeof ifname
);
2487 ovs_mutex_lock(&dpdk_mutex
);
2488 LIST_FOR_EACH (dev
, list_node
, &dpdk_list
) {
2489 if (netdev_dpdk_get_vid(dev
) == vid
) {
2491 ovs_mutex_lock(&dev
->mutex
);
2492 dev
->vhost_reconfigured
= false;
2493 ovsrcu_index_set(&dev
->vid
, -1);
2494 netdev_dpdk_txq_map_clear(dev
);
2496 netdev_change_seq_changed(&dev
->up
);
2497 ovs_mutex_unlock(&dev
->mutex
);
2503 ovs_mutex_unlock(&dpdk_mutex
);
2507 * Wait for other threads to quiesce after setting the 'virtio_dev'
2508 * to NULL, before returning.
2510 ovsrcu_synchronize();
2512 * As call to ovsrcu_synchronize() will end the quiescent state,
2513 * put thread back into quiescent state before returning.
2515 ovsrcu_quiesce_start();
2516 VLOG_INFO("vHost Device '%s' has been removed", ifname
);
2518 VLOG_INFO("vHost Device '%s' not found", ifname
);
2523 vring_state_changed(int vid
, uint16_t queue_id
, int enable
)
2525 struct netdev_dpdk
*dev
;
2526 bool exists
= false;
2527 int qid
= queue_id
/ VIRTIO_QNUM
;
2528 char ifname
[IF_NAME_SZ
];
2530 rte_vhost_get_ifname(vid
, ifname
, sizeof ifname
);
2532 if (queue_id
% VIRTIO_QNUM
== VIRTIO_TXQ
) {
2536 ovs_mutex_lock(&dpdk_mutex
);
2537 LIST_FOR_EACH (dev
, list_node
, &dpdk_list
) {
2538 ovs_mutex_lock(&dev
->mutex
);
2539 if (strncmp(ifname
, dev
->vhost_id
, IF_NAME_SZ
) == 0) {
2541 dev
->tx_q
[qid
].map
= qid
;
2543 dev
->tx_q
[qid
].map
= OVS_VHOST_QUEUE_DISABLED
;
2545 netdev_dpdk_remap_txqs(dev
);
2547 ovs_mutex_unlock(&dev
->mutex
);
2550 ovs_mutex_unlock(&dev
->mutex
);
2552 ovs_mutex_unlock(&dpdk_mutex
);
2555 VLOG_INFO("State of queue %d ( tx_qid %d ) of vhost device '%s'"
2556 "changed to \'%s\'", queue_id
, qid
, ifname
,
2557 (enable
== 1) ? "enabled" : "disabled");
2559 VLOG_INFO("vHost Device '%s' not found", ifname
);
2567 netdev_dpdk_get_vid(const struct netdev_dpdk
*dev
)
2569 return ovsrcu_index_get(&dev
->vid
);
2572 struct ingress_policer
*
2573 netdev_dpdk_get_ingress_policer(const struct netdev_dpdk
*dev
)
2575 return ovsrcu_get(struct ingress_policer
*, &dev
->ingress_policer
);
2579 * These callbacks allow virtio-net devices to be added to vhost ports when
2580 * configuration has been fully complete.
2582 static const struct virtio_net_device_ops virtio_net_device_ops
=
2584 .new_device
= new_device
,
2585 .destroy_device
= destroy_device
,
2586 .vring_state_changed
= vring_state_changed
2590 start_vhost_loop(void *dummy OVS_UNUSED
)
2592 pthread_detach(pthread_self());
2593 /* Put the vhost thread into quiescent state. */
2594 ovsrcu_quiesce_start();
2595 rte_vhost_driver_session_start();
2600 dpdk_vhost_class_init(void)
2602 rte_vhost_driver_callback_register(&virtio_net_device_ops
);
2603 rte_vhost_feature_disable(1ULL << VIRTIO_NET_F_HOST_TSO4
2604 | 1ULL << VIRTIO_NET_F_HOST_TSO6
2605 | 1ULL << VIRTIO_NET_F_CSUM
);
2607 ovs_thread_create("vhost_thread", start_vhost_loop
, NULL
);
2612 dpdk_common_init(void)
2614 unixctl_command_register("netdev-dpdk/set-admin-state",
2615 "[netdev] up|down", 1, 2,
2616 netdev_dpdk_set_admin_state
, NULL
);
2623 dpdk_ring_create(const char dev_name
[], unsigned int port_no
,
2624 unsigned int *eth_port_id
)
2626 struct dpdk_ring
*ivshmem
;
2630 ivshmem
= dpdk_rte_mzalloc(sizeof *ivshmem
);
2635 /* XXX: Add support for multiquque ring. */
2636 ring_name
= xasprintf("%s_tx", dev_name
);
2638 /* Create single producer tx ring, netdev does explicit locking. */
2639 ivshmem
->cring_tx
= rte_ring_create(ring_name
, DPDK_RING_SIZE
, SOCKET0
,
2642 if (ivshmem
->cring_tx
== NULL
) {
2647 ring_name
= xasprintf("%s_rx", dev_name
);
2649 /* Create single consumer rx ring, netdev does explicit locking. */
2650 ivshmem
->cring_rx
= rte_ring_create(ring_name
, DPDK_RING_SIZE
, SOCKET0
,
2653 if (ivshmem
->cring_rx
== NULL
) {
2658 err
= rte_eth_from_rings(dev_name
, &ivshmem
->cring_rx
, 1,
2659 &ivshmem
->cring_tx
, 1, SOCKET0
);
2666 ivshmem
->user_port_id
= port_no
;
2667 ivshmem
->eth_port_id
= rte_eth_dev_count() - 1;
2668 ovs_list_push_back(&dpdk_ring_list
, &ivshmem
->list_node
);
2670 *eth_port_id
= ivshmem
->eth_port_id
;
2675 dpdk_ring_open(const char dev_name
[], unsigned int *eth_port_id
)
2676 OVS_REQUIRES(dpdk_mutex
)
2678 struct dpdk_ring
*ivshmem
;
2679 unsigned int port_no
;
2682 /* Names always start with "dpdkr" */
2683 err
= dpdk_dev_parse_name(dev_name
, "dpdkr", &port_no
);
2688 /* Look through our list to find the device */
2689 LIST_FOR_EACH (ivshmem
, list_node
, &dpdk_ring_list
) {
2690 if (ivshmem
->user_port_id
== port_no
) {
2691 VLOG_INFO("Found dpdk ring device %s:", dev_name
);
2692 /* Really all that is needed */
2693 *eth_port_id
= ivshmem
->eth_port_id
;
2697 /* Need to create the device rings */
2698 return dpdk_ring_create(dev_name
, port_no
, eth_port_id
);
2702 netdev_dpdk_ring_send(struct netdev
*netdev
, int qid
,
2703 struct dp_packet_batch
*batch
, bool may_steal
,
2704 bool concurrent_txq
)
2706 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
2709 /* When using 'dpdkr' and sending to a DPDK ring, we want to ensure that
2710 * the rss hash field is clear. This is because the same mbuf may be
2711 * modified by the consumer of the ring and return into the datapath
2712 * without recalculating the RSS hash. */
2713 for (i
= 0; i
< batch
->count
; i
++) {
2714 dp_packet_rss_invalidate(batch
->packets
[i
]);
2717 netdev_dpdk_send__(dev
, qid
, batch
, may_steal
, concurrent_txq
);
2722 netdev_dpdk_ring_construct(struct netdev
*netdev
)
2724 unsigned int port_no
= 0;
2727 if (rte_eal_init_ret
) {
2728 return rte_eal_init_ret
;
2731 ovs_mutex_lock(&dpdk_mutex
);
2733 err
= dpdk_ring_open(netdev
->name
, &port_no
);
2738 err
= netdev_dpdk_init(netdev
, port_no
, DPDK_DEV_ETH
);
2741 ovs_mutex_unlock(&dpdk_mutex
);
2748 * Initialize QoS configuration operations.
2751 qos_conf_init(struct qos_conf
*conf
, const struct dpdk_qos_ops
*ops
)
2754 rte_spinlock_init(&conf
->lock
);
2758 * Search existing QoS operations in qos_ops and compare each set of
2759 * operations qos_name to name. Return a dpdk_qos_ops pointer to a match,
2762 static const struct dpdk_qos_ops
*
2763 qos_lookup_name(const char *name
)
2765 const struct dpdk_qos_ops
*const *opsp
;
2767 for (opsp
= qos_confs
; *opsp
!= NULL
; opsp
++) {
2768 const struct dpdk_qos_ops
*ops
= *opsp
;
2769 if (!strcmp(name
, ops
->qos_name
)) {
2777 netdev_dpdk_get_qos_types(const struct netdev
*netdev OVS_UNUSED
,
2780 const struct dpdk_qos_ops
*const *opsp
;
2782 for (opsp
= qos_confs
; *opsp
!= NULL
; opsp
++) {
2783 const struct dpdk_qos_ops
*ops
= *opsp
;
2784 if (ops
->qos_construct
&& ops
->qos_name
[0] != '\0') {
2785 sset_add(types
, ops
->qos_name
);
2792 netdev_dpdk_get_qos(const struct netdev
*netdev
,
2793 const char **typep
, struct smap
*details
)
2795 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
2796 struct qos_conf
*qos_conf
;
2799 ovs_mutex_lock(&dev
->mutex
);
2800 qos_conf
= ovsrcu_get_protected(struct qos_conf
*, &dev
->qos_conf
);
2802 *typep
= qos_conf
->ops
->qos_name
;
2803 error
= (qos_conf
->ops
->qos_get
2804 ? qos_conf
->ops
->qos_get(qos_conf
, details
): 0);
2806 /* No QoS configuration set, return an empty string */
2809 ovs_mutex_unlock(&dev
->mutex
);
2815 netdev_dpdk_set_qos(struct netdev
*netdev
, const char *type
,
2816 const struct smap
*details
)
2818 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
2819 const struct dpdk_qos_ops
*new_ops
= NULL
;
2820 struct qos_conf
*qos_conf
, *new_qos_conf
= NULL
;
2823 ovs_mutex_lock(&dev
->mutex
);
2825 qos_conf
= ovsrcu_get_protected(struct qos_conf
*, &dev
->qos_conf
);
2827 new_ops
= qos_lookup_name(type
);
2829 if (!new_ops
|| !new_ops
->qos_construct
) {
2830 new_qos_conf
= NULL
;
2831 if (type
&& type
[0]) {
2834 } else if (qos_conf
->ops
== new_ops
2835 && qos_conf
->ops
->qos_is_equal(qos_conf
, details
)) {
2836 new_qos_conf
= qos_conf
;
2838 error
= new_ops
->qos_construct(details
, &new_qos_conf
);
2842 VLOG_ERR("Failed to set QoS type %s on port %s: %s",
2843 type
, netdev
->name
, rte_strerror(error
));
2846 if (new_qos_conf
!= qos_conf
) {
2847 ovsrcu_set(&dev
->qos_conf
, new_qos_conf
);
2849 ovsrcu_postpone(qos_conf
->ops
->qos_destruct
, qos_conf
);
2853 ovs_mutex_unlock(&dev
->mutex
);
2858 /* egress-policer details */
2860 struct egress_policer
{
2861 struct qos_conf qos_conf
;
2862 struct rte_meter_srtcm_params app_srtcm_params
;
2863 struct rte_meter_srtcm egress_meter
;
2867 egress_policer_details_to_param(const struct smap
*details
,
2868 struct rte_meter_srtcm_params
*params
)
2870 memset(params
, 0, sizeof *params
);
2871 params
->cir
= smap_get_ullong(details
, "cir", 0);
2872 params
->cbs
= smap_get_ullong(details
, "cbs", 0);
2877 egress_policer_qos_construct(const struct smap
*details
,
2878 struct qos_conf
**conf
)
2880 struct egress_policer
*policer
;
2883 policer
= xmalloc(sizeof *policer
);
2884 qos_conf_init(&policer
->qos_conf
, &egress_policer_ops
);
2885 egress_policer_details_to_param(details
, &policer
->app_srtcm_params
);
2886 err
= rte_meter_srtcm_config(&policer
->egress_meter
,
2887 &policer
->app_srtcm_params
);
2889 *conf
= &policer
->qos_conf
;
2900 egress_policer_qos_destruct(struct qos_conf
*conf
)
2902 struct egress_policer
*policer
= CONTAINER_OF(conf
, struct egress_policer
,
2908 egress_policer_qos_get(const struct qos_conf
*conf
, struct smap
*details
)
2910 struct egress_policer
*policer
=
2911 CONTAINER_OF(conf
, struct egress_policer
, qos_conf
);
2913 smap_add_format(details
, "cir", "%"PRIu64
, policer
->app_srtcm_params
.cir
);
2914 smap_add_format(details
, "cbs", "%"PRIu64
, policer
->app_srtcm_params
.cbs
);
2920 egress_policer_qos_is_equal(const struct qos_conf
*conf
, const struct smap
*details
)
2922 struct egress_policer
*policer
=
2923 CONTAINER_OF(conf
, struct egress_policer
, qos_conf
);
2924 struct rte_meter_srtcm_params params
;
2926 egress_policer_details_to_param(details
, ¶ms
);
2928 return !memcmp(¶ms
, &policer
->app_srtcm_params
, sizeof params
);
2932 egress_policer_run(struct qos_conf
*conf
, struct rte_mbuf
**pkts
, int pkt_cnt
)
2935 struct egress_policer
*policer
=
2936 CONTAINER_OF(conf
, struct egress_policer
, qos_conf
);
2938 cnt
= netdev_dpdk_policer_run(&policer
->egress_meter
, pkts
, pkt_cnt
);
2943 static const struct dpdk_qos_ops egress_policer_ops
= {
2944 "egress-policer", /* qos_name */
2945 egress_policer_qos_construct
,
2946 egress_policer_qos_destruct
,
2947 egress_policer_qos_get
,
2948 egress_policer_qos_is_equal
,
2953 netdev_dpdk_reconfigure(struct netdev
*netdev
)
2955 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
2958 ovs_mutex_lock(&dev
->mutex
);
2960 if (netdev
->n_txq
== dev
->requested_n_txq
2961 && netdev
->n_rxq
== dev
->requested_n_rxq
2962 && dev
->mtu
== dev
->requested_mtu
2963 && dev
->rxq_size
== dev
->requested_rxq_size
2964 && dev
->txq_size
== dev
->requested_txq_size
) {
2965 /* Reconfiguration is unnecessary */
2970 rte_eth_dev_stop(dev
->port_id
);
2972 if (dev
->mtu
!= dev
->requested_mtu
) {
2973 netdev_dpdk_mempool_configure(dev
);
2976 netdev
->n_txq
= dev
->requested_n_txq
;
2977 netdev
->n_rxq
= dev
->requested_n_rxq
;
2979 dev
->rxq_size
= dev
->requested_rxq_size
;
2980 dev
->txq_size
= dev
->requested_txq_size
;
2982 rte_free(dev
->tx_q
);
2983 err
= dpdk_eth_dev_init(dev
);
2984 dev
->tx_q
= netdev_dpdk_alloc_txq(netdev
->n_txq
);
2989 netdev_change_seq_changed(netdev
);
2992 ovs_mutex_unlock(&dev
->mutex
);
2997 dpdk_vhost_reconfigure_helper(struct netdev_dpdk
*dev
)
2998 OVS_REQUIRES(dev
->mutex
)
3000 dev
->up
.n_txq
= dev
->requested_n_txq
;
3001 dev
->up
.n_rxq
= dev
->requested_n_rxq
;
3003 /* Enable TX queue 0 by default if it wasn't disabled. */
3004 if (dev
->tx_q
[0].map
== OVS_VHOST_QUEUE_MAP_UNKNOWN
) {
3005 dev
->tx_q
[0].map
= 0;
3008 netdev_dpdk_remap_txqs(dev
);
3010 if (dev
->requested_socket_id
!= dev
->socket_id
3011 || dev
->requested_mtu
!= dev
->mtu
) {
3012 if (!netdev_dpdk_mempool_configure(dev
)) {
3013 netdev_change_seq_changed(&dev
->up
);
3017 if (netdev_dpdk_get_vid(dev
) >= 0) {
3018 dev
->vhost_reconfigured
= true;
3023 netdev_dpdk_vhost_reconfigure(struct netdev
*netdev
)
3025 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
3027 ovs_mutex_lock(&dev
->mutex
);
3028 dpdk_vhost_reconfigure_helper(dev
);
3029 ovs_mutex_unlock(&dev
->mutex
);
3034 netdev_dpdk_vhost_client_reconfigure(struct netdev
*netdev
)
3036 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
3039 ovs_mutex_lock(&dev
->mutex
);
3041 dpdk_vhost_reconfigure_helper(dev
);
3043 /* Configure vHost client mode if requested and if the following criteria
3045 * 1. Device hasn't been registered yet.
3046 * 2. A path has been specified.
3048 if (!(dev
->vhost_driver_flags
& RTE_VHOST_USER_CLIENT
)
3049 && strlen(dev
->vhost_id
)) {
3050 /* Register client-mode device */
3051 err
= rte_vhost_driver_register(dev
->vhost_id
,
3052 RTE_VHOST_USER_CLIENT
);
3054 VLOG_ERR("vhost-user device setup failure for device %s\n",
3057 /* Configuration successful */
3058 dev
->vhost_driver_flags
|= RTE_VHOST_USER_CLIENT
;
3059 VLOG_INFO("vHost User device '%s' created in 'client' mode, "
3060 "using client socket '%s'",
3061 dev
->up
.name
, dev
->vhost_id
);
3065 ovs_mutex_unlock(&dev
->mutex
);
3070 #define NETDEV_DPDK_CLASS(NAME, CONSTRUCT, DESTRUCT, \
3071 SET_CONFIG, SET_TX_MULTIQ, SEND, \
3072 GET_CARRIER, GET_STATS, \
3073 GET_FEATURES, GET_STATUS, \
3074 RECONFIGURE, RXQ_RECV) \
3077 true, /* is_pmd */ \
3079 NULL, /* netdev_dpdk_run */ \
3080 NULL, /* netdev_dpdk_wait */ \
3082 netdev_dpdk_alloc, \
3085 netdev_dpdk_dealloc, \
3086 netdev_dpdk_get_config, \
3088 NULL, /* get_tunnel_config */ \
3089 NULL, /* build header */ \
3090 NULL, /* push header */ \
3091 NULL, /* pop header */ \
3092 netdev_dpdk_get_numa_id, /* get_numa_id */ \
3096 NULL, /* send_wait */ \
3098 netdev_dpdk_set_etheraddr, \
3099 netdev_dpdk_get_etheraddr, \
3100 netdev_dpdk_get_mtu, \
3101 netdev_dpdk_set_mtu, \
3102 netdev_dpdk_get_ifindex, \
3104 netdev_dpdk_get_carrier_resets, \
3105 netdev_dpdk_set_miimon, \
3108 NULL, /* set_advertisements */ \
3110 netdev_dpdk_set_policing, \
3111 netdev_dpdk_get_qos_types, \
3112 NULL, /* get_qos_capabilities */ \
3113 netdev_dpdk_get_qos, \
3114 netdev_dpdk_set_qos, \
3115 NULL, /* get_queue */ \
3116 NULL, /* set_queue */ \
3117 NULL, /* delete_queue */ \
3118 NULL, /* get_queue_stats */ \
3119 NULL, /* queue_dump_start */ \
3120 NULL, /* queue_dump_next */ \
3121 NULL, /* queue_dump_done */ \
3122 NULL, /* dump_queue_stats */ \
3124 NULL, /* set_in4 */ \
3125 NULL, /* get_addr_list */ \
3126 NULL, /* add_router */ \
3127 NULL, /* get_next_hop */ \
3129 NULL, /* arp_lookup */ \
3131 netdev_dpdk_update_flags, \
3134 netdev_dpdk_rxq_alloc, \
3135 netdev_dpdk_rxq_construct, \
3136 netdev_dpdk_rxq_destruct, \
3137 netdev_dpdk_rxq_dealloc, \
3139 NULL, /* rx_wait */ \
3140 NULL, /* rxq_drain */ \
3144 process_vhost_flags(char *flag
, char *default_val
, int size
,
3145 const struct smap
*ovs_other_config
,
3151 val
= smap_get(ovs_other_config
, flag
);
3153 /* Process the vhost-sock-dir flag if it is provided, otherwise resort to
3156 if (val
&& (strlen(val
) <= size
)) {
3158 *new_val
= xstrdup(val
);
3159 VLOG_INFO("User-provided %s in use: %s", flag
, *new_val
);
3161 VLOG_INFO("No %s provided - defaulting to %s", flag
, default_val
);
3162 *new_val
= default_val
;
3169 grow_argv(char ***argv
, size_t cur_siz
, size_t grow_by
)
3171 return xrealloc(*argv
, sizeof(char *) * (cur_siz
+ grow_by
));
3175 dpdk_option_extend(char ***argv
, int argc
, const char *option
,
3178 char **newargv
= grow_argv(argv
, argc
, 2);
3180 newargv
[argc
] = xstrdup(option
);
3181 newargv
[argc
+1] = xstrdup(value
);
3185 move_argv(char ***argv
, size_t cur_size
, char **src_argv
, size_t src_argc
)
3187 char **newargv
= grow_argv(argv
, cur_size
, src_argc
);
3188 while (src_argc
--) {
3189 newargv
[cur_size
+src_argc
] = src_argv
[src_argc
];
3190 src_argv
[src_argc
] = NULL
;
3196 extra_dpdk_args(const char *ovs_extra_config
, char ***argv
, int argc
)
3199 char *release_tok
= xstrdup(ovs_extra_config
);
3200 char *tok
, *endptr
= NULL
;
3202 for (tok
= strtok_r(release_tok
, " ", &endptr
); tok
!= NULL
;
3203 tok
= strtok_r(NULL
, " ", &endptr
)) {
3204 char **newarg
= grow_argv(argv
, ret
, 1);
3206 newarg
[ret
++] = xstrdup(tok
);
3213 argv_contains(char **argv_haystack
, const size_t argc_haystack
,
3216 for (size_t i
= 0; i
< argc_haystack
; ++i
) {
3217 if (!strcmp(argv_haystack
[i
], needle
))
3224 construct_dpdk_options(const struct smap
*ovs_other_config
,
3225 char ***argv
, const int initial_size
,
3226 char **extra_args
, const size_t extra_argc
)
3228 struct dpdk_options_map
{
3229 const char *ovs_configuration
;
3230 const char *dpdk_option
;
3231 bool default_enabled
;
3232 const char *default_value
;
3234 {"dpdk-lcore-mask", "-c", false, NULL
},
3235 {"dpdk-hugepage-dir", "--huge-dir", false, NULL
},
3238 int i
, ret
= initial_size
;
3240 /*First, construct from the flat-options (non-mutex)*/
3241 for (i
= 0; i
< ARRAY_SIZE(opts
); ++i
) {
3242 const char *lookup
= smap_get(ovs_other_config
,
3243 opts
[i
].ovs_configuration
);
3244 if (!lookup
&& opts
[i
].default_enabled
) {
3245 lookup
= opts
[i
].default_value
;
3249 if (!argv_contains(extra_args
, extra_argc
, opts
[i
].dpdk_option
)) {
3250 dpdk_option_extend(argv
, ret
, opts
[i
].dpdk_option
, lookup
);
3253 VLOG_WARN("Ignoring database defined option '%s' due to "
3254 "dpdk_extras config", opts
[i
].dpdk_option
);
3262 #define MAX_DPDK_EXCL_OPTS 10
3265 construct_dpdk_mutex_options(const struct smap
*ovs_other_config
,
3266 char ***argv
, const int initial_size
,
3267 char **extra_args
, const size_t extra_argc
)
3269 struct dpdk_exclusive_options_map
{
3270 const char *category
;
3271 const char *ovs_dpdk_options
[MAX_DPDK_EXCL_OPTS
];
3272 const char *eal_dpdk_options
[MAX_DPDK_EXCL_OPTS
];
3273 const char *default_value
;
3277 {"dpdk-alloc-mem", "dpdk-socket-mem", NULL
,},
3278 {"-m", "--socket-mem", NULL
,},
3283 int i
, ret
= initial_size
;
3284 for (i
= 0; i
< ARRAY_SIZE(excl_opts
); ++i
) {
3285 int found_opts
= 0, scan
, found_pos
= -1;
3286 const char *found_value
;
3287 struct dpdk_exclusive_options_map
*popt
= &excl_opts
[i
];
3289 for (scan
= 0; scan
< MAX_DPDK_EXCL_OPTS
3290 && popt
->ovs_dpdk_options
[scan
]; ++scan
) {
3291 const char *lookup
= smap_get(ovs_other_config
,
3292 popt
->ovs_dpdk_options
[scan
]);
3293 if (lookup
&& strlen(lookup
)) {
3296 found_value
= lookup
;
3301 if (popt
->default_option
) {
3302 found_pos
= popt
->default_option
;
3303 found_value
= popt
->default_value
;
3309 if (found_opts
> 1) {
3310 VLOG_ERR("Multiple defined options for %s. Please check your"
3311 " database settings and reconfigure if necessary.",
3315 if (!argv_contains(extra_args
, extra_argc
,
3316 popt
->eal_dpdk_options
[found_pos
])) {
3317 dpdk_option_extend(argv
, ret
, popt
->eal_dpdk_options
[found_pos
],
3321 VLOG_WARN("Ignoring database defined option '%s' due to "
3322 "dpdk_extras config", popt
->eal_dpdk_options
[found_pos
]);
3330 get_dpdk_args(const struct smap
*ovs_other_config
, char ***argv
,
3333 const char *extra_configuration
;
3334 char **extra_args
= NULL
;
3336 size_t extra_argc
= 0;
3338 extra_configuration
= smap_get(ovs_other_config
, "dpdk-extra");
3339 if (extra_configuration
) {
3340 extra_argc
= extra_dpdk_args(extra_configuration
, &extra_args
, 0);
3343 i
= construct_dpdk_options(ovs_other_config
, argv
, argc
, extra_args
,
3345 i
= construct_dpdk_mutex_options(ovs_other_config
, argv
, i
, extra_args
,
3348 if (extra_configuration
) {
3349 *argv
= move_argv(argv
, i
, extra_args
, extra_argc
);
3352 return i
+ extra_argc
;
3355 static char **dpdk_argv
;
3356 static int dpdk_argc
;
3359 deferred_argv_release(void)
3362 for (result
= 0; result
< dpdk_argc
; ++result
) {
3363 free(dpdk_argv
[result
]);
3370 dpdk_init__(const struct smap
*ovs_other_config
)
3375 bool auto_determine
= true;
3378 char *sock_dir_subcomponent
;
3380 if (!smap_get_bool(ovs_other_config
, "dpdk-init", false)) {
3381 VLOG_INFO("DPDK Disabled - to change this requires a restart.\n");
3385 VLOG_INFO("DPDK Enabled, initializing");
3386 if (process_vhost_flags("vhost-sock-dir", xstrdup(ovs_rundir()),
3387 NAME_MAX
, ovs_other_config
,
3388 &sock_dir_subcomponent
)) {
3390 if (!strstr(sock_dir_subcomponent
, "..")) {
3391 vhost_sock_dir
= xasprintf("%s/%s", ovs_rundir(),
3392 sock_dir_subcomponent
);
3394 err
= stat(vhost_sock_dir
, &s
);
3396 VLOG_ERR("vhost-user sock directory '%s' does not exist.",
3400 vhost_sock_dir
= xstrdup(ovs_rundir());
3401 VLOG_ERR("vhost-user sock directory request '%s/%s' has invalid"
3402 "characters '..' - using %s instead.",
3403 ovs_rundir(), sock_dir_subcomponent
, ovs_rundir());
3405 free(sock_dir_subcomponent
);
3407 vhost_sock_dir
= sock_dir_subcomponent
;
3410 argv
= grow_argv(&argv
, 0, 1);
3412 argv
[0] = xstrdup(ovs_get_program_name());
3413 argc_tmp
= get_dpdk_args(ovs_other_config
, &argv
, argc
);
3415 while (argc_tmp
!= argc
) {
3416 if (!strcmp("-c", argv
[argc
]) || !strcmp("-l", argv
[argc
])) {
3417 auto_determine
= false;
3425 * NOTE: This is an unsophisticated mechanism for determining the DPDK
3426 * lcore for the DPDK Master.
3428 if (auto_determine
) {
3430 /* Get the main thread affinity */
3432 err
= pthread_getaffinity_np(pthread_self(), sizeof(cpu_set_t
),
3435 for (i
= 0; i
< CPU_SETSIZE
; i
++) {
3436 if (CPU_ISSET(i
, &cpuset
)) {
3437 argv
= grow_argv(&argv
, argc
, 2);
3438 argv
[argc
++] = xstrdup("-c");
3439 argv
[argc
++] = xasprintf("0x%08llX", (1ULL<<i
));
3444 VLOG_ERR("Thread getaffinity error %d. Using core 0x1", err
);
3445 /* User did not set dpdk-lcore-mask and unable to get current
3446 * thread affintity - default to core 0x1 */
3447 argv
= grow_argv(&argv
, argc
, 2);
3448 argv
[argc
++] = xstrdup("-c");
3449 argv
[argc
++] = xasprintf("0x%X", 1);
3453 argv
= grow_argv(&argv
, argc
, 1);
3458 if (VLOG_IS_INFO_ENABLED()) {
3462 ds_put_cstr(&eal_args
, "EAL ARGS:");
3463 for (opt
= 0; opt
< argc
; ++opt
) {
3464 ds_put_cstr(&eal_args
, " ");
3465 ds_put_cstr(&eal_args
, argv
[opt
]);
3467 VLOG_INFO("%s", ds_cstr_ro(&eal_args
));
3468 ds_destroy(&eal_args
);
3471 /* Make sure things are initialized ... */
3472 result
= rte_eal_init(argc
, argv
);
3474 ovs_abort(result
, "Cannot init EAL");
3477 /* Set the main thread affinity back to pre rte_eal_init() value */
3478 if (auto_determine
&& !err
) {
3479 err
= pthread_setaffinity_np(pthread_self(), sizeof(cpu_set_t
),
3482 VLOG_ERR("Thread setaffinity error %d", err
);
3489 atexit(deferred_argv_release
);
3491 rte_memzone_dump(stdout
);
3492 rte_eal_init_ret
= 0;
3494 /* We are called from the main thread here */
3495 RTE_PER_LCORE(_lcore_id
) = NON_PMD_CORE_ID
;
3497 ovs_thread_create("dpdk_watchdog", dpdk_watchdog
, NULL
);
3499 dpdk_vhost_class_init();
3502 VLOG_INFO("DPDK pdump packet capture enabled");
3503 err
= rte_pdump_init(ovs_rundir());
3505 VLOG_INFO("Error initialising DPDK pdump");
3508 char *server_socket_path
;
3510 server_socket_path
= xasprintf("%s/%s", ovs_rundir(),
3511 "pdump_server_socket");
3512 fatal_signal_add_file_to_unlink(server_socket_path
);
3513 free(server_socket_path
);
3517 /* Finally, register the dpdk classes */
3518 netdev_dpdk_register();
3522 dpdk_init(const struct smap
*ovs_other_config
)
3524 static struct ovsthread_once once
= OVSTHREAD_ONCE_INITIALIZER
;
3526 if (ovs_other_config
&& ovsthread_once_start(&once
)) {
3527 dpdk_init__(ovs_other_config
);
3528 ovsthread_once_done(&once
);
3532 static const struct netdev_class dpdk_class
=
3535 netdev_dpdk_construct
,
3536 netdev_dpdk_destruct
,
3537 netdev_dpdk_set_config
,
3538 netdev_dpdk_set_tx_multiq
,
3539 netdev_dpdk_eth_send
,
3540 netdev_dpdk_get_carrier
,
3541 netdev_dpdk_get_stats
,
3542 netdev_dpdk_get_features
,
3543 netdev_dpdk_get_status
,
3544 netdev_dpdk_reconfigure
,
3545 netdev_dpdk_rxq_recv
);
3547 static const struct netdev_class dpdk_ring_class
=
3550 netdev_dpdk_ring_construct
,
3551 netdev_dpdk_destruct
,
3552 netdev_dpdk_ring_set_config
,
3553 netdev_dpdk_set_tx_multiq
,
3554 netdev_dpdk_ring_send
,
3555 netdev_dpdk_get_carrier
,
3556 netdev_dpdk_get_stats
,
3557 netdev_dpdk_get_features
,
3558 netdev_dpdk_get_status
,
3559 netdev_dpdk_reconfigure
,
3560 netdev_dpdk_rxq_recv
);
3562 static const struct netdev_class dpdk_vhost_class
=
3565 netdev_dpdk_vhost_construct
,
3566 netdev_dpdk_vhost_destruct
,
3569 netdev_dpdk_vhost_send
,
3570 netdev_dpdk_vhost_get_carrier
,
3571 netdev_dpdk_vhost_get_stats
,
3574 netdev_dpdk_vhost_reconfigure
,
3575 netdev_dpdk_vhost_rxq_recv
);
3576 static const struct netdev_class dpdk_vhost_client_class
=
3578 "dpdkvhostuserclient",
3579 netdev_dpdk_vhost_client_construct
,
3580 netdev_dpdk_vhost_destruct
,
3581 netdev_dpdk_vhost_client_set_config
,
3583 netdev_dpdk_vhost_send
,
3584 netdev_dpdk_vhost_get_carrier
,
3585 netdev_dpdk_vhost_get_stats
,
3588 netdev_dpdk_vhost_client_reconfigure
,
3589 netdev_dpdk_vhost_rxq_recv
);
3592 netdev_dpdk_register(void)
3595 netdev_register_provider(&dpdk_class
);
3596 netdev_register_provider(&dpdk_ring_class
);
3597 netdev_register_provider(&dpdk_vhost_class
);
3598 netdev_register_provider(&dpdk_vhost_client_class
);
3602 dpdk_set_lcore_id(unsigned cpu
)
3604 /* NON_PMD_CORE_ID is reserved for use by non pmd threads. */
3605 ovs_assert(cpu
!= NON_PMD_CORE_ID
);
3606 RTE_PER_LCORE(_lcore_id
) = cpu
;
3610 dpdk_thread_is_pmd(void)
3612 return rte_lcore_id() != NON_PMD_CORE_ID
;