2 * Copyright (c) 2014, 2015, 2016 Nicira, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
30 #include <sys/types.h>
35 #include "dp-packet.h"
36 #include "dpif-netdev.h"
37 #include "fatal-signal.h"
38 #include "netdev-dpdk.h"
39 #include "netdev-provider.h"
40 #include "netdev-vport.h"
42 #include "openvswitch/dynamic-string.h"
43 #include "openvswitch/list.h"
44 #include "openvswitch/ofp-print.h"
45 #include "openvswitch/vlog.h"
47 #include "ovs-thread.h"
50 #include "openvswitch/shash.h"
53 #include "unaligned.h"
57 #include "rte_config.h"
59 #include "rte_meter.h"
61 #include "rte_pdump.h"
63 #include "rte_virtio_net.h"
65 VLOG_DEFINE_THIS_MODULE(dpdk
);
66 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(5, 20);
68 #define DPDK_PORT_WATCHDOG_INTERVAL 5
70 #define OVS_CACHE_LINE_SIZE CACHE_LINE_SIZE
71 #define OVS_VPORT_DPDK "ovs_dpdk"
74 * need to reserve tons of extra space in the mbufs so we can align the
75 * DMA addresses to 4KB.
76 * The minimum mbuf size is limited to avoid scatter behaviour and drop in
77 * performance for standard Ethernet MTU.
79 #define ETHER_HDR_MAX_LEN (ETHER_HDR_LEN + ETHER_CRC_LEN + (2 * VLAN_HEADER_LEN))
80 #define MTU_TO_FRAME_LEN(mtu) ((mtu) + ETHER_HDR_LEN + ETHER_CRC_LEN)
81 #define MTU_TO_MAX_FRAME_LEN(mtu) ((mtu) + ETHER_HDR_MAX_LEN)
82 #define FRAME_LEN_TO_MTU(frame_len) ((frame_len)- ETHER_HDR_LEN - ETHER_CRC_LEN)
83 #define MBUF_SIZE(mtu) ( MTU_TO_MAX_FRAME_LEN(mtu) \
84 + sizeof(struct dp_packet) \
85 + RTE_PKTMBUF_HEADROOM)
86 #define NETDEV_DPDK_MBUF_ALIGN 1024
87 #define NETDEV_DPDK_MAX_PKT_LEN 9728
89 /* Max and min number of packets in the mempool. OVS tries to allocate a
90 * mempool with MAX_NB_MBUF: if this fails (because the system doesn't have
91 * enough hugepages) we keep halving the number until the allocation succeeds
92 * or we reach MIN_NB_MBUF */
94 #define MAX_NB_MBUF (4096 * 64)
95 #define MIN_NB_MBUF (4096 * 4)
96 #define MP_CACHE_SZ RTE_MEMPOOL_CACHE_MAX_SIZE
98 /* MAX_NB_MBUF can be divided by 2 many times, until MIN_NB_MBUF */
99 BUILD_ASSERT_DECL(MAX_NB_MBUF
% ROUND_DOWN_POW2(MAX_NB_MBUF
/MIN_NB_MBUF
) == 0);
101 /* The smallest possible NB_MBUF that we're going to try should be a multiple
102 * of MP_CACHE_SZ. This is advised by DPDK documentation. */
103 BUILD_ASSERT_DECL((MAX_NB_MBUF
/ ROUND_DOWN_POW2(MAX_NB_MBUF
/MIN_NB_MBUF
))
107 * DPDK XSTATS Counter names definition
109 #define XSTAT_RX_64_PACKETS "rx_size_64_packets"
110 #define XSTAT_RX_65_TO_127_PACKETS "rx_size_65_to_127_packets"
111 #define XSTAT_RX_128_TO_255_PACKETS "rx_size_128_to_255_packets"
112 #define XSTAT_RX_256_TO_511_PACKETS "rx_size_256_to_511_packets"
113 #define XSTAT_RX_512_TO_1023_PACKETS "rx_size_512_to_1023_packets"
114 #define XSTAT_RX_1024_TO_1522_PACKETS "rx_size_1024_to_1522_packets"
115 #define XSTAT_RX_1523_TO_MAX_PACKETS "rx_size_1523_to_max_packets"
117 #define XSTAT_TX_64_PACKETS "tx_size_64_packets"
118 #define XSTAT_TX_65_TO_127_PACKETS "tx_size_65_to_127_packets"
119 #define XSTAT_TX_128_TO_255_PACKETS "tx_size_128_to_255_packets"
120 #define XSTAT_TX_256_TO_511_PACKETS "tx_size_256_to_511_packets"
121 #define XSTAT_TX_512_TO_1023_PACKETS "tx_size_512_to_1023_packets"
122 #define XSTAT_TX_1024_TO_1522_PACKETS "tx_size_1024_to_1522_packets"
123 #define XSTAT_TX_1523_TO_MAX_PACKETS "tx_size_1523_to_max_packets"
125 #define XSTAT_TX_MULTICAST_PACKETS "tx_multicast_packets"
126 #define XSTAT_RX_BROADCAST_PACKETS "rx_broadcast_packets"
127 #define XSTAT_TX_BROADCAST_PACKETS "tx_broadcast_packets"
128 #define XSTAT_RX_UNDERSIZED_ERRORS "rx_undersized_errors"
129 #define XSTAT_RX_OVERSIZE_ERRORS "rx_oversize_errors"
130 #define XSTAT_RX_FRAGMENTED_ERRORS "rx_fragmented_errors"
131 #define XSTAT_RX_JABBER_ERRORS "rx_jabber_errors"
135 #define NIC_PORT_RX_Q_SIZE 2048 /* Size of Physical NIC RX Queue, Max (n+32<=4096)*/
136 #define NIC_PORT_TX_Q_SIZE 2048 /* Size of Physical NIC TX Queue, Max (n+32<=4096)*/
138 #define OVS_VHOST_MAX_QUEUE_NUM 1024 /* Maximum number of vHost TX queues. */
139 #define OVS_VHOST_QUEUE_MAP_UNKNOWN (-1) /* Mapping not initialized. */
140 #define OVS_VHOST_QUEUE_DISABLED (-2) /* Queue was disabled by guest and not
141 * yet mapped to another queue. */
143 static char *vhost_sock_dir
= NULL
; /* Location of vhost-user sockets */
145 #define VHOST_ENQ_RETRY_NUM 8
146 #define IF_NAME_SZ (PATH_MAX > IFNAMSIZ ? PATH_MAX : IFNAMSIZ)
148 static const struct rte_eth_conf port_conf
= {
150 .mq_mode
= ETH_MQ_RX_RSS
,
152 .header_split
= 0, /* Header Split disabled */
153 .hw_ip_checksum
= 0, /* IP checksum offload disabled */
154 .hw_vlan_filter
= 0, /* VLAN filtering disabled */
155 .jumbo_frame
= 0, /* Jumbo Frame Support disabled */
161 .rss_hf
= ETH_RSS_IP
| ETH_RSS_UDP
| ETH_RSS_TCP
,
165 .mq_mode
= ETH_MQ_TX_NONE
,
169 enum { DPDK_RING_SIZE
= 256 };
170 BUILD_ASSERT_DECL(IS_POW2(DPDK_RING_SIZE
));
171 enum { DRAIN_TSC
= 200000ULL };
178 static int rte_eal_init_ret
= ENODEV
;
180 /* Quality of Service */
182 /* An instance of a QoS configuration. Always associated with a particular
185 * Each QoS implementation subclasses this with whatever additional data it
189 const struct dpdk_qos_ops
*ops
;
192 /* A particular implementation of dpdk QoS operations.
194 * The functions below return 0 if successful or a positive errno value on
195 * failure, except where otherwise noted. All of them must be provided, except
196 * where otherwise noted.
198 struct dpdk_qos_ops
{
200 /* Name of the QoS type */
201 const char *qos_name
;
203 /* Called to construct the QoS implementation on 'netdev'. The
204 * implementation should make the appropriate calls to configure QoS
205 * according to 'details'. The implementation may assume that any current
206 * QoS configuration already installed should be destroyed before
207 * constructing the new configuration.
209 * The contents of 'details' should be documented as valid for 'ovs_name'
210 * in the "other_config" column in the "QoS" table in vswitchd/vswitch.xml
211 * (which is built as ovs-vswitchd.conf.db(8)).
213 * This function must return 0 if and only if it sets 'netdev->qos_conf'
214 * to an initialized 'struct qos_conf'.
216 * For all QoS implementations it should always be non-null.
218 int (*qos_construct
)(struct netdev
*netdev
, const struct smap
*details
);
220 /* Destroys the data structures allocated by the implementation as part of
223 * For all QoS implementations it should always be non-null.
225 void (*qos_destruct
)(struct netdev
*netdev
, struct qos_conf
*conf
);
227 /* Retrieves details of 'netdev->qos_conf' configuration into 'details'.
229 * The contents of 'details' should be documented as valid for 'ovs_name'
230 * in the "other_config" column in the "QoS" table in vswitchd/vswitch.xml
231 * (which is built as ovs-vswitchd.conf.db(8)).
233 int (*qos_get
)(const struct netdev
*netdev
, struct smap
*details
);
235 /* Reconfigures 'netdev->qos_conf' according to 'details', performing any
236 * required calls to complete the reconfiguration.
238 * The contents of 'details' should be documented as valid for 'ovs_name'
239 * in the "other_config" column in the "QoS" table in vswitchd/vswitch.xml
240 * (which is built as ovs-vswitchd.conf.db(8)).
242 * This function may be null if 'qos_conf' is not configurable.
244 int (*qos_set
)(struct netdev
*netdev
, const struct smap
*details
);
246 /* Modify an array of rte_mbufs. The modification is specific to
247 * each qos implementation.
249 * The function should take and array of mbufs and an int representing
250 * the current number of mbufs present in the array.
252 * After the function has performed a qos modification to the array of
253 * mbufs it returns an int representing the number of mbufs now present in
254 * the array. This value is can then be passed to the port send function
255 * along with the modified array for transmission.
257 * For all QoS implementations it should always be non-null.
259 int (*qos_run
)(struct netdev
*netdev
, struct rte_mbuf
**pkts
,
263 /* dpdk_qos_ops for each type of user space QoS implementation */
264 static const struct dpdk_qos_ops egress_policer_ops
;
267 * Array of dpdk_qos_ops, contains pointer to all supported QoS
270 static const struct dpdk_qos_ops
*const qos_confs
[] = {
275 static struct ovs_mutex dpdk_mutex
= OVS_MUTEX_INITIALIZER
;
277 /* Contains all 'struct dpdk_dev's. */
278 static struct ovs_list dpdk_list
OVS_GUARDED_BY(dpdk_mutex
)
279 = OVS_LIST_INITIALIZER(&dpdk_list
);
281 static struct ovs_mutex dpdk_mp_mutex
OVS_ACQ_AFTER(dpdk_mutex
)
282 = OVS_MUTEX_INITIALIZER
;
284 static struct ovs_list dpdk_mp_list
OVS_GUARDED_BY(dpdk_mp_mutex
)
285 = OVS_LIST_INITIALIZER(&dpdk_mp_list
);
287 /* This mutex must be used by non pmd threads when allocating or freeing
288 * mbufs through mempools. */
289 static struct ovs_mutex nonpmd_mempool_mutex
= OVS_MUTEX_INITIALIZER
;
292 struct rte_mempool
*mp
;
296 struct ovs_list list_node
OVS_GUARDED_BY(dpdk_mp_mutex
);
299 /* There should be one 'struct dpdk_tx_queue' created for
301 struct dpdk_tx_queue
{
302 rte_spinlock_t tx_lock
; /* Protects the members and the NIC queue
303 * from concurrent access. It is used only
304 * if the queue is shared among different
305 * pmd threads (see 'concurrent_txq'). */
306 int map
; /* Mapping of configured vhost-user queues
307 * to enabled by guest. */
310 /* dpdk has no way to remove dpdk ring ethernet devices
311 so we have to keep them around once they've been created
314 static struct ovs_list dpdk_ring_list
OVS_GUARDED_BY(dpdk_mutex
)
315 = OVS_LIST_INITIALIZER(&dpdk_ring_list
);
318 /* For the client rings */
319 struct rte_ring
*cring_tx
;
320 struct rte_ring
*cring_rx
;
321 unsigned int user_port_id
; /* User given port no, parsed from port name */
322 int eth_port_id
; /* ethernet device port id */
323 struct ovs_list list_node
OVS_GUARDED_BY(dpdk_mutex
);
326 struct ingress_policer
{
327 struct rte_meter_srtcm_params app_srtcm_params
;
328 struct rte_meter_srtcm in_policer
;
329 rte_spinlock_t policer_lock
;
336 enum dpdk_dev_type type
;
338 struct dpdk_tx_queue
*tx_q
;
340 struct ovs_mutex mutex
OVS_ACQ_AFTER(dpdk_mutex
);
342 struct dpdk_mp
*dpdk_mp
;
346 struct netdev_stats stats
;
348 rte_spinlock_t stats_lock
;
350 struct eth_addr hwaddr
;
351 enum netdev_flags flags
;
353 struct rte_eth_link link
;
356 /* virtio identifier for vhost devices */
359 /* True if vHost device is 'up' and has been reconfigured at least once */
360 bool vhost_reconfigured
;
362 /* Identifier used to distinguish vhost devices from each other. */
363 char vhost_id
[PATH_MAX
];
366 struct ovs_list list_node
OVS_GUARDED_BY(dpdk_mutex
);
368 /* QoS configuration and lock for the device */
369 struct qos_conf
*qos_conf
;
370 rte_spinlock_t qos_lock
;
372 /* The following properties cannot be changed when a device is running,
373 * so we remember the request and update them next time
374 * netdev_dpdk*_reconfigure() is called */
379 /* Socket ID detected when vHost device is brought up */
380 int requested_socket_id
;
382 /* Denotes whether vHost port is client/server mode */
383 uint64_t vhost_driver_flags
;
385 /* Ingress Policer */
386 OVSRCU_TYPE(struct ingress_policer
*) ingress_policer
;
387 uint32_t policer_rate
;
388 uint32_t policer_burst
;
390 /* DPDK-ETH Flow control */
391 struct rte_eth_fc_conf fc_conf
;
394 struct netdev_rxq_dpdk
{
395 struct netdev_rxq up
;
399 static bool dpdk_thread_is_pmd(void);
401 static int netdev_dpdk_construct(struct netdev
*);
403 int netdev_dpdk_get_vid(const struct netdev_dpdk
*dev
);
405 struct ingress_policer
*
406 netdev_dpdk_get_ingress_policer(const struct netdev_dpdk
*dev
);
409 is_dpdk_class(const struct netdev_class
*class)
411 return class->construct
== netdev_dpdk_construct
;
414 /* DPDK NIC drivers allocate RX buffers at a particular granularity, typically
415 * aligned at 1k or less. If a declared mbuf size is not a multiple of this
416 * value, insufficient buffers are allocated to accomodate the packet in its
417 * entirety. Furthermore, certain drivers need to ensure that there is also
418 * sufficient space in the Rx buffer to accommodate two VLAN tags (for QinQ
419 * frames). If the RX buffer is too small, then the driver enables scatter RX
420 * behaviour, which reduces performance. To prevent this, use a buffer size that
421 * is closest to 'mtu', but which satisfies the aforementioned criteria.
424 dpdk_buf_size(int mtu
)
426 return ROUND_UP((MTU_TO_MAX_FRAME_LEN(mtu
) + RTE_PKTMBUF_HEADROOM
),
427 NETDEV_DPDK_MBUF_ALIGN
);
430 /* XXX: use dpdk malloc for entire OVS. in fact huge page should be used
431 * for all other segments data, bss and text. */
434 dpdk_rte_mzalloc(size_t sz
)
438 ptr
= rte_zmalloc(OVS_VPORT_DPDK
, sz
, OVS_CACHE_LINE_SIZE
);
445 /* XXX this function should be called only by pmd threads (or by non pmd
446 * threads holding the nonpmd_mempool_mutex) */
448 free_dpdk_buf(struct dp_packet
*p
)
450 struct rte_mbuf
*pkt
= (struct rte_mbuf
*) p
;
452 rte_pktmbuf_free(pkt
);
456 ovs_rte_pktmbuf_init(struct rte_mempool
*mp
,
457 void *opaque_arg OVS_UNUSED
,
459 unsigned i OVS_UNUSED
)
461 struct rte_mbuf
*m
= _m
;
463 rte_pktmbuf_init(mp
, opaque_arg
, _m
, i
);
465 dp_packet_init_dpdk((struct dp_packet
*) m
, m
->buf_len
);
468 static struct dpdk_mp
*
469 dpdk_mp_get(int socket_id
, int mtu
)
471 struct dpdk_mp
*dmp
= NULL
;
472 char mp_name
[RTE_MEMPOOL_NAMESIZE
];
474 struct rte_pktmbuf_pool_private mbp_priv
;
477 ovs_mutex_lock(&dpdk_mp_mutex
);
478 LIST_FOR_EACH (dmp
, list_node
, &dpdk_mp_list
) {
479 if (dmp
->socket_id
== socket_id
&& dmp
->mtu
== mtu
) {
485 dmp
= dpdk_rte_mzalloc(sizeof *dmp
);
486 dmp
->socket_id
= socket_id
;
489 mbp_priv
.mbuf_data_room_size
= MBUF_SIZE(mtu
) - sizeof(struct dp_packet
);
490 mbp_priv
.mbuf_priv_size
= sizeof (struct dp_packet
)
491 - sizeof (struct rte_mbuf
);
492 /* XXX: this is a really rough method of provisioning memory.
493 * It's impossible to determine what the exact memory requirements are when
494 * the number of ports and rxqs that utilize a particular mempool can change
495 * dynamically at runtime. For the moment, use this rough heurisitic.
497 if (mtu
>= ETHER_MTU
) {
498 mp_size
= MAX_NB_MBUF
;
500 mp_size
= MIN_NB_MBUF
;
504 if (snprintf(mp_name
, RTE_MEMPOOL_NAMESIZE
, "ovs_mp_%d_%d_%u",
505 dmp
->mtu
, dmp
->socket_id
, mp_size
) < 0) {
510 dmp
->mp
= rte_mempool_create(mp_name
, mp_size
, MBUF_SIZE(mtu
),
512 sizeof(struct rte_pktmbuf_pool_private
),
513 rte_pktmbuf_pool_init
, &mbp_priv
,
514 ovs_rte_pktmbuf_init
, NULL
,
516 } while (!dmp
->mp
&& rte_errno
== ENOMEM
&& (mp_size
/= 2) >= MIN_NB_MBUF
);
518 if (dmp
->mp
== NULL
) {
522 VLOG_DBG("Allocated \"%s\" mempool with %u mbufs", mp_name
, mp_size
);
525 ovs_list_push_back(&dpdk_mp_list
, &dmp
->list_node
);
528 ovs_mutex_unlock(&dpdk_mp_mutex
);
538 dpdk_mp_put(struct dpdk_mp
*dmp
)
544 ovs_mutex_lock(&dpdk_mp_mutex
);
545 ovs_assert(dmp
->refcount
);
547 if (!--dmp
->refcount
) {
548 ovs_list_remove(&dmp
->list_node
);
549 rte_mempool_free(dmp
->mp
);
552 ovs_mutex_unlock(&dpdk_mp_mutex
);
555 /* Tries to allocate new mempool on requested_socket_id with
556 * mbuf size corresponding to requested_mtu.
557 * On success new configuration will be applied.
558 * On error, device will be left unchanged. */
560 netdev_dpdk_mempool_configure(struct netdev_dpdk
*dev
)
561 OVS_REQUIRES(dev
->mutex
)
563 uint32_t buf_size
= dpdk_buf_size(dev
->requested_mtu
);
566 mp
= dpdk_mp_get(dev
->requested_socket_id
, FRAME_LEN_TO_MTU(buf_size
));
568 VLOG_ERR("Insufficient memory to create memory pool for netdev "
569 "%s, with MTU %d on socket %d\n",
570 dev
->up
.name
, dev
->requested_mtu
, dev
->requested_socket_id
);
573 dpdk_mp_put(dev
->dpdk_mp
);
575 dev
->mtu
= dev
->requested_mtu
;
576 dev
->socket_id
= dev
->requested_socket_id
;
577 dev
->max_packet_len
= MTU_TO_FRAME_LEN(dev
->mtu
);
584 check_link_status(struct netdev_dpdk
*dev
)
586 struct rte_eth_link link
;
588 rte_eth_link_get_nowait(dev
->port_id
, &link
);
590 if (dev
->link
.link_status
!= link
.link_status
) {
591 netdev_change_seq_changed(&dev
->up
);
593 dev
->link_reset_cnt
++;
595 if (dev
->link
.link_status
) {
596 VLOG_DBG_RL(&rl
, "Port %d Link Up - speed %u Mbps - %s",
597 dev
->port_id
, (unsigned)dev
->link
.link_speed
,
598 (dev
->link
.link_duplex
== ETH_LINK_FULL_DUPLEX
) ?
599 ("full-duplex") : ("half-duplex"));
601 VLOG_DBG_RL(&rl
, "Port %d Link Down", dev
->port_id
);
607 dpdk_watchdog(void *dummy OVS_UNUSED
)
609 struct netdev_dpdk
*dev
;
611 pthread_detach(pthread_self());
614 ovs_mutex_lock(&dpdk_mutex
);
615 LIST_FOR_EACH (dev
, list_node
, &dpdk_list
) {
616 ovs_mutex_lock(&dev
->mutex
);
617 if (dev
->type
== DPDK_DEV_ETH
) {
618 check_link_status(dev
);
620 ovs_mutex_unlock(&dev
->mutex
);
622 ovs_mutex_unlock(&dpdk_mutex
);
623 xsleep(DPDK_PORT_WATCHDOG_INTERVAL
);
630 dpdk_eth_dev_queue_setup(struct netdev_dpdk
*dev
, int n_rxq
, int n_txq
)
634 struct rte_eth_conf conf
= port_conf
;
636 if (dev
->mtu
> ETHER_MTU
) {
637 conf
.rxmode
.jumbo_frame
= 1;
638 conf
.rxmode
.max_rx_pkt_len
= dev
->max_packet_len
;
640 conf
.rxmode
.jumbo_frame
= 0;
641 conf
.rxmode
.max_rx_pkt_len
= 0;
643 /* A device may report more queues than it makes available (this has
644 * been observed for Intel xl710, which reserves some of them for
645 * SRIOV): rte_eth_*_queue_setup will fail if a queue is not
646 * available. When this happens we can retry the configuration
647 * and request less queues */
648 while (n_rxq
&& n_txq
) {
650 VLOG_INFO("Retrying setup with (rxq:%d txq:%d)", n_rxq
, n_txq
);
653 diag
= rte_eth_dev_configure(dev
->port_id
, n_rxq
, n_txq
, &conf
);
655 VLOG_WARN("Interface %s eth_dev setup error %s\n",
656 dev
->up
.name
, rte_strerror(-diag
));
660 for (i
= 0; i
< n_txq
; i
++) {
661 diag
= rte_eth_tx_queue_setup(dev
->port_id
, i
, NIC_PORT_TX_Q_SIZE
,
662 dev
->socket_id
, NULL
);
664 VLOG_INFO("Interface %s txq(%d) setup error: %s",
665 dev
->up
.name
, i
, rte_strerror(-diag
));
671 /* Retry with less tx queues */
676 for (i
= 0; i
< n_rxq
; i
++) {
677 diag
= rte_eth_rx_queue_setup(dev
->port_id
, i
, NIC_PORT_RX_Q_SIZE
,
678 dev
->socket_id
, NULL
,
681 VLOG_INFO("Interface %s rxq(%d) setup error: %s",
682 dev
->up
.name
, i
, rte_strerror(-diag
));
688 /* Retry with less rx queues */
693 dev
->up
.n_rxq
= n_rxq
;
694 dev
->up
.n_txq
= n_txq
;
703 dpdk_eth_flow_ctrl_setup(struct netdev_dpdk
*dev
) OVS_REQUIRES(dev
->mutex
)
705 if (rte_eth_dev_flow_ctrl_set(dev
->port_id
, &dev
->fc_conf
)) {
706 VLOG_WARN("Failed to enable flow control on device %d", dev
->port_id
);
711 dpdk_eth_dev_init(struct netdev_dpdk
*dev
)
712 OVS_REQUIRES(dev
->mutex
)
714 struct rte_pktmbuf_pool_private
*mbp_priv
;
715 struct rte_eth_dev_info info
;
716 struct ether_addr eth_addr
;
720 if (!rte_eth_dev_is_valid_port(dev
->port_id
)) {
724 rte_eth_dev_info_get(dev
->port_id
, &info
);
726 n_rxq
= MIN(info
.max_rx_queues
, dev
->up
.n_rxq
);
727 n_txq
= MIN(info
.max_tx_queues
, dev
->up
.n_txq
);
729 diag
= dpdk_eth_dev_queue_setup(dev
, n_rxq
, n_txq
);
731 VLOG_ERR("Interface %s(rxq:%d txq:%d) configure error: %s",
732 dev
->up
.name
, n_rxq
, n_txq
, rte_strerror(-diag
));
736 diag
= rte_eth_dev_start(dev
->port_id
);
738 VLOG_ERR("Interface %s start error: %s", dev
->up
.name
,
739 rte_strerror(-diag
));
743 rte_eth_promiscuous_enable(dev
->port_id
);
744 rte_eth_allmulticast_enable(dev
->port_id
);
746 memset(ð_addr
, 0x0, sizeof(eth_addr
));
747 rte_eth_macaddr_get(dev
->port_id
, ð_addr
);
748 VLOG_INFO_RL(&rl
, "Port %d: "ETH_ADDR_FMT
"",
749 dev
->port_id
, ETH_ADDR_BYTES_ARGS(eth_addr
.addr_bytes
));
751 memcpy(dev
->hwaddr
.ea
, eth_addr
.addr_bytes
, ETH_ADDR_LEN
);
752 rte_eth_link_get_nowait(dev
->port_id
, &dev
->link
);
754 mbp_priv
= rte_mempool_get_priv(dev
->dpdk_mp
->mp
);
755 dev
->buf_size
= mbp_priv
->mbuf_data_room_size
- RTE_PKTMBUF_HEADROOM
;
757 dev
->flags
= NETDEV_UP
| NETDEV_PROMISC
;
759 /* Get the Flow control configuration for DPDK-ETH */
760 diag
= rte_eth_dev_flow_ctrl_get(dev
->port_id
, &dev
->fc_conf
);
762 VLOG_DBG("cannot get flow control parameters on port=%d, err=%d",
769 static struct netdev_dpdk
*
770 netdev_dpdk_cast(const struct netdev
*netdev
)
772 return CONTAINER_OF(netdev
, struct netdev_dpdk
, up
);
775 static struct netdev
*
776 netdev_dpdk_alloc(void)
778 struct netdev_dpdk
*dev
;
780 if (!rte_eal_init_ret
) { /* Only after successful initialization */
781 dev
= dpdk_rte_mzalloc(sizeof *dev
);
790 netdev_dpdk_alloc_txq(struct netdev_dpdk
*dev
, unsigned int n_txqs
)
794 dev
->tx_q
= dpdk_rte_mzalloc(n_txqs
* sizeof *dev
->tx_q
);
795 for (i
= 0; i
< n_txqs
; i
++) {
796 /* Initialize map for vhost devices. */
797 dev
->tx_q
[i
].map
= OVS_VHOST_QUEUE_MAP_UNKNOWN
;
798 rte_spinlock_init(&dev
->tx_q
[i
].tx_lock
);
803 netdev_dpdk_init(struct netdev
*netdev
, unsigned int port_no
,
804 enum dpdk_dev_type type
)
805 OVS_REQUIRES(dpdk_mutex
)
807 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
811 ovs_mutex_init(&dev
->mutex
);
812 ovs_mutex_lock(&dev
->mutex
);
814 rte_spinlock_init(&dev
->stats_lock
);
816 /* If the 'sid' is negative, it means that the kernel fails
817 * to obtain the pci numa info. In that situation, always
819 if (type
== DPDK_DEV_ETH
) {
820 sid
= rte_eth_dev_socket_id(port_no
);
822 sid
= rte_lcore_to_socket_id(rte_get_master_lcore());
825 dev
->socket_id
= sid
< 0 ? SOCKET0
: sid
;
826 dev
->requested_socket_id
= dev
->socket_id
;
827 dev
->port_id
= port_no
;
830 dev
->requested_mtu
= dev
->mtu
= ETHER_MTU
;
831 dev
->max_packet_len
= MTU_TO_FRAME_LEN(dev
->mtu
);
832 ovsrcu_index_init(&dev
->vid
, -1);
833 dev
->vhost_reconfigured
= false;
835 err
= netdev_dpdk_mempool_configure(dev
);
840 /* Initialise QoS configuration to NULL and qos lock to unlocked */
841 dev
->qos_conf
= NULL
;
842 rte_spinlock_init(&dev
->qos_lock
);
844 /* Initialise rcu pointer for ingress policer to NULL */
845 ovsrcu_init(&dev
->ingress_policer
, NULL
);
846 dev
->policer_rate
= 0;
847 dev
->policer_burst
= 0;
849 netdev
->n_rxq
= NR_QUEUE
;
850 netdev
->n_txq
= NR_QUEUE
;
851 dev
->requested_n_rxq
= netdev
->n_rxq
;
852 dev
->requested_n_txq
= netdev
->n_txq
;
854 /* Initialize the flow control to NULL */
855 memset(&dev
->fc_conf
, 0, sizeof dev
->fc_conf
);
856 if (type
== DPDK_DEV_ETH
) {
857 err
= dpdk_eth_dev_init(dev
);
861 netdev_dpdk_alloc_txq(dev
, netdev
->n_txq
);
863 netdev_dpdk_alloc_txq(dev
, OVS_VHOST_MAX_QUEUE_NUM
);
864 /* Enable DPDK_DEV_VHOST device and set promiscuous mode flag. */
865 dev
->flags
= NETDEV_UP
| NETDEV_PROMISC
;
868 ovs_list_push_back(&dpdk_list
, &dev
->list_node
);
871 ovs_mutex_unlock(&dev
->mutex
);
875 /* dev_name must be the prefix followed by a positive decimal number.
876 * (no leading + or - signs are allowed) */
878 dpdk_dev_parse_name(const char dev_name
[], const char prefix
[],
879 unsigned int *port_no
)
883 if (strncmp(dev_name
, prefix
, strlen(prefix
))) {
887 cport
= dev_name
+ strlen(prefix
);
889 if (str_to_uint(cport
, 10, port_no
)) {
897 netdev_dpdk_vhost_construct(struct netdev
*netdev
)
899 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
900 const char *name
= netdev
->name
;
903 /* 'name' is appended to 'vhost_sock_dir' and used to create a socket in
904 * the file system. '/' or '\' would traverse directories, so they're not
905 * acceptable in 'name'. */
906 if (strchr(name
, '/') || strchr(name
, '\\')) {
907 VLOG_ERR("\"%s\" is not a valid name for a vhost-user port. "
908 "A valid name must not include '/' or '\\'",
913 if (rte_eal_init_ret
) {
914 return rte_eal_init_ret
;
917 ovs_mutex_lock(&dpdk_mutex
);
918 /* Take the name of the vhost-user port and append it to the location where
919 * the socket is to be created, then register the socket.
921 snprintf(dev
->vhost_id
, sizeof dev
->vhost_id
, "%s/%s",
922 vhost_sock_dir
, name
);
924 dev
->vhost_driver_flags
&= ~RTE_VHOST_USER_CLIENT
;
925 err
= rte_vhost_driver_register(dev
->vhost_id
, dev
->vhost_driver_flags
);
927 VLOG_ERR("vhost-user socket device setup failure for socket %s\n",
930 fatal_signal_add_file_to_unlink(dev
->vhost_id
);
931 VLOG_INFO("Socket %s created for vhost-user port %s\n",
932 dev
->vhost_id
, name
);
934 err
= netdev_dpdk_init(netdev
, -1, DPDK_DEV_VHOST
);
936 ovs_mutex_unlock(&dpdk_mutex
);
941 netdev_dpdk_vhost_client_construct(struct netdev
*netdev
)
945 if (rte_eal_init_ret
) {
946 return rte_eal_init_ret
;
949 ovs_mutex_lock(&dpdk_mutex
);
950 err
= netdev_dpdk_init(netdev
, -1, DPDK_DEV_VHOST
);
951 ovs_mutex_unlock(&dpdk_mutex
);
956 netdev_dpdk_construct(struct netdev
*netdev
)
958 unsigned int port_no
;
961 if (rte_eal_init_ret
) {
962 return rte_eal_init_ret
;
965 /* Names always start with "dpdk" */
966 err
= dpdk_dev_parse_name(netdev
->name
, "dpdk", &port_no
);
971 ovs_mutex_lock(&dpdk_mutex
);
972 err
= netdev_dpdk_init(netdev
, port_no
, DPDK_DEV_ETH
);
973 ovs_mutex_unlock(&dpdk_mutex
);
978 netdev_dpdk_destruct(struct netdev
*netdev
)
980 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
982 ovs_mutex_lock(&dpdk_mutex
);
983 ovs_mutex_lock(&dev
->mutex
);
985 rte_eth_dev_stop(dev
->port_id
);
986 free(ovsrcu_get_protected(struct ingress_policer
*,
987 &dev
->ingress_policer
));
990 ovs_list_remove(&dev
->list_node
);
991 dpdk_mp_put(dev
->dpdk_mp
);
993 ovs_mutex_unlock(&dev
->mutex
);
994 ovs_mutex_unlock(&dpdk_mutex
);
997 /* rte_vhost_driver_unregister() can call back destroy_device(), which will
998 * try to acquire 'dpdk_mutex' and possibly 'dev->mutex'. To avoid a
999 * deadlock, none of the mutexes must be held while calling this function. */
1001 dpdk_vhost_driver_unregister(struct netdev_dpdk
*dev OVS_UNUSED
,
1003 OVS_EXCLUDED(dpdk_mutex
)
1004 OVS_EXCLUDED(dev
->mutex
)
1006 return rte_vhost_driver_unregister(vhost_id
);
1010 netdev_dpdk_vhost_destruct(struct netdev
*netdev
)
1012 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1015 ovs_mutex_lock(&dpdk_mutex
);
1016 ovs_mutex_lock(&dev
->mutex
);
1018 /* Guest becomes an orphan if still attached. */
1019 if (netdev_dpdk_get_vid(dev
) >= 0
1020 && !(dev
->vhost_driver_flags
& RTE_VHOST_USER_CLIENT
)) {
1021 VLOG_ERR("Removing port '%s' while vhost device still attached.",
1023 VLOG_ERR("To restore connectivity after re-adding of port, VM on socket"
1024 " '%s' must be restarted.", dev
->vhost_id
);
1027 free(ovsrcu_get_protected(struct ingress_policer
*,
1028 &dev
->ingress_policer
));
1030 rte_free(dev
->tx_q
);
1031 ovs_list_remove(&dev
->list_node
);
1032 dpdk_mp_put(dev
->dpdk_mp
);
1034 vhost_id
= xstrdup(dev
->vhost_id
);
1036 ovs_mutex_unlock(&dev
->mutex
);
1037 ovs_mutex_unlock(&dpdk_mutex
);
1039 if (dpdk_vhost_driver_unregister(dev
, vhost_id
)) {
1040 VLOG_ERR("%s: Unable to unregister vhost driver for socket '%s'.\n",
1041 netdev
->name
, vhost_id
);
1042 } else if (!(dev
->vhost_driver_flags
& RTE_VHOST_USER_CLIENT
)) {
1043 /* OVS server mode - remove this socket from list for deletion */
1044 fatal_signal_remove_file_to_unlink(vhost_id
);
1050 netdev_dpdk_dealloc(struct netdev
*netdev
)
1052 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1058 netdev_dpdk_get_config(const struct netdev
*netdev
, struct smap
*args
)
1060 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1062 ovs_mutex_lock(&dev
->mutex
);
1064 smap_add_format(args
, "requested_rx_queues", "%d", dev
->requested_n_rxq
);
1065 smap_add_format(args
, "configured_rx_queues", "%d", netdev
->n_rxq
);
1066 smap_add_format(args
, "requested_tx_queues", "%d", dev
->requested_n_txq
);
1067 smap_add_format(args
, "configured_tx_queues", "%d", netdev
->n_txq
);
1068 smap_add_format(args
, "mtu", "%d", dev
->mtu
);
1069 ovs_mutex_unlock(&dev
->mutex
);
1075 dpdk_set_rxq_config(struct netdev_dpdk
*dev
, const struct smap
*args
)
1079 new_n_rxq
= MAX(smap_get_int(args
, "n_rxq", dev
->requested_n_rxq
), 1);
1080 if (new_n_rxq
!= dev
->requested_n_rxq
) {
1081 dev
->requested_n_rxq
= new_n_rxq
;
1082 netdev_request_reconfigure(&dev
->up
);
1087 netdev_dpdk_set_config(struct netdev
*netdev
, const struct smap
*args
)
1089 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1091 ovs_mutex_lock(&dev
->mutex
);
1093 dpdk_set_rxq_config(dev
, args
);
1095 /* Flow control support is only available for DPDK Ethernet ports. */
1096 bool rx_fc_en
= false;
1097 bool tx_fc_en
= false;
1098 enum rte_eth_fc_mode fc_mode_set
[2][2] =
1099 {{RTE_FC_NONE
, RTE_FC_TX_PAUSE
},
1100 {RTE_FC_RX_PAUSE
, RTE_FC_FULL
}
1102 rx_fc_en
= smap_get_bool(args
, "rx-flow-ctrl", false);
1103 tx_fc_en
= smap_get_bool(args
, "tx-flow-ctrl", false);
1104 dev
->fc_conf
.autoneg
= smap_get_bool(args
, "flow-ctrl-autoneg", false);
1105 dev
->fc_conf
.mode
= fc_mode_set
[tx_fc_en
][rx_fc_en
];
1107 dpdk_eth_flow_ctrl_setup(dev
);
1109 ovs_mutex_unlock(&dev
->mutex
);
1115 netdev_dpdk_ring_set_config(struct netdev
*netdev
, const struct smap
*args
)
1117 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1119 ovs_mutex_lock(&dev
->mutex
);
1120 dpdk_set_rxq_config(dev
, args
);
1121 ovs_mutex_unlock(&dev
->mutex
);
1127 netdev_dpdk_vhost_client_set_config(struct netdev
*netdev
,
1128 const struct smap
*args
)
1130 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1133 ovs_mutex_lock(&dev
->mutex
);
1134 if (!(dev
->vhost_driver_flags
& RTE_VHOST_USER_CLIENT
)) {
1135 path
= smap_get(args
, "vhost-server-path");
1136 if (path
&& strcmp(path
, dev
->vhost_id
)) {
1137 strcpy(dev
->vhost_id
, path
);
1138 netdev_request_reconfigure(netdev
);
1141 ovs_mutex_unlock(&dev
->mutex
);
1147 netdev_dpdk_get_numa_id(const struct netdev
*netdev
)
1149 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1151 return dev
->socket_id
;
1154 /* Sets the number of tx queues for the dpdk interface. */
1156 netdev_dpdk_set_tx_multiq(struct netdev
*netdev
, unsigned int n_txq
)
1158 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1160 ovs_mutex_lock(&dev
->mutex
);
1162 if (dev
->requested_n_txq
== n_txq
) {
1166 dev
->requested_n_txq
= n_txq
;
1167 netdev_request_reconfigure(netdev
);
1170 ovs_mutex_unlock(&dev
->mutex
);
1174 static struct netdev_rxq
*
1175 netdev_dpdk_rxq_alloc(void)
1177 struct netdev_rxq_dpdk
*rx
= dpdk_rte_mzalloc(sizeof *rx
);
1182 static struct netdev_rxq_dpdk
*
1183 netdev_rxq_dpdk_cast(const struct netdev_rxq
*rxq
)
1185 return CONTAINER_OF(rxq
, struct netdev_rxq_dpdk
, up
);
1189 netdev_dpdk_rxq_construct(struct netdev_rxq
*rxq
)
1191 struct netdev_rxq_dpdk
*rx
= netdev_rxq_dpdk_cast(rxq
);
1192 struct netdev_dpdk
*dev
= netdev_dpdk_cast(rxq
->netdev
);
1194 ovs_mutex_lock(&dev
->mutex
);
1195 rx
->port_id
= dev
->port_id
;
1196 ovs_mutex_unlock(&dev
->mutex
);
1202 netdev_dpdk_rxq_destruct(struct netdev_rxq
*rxq OVS_UNUSED
)
1207 netdev_dpdk_rxq_dealloc(struct netdev_rxq
*rxq
)
1209 struct netdev_rxq_dpdk
*rx
= netdev_rxq_dpdk_cast(rxq
);
1215 netdev_dpdk_eth_tx_burst(struct netdev_dpdk
*dev
, int qid
,
1216 struct rte_mbuf
**pkts
, int cnt
)
1220 while (nb_tx
!= cnt
) {
1223 ret
= rte_eth_tx_burst(dev
->port_id
, qid
, pkts
+ nb_tx
, cnt
- nb_tx
);
1231 if (OVS_UNLIKELY(nb_tx
!= cnt
)) {
1232 /* free buffers, which we couldn't transmit, one at a time (each
1233 * packet could come from a different mempool) */
1236 for (i
= nb_tx
; i
< cnt
; i
++) {
1237 rte_pktmbuf_free(pkts
[i
]);
1239 rte_spinlock_lock(&dev
->stats_lock
);
1240 dev
->stats
.tx_dropped
+= cnt
- nb_tx
;
1241 rte_spinlock_unlock(&dev
->stats_lock
);
1246 netdev_dpdk_policer_pkt_handle(struct rte_meter_srtcm
*meter
,
1247 struct rte_mbuf
*pkt
, uint64_t time
)
1249 uint32_t pkt_len
= rte_pktmbuf_pkt_len(pkt
) - sizeof(struct ether_hdr
);
1251 return rte_meter_srtcm_color_blind_check(meter
, time
, pkt_len
) ==
1256 netdev_dpdk_policer_run(struct rte_meter_srtcm
*meter
,
1257 struct rte_mbuf
**pkts
, int pkt_cnt
)
1261 struct rte_mbuf
*pkt
= NULL
;
1262 uint64_t current_time
= rte_rdtsc();
1264 for (i
= 0; i
< pkt_cnt
; i
++) {
1266 /* Handle current packet */
1267 if (netdev_dpdk_policer_pkt_handle(meter
, pkt
, current_time
)) {
1273 rte_pktmbuf_free(pkt
);
1281 ingress_policer_run(struct ingress_policer
*policer
, struct rte_mbuf
**pkts
,
1286 rte_spinlock_lock(&policer
->policer_lock
);
1287 cnt
= netdev_dpdk_policer_run(&policer
->in_policer
, pkts
, pkt_cnt
);
1288 rte_spinlock_unlock(&policer
->policer_lock
);
1294 is_vhost_running(struct netdev_dpdk
*dev
)
1296 return (netdev_dpdk_get_vid(dev
) >= 0 && dev
->vhost_reconfigured
);
1300 netdev_dpdk_vhost_update_rx_size_counters(struct netdev_stats
*stats
,
1301 unsigned int packet_size
)
1303 /* Hard-coded search for the size bucket. */
1304 if (packet_size
< 256) {
1305 if (packet_size
>= 128) {
1306 stats
->rx_128_to_255_packets
++;
1307 } else if (packet_size
<= 64) {
1308 stats
->rx_1_to_64_packets
++;
1310 stats
->rx_65_to_127_packets
++;
1313 if (packet_size
>= 1523) {
1314 stats
->rx_1523_to_max_packets
++;
1315 } else if (packet_size
>= 1024) {
1316 stats
->rx_1024_to_1522_packets
++;
1317 } else if (packet_size
< 512) {
1318 stats
->rx_256_to_511_packets
++;
1320 stats
->rx_512_to_1023_packets
++;
1326 netdev_dpdk_vhost_update_rx_counters(struct netdev_stats
*stats
,
1327 struct dp_packet
**packets
, int count
,
1331 unsigned int packet_size
;
1332 struct dp_packet
*packet
;
1334 stats
->rx_packets
+= count
;
1335 stats
->rx_dropped
+= dropped
;
1336 for (i
= 0; i
< count
; i
++) {
1337 packet
= packets
[i
];
1338 packet_size
= dp_packet_size(packet
);
1340 if (OVS_UNLIKELY(packet_size
< ETH_HEADER_LEN
)) {
1341 /* This only protects the following multicast counting from
1342 * too short packets, but it does not stop the packet from
1343 * further processing. */
1345 stats
->rx_length_errors
++;
1349 netdev_dpdk_vhost_update_rx_size_counters(stats
, packet_size
);
1351 struct eth_header
*eh
= (struct eth_header
*) dp_packet_data(packet
);
1352 if (OVS_UNLIKELY(eth_addr_is_multicast(eh
->eth_dst
))) {
1356 stats
->rx_bytes
+= packet_size
;
1361 * The receive path for the vhost port is the TX path out from guest.
1364 netdev_dpdk_vhost_rxq_recv(struct netdev_rxq
*rxq
,
1365 struct dp_packet_batch
*batch
)
1367 struct netdev_dpdk
*dev
= netdev_dpdk_cast(rxq
->netdev
);
1368 int qid
= rxq
->queue_id
;
1369 struct ingress_policer
*policer
= netdev_dpdk_get_ingress_policer(dev
);
1371 uint16_t dropped
= 0;
1373 if (OVS_UNLIKELY(!is_vhost_running(dev
)
1374 || !(dev
->flags
& NETDEV_UP
))) {
1378 nb_rx
= rte_vhost_dequeue_burst(netdev_dpdk_get_vid(dev
),
1379 qid
* VIRTIO_QNUM
+ VIRTIO_TXQ
,
1381 (struct rte_mbuf
**) batch
->packets
,
1389 nb_rx
= ingress_policer_run(policer
,
1390 (struct rte_mbuf
**) batch
->packets
,
1395 rte_spinlock_lock(&dev
->stats_lock
);
1396 netdev_dpdk_vhost_update_rx_counters(&dev
->stats
, batch
->packets
,
1398 rte_spinlock_unlock(&dev
->stats_lock
);
1400 batch
->count
= (int) nb_rx
;
1405 netdev_dpdk_rxq_recv(struct netdev_rxq
*rxq
, struct dp_packet_batch
*batch
)
1407 struct netdev_rxq_dpdk
*rx
= netdev_rxq_dpdk_cast(rxq
);
1408 struct netdev_dpdk
*dev
= netdev_dpdk_cast(rxq
->netdev
);
1409 struct ingress_policer
*policer
= netdev_dpdk_get_ingress_policer(dev
);
1413 nb_rx
= rte_eth_rx_burst(rx
->port_id
, rxq
->queue_id
,
1414 (struct rte_mbuf
**) batch
->packets
,
1422 nb_rx
= ingress_policer_run(policer
,
1423 (struct rte_mbuf
**)batch
->packets
,
1428 /* Update stats to reflect dropped packets */
1429 if (OVS_UNLIKELY(dropped
)) {
1430 rte_spinlock_lock(&dev
->stats_lock
);
1431 dev
->stats
.rx_dropped
+= dropped
;
1432 rte_spinlock_unlock(&dev
->stats_lock
);
1435 batch
->count
= nb_rx
;
1441 netdev_dpdk_qos_run__(struct netdev_dpdk
*dev
, struct rte_mbuf
**pkts
,
1444 struct netdev
*netdev
= &dev
->up
;
1446 if (dev
->qos_conf
!= NULL
) {
1447 rte_spinlock_lock(&dev
->qos_lock
);
1448 if (dev
->qos_conf
!= NULL
) {
1449 cnt
= dev
->qos_conf
->ops
->qos_run(netdev
, pkts
, cnt
);
1451 rte_spinlock_unlock(&dev
->qos_lock
);
1458 netdev_dpdk_filter_packet_len(struct netdev_dpdk
*dev
, struct rte_mbuf
**pkts
,
1463 struct rte_mbuf
*pkt
;
1465 for (i
= 0; i
< pkt_cnt
; i
++) {
1467 if (OVS_UNLIKELY(pkt
->pkt_len
> dev
->max_packet_len
)) {
1468 VLOG_WARN_RL(&rl
, "%s: Too big size %" PRIu32
" max_packet_len %d",
1469 dev
->up
.name
, pkt
->pkt_len
, dev
->max_packet_len
);
1470 rte_pktmbuf_free(pkt
);
1474 if (OVS_UNLIKELY(i
!= cnt
)) {
1484 netdev_dpdk_vhost_update_tx_counters(struct netdev_stats
*stats
,
1485 struct dp_packet
**packets
,
1490 int sent
= attempted
- dropped
;
1492 stats
->tx_packets
+= sent
;
1493 stats
->tx_dropped
+= dropped
;
1495 for (i
= 0; i
< sent
; i
++) {
1496 stats
->tx_bytes
+= dp_packet_size(packets
[i
]);
1501 __netdev_dpdk_vhost_send(struct netdev
*netdev
, int qid
,
1502 struct dp_packet
**pkts
, int cnt
)
1504 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1505 struct rte_mbuf
**cur_pkts
= (struct rte_mbuf
**) pkts
;
1506 unsigned int total_pkts
= cnt
;
1507 unsigned int dropped
= 0;
1510 qid
= dev
->tx_q
[qid
% netdev
->n_txq
].map
;
1512 if (OVS_UNLIKELY(!is_vhost_running(dev
) || qid
< 0
1513 || !(dev
->flags
& NETDEV_UP
))) {
1514 rte_spinlock_lock(&dev
->stats_lock
);
1515 dev
->stats
.tx_dropped
+= cnt
;
1516 rte_spinlock_unlock(&dev
->stats_lock
);
1520 rte_spinlock_lock(&dev
->tx_q
[qid
].tx_lock
);
1522 cnt
= netdev_dpdk_filter_packet_len(dev
, cur_pkts
, cnt
);
1523 /* Check has QoS has been configured for the netdev */
1524 cnt
= netdev_dpdk_qos_run__(dev
, cur_pkts
, cnt
);
1525 dropped
= total_pkts
- cnt
;
1528 int vhost_qid
= qid
* VIRTIO_QNUM
+ VIRTIO_RXQ
;
1529 unsigned int tx_pkts
;
1531 tx_pkts
= rte_vhost_enqueue_burst(netdev_dpdk_get_vid(dev
),
1532 vhost_qid
, cur_pkts
, cnt
);
1533 if (OVS_LIKELY(tx_pkts
)) {
1534 /* Packets have been sent.*/
1536 /* Prepare for possible retry.*/
1537 cur_pkts
= &cur_pkts
[tx_pkts
];
1539 /* No packets sent - do not retry.*/
1542 } while (cnt
&& (retries
++ <= VHOST_ENQ_RETRY_NUM
));
1544 rte_spinlock_unlock(&dev
->tx_q
[qid
].tx_lock
);
1546 rte_spinlock_lock(&dev
->stats_lock
);
1547 netdev_dpdk_vhost_update_tx_counters(&dev
->stats
, pkts
, total_pkts
,
1549 rte_spinlock_unlock(&dev
->stats_lock
);
1552 for (i
= 0; i
< total_pkts
- dropped
; i
++) {
1553 dp_packet_delete(pkts
[i
]);
1557 /* Tx function. Transmit packets indefinitely */
1559 dpdk_do_tx_copy(struct netdev
*netdev
, int qid
, struct dp_packet_batch
*batch
)
1560 OVS_NO_THREAD_SAFETY_ANALYSIS
1562 #if !defined(__CHECKER__) && !defined(_WIN32)
1563 const size_t PKT_ARRAY_SIZE
= batch
->count
;
1565 /* Sparse or MSVC doesn't like variable length array. */
1566 enum { PKT_ARRAY_SIZE
= NETDEV_MAX_BURST
};
1568 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1569 struct rte_mbuf
*mbufs
[PKT_ARRAY_SIZE
];
1574 /* If we are on a non pmd thread we have to use the mempool mutex, because
1575 * every non pmd thread shares the same mempool cache */
1577 if (!dpdk_thread_is_pmd()) {
1578 ovs_mutex_lock(&nonpmd_mempool_mutex
);
1581 dp_packet_batch_apply_cutlen(batch
);
1583 for (i
= 0; i
< batch
->count
; i
++) {
1584 int size
= dp_packet_size(batch
->packets
[i
]);
1586 if (OVS_UNLIKELY(size
> dev
->max_packet_len
)) {
1587 VLOG_WARN_RL(&rl
, "Too big size %d max_packet_len %d",
1588 (int)size
, dev
->max_packet_len
);
1594 mbufs
[newcnt
] = rte_pktmbuf_alloc(dev
->dpdk_mp
->mp
);
1596 if (!mbufs
[newcnt
]) {
1597 dropped
+= batch
->count
- i
;
1601 /* We have to do a copy for now */
1602 memcpy(rte_pktmbuf_mtod(mbufs
[newcnt
], void *),
1603 dp_packet_data(batch
->packets
[i
]), size
);
1605 rte_pktmbuf_data_len(mbufs
[newcnt
]) = size
;
1606 rte_pktmbuf_pkt_len(mbufs
[newcnt
]) = size
;
1611 if (dev
->type
== DPDK_DEV_VHOST
) {
1612 __netdev_dpdk_vhost_send(netdev
, qid
, (struct dp_packet
**) mbufs
,
1615 unsigned int qos_pkts
= newcnt
;
1617 /* Check if QoS has been configured for this netdev. */
1618 newcnt
= netdev_dpdk_qos_run__(dev
, mbufs
, newcnt
);
1620 dropped
+= qos_pkts
- newcnt
;
1621 netdev_dpdk_eth_tx_burst(dev
, qid
, mbufs
, newcnt
);
1624 if (OVS_UNLIKELY(dropped
)) {
1625 rte_spinlock_lock(&dev
->stats_lock
);
1626 dev
->stats
.tx_dropped
+= dropped
;
1627 rte_spinlock_unlock(&dev
->stats_lock
);
1630 if (!dpdk_thread_is_pmd()) {
1631 ovs_mutex_unlock(&nonpmd_mempool_mutex
);
1636 netdev_dpdk_vhost_send(struct netdev
*netdev
, int qid
,
1637 struct dp_packet_batch
*batch
,
1638 bool may_steal
, bool concurrent_txq OVS_UNUSED
)
1641 if (OVS_UNLIKELY(!may_steal
|| batch
->packets
[0]->source
!= DPBUF_DPDK
)) {
1642 dpdk_do_tx_copy(netdev
, qid
, batch
);
1643 dp_packet_delete_batch(batch
, may_steal
);
1645 dp_packet_batch_apply_cutlen(batch
);
1646 __netdev_dpdk_vhost_send(netdev
, qid
, batch
->packets
, batch
->count
);
1652 netdev_dpdk_send__(struct netdev_dpdk
*dev
, int qid
,
1653 struct dp_packet_batch
*batch
, bool may_steal
,
1654 bool concurrent_txq
)
1656 if (OVS_UNLIKELY(concurrent_txq
)) {
1657 qid
= qid
% dev
->up
.n_txq
;
1658 rte_spinlock_lock(&dev
->tx_q
[qid
].tx_lock
);
1661 if (OVS_UNLIKELY(!may_steal
||
1662 batch
->packets
[0]->source
!= DPBUF_DPDK
)) {
1663 struct netdev
*netdev
= &dev
->up
;
1665 dpdk_do_tx_copy(netdev
, qid
, batch
);
1666 dp_packet_delete_batch(batch
, may_steal
);
1669 int cnt
= batch
->count
;
1670 struct rte_mbuf
**cur_pkts
= (struct rte_mbuf
**) batch
->packets
;
1672 dp_packet_batch_apply_cutlen(batch
);
1674 cnt
= netdev_dpdk_filter_packet_len(dev
, cur_pkts
, cnt
);
1675 cnt
= netdev_dpdk_qos_run__(dev
, cur_pkts
, cnt
);
1676 dropped
= batch
->count
- cnt
;
1678 netdev_dpdk_eth_tx_burst(dev
, qid
, cur_pkts
, cnt
);
1680 if (OVS_UNLIKELY(dropped
)) {
1681 rte_spinlock_lock(&dev
->stats_lock
);
1682 dev
->stats
.tx_dropped
+= dropped
;
1683 rte_spinlock_unlock(&dev
->stats_lock
);
1687 if (OVS_UNLIKELY(concurrent_txq
)) {
1688 rte_spinlock_unlock(&dev
->tx_q
[qid
].tx_lock
);
1693 netdev_dpdk_eth_send(struct netdev
*netdev
, int qid
,
1694 struct dp_packet_batch
*batch
, bool may_steal
,
1695 bool concurrent_txq
)
1697 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1699 netdev_dpdk_send__(dev
, qid
, batch
, may_steal
, concurrent_txq
);
1704 netdev_dpdk_set_etheraddr(struct netdev
*netdev
, const struct eth_addr mac
)
1706 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1708 ovs_mutex_lock(&dev
->mutex
);
1709 if (!eth_addr_equals(dev
->hwaddr
, mac
)) {
1711 netdev_change_seq_changed(netdev
);
1713 ovs_mutex_unlock(&dev
->mutex
);
1719 netdev_dpdk_get_etheraddr(const struct netdev
*netdev
, struct eth_addr
*mac
)
1721 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1723 ovs_mutex_lock(&dev
->mutex
);
1725 ovs_mutex_unlock(&dev
->mutex
);
1731 netdev_dpdk_get_mtu(const struct netdev
*netdev
, int *mtup
)
1733 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1735 ovs_mutex_lock(&dev
->mutex
);
1737 ovs_mutex_unlock(&dev
->mutex
);
1743 netdev_dpdk_set_mtu(struct netdev
*netdev
, int mtu
)
1745 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1747 if (MTU_TO_FRAME_LEN(mtu
) > NETDEV_DPDK_MAX_PKT_LEN
1748 || mtu
< ETHER_MIN_MTU
) {
1749 VLOG_WARN("%s: unsupported MTU %d\n", dev
->up
.name
, mtu
);
1753 ovs_mutex_lock(&dev
->mutex
);
1754 if (dev
->requested_mtu
!= mtu
) {
1755 dev
->requested_mtu
= mtu
;
1756 netdev_request_reconfigure(netdev
);
1758 ovs_mutex_unlock(&dev
->mutex
);
1764 netdev_dpdk_get_carrier(const struct netdev
*netdev
, bool *carrier
);
1767 netdev_dpdk_vhost_get_stats(const struct netdev
*netdev
,
1768 struct netdev_stats
*stats
)
1770 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1772 ovs_mutex_lock(&dev
->mutex
);
1774 rte_spinlock_lock(&dev
->stats_lock
);
1775 /* Supported Stats */
1776 stats
->rx_packets
+= dev
->stats
.rx_packets
;
1777 stats
->tx_packets
+= dev
->stats
.tx_packets
;
1778 stats
->rx_dropped
= dev
->stats
.rx_dropped
;
1779 stats
->tx_dropped
+= dev
->stats
.tx_dropped
;
1780 stats
->multicast
= dev
->stats
.multicast
;
1781 stats
->rx_bytes
= dev
->stats
.rx_bytes
;
1782 stats
->tx_bytes
= dev
->stats
.tx_bytes
;
1783 stats
->rx_errors
= dev
->stats
.rx_errors
;
1784 stats
->rx_length_errors
= dev
->stats
.rx_length_errors
;
1786 stats
->rx_1_to_64_packets
= dev
->stats
.rx_1_to_64_packets
;
1787 stats
->rx_65_to_127_packets
= dev
->stats
.rx_65_to_127_packets
;
1788 stats
->rx_128_to_255_packets
= dev
->stats
.rx_128_to_255_packets
;
1789 stats
->rx_256_to_511_packets
= dev
->stats
.rx_256_to_511_packets
;
1790 stats
->rx_512_to_1023_packets
= dev
->stats
.rx_512_to_1023_packets
;
1791 stats
->rx_1024_to_1522_packets
= dev
->stats
.rx_1024_to_1522_packets
;
1792 stats
->rx_1523_to_max_packets
= dev
->stats
.rx_1523_to_max_packets
;
1794 rte_spinlock_unlock(&dev
->stats_lock
);
1796 ovs_mutex_unlock(&dev
->mutex
);
1802 netdev_dpdk_convert_xstats(struct netdev_stats
*stats
,
1803 const struct rte_eth_xstat
*xstats
,
1804 const struct rte_eth_xstat_name
*names
,
1805 const unsigned int size
)
1807 for (unsigned int i
= 0; i
< size
; i
++) {
1808 if (strcmp(XSTAT_RX_64_PACKETS
, names
[i
].name
) == 0) {
1809 stats
->rx_1_to_64_packets
= xstats
[i
].value
;
1810 } else if (strcmp(XSTAT_RX_65_TO_127_PACKETS
, names
[i
].name
) == 0) {
1811 stats
->rx_65_to_127_packets
= xstats
[i
].value
;
1812 } else if (strcmp(XSTAT_RX_128_TO_255_PACKETS
, names
[i
].name
) == 0) {
1813 stats
->rx_128_to_255_packets
= xstats
[i
].value
;
1814 } else if (strcmp(XSTAT_RX_256_TO_511_PACKETS
, names
[i
].name
) == 0) {
1815 stats
->rx_256_to_511_packets
= xstats
[i
].value
;
1816 } else if (strcmp(XSTAT_RX_512_TO_1023_PACKETS
, names
[i
].name
) == 0) {
1817 stats
->rx_512_to_1023_packets
= xstats
[i
].value
;
1818 } else if (strcmp(XSTAT_RX_1024_TO_1522_PACKETS
, names
[i
].name
) == 0) {
1819 stats
->rx_1024_to_1522_packets
= xstats
[i
].value
;
1820 } else if (strcmp(XSTAT_RX_1523_TO_MAX_PACKETS
, names
[i
].name
) == 0) {
1821 stats
->rx_1523_to_max_packets
= xstats
[i
].value
;
1822 } else if (strcmp(XSTAT_TX_64_PACKETS
, names
[i
].name
) == 0) {
1823 stats
->tx_1_to_64_packets
= xstats
[i
].value
;
1824 } else if (strcmp(XSTAT_TX_65_TO_127_PACKETS
, names
[i
].name
) == 0) {
1825 stats
->tx_65_to_127_packets
= xstats
[i
].value
;
1826 } else if (strcmp(XSTAT_TX_128_TO_255_PACKETS
, names
[i
].name
) == 0) {
1827 stats
->tx_128_to_255_packets
= xstats
[i
].value
;
1828 } else if (strcmp(XSTAT_TX_256_TO_511_PACKETS
, names
[i
].name
) == 0) {
1829 stats
->tx_256_to_511_packets
= xstats
[i
].value
;
1830 } else if (strcmp(XSTAT_TX_512_TO_1023_PACKETS
, names
[i
].name
) == 0) {
1831 stats
->tx_512_to_1023_packets
= xstats
[i
].value
;
1832 } else if (strcmp(XSTAT_TX_1024_TO_1522_PACKETS
, names
[i
].name
) == 0) {
1833 stats
->tx_1024_to_1522_packets
= xstats
[i
].value
;
1834 } else if (strcmp(XSTAT_TX_1523_TO_MAX_PACKETS
, names
[i
].name
) == 0) {
1835 stats
->tx_1523_to_max_packets
= xstats
[i
].value
;
1836 } else if (strcmp(XSTAT_TX_MULTICAST_PACKETS
, names
[i
].name
) == 0) {
1837 stats
->tx_multicast_packets
= xstats
[i
].value
;
1838 } else if (strcmp(XSTAT_RX_BROADCAST_PACKETS
, names
[i
].name
) == 0) {
1839 stats
->rx_broadcast_packets
= xstats
[i
].value
;
1840 } else if (strcmp(XSTAT_TX_BROADCAST_PACKETS
, names
[i
].name
) == 0) {
1841 stats
->tx_broadcast_packets
= xstats
[i
].value
;
1842 } else if (strcmp(XSTAT_RX_UNDERSIZED_ERRORS
, names
[i
].name
) == 0) {
1843 stats
->rx_undersized_errors
= xstats
[i
].value
;
1844 } else if (strcmp(XSTAT_RX_FRAGMENTED_ERRORS
, names
[i
].name
) == 0) {
1845 stats
->rx_fragmented_errors
= xstats
[i
].value
;
1846 } else if (strcmp(XSTAT_RX_JABBER_ERRORS
, names
[i
].name
) == 0) {
1847 stats
->rx_jabber_errors
= xstats
[i
].value
;
1853 netdev_dpdk_get_stats(const struct netdev
*netdev
, struct netdev_stats
*stats
)
1855 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1856 struct rte_eth_stats rte_stats
;
1859 netdev_dpdk_get_carrier(netdev
, &gg
);
1860 ovs_mutex_lock(&dev
->mutex
);
1862 struct rte_eth_xstat
*rte_xstats
= NULL
;
1863 struct rte_eth_xstat_name
*rte_xstats_names
= NULL
;
1864 int rte_xstats_len
, rte_xstats_new_len
, rte_xstats_ret
;
1866 if (rte_eth_stats_get(dev
->port_id
, &rte_stats
)) {
1867 VLOG_ERR("Can't get ETH statistics for port: %i.", dev
->port_id
);
1868 ovs_mutex_unlock(&dev
->mutex
);
1872 /* Get length of statistics */
1873 rte_xstats_len
= rte_eth_xstats_get_names(dev
->port_id
, NULL
, 0);
1874 if (rte_xstats_len
< 0) {
1875 VLOG_WARN("Cannot get XSTATS values for port: %i", dev
->port_id
);
1878 /* Reserve memory for xstats names and values */
1879 rte_xstats_names
= xcalloc(rte_xstats_len
, sizeof *rte_xstats_names
);
1880 rte_xstats
= xcalloc(rte_xstats_len
, sizeof *rte_xstats
);
1882 /* Retreive xstats names */
1883 rte_xstats_new_len
= rte_eth_xstats_get_names(dev
->port_id
,
1886 if (rte_xstats_new_len
!= rte_xstats_len
) {
1887 VLOG_WARN("Cannot get XSTATS names for port: %i.", dev
->port_id
);
1890 /* Retreive xstats values */
1891 memset(rte_xstats
, 0xff, sizeof *rte_xstats
* rte_xstats_len
);
1892 rte_xstats_ret
= rte_eth_xstats_get(dev
->port_id
, rte_xstats
,
1894 if (rte_xstats_ret
> 0 && rte_xstats_ret
<= rte_xstats_len
) {
1895 netdev_dpdk_convert_xstats(stats
, rte_xstats
, rte_xstats_names
,
1898 VLOG_WARN("Cannot get XSTATS values for port: %i.", dev
->port_id
);
1903 free(rte_xstats_names
);
1905 stats
->rx_packets
= rte_stats
.ipackets
;
1906 stats
->tx_packets
= rte_stats
.opackets
;
1907 stats
->rx_bytes
= rte_stats
.ibytes
;
1908 stats
->tx_bytes
= rte_stats
.obytes
;
1909 /* DPDK counts imissed as errors, but count them here as dropped instead */
1910 stats
->rx_errors
= rte_stats
.ierrors
- rte_stats
.imissed
;
1911 stats
->tx_errors
= rte_stats
.oerrors
;
1913 rte_spinlock_lock(&dev
->stats_lock
);
1914 stats
->tx_dropped
= dev
->stats
.tx_dropped
;
1915 stats
->rx_dropped
= dev
->stats
.rx_dropped
;
1916 rte_spinlock_unlock(&dev
->stats_lock
);
1918 /* These are the available DPDK counters for packets not received due to
1919 * local resource constraints in DPDK and NIC respectively. */
1920 stats
->rx_dropped
+= rte_stats
.rx_nombuf
+ rte_stats
.imissed
;
1921 stats
->rx_missed_errors
= rte_stats
.imissed
;
1923 ovs_mutex_unlock(&dev
->mutex
);
1929 netdev_dpdk_get_features(const struct netdev
*netdev
,
1930 enum netdev_features
*current
,
1931 enum netdev_features
*advertised OVS_UNUSED
,
1932 enum netdev_features
*supported OVS_UNUSED
,
1933 enum netdev_features
*peer OVS_UNUSED
)
1935 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1936 struct rte_eth_link link
;
1938 ovs_mutex_lock(&dev
->mutex
);
1940 ovs_mutex_unlock(&dev
->mutex
);
1942 if (link
.link_duplex
== ETH_LINK_HALF_DUPLEX
) {
1943 if (link
.link_speed
== ETH_SPEED_NUM_10M
) {
1944 *current
= NETDEV_F_10MB_HD
;
1946 if (link
.link_speed
== ETH_SPEED_NUM_100M
) {
1947 *current
= NETDEV_F_100MB_HD
;
1949 if (link
.link_speed
== ETH_SPEED_NUM_1G
) {
1950 *current
= NETDEV_F_1GB_HD
;
1952 } else if (link
.link_duplex
== ETH_LINK_FULL_DUPLEX
) {
1953 if (link
.link_speed
== ETH_SPEED_NUM_10M
) {
1954 *current
= NETDEV_F_10MB_FD
;
1956 if (link
.link_speed
== ETH_SPEED_NUM_100M
) {
1957 *current
= NETDEV_F_100MB_FD
;
1959 if (link
.link_speed
== ETH_SPEED_NUM_1G
) {
1960 *current
= NETDEV_F_1GB_FD
;
1962 if (link
.link_speed
== ETH_SPEED_NUM_10G
) {
1963 *current
= NETDEV_F_10GB_FD
;
1967 if (link
.link_autoneg
) {
1968 *current
|= NETDEV_F_AUTONEG
;
1974 static struct ingress_policer
*
1975 netdev_dpdk_policer_construct(uint32_t rate
, uint32_t burst
)
1977 struct ingress_policer
*policer
= NULL
;
1978 uint64_t rate_bytes
;
1979 uint64_t burst_bytes
;
1982 policer
= xmalloc(sizeof *policer
);
1983 rte_spinlock_init(&policer
->policer_lock
);
1985 /* rte_meter requires bytes so convert kbits rate and burst to bytes. */
1986 rate_bytes
= rate
* 1000/8;
1987 burst_bytes
= burst
* 1000/8;
1989 policer
->app_srtcm_params
.cir
= rate_bytes
;
1990 policer
->app_srtcm_params
.cbs
= burst_bytes
;
1991 policer
->app_srtcm_params
.ebs
= 0;
1992 err
= rte_meter_srtcm_config(&policer
->in_policer
,
1993 &policer
->app_srtcm_params
);
1995 VLOG_ERR("Could not create rte meter for ingress policer");
2003 netdev_dpdk_set_policing(struct netdev
* netdev
, uint32_t policer_rate
,
2004 uint32_t policer_burst
)
2006 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
2007 struct ingress_policer
*policer
;
2009 /* Force to 0 if no rate specified,
2010 * default to 8000 kbits if burst is 0,
2011 * else stick with user-specified value.
2013 policer_burst
= (!policer_rate
? 0
2014 : !policer_burst
? 8000
2017 ovs_mutex_lock(&dev
->mutex
);
2019 policer
= ovsrcu_get_protected(struct ingress_policer
*,
2020 &dev
->ingress_policer
);
2022 if (dev
->policer_rate
== policer_rate
&&
2023 dev
->policer_burst
== policer_burst
) {
2024 /* Assume that settings haven't changed since we last set them. */
2025 ovs_mutex_unlock(&dev
->mutex
);
2029 /* Destroy any existing ingress policer for the device if one exists */
2031 ovsrcu_postpone(free
, policer
);
2034 if (policer_rate
!= 0) {
2035 policer
= netdev_dpdk_policer_construct(policer_rate
, policer_burst
);
2039 ovsrcu_set(&dev
->ingress_policer
, policer
);
2040 dev
->policer_rate
= policer_rate
;
2041 dev
->policer_burst
= policer_burst
;
2042 ovs_mutex_unlock(&dev
->mutex
);
2048 netdev_dpdk_get_ifindex(const struct netdev
*netdev
)
2050 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
2053 ovs_mutex_lock(&dev
->mutex
);
2054 ifindex
= dev
->port_id
;
2055 ovs_mutex_unlock(&dev
->mutex
);
2061 netdev_dpdk_get_carrier(const struct netdev
*netdev
, bool *carrier
)
2063 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
2065 ovs_mutex_lock(&dev
->mutex
);
2066 check_link_status(dev
);
2067 *carrier
= dev
->link
.link_status
;
2069 ovs_mutex_unlock(&dev
->mutex
);
2075 netdev_dpdk_vhost_get_carrier(const struct netdev
*netdev
, bool *carrier
)
2077 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
2079 ovs_mutex_lock(&dev
->mutex
);
2081 if (is_vhost_running(dev
)) {
2087 ovs_mutex_unlock(&dev
->mutex
);
2092 static long long int
2093 netdev_dpdk_get_carrier_resets(const struct netdev
*netdev
)
2095 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
2096 long long int carrier_resets
;
2098 ovs_mutex_lock(&dev
->mutex
);
2099 carrier_resets
= dev
->link_reset_cnt
;
2100 ovs_mutex_unlock(&dev
->mutex
);
2102 return carrier_resets
;
2106 netdev_dpdk_set_miimon(struct netdev
*netdev OVS_UNUSED
,
2107 long long int interval OVS_UNUSED
)
2113 netdev_dpdk_update_flags__(struct netdev_dpdk
*dev
,
2114 enum netdev_flags off
, enum netdev_flags on
,
2115 enum netdev_flags
*old_flagsp
)
2116 OVS_REQUIRES(dev
->mutex
)
2120 if ((off
| on
) & ~(NETDEV_UP
| NETDEV_PROMISC
)) {
2124 *old_flagsp
= dev
->flags
;
2128 if (dev
->flags
== *old_flagsp
) {
2132 if (dev
->type
== DPDK_DEV_ETH
) {
2133 if (dev
->flags
& NETDEV_UP
) {
2134 err
= rte_eth_dev_start(dev
->port_id
);
2139 if (dev
->flags
& NETDEV_PROMISC
) {
2140 rte_eth_promiscuous_enable(dev
->port_id
);
2143 if (!(dev
->flags
& NETDEV_UP
)) {
2144 rte_eth_dev_stop(dev
->port_id
);
2147 /* If DPDK_DEV_VHOST device's NETDEV_UP flag was changed and vhost is
2148 * running then change netdev's change_seq to trigger link state
2151 if ((NETDEV_UP
& ((*old_flagsp
^ on
) | (*old_flagsp
^ off
)))
2152 && is_vhost_running(dev
)) {
2153 netdev_change_seq_changed(&dev
->up
);
2155 /* Clear statistics if device is getting up. */
2156 if (NETDEV_UP
& on
) {
2157 rte_spinlock_lock(&dev
->stats_lock
);
2158 memset(&dev
->stats
, 0, sizeof(dev
->stats
));
2159 rte_spinlock_unlock(&dev
->stats_lock
);
2168 netdev_dpdk_update_flags(struct netdev
*netdev
,
2169 enum netdev_flags off
, enum netdev_flags on
,
2170 enum netdev_flags
*old_flagsp
)
2172 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
2175 ovs_mutex_lock(&dev
->mutex
);
2176 error
= netdev_dpdk_update_flags__(dev
, off
, on
, old_flagsp
);
2177 ovs_mutex_unlock(&dev
->mutex
);
2183 netdev_dpdk_get_status(const struct netdev
*netdev
, struct smap
*args
)
2185 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
2186 struct rte_eth_dev_info dev_info
;
2188 if (!rte_eth_dev_is_valid_port(dev
->port_id
)) {
2192 ovs_mutex_lock(&dev
->mutex
);
2193 rte_eth_dev_info_get(dev
->port_id
, &dev_info
);
2194 ovs_mutex_unlock(&dev
->mutex
);
2196 smap_add_format(args
, "port_no", "%d", dev
->port_id
);
2197 smap_add_format(args
, "numa_id", "%d", rte_eth_dev_socket_id(dev
->port_id
));
2198 smap_add_format(args
, "driver_name", "%s", dev_info
.driver_name
);
2199 smap_add_format(args
, "min_rx_bufsize", "%u", dev_info
.min_rx_bufsize
);
2200 smap_add_format(args
, "max_rx_pktlen", "%u", dev
->max_packet_len
);
2201 smap_add_format(args
, "max_rx_queues", "%u", dev_info
.max_rx_queues
);
2202 smap_add_format(args
, "max_tx_queues", "%u", dev_info
.max_tx_queues
);
2203 smap_add_format(args
, "max_mac_addrs", "%u", dev_info
.max_mac_addrs
);
2204 smap_add_format(args
, "max_hash_mac_addrs", "%u", dev_info
.max_hash_mac_addrs
);
2205 smap_add_format(args
, "max_vfs", "%u", dev_info
.max_vfs
);
2206 smap_add_format(args
, "max_vmdq_pools", "%u", dev_info
.max_vmdq_pools
);
2208 if (dev_info
.pci_dev
) {
2209 smap_add_format(args
, "pci-vendor_id", "0x%u",
2210 dev_info
.pci_dev
->id
.vendor_id
);
2211 smap_add_format(args
, "pci-device_id", "0x%x",
2212 dev_info
.pci_dev
->id
.device_id
);
2219 netdev_dpdk_set_admin_state__(struct netdev_dpdk
*dev
, bool admin_state
)
2220 OVS_REQUIRES(dev
->mutex
)
2222 enum netdev_flags old_flags
;
2225 netdev_dpdk_update_flags__(dev
, 0, NETDEV_UP
, &old_flags
);
2227 netdev_dpdk_update_flags__(dev
, NETDEV_UP
, 0, &old_flags
);
2232 netdev_dpdk_set_admin_state(struct unixctl_conn
*conn
, int argc
,
2233 const char *argv
[], void *aux OVS_UNUSED
)
2237 if (!strcasecmp(argv
[argc
- 1], "up")) {
2239 } else if ( !strcasecmp(argv
[argc
- 1], "down")) {
2242 unixctl_command_reply_error(conn
, "Invalid Admin State");
2247 struct netdev
*netdev
= netdev_from_name(argv
[1]);
2248 if (netdev
&& is_dpdk_class(netdev
->netdev_class
)) {
2249 struct netdev_dpdk
*dpdk_dev
= netdev_dpdk_cast(netdev
);
2251 ovs_mutex_lock(&dpdk_dev
->mutex
);
2252 netdev_dpdk_set_admin_state__(dpdk_dev
, up
);
2253 ovs_mutex_unlock(&dpdk_dev
->mutex
);
2255 netdev_close(netdev
);
2257 unixctl_command_reply_error(conn
, "Not a DPDK Interface");
2258 netdev_close(netdev
);
2262 struct netdev_dpdk
*netdev
;
2264 ovs_mutex_lock(&dpdk_mutex
);
2265 LIST_FOR_EACH (netdev
, list_node
, &dpdk_list
) {
2266 ovs_mutex_lock(&netdev
->mutex
);
2267 netdev_dpdk_set_admin_state__(netdev
, up
);
2268 ovs_mutex_unlock(&netdev
->mutex
);
2270 ovs_mutex_unlock(&dpdk_mutex
);
2272 unixctl_command_reply(conn
, "OK");
2276 * Set virtqueue flags so that we do not receive interrupts.
2279 set_irq_status(int vid
)
2284 for (i
= 0; i
< rte_vhost_get_queue_num(vid
); i
++) {
2285 idx
= i
* VIRTIO_QNUM
;
2286 rte_vhost_enable_guest_notification(vid
, idx
+ VIRTIO_RXQ
, 0);
2287 rte_vhost_enable_guest_notification(vid
, idx
+ VIRTIO_TXQ
, 0);
2292 * Fixes mapping for vhost-user tx queues. Must be called after each
2293 * enabling/disabling of queues and n_txq modifications.
2296 netdev_dpdk_remap_txqs(struct netdev_dpdk
*dev
)
2297 OVS_REQUIRES(dev
->mutex
)
2299 int *enabled_queues
, n_enabled
= 0;
2300 int i
, k
, total_txqs
= dev
->up
.n_txq
;
2302 enabled_queues
= dpdk_rte_mzalloc(total_txqs
* sizeof *enabled_queues
);
2304 for (i
= 0; i
< total_txqs
; i
++) {
2305 /* Enabled queues always mapped to themselves. */
2306 if (dev
->tx_q
[i
].map
== i
) {
2307 enabled_queues
[n_enabled
++] = i
;
2311 if (n_enabled
== 0 && total_txqs
!= 0) {
2312 enabled_queues
[0] = OVS_VHOST_QUEUE_DISABLED
;
2317 for (i
= 0; i
< total_txqs
; i
++) {
2318 if (dev
->tx_q
[i
].map
!= i
) {
2319 dev
->tx_q
[i
].map
= enabled_queues
[k
];
2320 k
= (k
+ 1) % n_enabled
;
2324 VLOG_DBG("TX queue mapping for %s\n", dev
->vhost_id
);
2325 for (i
= 0; i
< total_txqs
; i
++) {
2326 VLOG_DBG("%2d --> %2d", i
, dev
->tx_q
[i
].map
);
2329 rte_free(enabled_queues
);
2333 * A new virtio-net device is added to a vhost port.
2338 struct netdev_dpdk
*dev
;
2339 bool exists
= false;
2341 char ifname
[IF_NAME_SZ
];
2343 rte_vhost_get_ifname(vid
, ifname
, sizeof(ifname
));
2345 ovs_mutex_lock(&dpdk_mutex
);
2346 /* Add device to the vhost port with the same name as that passed down. */
2347 LIST_FOR_EACH(dev
, list_node
, &dpdk_list
) {
2348 ovs_mutex_lock(&dev
->mutex
);
2349 if (strncmp(ifname
, dev
->vhost_id
, IF_NAME_SZ
) == 0) {
2350 uint32_t qp_num
= rte_vhost_get_queue_num(vid
);
2352 /* Get NUMA information */
2353 newnode
= rte_vhost_get_numa_node(vid
);
2354 if (newnode
== -1) {
2356 VLOG_INFO("Error getting NUMA info for vHost Device '%s'",
2359 newnode
= dev
->socket_id
;
2362 if (dev
->requested_n_txq
!= qp_num
2363 || dev
->requested_n_rxq
!= qp_num
2364 || dev
->requested_socket_id
!= newnode
) {
2365 dev
->requested_socket_id
= newnode
;
2366 dev
->requested_n_rxq
= qp_num
;
2367 dev
->requested_n_txq
= qp_num
;
2368 netdev_request_reconfigure(&dev
->up
);
2370 /* Reconfiguration not required. */
2371 dev
->vhost_reconfigured
= true;
2374 ovsrcu_index_set(&dev
->vid
, vid
);
2377 /* Disable notifications. */
2378 set_irq_status(vid
);
2379 netdev_change_seq_changed(&dev
->up
);
2380 ovs_mutex_unlock(&dev
->mutex
);
2383 ovs_mutex_unlock(&dev
->mutex
);
2385 ovs_mutex_unlock(&dpdk_mutex
);
2388 VLOG_INFO("vHost Device '%s' can't be added - name not found", ifname
);
2393 VLOG_INFO("vHost Device '%s' has been added on numa node %i",
2399 /* Clears mapping for all available queues of vhost interface. */
2401 netdev_dpdk_txq_map_clear(struct netdev_dpdk
*dev
)
2402 OVS_REQUIRES(dev
->mutex
)
2406 for (i
= 0; i
< dev
->up
.n_txq
; i
++) {
2407 dev
->tx_q
[i
].map
= OVS_VHOST_QUEUE_MAP_UNKNOWN
;
2412 * Remove a virtio-net device from the specific vhost port. Use dev->remove
2413 * flag to stop any more packets from being sent or received to/from a VM and
2414 * ensure all currently queued packets have been sent/received before removing
2418 destroy_device(int vid
)
2420 struct netdev_dpdk
*dev
;
2421 bool exists
= false;
2422 char ifname
[IF_NAME_SZ
];
2424 rte_vhost_get_ifname(vid
, ifname
, sizeof(ifname
));
2426 ovs_mutex_lock(&dpdk_mutex
);
2427 LIST_FOR_EACH (dev
, list_node
, &dpdk_list
) {
2428 if (netdev_dpdk_get_vid(dev
) == vid
) {
2430 ovs_mutex_lock(&dev
->mutex
);
2431 dev
->vhost_reconfigured
= false;
2432 ovsrcu_index_set(&dev
->vid
, -1);
2433 netdev_dpdk_txq_map_clear(dev
);
2435 netdev_change_seq_changed(&dev
->up
);
2436 ovs_mutex_unlock(&dev
->mutex
);
2442 ovs_mutex_unlock(&dpdk_mutex
);
2446 * Wait for other threads to quiesce after setting the 'virtio_dev'
2447 * to NULL, before returning.
2449 ovsrcu_synchronize();
2451 * As call to ovsrcu_synchronize() will end the quiescent state,
2452 * put thread back into quiescent state before returning.
2454 ovsrcu_quiesce_start();
2455 VLOG_INFO("vHost Device '%s' has been removed", ifname
);
2457 VLOG_INFO("vHost Device '%s' not found", ifname
);
2462 vring_state_changed(int vid
, uint16_t queue_id
, int enable
)
2464 struct netdev_dpdk
*dev
;
2465 bool exists
= false;
2466 int qid
= queue_id
/ VIRTIO_QNUM
;
2467 char ifname
[IF_NAME_SZ
];
2469 rte_vhost_get_ifname(vid
, ifname
, sizeof(ifname
));
2471 if (queue_id
% VIRTIO_QNUM
== VIRTIO_TXQ
) {
2475 ovs_mutex_lock(&dpdk_mutex
);
2476 LIST_FOR_EACH (dev
, list_node
, &dpdk_list
) {
2477 ovs_mutex_lock(&dev
->mutex
);
2478 if (strncmp(ifname
, dev
->vhost_id
, IF_NAME_SZ
) == 0) {
2480 dev
->tx_q
[qid
].map
= qid
;
2482 dev
->tx_q
[qid
].map
= OVS_VHOST_QUEUE_DISABLED
;
2484 netdev_dpdk_remap_txqs(dev
);
2486 ovs_mutex_unlock(&dev
->mutex
);
2489 ovs_mutex_unlock(&dev
->mutex
);
2491 ovs_mutex_unlock(&dpdk_mutex
);
2494 VLOG_INFO("State of queue %d ( tx_qid %d ) of vhost device '%s'"
2495 "changed to \'%s\'", queue_id
, qid
, ifname
,
2496 (enable
== 1) ? "enabled" : "disabled");
2498 VLOG_INFO("vHost Device '%s' not found", ifname
);
2506 netdev_dpdk_get_vid(const struct netdev_dpdk
*dev
)
2508 return ovsrcu_index_get(&dev
->vid
);
2511 struct ingress_policer
*
2512 netdev_dpdk_get_ingress_policer(const struct netdev_dpdk
*dev
)
2514 return ovsrcu_get(struct ingress_policer
*, &dev
->ingress_policer
);
2518 * These callbacks allow virtio-net devices to be added to vhost ports when
2519 * configuration has been fully complete.
2521 static const struct virtio_net_device_ops virtio_net_device_ops
=
2523 .new_device
= new_device
,
2524 .destroy_device
= destroy_device
,
2525 .vring_state_changed
= vring_state_changed
2529 start_vhost_loop(void *dummy OVS_UNUSED
)
2531 pthread_detach(pthread_self());
2532 /* Put the vhost thread into quiescent state. */
2533 ovsrcu_quiesce_start();
2534 rte_vhost_driver_session_start();
2539 dpdk_vhost_class_init(void)
2541 rte_vhost_driver_callback_register(&virtio_net_device_ops
);
2542 rte_vhost_feature_disable(1ULL << VIRTIO_NET_F_HOST_TSO4
2543 | 1ULL << VIRTIO_NET_F_HOST_TSO6
2544 | 1ULL << VIRTIO_NET_F_CSUM
);
2546 ovs_thread_create("vhost_thread", start_vhost_loop
, NULL
);
2551 dpdk_common_init(void)
2553 unixctl_command_register("netdev-dpdk/set-admin-state",
2554 "[netdev] up|down", 1, 2,
2555 netdev_dpdk_set_admin_state
, NULL
);
2562 dpdk_ring_create(const char dev_name
[], unsigned int port_no
,
2563 unsigned int *eth_port_id
)
2565 struct dpdk_ring
*ivshmem
;
2566 char ring_name
[RTE_RING_NAMESIZE
];
2569 ivshmem
= dpdk_rte_mzalloc(sizeof *ivshmem
);
2570 if (ivshmem
== NULL
) {
2574 /* XXX: Add support for multiquque ring. */
2575 err
= snprintf(ring_name
, sizeof(ring_name
), "%s_tx", dev_name
);
2580 /* Create single producer tx ring, netdev does explicit locking. */
2581 ivshmem
->cring_tx
= rte_ring_create(ring_name
, DPDK_RING_SIZE
, SOCKET0
,
2583 if (ivshmem
->cring_tx
== NULL
) {
2588 err
= snprintf(ring_name
, sizeof(ring_name
), "%s_rx", dev_name
);
2593 /* Create single consumer rx ring, netdev does explicit locking. */
2594 ivshmem
->cring_rx
= rte_ring_create(ring_name
, DPDK_RING_SIZE
, SOCKET0
,
2596 if (ivshmem
->cring_rx
== NULL
) {
2601 err
= rte_eth_from_rings(dev_name
, &ivshmem
->cring_rx
, 1,
2602 &ivshmem
->cring_tx
, 1, SOCKET0
);
2609 ivshmem
->user_port_id
= port_no
;
2610 ivshmem
->eth_port_id
= rte_eth_dev_count() - 1;
2611 ovs_list_push_back(&dpdk_ring_list
, &ivshmem
->list_node
);
2613 *eth_port_id
= ivshmem
->eth_port_id
;
2618 dpdk_ring_open(const char dev_name
[], unsigned int *eth_port_id
)
2619 OVS_REQUIRES(dpdk_mutex
)
2621 struct dpdk_ring
*ivshmem
;
2622 unsigned int port_no
;
2625 /* Names always start with "dpdkr" */
2626 err
= dpdk_dev_parse_name(dev_name
, "dpdkr", &port_no
);
2631 /* look through our list to find the device */
2632 LIST_FOR_EACH (ivshmem
, list_node
, &dpdk_ring_list
) {
2633 if (ivshmem
->user_port_id
== port_no
) {
2634 VLOG_INFO("Found dpdk ring device %s:", dev_name
);
2635 *eth_port_id
= ivshmem
->eth_port_id
; /* really all that is needed */
2639 /* Need to create the device rings */
2640 return dpdk_ring_create(dev_name
, port_no
, eth_port_id
);
2644 netdev_dpdk_ring_send(struct netdev
*netdev
, int qid
,
2645 struct dp_packet_batch
*batch
, bool may_steal
,
2646 bool concurrent_txq
)
2648 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
2651 /* When using 'dpdkr' and sending to a DPDK ring, we want to ensure that the
2652 * rss hash field is clear. This is because the same mbuf may be modified by
2653 * the consumer of the ring and return into the datapath without recalculating
2655 for (i
= 0; i
< batch
->count
; i
++) {
2656 dp_packet_rss_invalidate(batch
->packets
[i
]);
2659 netdev_dpdk_send__(dev
, qid
, batch
, may_steal
, concurrent_txq
);
2664 netdev_dpdk_ring_construct(struct netdev
*netdev
)
2666 unsigned int port_no
= 0;
2669 if (rte_eal_init_ret
) {
2670 return rte_eal_init_ret
;
2673 ovs_mutex_lock(&dpdk_mutex
);
2675 err
= dpdk_ring_open(netdev
->name
, &port_no
);
2680 err
= netdev_dpdk_init(netdev
, port_no
, DPDK_DEV_ETH
);
2683 ovs_mutex_unlock(&dpdk_mutex
);
2690 * Initialize QoS configuration operations.
2693 qos_conf_init(struct qos_conf
*conf
, const struct dpdk_qos_ops
*ops
)
2699 * Search existing QoS operations in qos_ops and compare each set of
2700 * operations qos_name to name. Return a dpdk_qos_ops pointer to a match,
2703 static const struct dpdk_qos_ops
*
2704 qos_lookup_name(const char *name
)
2706 const struct dpdk_qos_ops
*const *opsp
;
2708 for (opsp
= qos_confs
; *opsp
!= NULL
; opsp
++) {
2709 const struct dpdk_qos_ops
*ops
= *opsp
;
2710 if (!strcmp(name
, ops
->qos_name
)) {
2718 * Call qos_destruct to clean up items associated with the netdevs
2719 * qos_conf. Set netdevs qos_conf to NULL.
2722 qos_delete_conf(struct netdev
*netdev
)
2724 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
2726 rte_spinlock_lock(&dev
->qos_lock
);
2727 if (dev
->qos_conf
) {
2728 if (dev
->qos_conf
->ops
->qos_destruct
) {
2729 dev
->qos_conf
->ops
->qos_destruct(netdev
, dev
->qos_conf
);
2731 dev
->qos_conf
= NULL
;
2733 rte_spinlock_unlock(&dev
->qos_lock
);
2737 netdev_dpdk_get_qos_types(const struct netdev
*netdev OVS_UNUSED
,
2740 const struct dpdk_qos_ops
*const *opsp
;
2742 for (opsp
= qos_confs
; *opsp
!= NULL
; opsp
++) {
2743 const struct dpdk_qos_ops
*ops
= *opsp
;
2744 if (ops
->qos_construct
&& ops
->qos_name
[0] != '\0') {
2745 sset_add(types
, ops
->qos_name
);
2752 netdev_dpdk_get_qos(const struct netdev
*netdev
,
2753 const char **typep
, struct smap
*details
)
2755 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
2758 ovs_mutex_lock(&dev
->mutex
);
2760 *typep
= dev
->qos_conf
->ops
->qos_name
;
2761 error
= (dev
->qos_conf
->ops
->qos_get
2762 ? dev
->qos_conf
->ops
->qos_get(netdev
, details
): 0);
2764 /* No QoS configuration set, return an empty string */
2767 ovs_mutex_unlock(&dev
->mutex
);
2773 netdev_dpdk_set_qos(struct netdev
*netdev
,
2774 const char *type
, const struct smap
*details
)
2776 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
2777 const struct dpdk_qos_ops
*new_ops
= NULL
;
2780 /* If type is empty or unsupported then the current QoS configuration
2781 * for the dpdk-netdev can be destroyed */
2782 new_ops
= qos_lookup_name(type
);
2784 if (type
[0] == '\0' || !new_ops
|| !new_ops
->qos_construct
) {
2785 qos_delete_conf(netdev
);
2789 ovs_mutex_lock(&dev
->mutex
);
2791 if (dev
->qos_conf
) {
2792 if (new_ops
== dev
->qos_conf
->ops
) {
2793 error
= new_ops
->qos_set
? new_ops
->qos_set(netdev
, details
) : 0;
2795 /* Delete existing QoS configuration. */
2796 qos_delete_conf(netdev
);
2797 ovs_assert(dev
->qos_conf
== NULL
);
2799 /* Install new QoS configuration. */
2800 error
= new_ops
->qos_construct(netdev
, details
);
2803 error
= new_ops
->qos_construct(netdev
, details
);
2806 ovs_assert((error
== 0) == (dev
->qos_conf
!= NULL
));
2808 VLOG_ERR("Failed to set QoS type %s on port %s, returned error: %s",
2809 type
, netdev
->name
, rte_strerror(-error
));
2812 ovs_mutex_unlock(&dev
->mutex
);
2816 /* egress-policer details */
2818 struct egress_policer
{
2819 struct qos_conf qos_conf
;
2820 struct rte_meter_srtcm_params app_srtcm_params
;
2821 struct rte_meter_srtcm egress_meter
;
2824 static struct egress_policer
*
2825 egress_policer_get__(const struct netdev
*netdev
)
2827 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
2828 return CONTAINER_OF(dev
->qos_conf
, struct egress_policer
, qos_conf
);
2832 egress_policer_qos_construct(struct netdev
*netdev
,
2833 const struct smap
*details
)
2835 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
2836 struct egress_policer
*policer
;
2839 rte_spinlock_lock(&dev
->qos_lock
);
2840 policer
= xmalloc(sizeof *policer
);
2841 qos_conf_init(&policer
->qos_conf
, &egress_policer_ops
);
2842 dev
->qos_conf
= &policer
->qos_conf
;
2843 policer
->app_srtcm_params
.cir
= smap_get_ullong(details
, "cir", 0);
2844 policer
->app_srtcm_params
.cbs
= smap_get_ullong(details
, "cbs", 0);
2845 policer
->app_srtcm_params
.ebs
= 0;
2846 err
= rte_meter_srtcm_config(&policer
->egress_meter
,
2847 &policer
->app_srtcm_params
);
2850 /* Error occurred during rte_meter creation, destroy the policer
2851 * and set the qos configuration for the netdev dpdk to NULL
2854 dev
->qos_conf
= NULL
;
2857 rte_spinlock_unlock(&dev
->qos_lock
);
2863 egress_policer_qos_destruct(struct netdev
*netdev OVS_UNUSED
,
2864 struct qos_conf
*conf
)
2866 struct egress_policer
*policer
= CONTAINER_OF(conf
, struct egress_policer
,
2872 egress_policer_qos_get(const struct netdev
*netdev
, struct smap
*details
)
2874 struct egress_policer
*policer
= egress_policer_get__(netdev
);
2875 smap_add_format(details
, "cir", "%llu",
2876 1ULL * policer
->app_srtcm_params
.cir
);
2877 smap_add_format(details
, "cbs", "%llu",
2878 1ULL * policer
->app_srtcm_params
.cbs
);
2884 egress_policer_qos_set(struct netdev
*netdev
, const struct smap
*details
)
2886 struct egress_policer
*policer
;
2887 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
2890 policer
= egress_policer_get__(netdev
);
2891 rte_spinlock_lock(&dev
->qos_lock
);
2892 policer
->app_srtcm_params
.cir
= smap_get_ullong(details
, "cir", 0);
2893 policer
->app_srtcm_params
.cbs
= smap_get_ullong(details
, "cbs", 0);
2894 policer
->app_srtcm_params
.ebs
= 0;
2895 err
= rte_meter_srtcm_config(&policer
->egress_meter
,
2896 &policer
->app_srtcm_params
);
2899 /* Error occurred during rte_meter creation, destroy the policer
2900 * and set the qos configuration for the netdev dpdk to NULL
2903 dev
->qos_conf
= NULL
;
2906 rte_spinlock_unlock(&dev
->qos_lock
);
2912 egress_policer_run(struct netdev
*netdev
, struct rte_mbuf
**pkts
, int pkt_cnt
)
2915 struct egress_policer
*policer
= egress_policer_get__(netdev
);
2917 cnt
= netdev_dpdk_policer_run(&policer
->egress_meter
, pkts
, pkt_cnt
);
2922 static const struct dpdk_qos_ops egress_policer_ops
= {
2923 "egress-policer", /* qos_name */
2924 egress_policer_qos_construct
,
2925 egress_policer_qos_destruct
,
2926 egress_policer_qos_get
,
2927 egress_policer_qos_set
,
2932 netdev_dpdk_reconfigure(struct netdev
*netdev
)
2934 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
2937 ovs_mutex_lock(&dev
->mutex
);
2939 if (netdev
->n_txq
== dev
->requested_n_txq
2940 && netdev
->n_rxq
== dev
->requested_n_rxq
2941 && dev
->mtu
== dev
->requested_mtu
) {
2942 /* Reconfiguration is unnecessary */
2947 rte_eth_dev_stop(dev
->port_id
);
2949 if (dev
->mtu
!= dev
->requested_mtu
) {
2950 netdev_dpdk_mempool_configure(dev
);
2953 netdev
->n_txq
= dev
->requested_n_txq
;
2954 netdev
->n_rxq
= dev
->requested_n_rxq
;
2956 rte_free(dev
->tx_q
);
2957 err
= dpdk_eth_dev_init(dev
);
2958 netdev_dpdk_alloc_txq(dev
, netdev
->n_txq
);
2960 netdev_change_seq_changed(netdev
);
2963 ovs_mutex_unlock(&dev
->mutex
);
2968 dpdk_vhost_reconfigure_helper(struct netdev_dpdk
*dev
)
2969 OVS_REQUIRES(dev
->mutex
)
2971 dev
->up
.n_txq
= dev
->requested_n_txq
;
2972 dev
->up
.n_rxq
= dev
->requested_n_rxq
;
2974 /* Enable TX queue 0 by default if it wasn't disabled. */
2975 if (dev
->tx_q
[0].map
== OVS_VHOST_QUEUE_MAP_UNKNOWN
) {
2976 dev
->tx_q
[0].map
= 0;
2979 netdev_dpdk_remap_txqs(dev
);
2981 if (dev
->requested_socket_id
!= dev
->socket_id
2982 || dev
->requested_mtu
!= dev
->mtu
) {
2983 if (!netdev_dpdk_mempool_configure(dev
)) {
2984 netdev_change_seq_changed(&dev
->up
);
2988 if (netdev_dpdk_get_vid(dev
) >= 0) {
2989 dev
->vhost_reconfigured
= true;
2994 netdev_dpdk_vhost_reconfigure(struct netdev
*netdev
)
2996 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
2998 ovs_mutex_lock(&dev
->mutex
);
2999 dpdk_vhost_reconfigure_helper(dev
);
3000 ovs_mutex_unlock(&dev
->mutex
);
3005 netdev_dpdk_vhost_client_reconfigure(struct netdev
*netdev
)
3007 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
3010 ovs_mutex_lock(&dev
->mutex
);
3012 dpdk_vhost_reconfigure_helper(dev
);
3014 /* Configure vHost client mode if requested and if the following criteria
3016 * 1. Device hasn't been registered yet.
3017 * 2. A path has been specified.
3019 if (!(dev
->vhost_driver_flags
& RTE_VHOST_USER_CLIENT
)
3020 && strlen(dev
->vhost_id
)) {
3021 /* Register client-mode device */
3022 err
= rte_vhost_driver_register(dev
->vhost_id
,
3023 RTE_VHOST_USER_CLIENT
);
3025 VLOG_ERR("vhost-user device setup failure for device %s\n",
3028 /* Configuration successful */
3029 dev
->vhost_driver_flags
|= RTE_VHOST_USER_CLIENT
;
3030 VLOG_INFO("vHost User device '%s' created in 'client' mode, "
3031 "using client socket '%s'",
3032 dev
->up
.name
, dev
->vhost_id
);
3036 ovs_mutex_unlock(&dev
->mutex
);
3041 #define NETDEV_DPDK_CLASS(NAME, CONSTRUCT, DESTRUCT, \
3042 SET_CONFIG, SET_TX_MULTIQ, SEND, \
3043 GET_CARRIER, GET_STATS, \
3044 GET_FEATURES, GET_STATUS, \
3045 RECONFIGURE, RXQ_RECV) \
3048 true, /* is_pmd */ \
3050 NULL, /* netdev_dpdk_run */ \
3051 NULL, /* netdev_dpdk_wait */ \
3053 netdev_dpdk_alloc, \
3056 netdev_dpdk_dealloc, \
3057 netdev_dpdk_get_config, \
3059 NULL, /* get_tunnel_config */ \
3060 NULL, /* build header */ \
3061 NULL, /* push header */ \
3062 NULL, /* pop header */ \
3063 netdev_dpdk_get_numa_id, /* get_numa_id */ \
3067 NULL, /* send_wait */ \
3069 netdev_dpdk_set_etheraddr, \
3070 netdev_dpdk_get_etheraddr, \
3071 netdev_dpdk_get_mtu, \
3072 netdev_dpdk_set_mtu, \
3073 netdev_dpdk_get_ifindex, \
3075 netdev_dpdk_get_carrier_resets, \
3076 netdev_dpdk_set_miimon, \
3079 NULL, /* set_advertisements */ \
3081 netdev_dpdk_set_policing, \
3082 netdev_dpdk_get_qos_types, \
3083 NULL, /* get_qos_capabilities */ \
3084 netdev_dpdk_get_qos, \
3085 netdev_dpdk_set_qos, \
3086 NULL, /* get_queue */ \
3087 NULL, /* set_queue */ \
3088 NULL, /* delete_queue */ \
3089 NULL, /* get_queue_stats */ \
3090 NULL, /* queue_dump_start */ \
3091 NULL, /* queue_dump_next */ \
3092 NULL, /* queue_dump_done */ \
3093 NULL, /* dump_queue_stats */ \
3095 NULL, /* set_in4 */ \
3096 NULL, /* get_addr_list */ \
3097 NULL, /* add_router */ \
3098 NULL, /* get_next_hop */ \
3100 NULL, /* arp_lookup */ \
3102 netdev_dpdk_update_flags, \
3105 netdev_dpdk_rxq_alloc, \
3106 netdev_dpdk_rxq_construct, \
3107 netdev_dpdk_rxq_destruct, \
3108 netdev_dpdk_rxq_dealloc, \
3110 NULL, /* rx_wait */ \
3111 NULL, /* rxq_drain */ \
3115 process_vhost_flags(char *flag
, char *default_val
, int size
,
3116 const struct smap
*ovs_other_config
,
3122 val
= smap_get(ovs_other_config
, flag
);
3124 /* Process the vhost-sock-dir flag if it is provided, otherwise resort to
3127 if (val
&& (strlen(val
) <= size
)) {
3129 *new_val
= xstrdup(val
);
3130 VLOG_INFO("User-provided %s in use: %s", flag
, *new_val
);
3132 VLOG_INFO("No %s provided - defaulting to %s", flag
, default_val
);
3133 *new_val
= default_val
;
3140 grow_argv(char ***argv
, size_t cur_siz
, size_t grow_by
)
3142 return xrealloc(*argv
, sizeof(char *) * (cur_siz
+ grow_by
));
3146 dpdk_option_extend(char ***argv
, int argc
, const char *option
,
3149 char **newargv
= grow_argv(argv
, argc
, 2);
3151 newargv
[argc
] = xstrdup(option
);
3152 newargv
[argc
+1] = xstrdup(value
);
3156 move_argv(char ***argv
, size_t cur_size
, char **src_argv
, size_t src_argc
)
3158 char **newargv
= grow_argv(argv
, cur_size
, src_argc
);
3159 while (src_argc
--) {
3160 newargv
[cur_size
+src_argc
] = src_argv
[src_argc
];
3161 src_argv
[src_argc
] = NULL
;
3167 extra_dpdk_args(const char *ovs_extra_config
, char ***argv
, int argc
)
3170 char *release_tok
= xstrdup(ovs_extra_config
);
3171 char *tok
, *endptr
= NULL
;
3173 for (tok
= strtok_r(release_tok
, " ", &endptr
); tok
!= NULL
;
3174 tok
= strtok_r(NULL
, " ", &endptr
)) {
3175 char **newarg
= grow_argv(argv
, ret
, 1);
3177 newarg
[ret
++] = xstrdup(tok
);
3184 argv_contains(char **argv_haystack
, const size_t argc_haystack
,
3187 for (size_t i
= 0; i
< argc_haystack
; ++i
) {
3188 if (!strcmp(argv_haystack
[i
], needle
))
3195 construct_dpdk_options(const struct smap
*ovs_other_config
,
3196 char ***argv
, const int initial_size
,
3197 char **extra_args
, const size_t extra_argc
)
3199 struct dpdk_options_map
{
3200 const char *ovs_configuration
;
3201 const char *dpdk_option
;
3202 bool default_enabled
;
3203 const char *default_value
;
3205 {"dpdk-lcore-mask", "-c", false, NULL
},
3206 {"dpdk-hugepage-dir", "--huge-dir", false, NULL
},
3209 int i
, ret
= initial_size
;
3211 /*First, construct from the flat-options (non-mutex)*/
3212 for (i
= 0; i
< ARRAY_SIZE(opts
); ++i
) {
3213 const char *lookup
= smap_get(ovs_other_config
,
3214 opts
[i
].ovs_configuration
);
3215 if (!lookup
&& opts
[i
].default_enabled
) {
3216 lookup
= opts
[i
].default_value
;
3220 if (!argv_contains(extra_args
, extra_argc
, opts
[i
].dpdk_option
)) {
3221 dpdk_option_extend(argv
, ret
, opts
[i
].dpdk_option
, lookup
);
3224 VLOG_WARN("Ignoring database defined option '%s' due to "
3225 "dpdk_extras config", opts
[i
].dpdk_option
);
3233 #define MAX_DPDK_EXCL_OPTS 10
3236 construct_dpdk_mutex_options(const struct smap
*ovs_other_config
,
3237 char ***argv
, const int initial_size
,
3238 char **extra_args
, const size_t extra_argc
)
3240 struct dpdk_exclusive_options_map
{
3241 const char *category
;
3242 const char *ovs_dpdk_options
[MAX_DPDK_EXCL_OPTS
];
3243 const char *eal_dpdk_options
[MAX_DPDK_EXCL_OPTS
];
3244 const char *default_value
;
3248 {"dpdk-alloc-mem", "dpdk-socket-mem", NULL
,},
3249 {"-m", "--socket-mem", NULL
,},
3254 int i
, ret
= initial_size
;
3255 for (i
= 0; i
< ARRAY_SIZE(excl_opts
); ++i
) {
3256 int found_opts
= 0, scan
, found_pos
= -1;
3257 const char *found_value
;
3258 struct dpdk_exclusive_options_map
*popt
= &excl_opts
[i
];
3260 for (scan
= 0; scan
< MAX_DPDK_EXCL_OPTS
3261 && popt
->ovs_dpdk_options
[scan
]; ++scan
) {
3262 const char *lookup
= smap_get(ovs_other_config
,
3263 popt
->ovs_dpdk_options
[scan
]);
3264 if (lookup
&& strlen(lookup
)) {
3267 found_value
= lookup
;
3272 if (popt
->default_option
) {
3273 found_pos
= popt
->default_option
;
3274 found_value
= popt
->default_value
;
3280 if (found_opts
> 1) {
3281 VLOG_ERR("Multiple defined options for %s. Please check your"
3282 " database settings and reconfigure if necessary.",
3286 if (!argv_contains(extra_args
, extra_argc
,
3287 popt
->eal_dpdk_options
[found_pos
])) {
3288 dpdk_option_extend(argv
, ret
, popt
->eal_dpdk_options
[found_pos
],
3292 VLOG_WARN("Ignoring database defined option '%s' due to "
3293 "dpdk_extras config", popt
->eal_dpdk_options
[found_pos
]);
3301 get_dpdk_args(const struct smap
*ovs_other_config
, char ***argv
,
3304 const char *extra_configuration
;
3305 char **extra_args
= NULL
;
3307 size_t extra_argc
= 0;
3309 extra_configuration
= smap_get(ovs_other_config
, "dpdk-extra");
3310 if (extra_configuration
) {
3311 extra_argc
= extra_dpdk_args(extra_configuration
, &extra_args
, 0);
3314 i
= construct_dpdk_options(ovs_other_config
, argv
, argc
, extra_args
,
3316 i
= construct_dpdk_mutex_options(ovs_other_config
, argv
, i
, extra_args
,
3319 if (extra_configuration
) {
3320 *argv
= move_argv(argv
, i
, extra_args
, extra_argc
);
3323 return i
+ extra_argc
;
3326 static char **dpdk_argv
;
3327 static int dpdk_argc
;
3330 deferred_argv_release(void)
3333 for (result
= 0; result
< dpdk_argc
; ++result
) {
3334 free(dpdk_argv
[result
]);
3341 dpdk_init__(const struct smap
*ovs_other_config
)
3346 bool auto_determine
= true;
3349 char *sock_dir_subcomponent
;
3351 if (!smap_get_bool(ovs_other_config
, "dpdk-init", false)) {
3352 VLOG_INFO("DPDK Disabled - to change this requires a restart.\n");
3356 VLOG_INFO("DPDK Enabled, initializing");
3357 if (process_vhost_flags("vhost-sock-dir", xstrdup(ovs_rundir()),
3358 NAME_MAX
, ovs_other_config
,
3359 &sock_dir_subcomponent
)) {
3361 if (!strstr(sock_dir_subcomponent
, "..")) {
3362 vhost_sock_dir
= xasprintf("%s/%s", ovs_rundir(),
3363 sock_dir_subcomponent
);
3365 err
= stat(vhost_sock_dir
, &s
);
3367 VLOG_ERR("vhost-user sock directory '%s' does not exist.",
3371 vhost_sock_dir
= xstrdup(ovs_rundir());
3372 VLOG_ERR("vhost-user sock directory request '%s/%s' has invalid"
3373 "characters '..' - using %s instead.",
3374 ovs_rundir(), sock_dir_subcomponent
, ovs_rundir());
3376 free(sock_dir_subcomponent
);
3378 vhost_sock_dir
= sock_dir_subcomponent
;
3381 argv
= grow_argv(&argv
, 0, 1);
3383 argv
[0] = xstrdup(ovs_get_program_name());
3384 argc_tmp
= get_dpdk_args(ovs_other_config
, &argv
, argc
);
3386 while (argc_tmp
!= argc
) {
3387 if (!strcmp("-c", argv
[argc
]) || !strcmp("-l", argv
[argc
])) {
3388 auto_determine
= false;
3396 * NOTE: This is an unsophisticated mechanism for determining the DPDK
3397 * lcore for the DPDK Master.
3399 if (auto_determine
) {
3401 /* Get the main thread affinity */
3403 err
= pthread_getaffinity_np(pthread_self(), sizeof(cpu_set_t
),
3406 for (i
= 0; i
< CPU_SETSIZE
; i
++) {
3407 if (CPU_ISSET(i
, &cpuset
)) {
3408 argv
= grow_argv(&argv
, argc
, 2);
3409 argv
[argc
++] = xstrdup("-c");
3410 argv
[argc
++] = xasprintf("0x%08llX", (1ULL<<i
));
3415 VLOG_ERR("Thread getaffinity error %d. Using core 0x1", err
);
3416 /* User did not set dpdk-lcore-mask and unable to get current
3417 * thread affintity - default to core 0x1 */
3418 argv
= grow_argv(&argv
, argc
, 2);
3419 argv
[argc
++] = xstrdup("-c");
3420 argv
[argc
++] = xasprintf("0x%X", 1);
3424 argv
= grow_argv(&argv
, argc
, 1);
3429 if (VLOG_IS_INFO_ENABLED()) {
3433 ds_put_cstr(&eal_args
, "EAL ARGS:");
3434 for (opt
= 0; opt
< argc
; ++opt
) {
3435 ds_put_cstr(&eal_args
, " ");
3436 ds_put_cstr(&eal_args
, argv
[opt
]);
3438 VLOG_INFO("%s", ds_cstr_ro(&eal_args
));
3439 ds_destroy(&eal_args
);
3442 /* Make sure things are initialized ... */
3443 result
= rte_eal_init(argc
, argv
);
3445 ovs_abort(result
, "Cannot init EAL");
3448 /* Set the main thread affinity back to pre rte_eal_init() value */
3449 if (auto_determine
&& !err
) {
3450 err
= pthread_setaffinity_np(pthread_self(), sizeof(cpu_set_t
),
3453 VLOG_ERR("Thread setaffinity error %d", err
);
3460 atexit(deferred_argv_release
);
3462 rte_memzone_dump(stdout
);
3463 rte_eal_init_ret
= 0;
3465 /* We are called from the main thread here */
3466 RTE_PER_LCORE(_lcore_id
) = NON_PMD_CORE_ID
;
3468 ovs_thread_create("dpdk_watchdog", dpdk_watchdog
, NULL
);
3470 dpdk_vhost_class_init();
3473 VLOG_INFO("DPDK pdump packet capture enabled");
3474 err
= rte_pdump_init(ovs_rundir());
3476 VLOG_INFO("Error initialising DPDK pdump");
3479 char *server_socket_path
;
3481 server_socket_path
= xasprintf("%s/%s", ovs_rundir(),
3482 "pdump_server_socket");
3483 fatal_signal_add_file_to_unlink(server_socket_path
);
3484 free(server_socket_path
);
3488 /* Finally, register the dpdk classes */
3489 netdev_dpdk_register();
3493 dpdk_init(const struct smap
*ovs_other_config
)
3495 static struct ovsthread_once once
= OVSTHREAD_ONCE_INITIALIZER
;
3497 if (ovs_other_config
&& ovsthread_once_start(&once
)) {
3498 dpdk_init__(ovs_other_config
);
3499 ovsthread_once_done(&once
);
3503 static const struct netdev_class dpdk_class
=
3506 netdev_dpdk_construct
,
3507 netdev_dpdk_destruct
,
3508 netdev_dpdk_set_config
,
3509 netdev_dpdk_set_tx_multiq
,
3510 netdev_dpdk_eth_send
,
3511 netdev_dpdk_get_carrier
,
3512 netdev_dpdk_get_stats
,
3513 netdev_dpdk_get_features
,
3514 netdev_dpdk_get_status
,
3515 netdev_dpdk_reconfigure
,
3516 netdev_dpdk_rxq_recv
);
3518 static const struct netdev_class dpdk_ring_class
=
3521 netdev_dpdk_ring_construct
,
3522 netdev_dpdk_destruct
,
3523 netdev_dpdk_ring_set_config
,
3524 netdev_dpdk_set_tx_multiq
,
3525 netdev_dpdk_ring_send
,
3526 netdev_dpdk_get_carrier
,
3527 netdev_dpdk_get_stats
,
3528 netdev_dpdk_get_features
,
3529 netdev_dpdk_get_status
,
3530 netdev_dpdk_reconfigure
,
3531 netdev_dpdk_rxq_recv
);
3533 static const struct netdev_class dpdk_vhost_class
=
3536 netdev_dpdk_vhost_construct
,
3537 netdev_dpdk_vhost_destruct
,
3540 netdev_dpdk_vhost_send
,
3541 netdev_dpdk_vhost_get_carrier
,
3542 netdev_dpdk_vhost_get_stats
,
3545 netdev_dpdk_vhost_reconfigure
,
3546 netdev_dpdk_vhost_rxq_recv
);
3547 static const struct netdev_class dpdk_vhost_client_class
=
3549 "dpdkvhostuserclient",
3550 netdev_dpdk_vhost_client_construct
,
3551 netdev_dpdk_vhost_destruct
,
3552 netdev_dpdk_vhost_client_set_config
,
3554 netdev_dpdk_vhost_send
,
3555 netdev_dpdk_vhost_get_carrier
,
3556 netdev_dpdk_vhost_get_stats
,
3559 netdev_dpdk_vhost_client_reconfigure
,
3560 netdev_dpdk_vhost_rxq_recv
);
3563 netdev_dpdk_register(void)
3566 netdev_register_provider(&dpdk_class
);
3567 netdev_register_provider(&dpdk_ring_class
);
3568 netdev_register_provider(&dpdk_vhost_class
);
3569 netdev_register_provider(&dpdk_vhost_client_class
);
3573 dpdk_set_lcore_id(unsigned cpu
)
3575 /* NON_PMD_CORE_ID is reserved for use by non pmd threads. */
3576 ovs_assert(cpu
!= NON_PMD_CORE_ID
);
3577 RTE_PER_LCORE(_lcore_id
) = cpu
;
3581 dpdk_thread_is_pmd(void)
3583 return rte_lcore_id() != NON_PMD_CORE_ID
;