2 * Copyright (c) 2014, 2015, 2016 Nicira, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
30 #include <sys/types.h>
35 #include "dp-packet.h"
36 #include "dpif-netdev.h"
37 #include "fatal-signal.h"
38 #include "netdev-dpdk.h"
39 #include "netdev-provider.h"
40 #include "netdev-vport.h"
42 #include "openvswitch/dynamic-string.h"
43 #include "openvswitch/list.h"
44 #include "openvswitch/ofp-print.h"
45 #include "openvswitch/vlog.h"
47 #include "ovs-thread.h"
53 #include "unaligned.h"
57 #include "rte_config.h"
59 #include "rte_meter.h"
60 #include "rte_virtio_net.h"
62 VLOG_DEFINE_THIS_MODULE(dpdk
);
63 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(5, 20);
65 #define DPDK_PORT_WATCHDOG_INTERVAL 5
67 #define OVS_CACHE_LINE_SIZE CACHE_LINE_SIZE
68 #define OVS_VPORT_DPDK "ovs_dpdk"
71 * need to reserve tons of extra space in the mbufs so we can align the
72 * DMA addresses to 4KB.
73 * The minimum mbuf size is limited to avoid scatter behaviour and drop in
74 * performance for standard Ethernet MTU.
76 #define ETHER_HDR_MAX_LEN (ETHER_HDR_LEN + ETHER_CRC_LEN + (2 * VLAN_HEADER_LEN))
77 #define MTU_TO_FRAME_LEN(mtu) ((mtu) + ETHER_HDR_LEN + ETHER_CRC_LEN)
78 #define MTU_TO_MAX_FRAME_LEN(mtu) ((mtu) + ETHER_HDR_MAX_LEN)
79 #define FRAME_LEN_TO_MTU(frame_len) ((frame_len)- ETHER_HDR_LEN - ETHER_CRC_LEN)
80 #define MBUF_SIZE(mtu) ( MTU_TO_MAX_FRAME_LEN(mtu) \
81 + sizeof(struct dp_packet) \
82 + RTE_PKTMBUF_HEADROOM)
83 #define NETDEV_DPDK_MBUF_ALIGN 1024
85 /* Max and min number of packets in the mempool. OVS tries to allocate a
86 * mempool with MAX_NB_MBUF: if this fails (because the system doesn't have
87 * enough hugepages) we keep halving the number until the allocation succeeds
88 * or we reach MIN_NB_MBUF */
90 #define MAX_NB_MBUF (4096 * 64)
91 #define MIN_NB_MBUF (4096 * 4)
92 #define MP_CACHE_SZ RTE_MEMPOOL_CACHE_MAX_SIZE
94 /* MAX_NB_MBUF can be divided by 2 many times, until MIN_NB_MBUF */
95 BUILD_ASSERT_DECL(MAX_NB_MBUF
% ROUND_DOWN_POW2(MAX_NB_MBUF
/MIN_NB_MBUF
) == 0);
97 /* The smallest possible NB_MBUF that we're going to try should be a multiple
98 * of MP_CACHE_SZ. This is advised by DPDK documentation. */
99 BUILD_ASSERT_DECL((MAX_NB_MBUF
/ ROUND_DOWN_POW2(MAX_NB_MBUF
/MIN_NB_MBUF
))
103 * DPDK XSTATS Counter names definition
105 #define XSTAT_RX_64_PACKETS "rx_size_64_packets"
106 #define XSTAT_RX_65_TO_127_PACKETS "rx_size_65_to_127_packets"
107 #define XSTAT_RX_128_TO_255_PACKETS "rx_size_128_to_255_packets"
108 #define XSTAT_RX_256_TO_511_PACKETS "rx_size_256_to_511_packets"
109 #define XSTAT_RX_512_TO_1023_PACKETS "rx_size_512_to_1023_packets"
110 #define XSTAT_RX_1024_TO_1522_PACKETS "rx_size_1024_to_1522_packets"
111 #define XSTAT_RX_1523_TO_MAX_PACKETS "rx_size_1523_to_max_packets"
113 #define XSTAT_TX_64_PACKETS "tx_size_64_packets"
114 #define XSTAT_TX_65_TO_127_PACKETS "tx_size_65_to_127_packets"
115 #define XSTAT_TX_128_TO_255_PACKETS "tx_size_128_to_255_packets"
116 #define XSTAT_TX_256_TO_511_PACKETS "tx_size_256_to_511_packets"
117 #define XSTAT_TX_512_TO_1023_PACKETS "tx_size_512_to_1023_packets"
118 #define XSTAT_TX_1024_TO_1522_PACKETS "tx_size_1024_to_1522_packets"
119 #define XSTAT_TX_1523_TO_MAX_PACKETS "tx_size_1523_to_max_packets"
121 #define XSTAT_TX_MULTICAST_PACKETS "tx_multicast_packets"
122 #define XSTAT_RX_BROADCAST_PACKETS "rx_broadcast_packets"
123 #define XSTAT_TX_BROADCAST_PACKETS "tx_broadcast_packets"
124 #define XSTAT_RX_UNDERSIZED_ERRORS "rx_undersized_errors"
125 #define XSTAT_RX_OVERSIZE_ERRORS "rx_oversize_errors"
126 #define XSTAT_RX_FRAGMENTED_ERRORS "rx_fragmented_errors"
127 #define XSTAT_RX_JABBER_ERRORS "rx_jabber_errors"
131 #define NIC_PORT_RX_Q_SIZE 2048 /* Size of Physical NIC RX Queue, Max (n+32<=4096)*/
132 #define NIC_PORT_TX_Q_SIZE 2048 /* Size of Physical NIC TX Queue, Max (n+32<=4096)*/
134 #define OVS_VHOST_MAX_QUEUE_NUM 1024 /* Maximum number of vHost TX queues. */
135 #define OVS_VHOST_QUEUE_MAP_UNKNOWN (-1) /* Mapping not initialized. */
136 #define OVS_VHOST_QUEUE_DISABLED (-2) /* Queue was disabled by guest and not
137 * yet mapped to another queue. */
140 static char *cuse_dev_name
= NULL
; /* Character device cuse_dev_name. */
142 static char *vhost_sock_dir
= NULL
; /* Location of vhost-user sockets */
144 #define VHOST_ENQ_RETRY_NUM 8
146 static const struct rte_eth_conf port_conf
= {
148 .mq_mode
= ETH_MQ_RX_RSS
,
150 .header_split
= 0, /* Header Split disabled */
151 .hw_ip_checksum
= 0, /* IP checksum offload disabled */
152 .hw_vlan_filter
= 0, /* VLAN filtering disabled */
153 .jumbo_frame
= 0, /* Jumbo Frame Support disabled */
159 .rss_hf
= ETH_RSS_IP
| ETH_RSS_UDP
| ETH_RSS_TCP
,
163 .mq_mode
= ETH_MQ_TX_NONE
,
167 enum { MAX_TX_QUEUE_LEN
= 384 };
168 enum { DPDK_RING_SIZE
= 256 };
169 BUILD_ASSERT_DECL(IS_POW2(DPDK_RING_SIZE
));
170 enum { DRAIN_TSC
= 200000ULL };
177 static int rte_eal_init_ret
= ENODEV
;
179 static struct ovs_mutex dpdk_mutex
= OVS_MUTEX_INITIALIZER
;
181 /* Quality of Service */
183 /* An instance of a QoS configuration. Always associated with a particular
186 * Each QoS implementation subclasses this with whatever additional data it
190 const struct dpdk_qos_ops
*ops
;
193 /* A particular implementation of dpdk QoS operations.
195 * The functions below return 0 if successful or a positive errno value on
196 * failure, except where otherwise noted. All of them must be provided, except
197 * where otherwise noted.
199 struct dpdk_qos_ops
{
201 /* Name of the QoS type */
202 const char *qos_name
;
204 /* Called to construct the QoS implementation on 'netdev'. The
205 * implementation should make the appropriate calls to configure QoS
206 * according to 'details'. The implementation may assume that any current
207 * QoS configuration already installed should be destroyed before
208 * constructing the new configuration.
210 * The contents of 'details' should be documented as valid for 'ovs_name'
211 * in the "other_config" column in the "QoS" table in vswitchd/vswitch.xml
212 * (which is built as ovs-vswitchd.conf.db(8)).
214 * This function must return 0 if and only if it sets 'netdev->qos_conf'
215 * to an initialized 'struct qos_conf'.
217 * For all QoS implementations it should always be non-null.
219 int (*qos_construct
)(struct netdev
*netdev
, const struct smap
*details
);
221 /* Destroys the data structures allocated by the implementation as part of
224 * For all QoS implementations it should always be non-null.
226 void (*qos_destruct
)(struct netdev
*netdev
, struct qos_conf
*conf
);
228 /* Retrieves details of 'netdev->qos_conf' configuration into 'details'.
230 * The contents of 'details' should be documented as valid for 'ovs_name'
231 * in the "other_config" column in the "QoS" table in vswitchd/vswitch.xml
232 * (which is built as ovs-vswitchd.conf.db(8)).
234 int (*qos_get
)(const struct netdev
*netdev
, struct smap
*details
);
236 /* Reconfigures 'netdev->qos_conf' according to 'details', performing any
237 * required calls to complete the reconfiguration.
239 * The contents of 'details' should be documented as valid for 'ovs_name'
240 * in the "other_config" column in the "QoS" table in vswitchd/vswitch.xml
241 * (which is built as ovs-vswitchd.conf.db(8)).
243 * This function may be null if 'qos_conf' is not configurable.
245 int (*qos_set
)(struct netdev
*netdev
, const struct smap
*details
);
247 /* Modify an array of rte_mbufs. The modification is specific to
248 * each qos implementation.
250 * The function should take and array of mbufs and an int representing
251 * the current number of mbufs present in the array.
253 * After the function has performed a qos modification to the array of
254 * mbufs it returns an int representing the number of mbufs now present in
255 * the array. This value is can then be passed to the port send function
256 * along with the modified array for transmission.
258 * For all QoS implementations it should always be non-null.
260 int (*qos_run
)(struct netdev
*netdev
, struct rte_mbuf
**pkts
,
264 /* dpdk_qos_ops for each type of user space QoS implementation */
265 static const struct dpdk_qos_ops egress_policer_ops
;
268 * Array of dpdk_qos_ops, contains pointer to all supported QoS
271 static const struct dpdk_qos_ops
*const qos_confs
[] = {
276 /* Contains all 'struct dpdk_dev's. */
277 static struct ovs_list dpdk_list
OVS_GUARDED_BY(dpdk_mutex
)
278 = OVS_LIST_INITIALIZER(&dpdk_list
);
280 static struct ovs_list dpdk_mp_list
OVS_GUARDED_BY(dpdk_mutex
)
281 = OVS_LIST_INITIALIZER(&dpdk_mp_list
);
283 /* This mutex must be used by non pmd threads when allocating or freeing
284 * mbufs through mempools. Since dpdk_queue_pkts() and dpdk_queue_flush() may
285 * use mempools, a non pmd thread should hold this mutex while calling them */
286 static struct ovs_mutex nonpmd_mempool_mutex
= OVS_MUTEX_INITIALIZER
;
289 struct rte_mempool
*mp
;
293 struct ovs_list list_node
OVS_GUARDED_BY(dpdk_mutex
);
296 /* There should be one 'struct dpdk_tx_queue' created for
298 struct dpdk_tx_queue
{
299 bool flush_tx
; /* Set to true to flush queue everytime */
300 /* pkts are queued. */
302 rte_spinlock_t tx_lock
; /* Protects the members and the NIC queue
303 * from concurrent access. It is used only
304 * if the queue is shared among different
305 * pmd threads (see 'txq_needs_locking'). */
306 int map
; /* Mapping of configured vhost-user queues
307 * to enabled by guest. */
309 struct rte_mbuf
*burst_pkts
[MAX_TX_QUEUE_LEN
];
312 /* dpdk has no way to remove dpdk ring ethernet devices
313 so we have to keep them around once they've been created
316 static struct ovs_list dpdk_ring_list
OVS_GUARDED_BY(dpdk_mutex
)
317 = OVS_LIST_INITIALIZER(&dpdk_ring_list
);
320 /* For the client rings */
321 struct rte_ring
*cring_tx
;
322 struct rte_ring
*cring_rx
;
323 unsigned int user_port_id
; /* User given port no, parsed from port name */
324 int eth_port_id
; /* ethernet device port id */
325 struct ovs_list list_node
OVS_GUARDED_BY(dpdk_mutex
);
328 struct ingress_policer
{
329 struct rte_meter_srtcm_params app_srtcm_params
;
330 struct rte_meter_srtcm in_policer
;
331 rte_spinlock_t policer_lock
;
338 enum dpdk_dev_type type
;
340 struct dpdk_tx_queue
*tx_q
;
342 struct ovs_mutex mutex
OVS_ACQ_AFTER(dpdk_mutex
);
344 struct dpdk_mp
*dpdk_mp
;
348 struct netdev_stats stats
;
350 rte_spinlock_t stats_lock
;
352 struct eth_addr hwaddr
;
353 enum netdev_flags flags
;
355 struct rte_eth_link link
;
358 /* The user might request more txqs than the NIC has. We remap those
359 * ('up.n_txq') on these ('real_n_txq').
360 * If the numbers match, 'txq_needs_locking' is false, otherwise it is
361 * true and we will take a spinlock on transmission */
364 bool txq_needs_locking
;
366 /* virtio-net structure for vhost device */
367 OVSRCU_TYPE(struct virtio_net
*) virtio_dev
;
369 /* Identifier used to distinguish vhost devices from each other */
370 char vhost_id
[PATH_MAX
];
373 struct ovs_list list_node
OVS_GUARDED_BY(dpdk_mutex
);
375 /* QoS configuration and lock for the device */
376 struct qos_conf
*qos_conf
;
377 rte_spinlock_t qos_lock
;
379 /* The following properties cannot be changed when a device is running,
380 * so we remember the request and update them next time
381 * netdev_dpdk*_reconfigure() is called */
385 /* Ingress Policer */
386 OVSRCU_TYPE(struct ingress_policer
*) ingress_policer
;
387 uint32_t policer_rate
;
388 uint32_t policer_burst
;
391 struct netdev_rxq_dpdk
{
392 struct netdev_rxq up
;
396 static bool dpdk_thread_is_pmd(void);
398 static int netdev_dpdk_construct(struct netdev
*);
400 struct virtio_net
* netdev_dpdk_get_virtio(const struct netdev_dpdk
*dev
);
402 struct ingress_policer
*
403 netdev_dpdk_get_ingress_policer(const struct netdev_dpdk
*dev
);
406 is_dpdk_class(const struct netdev_class
*class)
408 return class->construct
== netdev_dpdk_construct
;
411 /* DPDK NIC drivers allocate RX buffers at a particular granularity, typically
412 * aligned at 1k or less. If a declared mbuf size is not a multiple of this
413 * value, insufficient buffers are allocated to accomodate the packet in its
414 * entirety. Furthermore, certain drivers need to ensure that there is also
415 * sufficient space in the Rx buffer to accommodate two VLAN tags (for QinQ
416 * frames). If the RX buffer is too small, then the driver enables scatter RX
417 * behaviour, which reduces performance. To prevent this, use a buffer size that
418 * is closest to 'mtu', but which satisfies the aforementioned criteria.
421 dpdk_buf_size(int mtu
)
423 return ROUND_UP((MTU_TO_MAX_FRAME_LEN(mtu
) + RTE_PKTMBUF_HEADROOM
),
424 NETDEV_DPDK_MBUF_ALIGN
);
427 /* XXX: use dpdk malloc for entire OVS. in fact huge page should be used
428 * for all other segments data, bss and text. */
431 dpdk_rte_mzalloc(size_t sz
)
435 ptr
= rte_zmalloc(OVS_VPORT_DPDK
, sz
, OVS_CACHE_LINE_SIZE
);
442 /* XXX this function should be called only by pmd threads (or by non pmd
443 * threads holding the nonpmd_mempool_mutex) */
445 free_dpdk_buf(struct dp_packet
*p
)
447 struct rte_mbuf
*pkt
= (struct rte_mbuf
*) p
;
449 rte_pktmbuf_free(pkt
);
453 ovs_rte_pktmbuf_init(struct rte_mempool
*mp
,
454 void *opaque_arg OVS_UNUSED
,
456 unsigned i OVS_UNUSED
)
458 struct rte_mbuf
*m
= _m
;
460 rte_pktmbuf_init(mp
, opaque_arg
, _m
, i
);
462 dp_packet_init_dpdk((struct dp_packet
*) m
, m
->buf_len
);
465 static struct dpdk_mp
*
466 dpdk_mp_get(int socket_id
, int mtu
) OVS_REQUIRES(dpdk_mutex
)
468 struct dpdk_mp
*dmp
= NULL
;
469 char mp_name
[RTE_MEMPOOL_NAMESIZE
];
471 struct rte_pktmbuf_pool_private mbp_priv
;
473 LIST_FOR_EACH (dmp
, list_node
, &dpdk_mp_list
) {
474 if (dmp
->socket_id
== socket_id
&& dmp
->mtu
== mtu
) {
480 dmp
= dpdk_rte_mzalloc(sizeof *dmp
);
481 dmp
->socket_id
= socket_id
;
484 mbp_priv
.mbuf_data_room_size
= MBUF_SIZE(mtu
) - sizeof(struct dp_packet
);
485 mbp_priv
.mbuf_priv_size
= sizeof (struct dp_packet
) - sizeof (struct rte_mbuf
);
487 mp_size
= MAX_NB_MBUF
;
489 if (snprintf(mp_name
, RTE_MEMPOOL_NAMESIZE
, "ovs_mp_%d_%d_%u",
490 dmp
->mtu
, dmp
->socket_id
, mp_size
) < 0) {
494 dmp
->mp
= rte_mempool_create(mp_name
, mp_size
, MBUF_SIZE(mtu
),
496 sizeof(struct rte_pktmbuf_pool_private
),
497 rte_pktmbuf_pool_init
, &mbp_priv
,
498 ovs_rte_pktmbuf_init
, NULL
,
500 } while (!dmp
->mp
&& rte_errno
== ENOMEM
&& (mp_size
/= 2) >= MIN_NB_MBUF
);
502 if (dmp
->mp
== NULL
) {
505 VLOG_DBG("Allocated \"%s\" mempool with %u mbufs", mp_name
, mp_size
);
508 ovs_list_push_back(&dpdk_mp_list
, &dmp
->list_node
);
513 dpdk_mp_put(struct dpdk_mp
*dmp
)
521 ovs_assert(dmp
->refcount
>= 0);
524 /* I could not find any API to destroy mp. */
525 if (dmp
->refcount
== 0) {
526 list_delete(dmp
->list_node
);
527 /* destroy mp-pool. */
533 check_link_status(struct netdev_dpdk
*dev
)
535 struct rte_eth_link link
;
537 rte_eth_link_get_nowait(dev
->port_id
, &link
);
539 if (dev
->link
.link_status
!= link
.link_status
) {
540 netdev_change_seq_changed(&dev
->up
);
542 dev
->link_reset_cnt
++;
544 if (dev
->link
.link_status
) {
545 VLOG_DBG_RL(&rl
, "Port %d Link Up - speed %u Mbps - %s",
546 dev
->port_id
, (unsigned)dev
->link
.link_speed
,
547 (dev
->link
.link_duplex
== ETH_LINK_FULL_DUPLEX
) ?
548 ("full-duplex") : ("half-duplex"));
550 VLOG_DBG_RL(&rl
, "Port %d Link Down", dev
->port_id
);
556 dpdk_watchdog(void *dummy OVS_UNUSED
)
558 struct netdev_dpdk
*dev
;
560 pthread_detach(pthread_self());
563 ovs_mutex_lock(&dpdk_mutex
);
564 LIST_FOR_EACH (dev
, list_node
, &dpdk_list
) {
565 ovs_mutex_lock(&dev
->mutex
);
566 check_link_status(dev
);
567 ovs_mutex_unlock(&dev
->mutex
);
569 ovs_mutex_unlock(&dpdk_mutex
);
570 xsleep(DPDK_PORT_WATCHDOG_INTERVAL
);
577 dpdk_eth_dev_queue_setup(struct netdev_dpdk
*dev
, int n_rxq
, int n_txq
)
582 /* A device may report more queues than it makes available (this has
583 * been observed for Intel xl710, which reserves some of them for
584 * SRIOV): rte_eth_*_queue_setup will fail if a queue is not
585 * available. When this happens we can retry the configuration
586 * and request less queues */
587 while (n_rxq
&& n_txq
) {
589 VLOG_INFO("Retrying setup with (rxq:%d txq:%d)", n_rxq
, n_txq
);
592 diag
= rte_eth_dev_configure(dev
->port_id
, n_rxq
, n_txq
, &port_conf
);
597 for (i
= 0; i
< n_txq
; i
++) {
598 diag
= rte_eth_tx_queue_setup(dev
->port_id
, i
, NIC_PORT_TX_Q_SIZE
,
599 dev
->socket_id
, NULL
);
601 VLOG_INFO("Interface %s txq(%d) setup error: %s",
602 dev
->up
.name
, i
, rte_strerror(-diag
));
608 /* Retry with less tx queues */
613 for (i
= 0; i
< n_rxq
; i
++) {
614 diag
= rte_eth_rx_queue_setup(dev
->port_id
, i
, NIC_PORT_RX_Q_SIZE
,
615 dev
->socket_id
, NULL
,
618 VLOG_INFO("Interface %s rxq(%d) setup error: %s",
619 dev
->up
.name
, i
, rte_strerror(-diag
));
625 /* Retry with less rx queues */
630 dev
->up
.n_rxq
= n_rxq
;
631 dev
->real_n_txq
= n_txq
;
641 dpdk_eth_dev_init(struct netdev_dpdk
*dev
) OVS_REQUIRES(dpdk_mutex
)
643 struct rte_pktmbuf_pool_private
*mbp_priv
;
644 struct rte_eth_dev_info info
;
645 struct ether_addr eth_addr
;
649 if (dev
->port_id
< 0 || dev
->port_id
>= rte_eth_dev_count()) {
653 rte_eth_dev_info_get(dev
->port_id
, &info
);
655 n_rxq
= MIN(info
.max_rx_queues
, dev
->up
.n_rxq
);
656 n_txq
= MIN(info
.max_tx_queues
, dev
->up
.n_txq
);
658 diag
= dpdk_eth_dev_queue_setup(dev
, n_rxq
, n_txq
);
660 VLOG_ERR("Interface %s(rxq:%d txq:%d) configure error: %s",
661 dev
->up
.name
, n_rxq
, n_txq
, rte_strerror(-diag
));
665 diag
= rte_eth_dev_start(dev
->port_id
);
667 VLOG_ERR("Interface %s start error: %s", dev
->up
.name
,
668 rte_strerror(-diag
));
672 rte_eth_promiscuous_enable(dev
->port_id
);
673 rte_eth_allmulticast_enable(dev
->port_id
);
675 memset(ð_addr
, 0x0, sizeof(eth_addr
));
676 rte_eth_macaddr_get(dev
->port_id
, ð_addr
);
677 VLOG_INFO_RL(&rl
, "Port %d: "ETH_ADDR_FMT
"",
678 dev
->port_id
, ETH_ADDR_BYTES_ARGS(eth_addr
.addr_bytes
));
680 memcpy(dev
->hwaddr
.ea
, eth_addr
.addr_bytes
, ETH_ADDR_LEN
);
681 rte_eth_link_get_nowait(dev
->port_id
, &dev
->link
);
683 mbp_priv
= rte_mempool_get_priv(dev
->dpdk_mp
->mp
);
684 dev
->buf_size
= mbp_priv
->mbuf_data_room_size
- RTE_PKTMBUF_HEADROOM
;
686 dev
->flags
= NETDEV_UP
| NETDEV_PROMISC
;
690 static struct netdev_dpdk
*
691 netdev_dpdk_cast(const struct netdev
*netdev
)
693 return CONTAINER_OF(netdev
, struct netdev_dpdk
, up
);
696 static struct netdev
*
697 netdev_dpdk_alloc(void)
699 struct netdev_dpdk
*dev
;
701 if (!rte_eal_init_ret
) { /* Only after successful initialization */
702 dev
= dpdk_rte_mzalloc(sizeof *dev
);
711 netdev_dpdk_alloc_txq(struct netdev_dpdk
*dev
, unsigned int n_txqs
)
715 dev
->tx_q
= dpdk_rte_mzalloc(n_txqs
* sizeof *dev
->tx_q
);
716 for (i
= 0; i
< n_txqs
; i
++) {
717 int numa_id
= ovs_numa_get_numa_id(i
);
719 if (!dev
->txq_needs_locking
) {
720 /* Each index is considered as a cpu core id, since there should
721 * be one tx queue for each cpu core. If the corresponding core
722 * is not on the same numa node as 'dev', flags the
724 dev
->tx_q
[i
].flush_tx
= dev
->socket_id
== numa_id
;
726 /* Queues are shared among CPUs. Always flush */
727 dev
->tx_q
[i
].flush_tx
= true;
730 /* Initialize map for vhost devices. */
731 dev
->tx_q
[i
].map
= OVS_VHOST_QUEUE_MAP_UNKNOWN
;
732 rte_spinlock_init(&dev
->tx_q
[i
].tx_lock
);
737 netdev_dpdk_init(struct netdev
*netdev
, unsigned int port_no
,
738 enum dpdk_dev_type type
)
739 OVS_REQUIRES(dpdk_mutex
)
741 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
746 ovs_mutex_init(&dev
->mutex
);
747 ovs_mutex_lock(&dev
->mutex
);
749 rte_spinlock_init(&dev
->stats_lock
);
751 /* If the 'sid' is negative, it means that the kernel fails
752 * to obtain the pci numa info. In that situation, always
754 if (type
== DPDK_DEV_ETH
) {
755 sid
= rte_eth_dev_socket_id(port_no
);
757 sid
= rte_lcore_to_socket_id(rte_get_master_lcore());
760 dev
->socket_id
= sid
< 0 ? SOCKET0
: sid
;
761 dev
->port_id
= port_no
;
764 dev
->mtu
= ETHER_MTU
;
765 dev
->max_packet_len
= MTU_TO_FRAME_LEN(dev
->mtu
);
767 buf_size
= dpdk_buf_size(dev
->mtu
);
768 dev
->dpdk_mp
= dpdk_mp_get(dev
->socket_id
, FRAME_LEN_TO_MTU(buf_size
));
774 /* Initialise QoS configuration to NULL and qos lock to unlocked */
775 dev
->qos_conf
= NULL
;
776 rte_spinlock_init(&dev
->qos_lock
);
778 /* Initialise rcu pointer for ingress policer to NULL */
779 ovsrcu_init(&dev
->ingress_policer
, NULL
);
780 dev
->policer_rate
= 0;
781 dev
->policer_burst
= 0;
783 netdev
->n_txq
= NR_QUEUE
;
784 netdev
->n_rxq
= NR_QUEUE
;
785 dev
->requested_n_rxq
= NR_QUEUE
;
786 dev
->requested_n_txq
= NR_QUEUE
;
787 dev
->real_n_txq
= NR_QUEUE
;
789 if (type
== DPDK_DEV_ETH
) {
790 netdev_dpdk_alloc_txq(dev
, NR_QUEUE
);
791 err
= dpdk_eth_dev_init(dev
);
796 netdev_dpdk_alloc_txq(dev
, OVS_VHOST_MAX_QUEUE_NUM
);
797 /* Enable DPDK_DEV_VHOST device and set promiscuous mode flag. */
798 dev
->flags
= NETDEV_UP
| NETDEV_PROMISC
;
801 ovs_list_push_back(&dpdk_list
, &dev
->list_node
);
807 ovs_mutex_unlock(&dev
->mutex
);
811 /* dev_name must be the prefix followed by a positive decimal number.
812 * (no leading + or - signs are allowed) */
814 dpdk_dev_parse_name(const char dev_name
[], const char prefix
[],
815 unsigned int *port_no
)
819 if (strncmp(dev_name
, prefix
, strlen(prefix
))) {
823 cport
= dev_name
+ strlen(prefix
);
825 if (str_to_uint(cport
, 10, port_no
)) {
833 vhost_construct_helper(struct netdev
*netdev
) OVS_REQUIRES(dpdk_mutex
)
835 if (rte_eal_init_ret
) {
836 return rte_eal_init_ret
;
839 return netdev_dpdk_init(netdev
, -1, DPDK_DEV_VHOST
);
843 netdev_dpdk_vhost_cuse_construct(struct netdev
*netdev
)
845 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
848 if (rte_eal_init_ret
) {
849 return rte_eal_init_ret
;
852 ovs_mutex_lock(&dpdk_mutex
);
853 strncpy(dev
->vhost_id
, netdev
->name
, sizeof(dev
->vhost_id
));
854 err
= vhost_construct_helper(netdev
);
855 ovs_mutex_unlock(&dpdk_mutex
);
860 netdev_dpdk_vhost_user_construct(struct netdev
*netdev
)
862 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
863 const char *name
= netdev
->name
;
866 /* 'name' is appended to 'vhost_sock_dir' and used to create a socket in
867 * the file system. '/' or '\' would traverse directories, so they're not
868 * acceptable in 'name'. */
869 if (strchr(name
, '/') || strchr(name
, '\\')) {
870 VLOG_ERR("\"%s\" is not a valid name for a vhost-user port. "
871 "A valid name must not include '/' or '\\'",
876 if (rte_eal_init_ret
) {
877 return rte_eal_init_ret
;
880 ovs_mutex_lock(&dpdk_mutex
);
881 /* Take the name of the vhost-user port and append it to the location where
882 * the socket is to be created, then register the socket.
884 snprintf(dev
->vhost_id
, sizeof(dev
->vhost_id
), "%s/%s",
885 vhost_sock_dir
, name
);
887 err
= rte_vhost_driver_register(dev
->vhost_id
);
889 VLOG_ERR("vhost-user socket device setup failure for socket %s\n",
892 fatal_signal_add_file_to_unlink(dev
->vhost_id
);
893 VLOG_INFO("Socket %s created for vhost-user port %s\n",
894 dev
->vhost_id
, name
);
895 err
= vhost_construct_helper(netdev
);
898 ovs_mutex_unlock(&dpdk_mutex
);
903 netdev_dpdk_construct(struct netdev
*netdev
)
905 unsigned int port_no
;
908 if (rte_eal_init_ret
) {
909 return rte_eal_init_ret
;
912 /* Names always start with "dpdk" */
913 err
= dpdk_dev_parse_name(netdev
->name
, "dpdk", &port_no
);
918 ovs_mutex_lock(&dpdk_mutex
);
919 err
= netdev_dpdk_init(netdev
, port_no
, DPDK_DEV_ETH
);
920 ovs_mutex_unlock(&dpdk_mutex
);
925 netdev_dpdk_destruct(struct netdev
*netdev
)
927 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
929 ovs_mutex_lock(&dev
->mutex
);
930 rte_eth_dev_stop(dev
->port_id
);
931 free(ovsrcu_get_protected(struct ingress_policer
*,
932 &dev
->ingress_policer
));
933 ovs_mutex_unlock(&dev
->mutex
);
935 ovs_mutex_lock(&dpdk_mutex
);
937 ovs_list_remove(&dev
->list_node
);
938 dpdk_mp_put(dev
->dpdk_mp
);
939 ovs_mutex_unlock(&dpdk_mutex
);
943 netdev_dpdk_vhost_destruct(struct netdev
*netdev
)
945 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
947 /* Guest becomes an orphan if still attached. */
948 if (netdev_dpdk_get_virtio(dev
) != NULL
) {
949 VLOG_ERR("Removing port '%s' while vhost device still attached.",
951 VLOG_ERR("To restore connectivity after re-adding of port, VM on socket"
952 " '%s' must be restarted.",
956 if (rte_vhost_driver_unregister(dev
->vhost_id
)) {
957 VLOG_ERR("Unable to remove vhost-user socket %s", dev
->vhost_id
);
959 fatal_signal_remove_file_to_unlink(dev
->vhost_id
);
962 ovs_mutex_lock(&dev
->mutex
);
963 free(ovsrcu_get_protected(struct ingress_policer
*,
964 &dev
->ingress_policer
));
965 ovs_mutex_unlock(&dev
->mutex
);
967 ovs_mutex_lock(&dpdk_mutex
);
969 ovs_list_remove(&dev
->list_node
);
970 dpdk_mp_put(dev
->dpdk_mp
);
971 ovs_mutex_unlock(&dpdk_mutex
);
975 netdev_dpdk_dealloc(struct netdev
*netdev
)
977 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
983 netdev_dpdk_get_config(const struct netdev
*netdev
, struct smap
*args
)
985 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
987 ovs_mutex_lock(&dev
->mutex
);
989 smap_add_format(args
, "requested_rx_queues", "%d", dev
->requested_n_rxq
);
990 smap_add_format(args
, "configured_rx_queues", "%d", netdev
->n_rxq
);
991 smap_add_format(args
, "requested_tx_queues", "%d", netdev
->n_txq
);
992 smap_add_format(args
, "configured_tx_queues", "%d", dev
->real_n_txq
);
993 ovs_mutex_unlock(&dev
->mutex
);
999 netdev_dpdk_set_config(struct netdev
*netdev
, const struct smap
*args
)
1001 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1004 ovs_mutex_lock(&dev
->mutex
);
1005 new_n_rxq
= MAX(smap_get_int(args
, "n_rxq", dev
->requested_n_rxq
), 1);
1006 if (new_n_rxq
!= dev
->requested_n_rxq
) {
1007 dev
->requested_n_rxq
= new_n_rxq
;
1008 netdev_request_reconfigure(netdev
);
1010 ovs_mutex_unlock(&dev
->mutex
);
1016 netdev_dpdk_get_numa_id(const struct netdev
*netdev
)
1018 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1020 return dev
->socket_id
;
1023 /* Sets the number of tx queues for the dpdk interface. */
1025 netdev_dpdk_set_tx_multiq(struct netdev
*netdev
, unsigned int n_txq
)
1027 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1029 ovs_mutex_lock(&dev
->mutex
);
1031 if (dev
->requested_n_txq
== n_txq
) {
1035 dev
->requested_n_txq
= n_txq
;
1036 netdev_request_reconfigure(netdev
);
1039 ovs_mutex_unlock(&dev
->mutex
);
1043 static struct netdev_rxq
*
1044 netdev_dpdk_rxq_alloc(void)
1046 struct netdev_rxq_dpdk
*rx
= dpdk_rte_mzalloc(sizeof *rx
);
1051 static struct netdev_rxq_dpdk
*
1052 netdev_rxq_dpdk_cast(const struct netdev_rxq
*rxq
)
1054 return CONTAINER_OF(rxq
, struct netdev_rxq_dpdk
, up
);
1058 netdev_dpdk_rxq_construct(struct netdev_rxq
*rxq
)
1060 struct netdev_rxq_dpdk
*rx
= netdev_rxq_dpdk_cast(rxq
);
1061 struct netdev_dpdk
*dev
= netdev_dpdk_cast(rxq
->netdev
);
1063 ovs_mutex_lock(&dev
->mutex
);
1064 rx
->port_id
= dev
->port_id
;
1065 ovs_mutex_unlock(&dev
->mutex
);
1071 netdev_dpdk_rxq_destruct(struct netdev_rxq
*rxq OVS_UNUSED
)
1076 netdev_dpdk_rxq_dealloc(struct netdev_rxq
*rxq
)
1078 struct netdev_rxq_dpdk
*rx
= netdev_rxq_dpdk_cast(rxq
);
1084 dpdk_queue_flush__(struct netdev_dpdk
*dev
, int qid
)
1086 struct dpdk_tx_queue
*txq
= &dev
->tx_q
[qid
];
1089 while (nb_tx
!= txq
->count
) {
1092 ret
= rte_eth_tx_burst(dev
->port_id
, qid
, txq
->burst_pkts
+ nb_tx
,
1093 txq
->count
- nb_tx
);
1101 if (OVS_UNLIKELY(nb_tx
!= txq
->count
)) {
1102 /* free buffers, which we couldn't transmit, one at a time (each
1103 * packet could come from a different mempool) */
1106 for (i
= nb_tx
; i
< txq
->count
; i
++) {
1107 rte_pktmbuf_free(txq
->burst_pkts
[i
]);
1109 rte_spinlock_lock(&dev
->stats_lock
);
1110 dev
->stats
.tx_dropped
+= txq
->count
-nb_tx
;
1111 rte_spinlock_unlock(&dev
->stats_lock
);
1115 txq
->tsc
= rte_get_timer_cycles();
1119 dpdk_queue_flush(struct netdev_dpdk
*dev
, int qid
)
1121 struct dpdk_tx_queue
*txq
= &dev
->tx_q
[qid
];
1123 if (txq
->count
== 0) {
1126 dpdk_queue_flush__(dev
, qid
);
1130 netdev_dpdk_policer_pkt_handle(struct rte_meter_srtcm
*meter
,
1131 struct rte_mbuf
*pkt
, uint64_t time
)
1133 uint32_t pkt_len
= rte_pktmbuf_pkt_len(pkt
) - sizeof(struct ether_hdr
);
1135 return rte_meter_srtcm_color_blind_check(meter
, time
, pkt_len
) ==
1140 netdev_dpdk_policer_run(struct rte_meter_srtcm
*meter
,
1141 struct rte_mbuf
**pkts
, int pkt_cnt
)
1145 struct rte_mbuf
*pkt
= NULL
;
1146 uint64_t current_time
= rte_rdtsc();
1148 for (i
= 0; i
< pkt_cnt
; i
++) {
1150 /* Handle current packet */
1151 if (netdev_dpdk_policer_pkt_handle(meter
, pkt
, current_time
)) {
1157 rte_pktmbuf_free(pkt
);
1165 ingress_policer_run(struct ingress_policer
*policer
, struct rte_mbuf
**pkts
,
1170 rte_spinlock_lock(&policer
->policer_lock
);
1171 cnt
= netdev_dpdk_policer_run(&policer
->in_policer
, pkts
, pkt_cnt
);
1172 rte_spinlock_unlock(&policer
->policer_lock
);
1178 is_vhost_running(struct virtio_net
*virtio_dev
)
1180 return (virtio_dev
!= NULL
&& (virtio_dev
->flags
& VIRTIO_DEV_RUNNING
));
1184 netdev_dpdk_vhost_update_rx_size_counters(struct netdev_stats
*stats
,
1185 unsigned int packet_size
)
1187 /* Hard-coded search for the size bucket. */
1188 if (packet_size
< 256) {
1189 if (packet_size
>= 128) {
1190 stats
->rx_128_to_255_packets
++;
1191 } else if (packet_size
<= 64) {
1192 stats
->rx_1_to_64_packets
++;
1194 stats
->rx_65_to_127_packets
++;
1197 if (packet_size
>= 1523) {
1198 stats
->rx_1523_to_max_packets
++;
1199 } else if (packet_size
>= 1024) {
1200 stats
->rx_1024_to_1522_packets
++;
1201 } else if (packet_size
< 512) {
1202 stats
->rx_256_to_511_packets
++;
1204 stats
->rx_512_to_1023_packets
++;
1210 netdev_dpdk_vhost_update_rx_counters(struct netdev_stats
*stats
,
1211 struct dp_packet
**packets
, int count
,
1215 unsigned int packet_size
;
1216 struct dp_packet
*packet
;
1218 stats
->rx_packets
+= count
;
1219 stats
->rx_dropped
+= dropped
;
1220 for (i
= 0; i
< count
; i
++) {
1221 packet
= packets
[i
];
1222 packet_size
= dp_packet_size(packet
);
1224 if (OVS_UNLIKELY(packet_size
< ETH_HEADER_LEN
)) {
1225 /* This only protects the following multicast counting from
1226 * too short packets, but it does not stop the packet from
1227 * further processing. */
1229 stats
->rx_length_errors
++;
1233 netdev_dpdk_vhost_update_rx_size_counters(stats
, packet_size
);
1235 struct eth_header
*eh
= (struct eth_header
*) dp_packet_data(packet
);
1236 if (OVS_UNLIKELY(eth_addr_is_multicast(eh
->eth_dst
))) {
1240 stats
->rx_bytes
+= packet_size
;
1245 * The receive path for the vhost port is the TX path out from guest.
1248 netdev_dpdk_vhost_rxq_recv(struct netdev_rxq
*rxq
,
1249 struct dp_packet
**packets
, int *c
)
1251 struct netdev_dpdk
*dev
= netdev_dpdk_cast(rxq
->netdev
);
1252 struct virtio_net
*virtio_dev
= netdev_dpdk_get_virtio(dev
);
1253 int qid
= rxq
->queue_id
;
1254 struct ingress_policer
*policer
= netdev_dpdk_get_ingress_policer(dev
);
1256 uint16_t dropped
= 0;
1258 if (OVS_UNLIKELY(!is_vhost_running(virtio_dev
)
1259 || !(dev
->flags
& NETDEV_UP
))) {
1263 if (rxq
->queue_id
>= dev
->real_n_rxq
) {
1267 nb_rx
= rte_vhost_dequeue_burst(virtio_dev
, qid
* VIRTIO_QNUM
+ VIRTIO_TXQ
,
1269 (struct rte_mbuf
**)packets
,
1277 nb_rx
= ingress_policer_run(policer
, (struct rte_mbuf
**)packets
, nb_rx
);
1281 rte_spinlock_lock(&dev
->stats_lock
);
1282 netdev_dpdk_vhost_update_rx_counters(&dev
->stats
, packets
, nb_rx
, dropped
);
1283 rte_spinlock_unlock(&dev
->stats_lock
);
1290 netdev_dpdk_rxq_recv(struct netdev_rxq
*rxq
, struct dp_packet
**packets
,
1293 struct netdev_rxq_dpdk
*rx
= netdev_rxq_dpdk_cast(rxq
);
1294 struct netdev_dpdk
*dev
= netdev_dpdk_cast(rxq
->netdev
);
1295 struct ingress_policer
*policer
= netdev_dpdk_get_ingress_policer(dev
);
1299 /* There is only one tx queue for this core. Do not flush other
1301 * Do not flush tx queue which is shared among CPUs
1302 * since it is always flushed */
1303 if (rxq
->queue_id
== rte_lcore_id() &&
1304 OVS_LIKELY(!dev
->txq_needs_locking
)) {
1305 dpdk_queue_flush(dev
, rxq
->queue_id
);
1308 nb_rx
= rte_eth_rx_burst(rx
->port_id
, rxq
->queue_id
,
1309 (struct rte_mbuf
**) packets
,
1317 nb_rx
= ingress_policer_run(policer
, (struct rte_mbuf
**) packets
, nb_rx
);
1321 /* Update stats to reflect dropped packets */
1322 if (OVS_UNLIKELY(dropped
)) {
1323 rte_spinlock_lock(&dev
->stats_lock
);
1324 dev
->stats
.rx_dropped
+= dropped
;
1325 rte_spinlock_unlock(&dev
->stats_lock
);
1334 netdev_dpdk_qos_run__(struct netdev_dpdk
*dev
, struct rte_mbuf
**pkts
,
1337 struct netdev
*netdev
= &dev
->up
;
1339 if (dev
->qos_conf
!= NULL
) {
1340 rte_spinlock_lock(&dev
->qos_lock
);
1341 if (dev
->qos_conf
!= NULL
) {
1342 cnt
= dev
->qos_conf
->ops
->qos_run(netdev
, pkts
, cnt
);
1344 rte_spinlock_unlock(&dev
->qos_lock
);
1351 netdev_dpdk_vhost_update_tx_counters(struct netdev_stats
*stats
,
1352 struct dp_packet
**packets
,
1357 int sent
= attempted
- dropped
;
1359 stats
->tx_packets
+= sent
;
1360 stats
->tx_dropped
+= dropped
;
1362 for (i
= 0; i
< sent
; i
++) {
1363 stats
->tx_bytes
+= dp_packet_size(packets
[i
]);
1368 __netdev_dpdk_vhost_send(struct netdev
*netdev
, int qid
,
1369 struct dp_packet
**pkts
, int cnt
,
1372 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1373 struct virtio_net
*virtio_dev
= netdev_dpdk_get_virtio(dev
);
1374 struct rte_mbuf
**cur_pkts
= (struct rte_mbuf
**) pkts
;
1375 unsigned int total_pkts
= cnt
;
1376 unsigned int qos_pkts
= cnt
;
1379 qid
= dev
->tx_q
[qid
% dev
->real_n_txq
].map
;
1381 if (OVS_UNLIKELY(!is_vhost_running(virtio_dev
) || qid
< 0
1382 || !(dev
->flags
& NETDEV_UP
))) {
1383 rte_spinlock_lock(&dev
->stats_lock
);
1384 dev
->stats
.tx_dropped
+= cnt
;
1385 rte_spinlock_unlock(&dev
->stats_lock
);
1389 rte_spinlock_lock(&dev
->tx_q
[qid
].tx_lock
);
1391 /* Check has QoS has been configured for the netdev */
1392 cnt
= netdev_dpdk_qos_run__(dev
, cur_pkts
, cnt
);
1396 int vhost_qid
= qid
* VIRTIO_QNUM
+ VIRTIO_RXQ
;
1397 unsigned int tx_pkts
;
1399 tx_pkts
= rte_vhost_enqueue_burst(virtio_dev
, vhost_qid
,
1401 if (OVS_LIKELY(tx_pkts
)) {
1402 /* Packets have been sent.*/
1404 /* Prepare for possible retry.*/
1405 cur_pkts
= &cur_pkts
[tx_pkts
];
1407 /* No packets sent - do not retry.*/
1410 } while (cnt
&& (retries
++ < VHOST_ENQ_RETRY_NUM
));
1412 rte_spinlock_unlock(&dev
->tx_q
[qid
].tx_lock
);
1414 rte_spinlock_lock(&dev
->stats_lock
);
1416 netdev_dpdk_vhost_update_tx_counters(&dev
->stats
, pkts
, total_pkts
, cnt
);
1417 rte_spinlock_unlock(&dev
->stats_lock
);
1423 for (i
= 0; i
< total_pkts
; i
++) {
1424 dp_packet_delete(pkts
[i
]);
1430 dpdk_queue_pkts(struct netdev_dpdk
*dev
, int qid
,
1431 struct rte_mbuf
**pkts
, int cnt
)
1433 struct dpdk_tx_queue
*txq
= &dev
->tx_q
[qid
];
1439 int freeslots
= MAX_TX_QUEUE_LEN
- txq
->count
;
1440 int tocopy
= MIN(freeslots
, cnt
-i
);
1442 memcpy(&txq
->burst_pkts
[txq
->count
], &pkts
[i
],
1443 tocopy
* sizeof (struct rte_mbuf
*));
1445 txq
->count
+= tocopy
;
1448 if (txq
->count
== MAX_TX_QUEUE_LEN
|| txq
->flush_tx
) {
1449 dpdk_queue_flush__(dev
, qid
);
1451 diff_tsc
= rte_get_timer_cycles() - txq
->tsc
;
1452 if (diff_tsc
>= DRAIN_TSC
) {
1453 dpdk_queue_flush__(dev
, qid
);
1458 /* Tx function. Transmit packets indefinitely */
1460 dpdk_do_tx_copy(struct netdev
*netdev
, int qid
, struct dp_packet
**pkts
,
1462 OVS_NO_THREAD_SAFETY_ANALYSIS
1464 #if !defined(__CHECKER__) && !defined(_WIN32)
1465 const size_t PKT_ARRAY_SIZE
= cnt
;
1467 /* Sparse or MSVC doesn't like variable length array. */
1468 enum { PKT_ARRAY_SIZE
= NETDEV_MAX_BURST
};
1470 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1471 struct rte_mbuf
*mbufs
[PKT_ARRAY_SIZE
];
1476 /* If we are on a non pmd thread we have to use the mempool mutex, because
1477 * every non pmd thread shares the same mempool cache */
1479 if (!dpdk_thread_is_pmd()) {
1480 ovs_mutex_lock(&nonpmd_mempool_mutex
);
1483 for (i
= 0; i
< cnt
; i
++) {
1484 int size
= dp_packet_size(pkts
[i
]);
1486 if (OVS_UNLIKELY(size
> dev
->max_packet_len
)) {
1487 VLOG_WARN_RL(&rl
, "Too big size %d max_packet_len %d",
1488 (int)size
, dev
->max_packet_len
);
1494 mbufs
[newcnt
] = rte_pktmbuf_alloc(dev
->dpdk_mp
->mp
);
1496 if (!mbufs
[newcnt
]) {
1501 /* We have to do a copy for now */
1502 memcpy(rte_pktmbuf_mtod(mbufs
[newcnt
], void *), dp_packet_data(pkts
[i
]), size
);
1504 rte_pktmbuf_data_len(mbufs
[newcnt
]) = size
;
1505 rte_pktmbuf_pkt_len(mbufs
[newcnt
]) = size
;
1510 if (dev
->type
== DPDK_DEV_VHOST
) {
1511 __netdev_dpdk_vhost_send(netdev
, qid
, (struct dp_packet
**) mbufs
, newcnt
, true);
1513 unsigned int qos_pkts
= newcnt
;
1515 /* Check if QoS has been configured for this netdev. */
1516 newcnt
= netdev_dpdk_qos_run__(dev
, mbufs
, newcnt
);
1518 dropped
+= qos_pkts
- newcnt
;
1519 dpdk_queue_pkts(dev
, qid
, mbufs
, newcnt
);
1520 dpdk_queue_flush(dev
, qid
);
1523 if (OVS_UNLIKELY(dropped
)) {
1524 rte_spinlock_lock(&dev
->stats_lock
);
1525 dev
->stats
.tx_dropped
+= dropped
;
1526 rte_spinlock_unlock(&dev
->stats_lock
);
1529 if (!dpdk_thread_is_pmd()) {
1530 ovs_mutex_unlock(&nonpmd_mempool_mutex
);
1535 netdev_dpdk_vhost_send(struct netdev
*netdev
, int qid
, struct dp_packet
**pkts
,
1536 int cnt
, bool may_steal
)
1538 if (OVS_UNLIKELY(pkts
[0]->source
!= DPBUF_DPDK
)) {
1541 dpdk_do_tx_copy(netdev
, qid
, pkts
, cnt
);
1543 for (i
= 0; i
< cnt
; i
++) {
1544 dp_packet_delete(pkts
[i
]);
1548 __netdev_dpdk_vhost_send(netdev
, qid
, pkts
, cnt
, may_steal
);
1554 netdev_dpdk_send__(struct netdev_dpdk
*dev
, int qid
,
1555 struct dp_packet
**pkts
, int cnt
, bool may_steal
)
1559 if (OVS_UNLIKELY(dev
->txq_needs_locking
)) {
1560 qid
= qid
% dev
->real_n_txq
;
1561 rte_spinlock_lock(&dev
->tx_q
[qid
].tx_lock
);
1564 if (OVS_UNLIKELY(!may_steal
||
1565 pkts
[0]->source
!= DPBUF_DPDK
)) {
1566 struct netdev
*netdev
= &dev
->up
;
1568 dpdk_do_tx_copy(netdev
, qid
, pkts
, cnt
);
1571 for (i
= 0; i
< cnt
; i
++) {
1572 dp_packet_delete(pkts
[i
]);
1576 int next_tx_idx
= 0;
1578 unsigned int qos_pkts
= 0;
1579 unsigned int temp_cnt
= 0;
1581 for (i
= 0; i
< cnt
; i
++) {
1582 int size
= dp_packet_size(pkts
[i
]);
1584 if (OVS_UNLIKELY(size
> dev
->max_packet_len
)) {
1585 if (next_tx_idx
!= i
) {
1586 temp_cnt
= i
- next_tx_idx
;
1587 qos_pkts
= temp_cnt
;
1589 temp_cnt
= netdev_dpdk_qos_run__(dev
, (struct rte_mbuf
**)pkts
,
1591 dropped
+= qos_pkts
- temp_cnt
;
1592 dpdk_queue_pkts(dev
, qid
,
1593 (struct rte_mbuf
**)&pkts
[next_tx_idx
],
1598 VLOG_WARN_RL(&rl
, "Too big size %d max_packet_len %d",
1599 (int)size
, dev
->max_packet_len
);
1601 dp_packet_delete(pkts
[i
]);
1603 next_tx_idx
= i
+ 1;
1606 if (next_tx_idx
!= cnt
) {
1610 cnt
= netdev_dpdk_qos_run__(dev
, (struct rte_mbuf
**)pkts
, cnt
);
1611 dropped
+= qos_pkts
- cnt
;
1612 dpdk_queue_pkts(dev
, qid
, (struct rte_mbuf
**)&pkts
[next_tx_idx
],
1616 if (OVS_UNLIKELY(dropped
)) {
1617 rte_spinlock_lock(&dev
->stats_lock
);
1618 dev
->stats
.tx_dropped
+= dropped
;
1619 rte_spinlock_unlock(&dev
->stats_lock
);
1623 if (OVS_UNLIKELY(dev
->txq_needs_locking
)) {
1624 rte_spinlock_unlock(&dev
->tx_q
[qid
].tx_lock
);
1629 netdev_dpdk_eth_send(struct netdev
*netdev
, int qid
,
1630 struct dp_packet
**pkts
, int cnt
, bool may_steal
)
1632 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1634 netdev_dpdk_send__(dev
, qid
, pkts
, cnt
, may_steal
);
1639 netdev_dpdk_set_etheraddr(struct netdev
*netdev
, const struct eth_addr mac
)
1641 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1643 ovs_mutex_lock(&dev
->mutex
);
1644 if (!eth_addr_equals(dev
->hwaddr
, mac
)) {
1646 netdev_change_seq_changed(netdev
);
1648 ovs_mutex_unlock(&dev
->mutex
);
1654 netdev_dpdk_get_etheraddr(const struct netdev
*netdev
, struct eth_addr
*mac
)
1656 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1658 ovs_mutex_lock(&dev
->mutex
);
1660 ovs_mutex_unlock(&dev
->mutex
);
1666 netdev_dpdk_get_mtu(const struct netdev
*netdev
, int *mtup
)
1668 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1670 ovs_mutex_lock(&dev
->mutex
);
1672 ovs_mutex_unlock(&dev
->mutex
);
1678 netdev_dpdk_set_mtu(const struct netdev
*netdev
, int mtu
)
1680 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1681 int old_mtu
, err
, dpdk_mtu
;
1682 struct dpdk_mp
*old_mp
;
1686 ovs_mutex_lock(&dpdk_mutex
);
1687 ovs_mutex_lock(&dev
->mutex
);
1688 if (dev
->mtu
== mtu
) {
1693 buf_size
= dpdk_buf_size(mtu
);
1694 dpdk_mtu
= FRAME_LEN_TO_MTU(buf_size
);
1696 mp
= dpdk_mp_get(dev
->socket_id
, dpdk_mtu
);
1702 rte_eth_dev_stop(dev
->port_id
);
1705 old_mp
= dev
->dpdk_mp
;
1708 dev
->max_packet_len
= MTU_TO_FRAME_LEN(dev
->mtu
);
1710 err
= dpdk_eth_dev_init(dev
);
1714 dev
->dpdk_mp
= old_mp
;
1715 dev
->max_packet_len
= MTU_TO_FRAME_LEN(dev
->mtu
);
1716 dpdk_eth_dev_init(dev
);
1720 dpdk_mp_put(old_mp
);
1721 netdev_change_seq_changed(netdev
);
1723 ovs_mutex_unlock(&dev
->mutex
);
1724 ovs_mutex_unlock(&dpdk_mutex
);
1729 netdev_dpdk_get_carrier(const struct netdev
*netdev
, bool *carrier
);
1732 netdev_dpdk_vhost_get_stats(const struct netdev
*netdev
,
1733 struct netdev_stats
*stats
)
1735 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1737 ovs_mutex_lock(&dev
->mutex
);
1739 rte_spinlock_lock(&dev
->stats_lock
);
1740 /* Supported Stats */
1741 stats
->rx_packets
+= dev
->stats
.rx_packets
;
1742 stats
->tx_packets
+= dev
->stats
.tx_packets
;
1743 stats
->rx_dropped
= dev
->stats
.rx_dropped
;
1744 stats
->tx_dropped
+= dev
->stats
.tx_dropped
;
1745 stats
->multicast
= dev
->stats
.multicast
;
1746 stats
->rx_bytes
= dev
->stats
.rx_bytes
;
1747 stats
->tx_bytes
= dev
->stats
.tx_bytes
;
1748 stats
->rx_errors
= dev
->stats
.rx_errors
;
1749 stats
->rx_length_errors
= dev
->stats
.rx_length_errors
;
1751 stats
->rx_1_to_64_packets
= dev
->stats
.rx_1_to_64_packets
;
1752 stats
->rx_65_to_127_packets
= dev
->stats
.rx_65_to_127_packets
;
1753 stats
->rx_128_to_255_packets
= dev
->stats
.rx_128_to_255_packets
;
1754 stats
->rx_256_to_511_packets
= dev
->stats
.rx_256_to_511_packets
;
1755 stats
->rx_512_to_1023_packets
= dev
->stats
.rx_512_to_1023_packets
;
1756 stats
->rx_1024_to_1522_packets
= dev
->stats
.rx_1024_to_1522_packets
;
1757 stats
->rx_1523_to_max_packets
= dev
->stats
.rx_1523_to_max_packets
;
1759 rte_spinlock_unlock(&dev
->stats_lock
);
1761 ovs_mutex_unlock(&dev
->mutex
);
1767 netdev_dpdk_convert_xstats(struct netdev_stats
*stats
,
1768 const struct rte_eth_xstats
*xstats
,
1769 const unsigned int size
)
1771 /* XXX Current implementation is simple search through an array
1772 * to find hardcoded counter names. In future DPDK release (TBD)
1773 * XSTATS API will change so each counter will be represented by
1774 * unique ID instead of String. */
1776 for (unsigned int i
= 0; i
< size
; i
++) {
1777 if (strcmp(XSTAT_RX_64_PACKETS
, xstats
[i
].name
) == 0) {
1778 stats
->rx_1_to_64_packets
= xstats
[i
].value
;
1779 } else if (strcmp(XSTAT_RX_65_TO_127_PACKETS
, xstats
[i
].name
) == 0) {
1780 stats
->rx_65_to_127_packets
= xstats
[i
].value
;
1781 } else if (strcmp(XSTAT_RX_128_TO_255_PACKETS
, xstats
[i
].name
) == 0) {
1782 stats
->rx_128_to_255_packets
= xstats
[i
].value
;
1783 } else if (strcmp(XSTAT_RX_256_TO_511_PACKETS
, xstats
[i
].name
) == 0) {
1784 stats
->rx_256_to_511_packets
= xstats
[i
].value
;
1785 } else if (strcmp(XSTAT_RX_512_TO_1023_PACKETS
,
1786 xstats
[i
].name
) == 0) {
1787 stats
->rx_512_to_1023_packets
= xstats
[i
].value
;
1788 } else if (strcmp(XSTAT_RX_1024_TO_1522_PACKETS
,
1789 xstats
[i
].name
) == 0) {
1790 stats
->rx_1024_to_1522_packets
= xstats
[i
].value
;
1791 } else if (strcmp(XSTAT_RX_1523_TO_MAX_PACKETS
,
1792 xstats
[i
].name
) == 0) {
1793 stats
->rx_1523_to_max_packets
= xstats
[i
].value
;
1794 } else if (strcmp(XSTAT_TX_64_PACKETS
, xstats
[i
].name
) == 0) {
1795 stats
->tx_1_to_64_packets
= xstats
[i
].value
;
1796 } else if (strcmp(XSTAT_TX_65_TO_127_PACKETS
, xstats
[i
].name
) == 0) {
1797 stats
->tx_65_to_127_packets
= xstats
[i
].value
;
1798 } else if (strcmp(XSTAT_TX_128_TO_255_PACKETS
, xstats
[i
].name
) == 0) {
1799 stats
->tx_128_to_255_packets
= xstats
[i
].value
;
1800 } else if (strcmp(XSTAT_TX_256_TO_511_PACKETS
, xstats
[i
].name
) == 0) {
1801 stats
->tx_256_to_511_packets
= xstats
[i
].value
;
1802 } else if (strcmp(XSTAT_TX_512_TO_1023_PACKETS
,
1803 xstats
[i
].name
) == 0) {
1804 stats
->tx_512_to_1023_packets
= xstats
[i
].value
;
1805 } else if (strcmp(XSTAT_TX_1024_TO_1522_PACKETS
,
1806 xstats
[i
].name
) == 0) {
1807 stats
->tx_1024_to_1522_packets
= xstats
[i
].value
;
1808 } else if (strcmp(XSTAT_TX_1523_TO_MAX_PACKETS
,
1809 xstats
[i
].name
) == 0) {
1810 stats
->tx_1523_to_max_packets
= xstats
[i
].value
;
1811 } else if (strcmp(XSTAT_TX_MULTICAST_PACKETS
, xstats
[i
].name
) == 0) {
1812 stats
->tx_multicast_packets
= xstats
[i
].value
;
1813 } else if (strcmp(XSTAT_RX_BROADCAST_PACKETS
, xstats
[i
].name
) == 0) {
1814 stats
->rx_broadcast_packets
= xstats
[i
].value
;
1815 } else if (strcmp(XSTAT_TX_BROADCAST_PACKETS
, xstats
[i
].name
) == 0) {
1816 stats
->tx_broadcast_packets
= xstats
[i
].value
;
1817 } else if (strcmp(XSTAT_RX_UNDERSIZED_ERRORS
, xstats
[i
].name
) == 0) {
1818 stats
->rx_undersized_errors
= xstats
[i
].value
;
1819 } else if (strcmp(XSTAT_RX_FRAGMENTED_ERRORS
, xstats
[i
].name
) == 0) {
1820 stats
->rx_fragmented_errors
= xstats
[i
].value
;
1821 } else if (strcmp(XSTAT_RX_JABBER_ERRORS
, xstats
[i
].name
) == 0) {
1822 stats
->rx_jabber_errors
= xstats
[i
].value
;
1828 netdev_dpdk_get_stats(const struct netdev
*netdev
, struct netdev_stats
*stats
)
1830 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1831 struct rte_eth_stats rte_stats
;
1834 netdev_dpdk_get_carrier(netdev
, &gg
);
1835 ovs_mutex_lock(&dev
->mutex
);
1837 struct rte_eth_xstats
*rte_xstats
;
1838 int rte_xstats_len
, rte_xstats_ret
;
1840 if (rte_eth_stats_get(dev
->port_id
, &rte_stats
)) {
1841 VLOG_ERR("Can't get ETH statistics for port: %i.", dev
->port_id
);
1842 ovs_mutex_unlock(&dev
->mutex
);
1846 rte_xstats_len
= rte_eth_xstats_get(dev
->port_id
, NULL
, 0);
1847 if (rte_xstats_len
> 0) {
1848 rte_xstats
= dpdk_rte_mzalloc(sizeof(*rte_xstats
) * rte_xstats_len
);
1849 memset(rte_xstats
, 0xff, sizeof(*rte_xstats
) * rte_xstats_len
);
1850 rte_xstats_ret
= rte_eth_xstats_get(dev
->port_id
, rte_xstats
,
1852 if (rte_xstats_ret
> 0 && rte_xstats_ret
<= rte_xstats_len
) {
1853 netdev_dpdk_convert_xstats(stats
, rte_xstats
, rte_xstats_ret
);
1855 rte_free(rte_xstats
);
1857 VLOG_WARN("Can't get XSTATS counters for port: %i.", dev
->port_id
);
1860 stats
->rx_packets
= rte_stats
.ipackets
;
1861 stats
->tx_packets
= rte_stats
.opackets
;
1862 stats
->rx_bytes
= rte_stats
.ibytes
;
1863 stats
->tx_bytes
= rte_stats
.obytes
;
1864 /* DPDK counts imissed as errors, but count them here as dropped instead */
1865 stats
->rx_errors
= rte_stats
.ierrors
- rte_stats
.imissed
;
1866 stats
->tx_errors
= rte_stats
.oerrors
;
1867 stats
->multicast
= rte_stats
.imcasts
;
1869 rte_spinlock_lock(&dev
->stats_lock
);
1870 stats
->tx_dropped
= dev
->stats
.tx_dropped
;
1871 stats
->rx_dropped
= dev
->stats
.rx_dropped
;
1872 rte_spinlock_unlock(&dev
->stats_lock
);
1874 /* These are the available DPDK counters for packets not received due to
1875 * local resource constraints in DPDK and NIC respectively. */
1876 stats
->rx_dropped
+= rte_stats
.rx_nombuf
+ rte_stats
.imissed
;
1877 stats
->rx_missed_errors
= rte_stats
.imissed
;
1879 ovs_mutex_unlock(&dev
->mutex
);
1885 netdev_dpdk_get_features(const struct netdev
*netdev
,
1886 enum netdev_features
*current
,
1887 enum netdev_features
*advertised OVS_UNUSED
,
1888 enum netdev_features
*supported OVS_UNUSED
,
1889 enum netdev_features
*peer OVS_UNUSED
)
1891 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1892 struct rte_eth_link link
;
1894 ovs_mutex_lock(&dev
->mutex
);
1896 ovs_mutex_unlock(&dev
->mutex
);
1898 if (link
.link_duplex
== ETH_LINK_HALF_DUPLEX
) {
1899 if (link
.link_speed
== ETH_SPEED_NUM_10M
) {
1900 *current
= NETDEV_F_10MB_HD
;
1902 if (link
.link_speed
== ETH_SPEED_NUM_100M
) {
1903 *current
= NETDEV_F_100MB_HD
;
1905 if (link
.link_speed
== ETH_SPEED_NUM_1G
) {
1906 *current
= NETDEV_F_1GB_HD
;
1908 } else if (link
.link_duplex
== ETH_LINK_FULL_DUPLEX
) {
1909 if (link
.link_speed
== ETH_SPEED_NUM_10M
) {
1910 *current
= NETDEV_F_10MB_FD
;
1912 if (link
.link_speed
== ETH_SPEED_NUM_100M
) {
1913 *current
= NETDEV_F_100MB_FD
;
1915 if (link
.link_speed
== ETH_SPEED_NUM_1G
) {
1916 *current
= NETDEV_F_1GB_FD
;
1918 if (link
.link_speed
== ETH_SPEED_NUM_10G
) {
1919 *current
= NETDEV_F_10GB_FD
;
1923 if (link
.link_autoneg
) {
1924 *current
|= NETDEV_F_AUTONEG
;
1930 static struct ingress_policer
*
1931 netdev_dpdk_policer_construct(uint32_t rate
, uint32_t burst
)
1933 struct ingress_policer
*policer
= NULL
;
1934 uint64_t rate_bytes
;
1935 uint64_t burst_bytes
;
1938 policer
= xmalloc(sizeof *policer
);
1939 rte_spinlock_init(&policer
->policer_lock
);
1941 /* rte_meter requires bytes so convert kbits rate and burst to bytes. */
1942 rate_bytes
= rate
* 1000/8;
1943 burst_bytes
= burst
* 1000/8;
1945 policer
->app_srtcm_params
.cir
= rate_bytes
;
1946 policer
->app_srtcm_params
.cbs
= burst_bytes
;
1947 policer
->app_srtcm_params
.ebs
= 0;
1948 err
= rte_meter_srtcm_config(&policer
->in_policer
,
1949 &policer
->app_srtcm_params
);
1951 VLOG_ERR("Could not create rte meter for ingress policer");
1959 netdev_dpdk_set_policing(struct netdev
* netdev
, uint32_t policer_rate
,
1960 uint32_t policer_burst
)
1962 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1963 struct ingress_policer
*policer
;
1965 /* Force to 0 if no rate specified,
1966 * default to 8000 kbits if burst is 0,
1967 * else stick with user-specified value.
1969 policer_burst
= (!policer_rate
? 0
1970 : !policer_burst
? 8000
1973 ovs_mutex_lock(&dev
->mutex
);
1975 policer
= ovsrcu_get_protected(struct ingress_policer
*,
1976 &dev
->ingress_policer
);
1978 if (dev
->policer_rate
== policer_rate
&&
1979 dev
->policer_burst
== policer_burst
) {
1980 /* Assume that settings haven't changed since we last set them. */
1981 ovs_mutex_unlock(&dev
->mutex
);
1985 /* Destroy any existing ingress policer for the device if one exists */
1987 ovsrcu_postpone(free
, policer
);
1990 if (policer_rate
!= 0) {
1991 policer
= netdev_dpdk_policer_construct(policer_rate
, policer_burst
);
1995 ovsrcu_set(&dev
->ingress_policer
, policer
);
1996 dev
->policer_rate
= policer_rate
;
1997 dev
->policer_burst
= policer_burst
;
1998 ovs_mutex_unlock(&dev
->mutex
);
2004 netdev_dpdk_get_ifindex(const struct netdev
*netdev
)
2006 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
2009 ovs_mutex_lock(&dev
->mutex
);
2010 ifindex
= dev
->port_id
;
2011 ovs_mutex_unlock(&dev
->mutex
);
2017 netdev_dpdk_get_carrier(const struct netdev
*netdev
, bool *carrier
)
2019 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
2021 ovs_mutex_lock(&dev
->mutex
);
2022 check_link_status(dev
);
2023 *carrier
= dev
->link
.link_status
;
2025 ovs_mutex_unlock(&dev
->mutex
);
2031 netdev_dpdk_vhost_get_carrier(const struct netdev
*netdev
, bool *carrier
)
2033 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
2034 struct virtio_net
*virtio_dev
= netdev_dpdk_get_virtio(dev
);
2036 ovs_mutex_lock(&dev
->mutex
);
2038 if (is_vhost_running(virtio_dev
)) {
2044 ovs_mutex_unlock(&dev
->mutex
);
2049 static long long int
2050 netdev_dpdk_get_carrier_resets(const struct netdev
*netdev
)
2052 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
2053 long long int carrier_resets
;
2055 ovs_mutex_lock(&dev
->mutex
);
2056 carrier_resets
= dev
->link_reset_cnt
;
2057 ovs_mutex_unlock(&dev
->mutex
);
2059 return carrier_resets
;
2063 netdev_dpdk_set_miimon(struct netdev
*netdev OVS_UNUSED
,
2064 long long int interval OVS_UNUSED
)
2070 netdev_dpdk_update_flags__(struct netdev_dpdk
*dev
,
2071 enum netdev_flags off
, enum netdev_flags on
,
2072 enum netdev_flags
*old_flagsp
) OVS_REQUIRES(dev
->mutex
)
2076 if ((off
| on
) & ~(NETDEV_UP
| NETDEV_PROMISC
)) {
2080 *old_flagsp
= dev
->flags
;
2084 if (dev
->flags
== *old_flagsp
) {
2088 if (dev
->type
== DPDK_DEV_ETH
) {
2089 if (dev
->flags
& NETDEV_UP
) {
2090 err
= rte_eth_dev_start(dev
->port_id
);
2095 if (dev
->flags
& NETDEV_PROMISC
) {
2096 rte_eth_promiscuous_enable(dev
->port_id
);
2099 if (!(dev
->flags
& NETDEV_UP
)) {
2100 rte_eth_dev_stop(dev
->port_id
);
2103 /* If DPDK_DEV_VHOST device's NETDEV_UP flag was changed and vhost is
2104 * running then change netdev's change_seq to trigger link state
2106 struct virtio_net
*virtio_dev
= netdev_dpdk_get_virtio(dev
);
2108 if ((NETDEV_UP
& ((*old_flagsp
^ on
) | (*old_flagsp
^ off
)))
2109 && is_vhost_running(virtio_dev
)) {
2110 netdev_change_seq_changed(&dev
->up
);
2112 /* Clear statistics if device is getting up. */
2113 if (NETDEV_UP
& on
) {
2114 rte_spinlock_lock(&dev
->stats_lock
);
2115 memset(&dev
->stats
, 0, sizeof(dev
->stats
));
2116 rte_spinlock_unlock(&dev
->stats_lock
);
2125 netdev_dpdk_update_flags(struct netdev
*netdev
,
2126 enum netdev_flags off
, enum netdev_flags on
,
2127 enum netdev_flags
*old_flagsp
)
2129 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
2132 ovs_mutex_lock(&dev
->mutex
);
2133 error
= netdev_dpdk_update_flags__(dev
, off
, on
, old_flagsp
);
2134 ovs_mutex_unlock(&dev
->mutex
);
2140 netdev_dpdk_get_status(const struct netdev
*netdev
, struct smap
*args
)
2142 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
2143 struct rte_eth_dev_info dev_info
;
2145 if (dev
->port_id
< 0)
2148 ovs_mutex_lock(&dev
->mutex
);
2149 rte_eth_dev_info_get(dev
->port_id
, &dev_info
);
2150 ovs_mutex_unlock(&dev
->mutex
);
2152 smap_add_format(args
, "driver_name", "%s", dev_info
.driver_name
);
2154 smap_add_format(args
, "port_no", "%d", dev
->port_id
);
2155 smap_add_format(args
, "numa_id", "%d", rte_eth_dev_socket_id(dev
->port_id
));
2156 smap_add_format(args
, "driver_name", "%s", dev_info
.driver_name
);
2157 smap_add_format(args
, "min_rx_bufsize", "%u", dev_info
.min_rx_bufsize
);
2158 smap_add_format(args
, "max_rx_pktlen", "%u", dev
->max_packet_len
);
2159 smap_add_format(args
, "max_rx_queues", "%u", dev_info
.max_rx_queues
);
2160 smap_add_format(args
, "max_tx_queues", "%u", dev_info
.max_tx_queues
);
2161 smap_add_format(args
, "max_mac_addrs", "%u", dev_info
.max_mac_addrs
);
2162 smap_add_format(args
, "max_hash_mac_addrs", "%u", dev_info
.max_hash_mac_addrs
);
2163 smap_add_format(args
, "max_vfs", "%u", dev_info
.max_vfs
);
2164 smap_add_format(args
, "max_vmdq_pools", "%u", dev_info
.max_vmdq_pools
);
2166 if (dev_info
.pci_dev
) {
2167 smap_add_format(args
, "pci-vendor_id", "0x%u",
2168 dev_info
.pci_dev
->id
.vendor_id
);
2169 smap_add_format(args
, "pci-device_id", "0x%x",
2170 dev_info
.pci_dev
->id
.device_id
);
2177 netdev_dpdk_set_admin_state__(struct netdev_dpdk
*dev
, bool admin_state
)
2178 OVS_REQUIRES(dev
->mutex
)
2180 enum netdev_flags old_flags
;
2183 netdev_dpdk_update_flags__(dev
, 0, NETDEV_UP
, &old_flags
);
2185 netdev_dpdk_update_flags__(dev
, NETDEV_UP
, 0, &old_flags
);
2190 netdev_dpdk_set_admin_state(struct unixctl_conn
*conn
, int argc
,
2191 const char *argv
[], void *aux OVS_UNUSED
)
2195 if (!strcasecmp(argv
[argc
- 1], "up")) {
2197 } else if ( !strcasecmp(argv
[argc
- 1], "down")) {
2200 unixctl_command_reply_error(conn
, "Invalid Admin State");
2205 struct netdev
*netdev
= netdev_from_name(argv
[1]);
2206 if (netdev
&& is_dpdk_class(netdev
->netdev_class
)) {
2207 struct netdev_dpdk
*dpdk_dev
= netdev_dpdk_cast(netdev
);
2209 ovs_mutex_lock(&dpdk_dev
->mutex
);
2210 netdev_dpdk_set_admin_state__(dpdk_dev
, up
);
2211 ovs_mutex_unlock(&dpdk_dev
->mutex
);
2213 netdev_close(netdev
);
2215 unixctl_command_reply_error(conn
, "Not a DPDK Interface");
2216 netdev_close(netdev
);
2220 struct netdev_dpdk
*netdev
;
2222 ovs_mutex_lock(&dpdk_mutex
);
2223 LIST_FOR_EACH (netdev
, list_node
, &dpdk_list
) {
2224 ovs_mutex_lock(&netdev
->mutex
);
2225 netdev_dpdk_set_admin_state__(netdev
, up
);
2226 ovs_mutex_unlock(&netdev
->mutex
);
2228 ovs_mutex_unlock(&dpdk_mutex
);
2230 unixctl_command_reply(conn
, "OK");
2234 * Set virtqueue flags so that we do not receive interrupts.
2237 set_irq_status(struct virtio_net
*virtio_dev
)
2242 for (i
= 0; i
< virtio_dev
->virt_qp_nb
; i
++) {
2243 idx
= i
* VIRTIO_QNUM
;
2244 rte_vhost_enable_guest_notification(virtio_dev
, idx
+ VIRTIO_RXQ
, 0);
2245 rte_vhost_enable_guest_notification(virtio_dev
, idx
+ VIRTIO_TXQ
, 0);
2250 * Fixes mapping for vhost-user tx queues. Must be called after each
2251 * enabling/disabling of queues and real_n_txq modifications.
2254 netdev_dpdk_remap_txqs(struct netdev_dpdk
*dev
)
2255 OVS_REQUIRES(dev
->mutex
)
2257 int *enabled_queues
, n_enabled
= 0;
2258 int i
, k
, total_txqs
= dev
->real_n_txq
;
2260 enabled_queues
= dpdk_rte_mzalloc(total_txqs
* sizeof *enabled_queues
);
2262 for (i
= 0; i
< total_txqs
; i
++) {
2263 /* Enabled queues always mapped to themselves. */
2264 if (dev
->tx_q
[i
].map
== i
) {
2265 enabled_queues
[n_enabled
++] = i
;
2269 if (n_enabled
== 0 && total_txqs
!= 0) {
2270 enabled_queues
[0] = OVS_VHOST_QUEUE_DISABLED
;
2275 for (i
= 0; i
< total_txqs
; i
++) {
2276 if (dev
->tx_q
[i
].map
!= i
) {
2277 dev
->tx_q
[i
].map
= enabled_queues
[k
];
2278 k
= (k
+ 1) % n_enabled
;
2282 VLOG_DBG("TX queue mapping for %s\n", dev
->vhost_id
);
2283 for (i
= 0; i
< total_txqs
; i
++) {
2284 VLOG_DBG("%2d --> %2d", i
, dev
->tx_q
[i
].map
);
2287 rte_free(enabled_queues
);
2291 netdev_dpdk_vhost_set_queues(struct netdev_dpdk
*dev
, struct virtio_net
*virtio_dev
)
2292 OVS_REQUIRES(dev
->mutex
)
2296 qp_num
= virtio_dev
->virt_qp_nb
;
2297 if (qp_num
> dev
->up
.n_rxq
) {
2298 VLOG_ERR("vHost Device '%s' %"PRIu64
" can't be added - "
2299 "too many queues %d > %d", virtio_dev
->ifname
, virtio_dev
->device_fh
,
2300 qp_num
, dev
->up
.n_rxq
);
2304 dev
->real_n_rxq
= qp_num
;
2305 dev
->real_n_txq
= qp_num
;
2306 dev
->txq_needs_locking
= true;
2307 /* Enable TX queue 0 by default if it wasn't disabled. */
2308 if (dev
->tx_q
[0].map
== OVS_VHOST_QUEUE_MAP_UNKNOWN
) {
2309 dev
->tx_q
[0].map
= 0;
2312 netdev_dpdk_remap_txqs(dev
);
2318 * A new virtio-net device is added to a vhost port.
2321 new_device(struct virtio_net
*virtio_dev
)
2323 struct netdev_dpdk
*dev
;
2324 bool exists
= false;
2326 ovs_mutex_lock(&dpdk_mutex
);
2327 /* Add device to the vhost port with the same name as that passed down. */
2328 LIST_FOR_EACH(dev
, list_node
, &dpdk_list
) {
2329 if (strncmp(virtio_dev
->ifname
, dev
->vhost_id
, IF_NAME_SZ
) == 0) {
2330 ovs_mutex_lock(&dev
->mutex
);
2331 if (netdev_dpdk_vhost_set_queues(dev
, virtio_dev
)) {
2332 ovs_mutex_unlock(&dev
->mutex
);
2333 ovs_mutex_unlock(&dpdk_mutex
);
2336 ovsrcu_set(&dev
->virtio_dev
, virtio_dev
);
2338 virtio_dev
->flags
|= VIRTIO_DEV_RUNNING
;
2339 /* Disable notifications. */
2340 set_irq_status(virtio_dev
);
2341 netdev_change_seq_changed(&dev
->up
);
2342 ovs_mutex_unlock(&dev
->mutex
);
2346 ovs_mutex_unlock(&dpdk_mutex
);
2349 VLOG_INFO("vHost Device '%s' %"PRIu64
" can't be added - name not "
2350 "found", virtio_dev
->ifname
, virtio_dev
->device_fh
);
2355 VLOG_INFO("vHost Device '%s' %"PRIu64
" has been added", virtio_dev
->ifname
,
2356 virtio_dev
->device_fh
);
2360 /* Clears mapping for all available queues of vhost interface. */
2362 netdev_dpdk_txq_map_clear(struct netdev_dpdk
*dev
)
2363 OVS_REQUIRES(dev
->mutex
)
2367 for (i
= 0; i
< dev
->real_n_txq
; i
++) {
2368 dev
->tx_q
[i
].map
= OVS_VHOST_QUEUE_MAP_UNKNOWN
;
2373 * Remove a virtio-net device from the specific vhost port. Use dev->remove
2374 * flag to stop any more packets from being sent or received to/from a VM and
2375 * ensure all currently queued packets have been sent/received before removing
2379 destroy_device(volatile struct virtio_net
*virtio_dev
)
2381 struct netdev_dpdk
*dev
;
2382 bool exists
= false;
2384 ovs_mutex_lock(&dpdk_mutex
);
2385 LIST_FOR_EACH (dev
, list_node
, &dpdk_list
) {
2386 if (netdev_dpdk_get_virtio(dev
) == virtio_dev
) {
2388 ovs_mutex_lock(&dev
->mutex
);
2389 virtio_dev
->flags
&= ~VIRTIO_DEV_RUNNING
;
2390 ovsrcu_set(&dev
->virtio_dev
, NULL
);
2391 netdev_dpdk_txq_map_clear(dev
);
2393 netdev_change_seq_changed(&dev
->up
);
2394 ovs_mutex_unlock(&dev
->mutex
);
2399 ovs_mutex_unlock(&dpdk_mutex
);
2401 if (exists
== true) {
2403 * Wait for other threads to quiesce after setting the 'virtio_dev'
2404 * to NULL, before returning.
2406 ovsrcu_synchronize();
2408 * As call to ovsrcu_synchronize() will end the quiescent state,
2409 * put thread back into quiescent state before returning.
2411 ovsrcu_quiesce_start();
2412 VLOG_INFO("vHost Device '%s' %"PRIu64
" has been removed",
2413 virtio_dev
->ifname
, virtio_dev
->device_fh
);
2415 VLOG_INFO("vHost Device '%s' %"PRIu64
" not found", virtio_dev
->ifname
,
2416 virtio_dev
->device_fh
);
2421 vring_state_changed(struct virtio_net
*virtio_dev
, uint16_t queue_id
,
2424 struct netdev_dpdk
*dev
;
2425 bool exists
= false;
2426 int qid
= queue_id
/ VIRTIO_QNUM
;
2428 if (queue_id
% VIRTIO_QNUM
== VIRTIO_TXQ
) {
2432 ovs_mutex_lock(&dpdk_mutex
);
2433 LIST_FOR_EACH (dev
, list_node
, &dpdk_list
) {
2434 if (strncmp(virtio_dev
->ifname
, dev
->vhost_id
, IF_NAME_SZ
) == 0) {
2435 ovs_mutex_lock(&dev
->mutex
);
2437 dev
->tx_q
[qid
].map
= qid
;
2439 dev
->tx_q
[qid
].map
= OVS_VHOST_QUEUE_DISABLED
;
2441 netdev_dpdk_remap_txqs(dev
);
2443 ovs_mutex_unlock(&dev
->mutex
);
2447 ovs_mutex_unlock(&dpdk_mutex
);
2450 VLOG_INFO("State of queue %d ( tx_qid %d ) of vhost device '%s' %"
2451 PRIu64
" changed to \'%s\'", queue_id
, qid
,
2452 virtio_dev
->ifname
, virtio_dev
->device_fh
,
2453 (enable
== 1) ? "enabled" : "disabled");
2455 VLOG_INFO("vHost Device '%s' %"PRIu64
" not found", virtio_dev
->ifname
,
2456 virtio_dev
->device_fh
);
2464 netdev_dpdk_get_virtio(const struct netdev_dpdk
*dev
)
2466 return ovsrcu_get(struct virtio_net
*, &dev
->virtio_dev
);
2469 struct ingress_policer
*
2470 netdev_dpdk_get_ingress_policer(const struct netdev_dpdk
*dev
)
2472 return ovsrcu_get(struct ingress_policer
*, &dev
->ingress_policer
);
2476 * These callbacks allow virtio-net devices to be added to vhost ports when
2477 * configuration has been fully complete.
2479 static const struct virtio_net_device_ops virtio_net_device_ops
=
2481 .new_device
= new_device
,
2482 .destroy_device
= destroy_device
,
2483 .vring_state_changed
= vring_state_changed
2487 start_vhost_loop(void *dummy OVS_UNUSED
)
2489 pthread_detach(pthread_self());
2490 /* Put the cuse thread into quiescent state. */
2491 ovsrcu_quiesce_start();
2492 rte_vhost_driver_session_start();
2497 dpdk_vhost_class_init(void)
2499 rte_vhost_driver_callback_register(&virtio_net_device_ops
);
2500 rte_vhost_feature_disable(1ULL << VIRTIO_NET_F_HOST_TSO4
2501 | 1ULL << VIRTIO_NET_F_HOST_TSO6
2502 | 1ULL << VIRTIO_NET_F_CSUM
);
2504 ovs_thread_create("vhost_thread", start_vhost_loop
, NULL
);
2509 dpdk_vhost_cuse_class_init(void)
2515 dpdk_vhost_user_class_init(void)
2521 dpdk_common_init(void)
2523 unixctl_command_register("netdev-dpdk/set-admin-state",
2524 "[netdev] up|down", 1, 2,
2525 netdev_dpdk_set_admin_state
, NULL
);
2532 dpdk_ring_create(const char dev_name
[], unsigned int port_no
,
2533 unsigned int *eth_port_id
)
2535 struct dpdk_ring
*ivshmem
;
2536 char ring_name
[RTE_RING_NAMESIZE
];
2539 ivshmem
= dpdk_rte_mzalloc(sizeof *ivshmem
);
2540 if (ivshmem
== NULL
) {
2544 /* XXX: Add support for multiquque ring. */
2545 err
= snprintf(ring_name
, sizeof(ring_name
), "%s_tx", dev_name
);
2550 /* Create single producer tx ring, netdev does explicit locking. */
2551 ivshmem
->cring_tx
= rte_ring_create(ring_name
, DPDK_RING_SIZE
, SOCKET0
,
2553 if (ivshmem
->cring_tx
== NULL
) {
2558 err
= snprintf(ring_name
, sizeof(ring_name
), "%s_rx", dev_name
);
2563 /* Create single consumer rx ring, netdev does explicit locking. */
2564 ivshmem
->cring_rx
= rte_ring_create(ring_name
, DPDK_RING_SIZE
, SOCKET0
,
2566 if (ivshmem
->cring_rx
== NULL
) {
2571 err
= rte_eth_from_rings(dev_name
, &ivshmem
->cring_rx
, 1,
2572 &ivshmem
->cring_tx
, 1, SOCKET0
);
2579 ivshmem
->user_port_id
= port_no
;
2580 ivshmem
->eth_port_id
= rte_eth_dev_count() - 1;
2581 ovs_list_push_back(&dpdk_ring_list
, &ivshmem
->list_node
);
2583 *eth_port_id
= ivshmem
->eth_port_id
;
2588 dpdk_ring_open(const char dev_name
[], unsigned int *eth_port_id
) OVS_REQUIRES(dpdk_mutex
)
2590 struct dpdk_ring
*ivshmem
;
2591 unsigned int port_no
;
2594 /* Names always start with "dpdkr" */
2595 err
= dpdk_dev_parse_name(dev_name
, "dpdkr", &port_no
);
2600 /* look through our list to find the device */
2601 LIST_FOR_EACH (ivshmem
, list_node
, &dpdk_ring_list
) {
2602 if (ivshmem
->user_port_id
== port_no
) {
2603 VLOG_INFO("Found dpdk ring device %s:", dev_name
);
2604 *eth_port_id
= ivshmem
->eth_port_id
; /* really all that is needed */
2608 /* Need to create the device rings */
2609 return dpdk_ring_create(dev_name
, port_no
, eth_port_id
);
2613 netdev_dpdk_ring_send(struct netdev
*netdev
, int qid
,
2614 struct dp_packet
**pkts
, int cnt
, bool may_steal
)
2616 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
2619 /* When using 'dpdkr' and sending to a DPDK ring, we want to ensure that the
2620 * rss hash field is clear. This is because the same mbuf may be modified by
2621 * the consumer of the ring and return into the datapath without recalculating
2623 for (i
= 0; i
< cnt
; i
++) {
2624 dp_packet_rss_invalidate(pkts
[i
]);
2627 netdev_dpdk_send__(dev
, qid
, pkts
, cnt
, may_steal
);
2632 netdev_dpdk_ring_construct(struct netdev
*netdev
)
2634 unsigned int port_no
= 0;
2637 if (rte_eal_init_ret
) {
2638 return rte_eal_init_ret
;
2641 ovs_mutex_lock(&dpdk_mutex
);
2643 err
= dpdk_ring_open(netdev
->name
, &port_no
);
2648 err
= netdev_dpdk_init(netdev
, port_no
, DPDK_DEV_ETH
);
2651 ovs_mutex_unlock(&dpdk_mutex
);
2658 * Initialize QoS configuration operations.
2661 qos_conf_init(struct qos_conf
*conf
, const struct dpdk_qos_ops
*ops
)
2667 * Search existing QoS operations in qos_ops and compare each set of
2668 * operations qos_name to name. Return a dpdk_qos_ops pointer to a match,
2671 static const struct dpdk_qos_ops
*
2672 qos_lookup_name(const char *name
)
2674 const struct dpdk_qos_ops
*const *opsp
;
2676 for (opsp
= qos_confs
; *opsp
!= NULL
; opsp
++) {
2677 const struct dpdk_qos_ops
*ops
= *opsp
;
2678 if (!strcmp(name
, ops
->qos_name
)) {
2686 * Call qos_destruct to clean up items associated with the netdevs
2687 * qos_conf. Set netdevs qos_conf to NULL.
2690 qos_delete_conf(struct netdev
*netdev
)
2692 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
2694 rte_spinlock_lock(&dev
->qos_lock
);
2695 if (dev
->qos_conf
) {
2696 if (dev
->qos_conf
->ops
->qos_destruct
) {
2697 dev
->qos_conf
->ops
->qos_destruct(netdev
, dev
->qos_conf
);
2699 dev
->qos_conf
= NULL
;
2701 rte_spinlock_unlock(&dev
->qos_lock
);
2705 netdev_dpdk_get_qos_types(const struct netdev
*netdev OVS_UNUSED
,
2708 const struct dpdk_qos_ops
*const *opsp
;
2710 for (opsp
= qos_confs
; *opsp
!= NULL
; opsp
++) {
2711 const struct dpdk_qos_ops
*ops
= *opsp
;
2712 if (ops
->qos_construct
&& ops
->qos_name
[0] != '\0') {
2713 sset_add(types
, ops
->qos_name
);
2720 netdev_dpdk_get_qos(const struct netdev
*netdev
,
2721 const char **typep
, struct smap
*details
)
2723 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
2726 ovs_mutex_lock(&dev
->mutex
);
2728 *typep
= dev
->qos_conf
->ops
->qos_name
;
2729 error
= (dev
->qos_conf
->ops
->qos_get
2730 ? dev
->qos_conf
->ops
->qos_get(netdev
, details
): 0);
2732 ovs_mutex_unlock(&dev
->mutex
);
2738 netdev_dpdk_set_qos(struct netdev
*netdev
,
2739 const char *type
, const struct smap
*details
)
2741 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
2742 const struct dpdk_qos_ops
*new_ops
= NULL
;
2745 /* If type is empty or unsupported then the current QoS configuration
2746 * for the dpdk-netdev can be destroyed */
2747 new_ops
= qos_lookup_name(type
);
2749 if (type
[0] == '\0' || !new_ops
|| !new_ops
->qos_construct
) {
2750 qos_delete_conf(netdev
);
2754 ovs_mutex_lock(&dev
->mutex
);
2756 if (dev
->qos_conf
) {
2757 if (new_ops
== dev
->qos_conf
->ops
) {
2758 error
= new_ops
->qos_set
? new_ops
->qos_set(netdev
, details
) : 0;
2760 /* Delete existing QoS configuration. */
2761 qos_delete_conf(netdev
);
2762 ovs_assert(dev
->qos_conf
== NULL
);
2764 /* Install new QoS configuration. */
2765 error
= new_ops
->qos_construct(netdev
, details
);
2766 ovs_assert((error
== 0) == (dev
->qos_conf
!= NULL
));
2769 error
= new_ops
->qos_construct(netdev
, details
);
2770 ovs_assert((error
== 0) == (dev
->qos_conf
!= NULL
));
2773 ovs_mutex_unlock(&dev
->mutex
);
2777 /* egress-policer details */
2779 struct egress_policer
{
2780 struct qos_conf qos_conf
;
2781 struct rte_meter_srtcm_params app_srtcm_params
;
2782 struct rte_meter_srtcm egress_meter
;
2785 static struct egress_policer
*
2786 egress_policer_get__(const struct netdev
*netdev
)
2788 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
2789 return CONTAINER_OF(dev
->qos_conf
, struct egress_policer
, qos_conf
);
2793 egress_policer_qos_construct(struct netdev
*netdev
,
2794 const struct smap
*details
)
2796 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
2797 struct egress_policer
*policer
;
2802 rte_spinlock_lock(&dev
->qos_lock
);
2803 policer
= xmalloc(sizeof *policer
);
2804 qos_conf_init(&policer
->qos_conf
, &egress_policer_ops
);
2805 dev
->qos_conf
= &policer
->qos_conf
;
2806 cir_s
= smap_get(details
, "cir");
2807 cbs_s
= smap_get(details
, "cbs");
2808 policer
->app_srtcm_params
.cir
= cir_s
? strtoull(cir_s
, NULL
, 10) : 0;
2809 policer
->app_srtcm_params
.cbs
= cbs_s
? strtoull(cbs_s
, NULL
, 10) : 0;
2810 policer
->app_srtcm_params
.ebs
= 0;
2811 err
= rte_meter_srtcm_config(&policer
->egress_meter
,
2812 &policer
->app_srtcm_params
);
2813 rte_spinlock_unlock(&dev
->qos_lock
);
2819 egress_policer_qos_destruct(struct netdev
*netdev OVS_UNUSED
,
2820 struct qos_conf
*conf
)
2822 struct egress_policer
*policer
= CONTAINER_OF(conf
, struct egress_policer
,
2828 egress_policer_qos_get(const struct netdev
*netdev
, struct smap
*details
)
2830 struct egress_policer
*policer
= egress_policer_get__(netdev
);
2831 smap_add_format(details
, "cir", "%llu",
2832 1ULL * policer
->app_srtcm_params
.cir
);
2833 smap_add_format(details
, "cbs", "%llu",
2834 1ULL * policer
->app_srtcm_params
.cbs
);
2840 egress_policer_qos_set(struct netdev
*netdev
, const struct smap
*details
)
2842 struct egress_policer
*policer
;
2847 policer
= egress_policer_get__(netdev
);
2848 cir_s
= smap_get(details
, "cir");
2849 cbs_s
= smap_get(details
, "cbs");
2850 policer
->app_srtcm_params
.cir
= cir_s
? strtoull(cir_s
, NULL
, 10) : 0;
2851 policer
->app_srtcm_params
.cbs
= cbs_s
? strtoull(cbs_s
, NULL
, 10) : 0;
2852 policer
->app_srtcm_params
.ebs
= 0;
2853 err
= rte_meter_srtcm_config(&policer
->egress_meter
,
2854 &policer
->app_srtcm_params
);
2860 egress_policer_run(struct netdev
*netdev
, struct rte_mbuf
**pkts
, int pkt_cnt
)
2863 struct egress_policer
*policer
= egress_policer_get__(netdev
);
2865 cnt
= netdev_dpdk_policer_run(&policer
->egress_meter
, pkts
, pkt_cnt
);
2870 static const struct dpdk_qos_ops egress_policer_ops
= {
2871 "egress-policer", /* qos_name */
2872 egress_policer_qos_construct
,
2873 egress_policer_qos_destruct
,
2874 egress_policer_qos_get
,
2875 egress_policer_qos_set
,
2880 netdev_dpdk_reconfigure(struct netdev
*netdev
)
2882 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
2885 ovs_mutex_lock(&dpdk_mutex
);
2886 ovs_mutex_lock(&dev
->mutex
);
2888 if (netdev
->n_txq
== dev
->requested_n_txq
2889 && netdev
->n_rxq
== dev
->requested_n_rxq
) {
2890 /* Reconfiguration is unnecessary */
2895 rte_eth_dev_stop(dev
->port_id
);
2897 netdev
->n_txq
= dev
->requested_n_txq
;
2898 netdev
->n_rxq
= dev
->requested_n_rxq
;
2900 rte_free(dev
->tx_q
);
2901 err
= dpdk_eth_dev_init(dev
);
2902 netdev_dpdk_alloc_txq(dev
, dev
->real_n_txq
);
2904 dev
->txq_needs_locking
= dev
->real_n_txq
!= netdev
->n_txq
;
2908 ovs_mutex_unlock(&dev
->mutex
);
2909 ovs_mutex_unlock(&dpdk_mutex
);
2915 netdev_dpdk_vhost_user_reconfigure(struct netdev
*netdev
)
2917 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
2919 ovs_mutex_lock(&dpdk_mutex
);
2920 ovs_mutex_lock(&dev
->mutex
);
2922 netdev
->n_txq
= dev
->requested_n_txq
;
2923 netdev
->n_rxq
= dev
->requested_n_rxq
;
2925 ovs_mutex_unlock(&dev
->mutex
);
2926 ovs_mutex_unlock(&dpdk_mutex
);
2932 netdev_dpdk_vhost_cuse_reconfigure(struct netdev
*netdev
)
2934 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
2936 ovs_mutex_lock(&dpdk_mutex
);
2937 ovs_mutex_lock(&dev
->mutex
);
2939 netdev
->n_txq
= dev
->requested_n_txq
;
2940 dev
->real_n_txq
= 1;
2942 dev
->txq_needs_locking
= dev
->real_n_txq
!= netdev
->n_txq
;
2944 ovs_mutex_unlock(&dev
->mutex
);
2945 ovs_mutex_unlock(&dpdk_mutex
);
2950 #define NETDEV_DPDK_CLASS(NAME, INIT, CONSTRUCT, DESTRUCT, SEND, \
2951 GET_CARRIER, GET_STATS, GET_FEATURES, \
2952 GET_STATUS, RECONFIGURE, RXQ_RECV) \
2955 true, /* is_pmd */ \
2957 NULL, /* netdev_dpdk_run */ \
2958 NULL, /* netdev_dpdk_wait */ \
2960 netdev_dpdk_alloc, \
2963 netdev_dpdk_dealloc, \
2964 netdev_dpdk_get_config, \
2965 netdev_dpdk_set_config, \
2966 NULL, /* get_tunnel_config */ \
2967 NULL, /* build header */ \
2968 NULL, /* push header */ \
2969 NULL, /* pop header */ \
2970 netdev_dpdk_get_numa_id, /* get_numa_id */ \
2971 netdev_dpdk_set_tx_multiq, \
2974 NULL, /* send_wait */ \
2976 netdev_dpdk_set_etheraddr, \
2977 netdev_dpdk_get_etheraddr, \
2978 netdev_dpdk_get_mtu, \
2979 netdev_dpdk_set_mtu, \
2980 netdev_dpdk_get_ifindex, \
2982 netdev_dpdk_get_carrier_resets, \
2983 netdev_dpdk_set_miimon, \
2986 NULL, /* set_advertisements */ \
2988 netdev_dpdk_set_policing, \
2989 netdev_dpdk_get_qos_types, \
2990 NULL, /* get_qos_capabilities */ \
2991 netdev_dpdk_get_qos, \
2992 netdev_dpdk_set_qos, \
2993 NULL, /* get_queue */ \
2994 NULL, /* set_queue */ \
2995 NULL, /* delete_queue */ \
2996 NULL, /* get_queue_stats */ \
2997 NULL, /* queue_dump_start */ \
2998 NULL, /* queue_dump_next */ \
2999 NULL, /* queue_dump_done */ \
3000 NULL, /* dump_queue_stats */ \
3002 NULL, /* set_in4 */ \
3003 NULL, /* get_addr_list */ \
3004 NULL, /* add_router */ \
3005 NULL, /* get_next_hop */ \
3007 NULL, /* arp_lookup */ \
3009 netdev_dpdk_update_flags, \
3012 netdev_dpdk_rxq_alloc, \
3013 netdev_dpdk_rxq_construct, \
3014 netdev_dpdk_rxq_destruct, \
3015 netdev_dpdk_rxq_dealloc, \
3017 NULL, /* rx_wait */ \
3018 NULL, /* rxq_drain */ \
3022 process_vhost_flags(char *flag
, char *default_val
, int size
,
3023 const struct smap
*ovs_other_config
,
3029 val
= smap_get(ovs_other_config
, flag
);
3031 /* Depending on which version of vhost is in use, process the vhost-specific
3032 * flag if it is provided, otherwise resort to default value.
3034 if (val
&& (strlen(val
) <= size
)) {
3036 *new_val
= xstrdup(val
);
3037 VLOG_INFO("User-provided %s in use: %s", flag
, *new_val
);
3039 VLOG_INFO("No %s provided - defaulting to %s", flag
, default_val
);
3040 *new_val
= default_val
;
3047 grow_argv(char ***argv
, size_t cur_siz
, size_t grow_by
)
3049 return xrealloc(*argv
, sizeof(char *) * (cur_siz
+ grow_by
));
3053 dpdk_option_extend(char ***argv
, int argc
, const char *option
,
3056 char **newargv
= grow_argv(argv
, argc
, 2);
3058 newargv
[argc
] = xstrdup(option
);
3059 newargv
[argc
+1] = xstrdup(value
);
3063 move_argv(char ***argv
, size_t cur_size
, char **src_argv
, size_t src_argc
)
3065 char **newargv
= grow_argv(argv
, cur_size
, src_argc
);
3066 while (src_argc
--) {
3067 newargv
[cur_size
+src_argc
] = src_argv
[src_argc
];
3068 src_argv
[src_argc
] = NULL
;
3074 extra_dpdk_args(const char *ovs_extra_config
, char ***argv
, int argc
)
3077 char *release_tok
= xstrdup(ovs_extra_config
);
3078 char *tok
= release_tok
, *endptr
= NULL
;
3080 for (tok
= strtok_r(release_tok
, " ", &endptr
); tok
!= NULL
;
3081 tok
= strtok_r(NULL
, " ", &endptr
)) {
3082 char **newarg
= grow_argv(argv
, ret
, 1);
3084 newarg
[ret
++] = xstrdup(tok
);
3091 argv_contains(char **argv_haystack
, const size_t argc_haystack
,
3094 for (size_t i
= 0; i
< argc_haystack
; ++i
) {
3095 if (!strcmp(argv_haystack
[i
], needle
))
3102 construct_dpdk_options(const struct smap
*ovs_other_config
,
3103 char ***argv
, const int initial_size
,
3104 char **extra_args
, const size_t extra_argc
)
3106 struct dpdk_options_map
{
3107 const char *ovs_configuration
;
3108 const char *dpdk_option
;
3109 bool default_enabled
;
3110 const char *default_value
;
3112 {"dpdk-lcore-mask", "-c", false, NULL
},
3113 {"dpdk-hugepage-dir", "--huge-dir", false, NULL
},
3116 int i
, ret
= initial_size
;
3118 /*First, construct from the flat-options (non-mutex)*/
3119 for (i
= 0; i
< ARRAY_SIZE(opts
); ++i
) {
3120 const char *lookup
= smap_get(ovs_other_config
,
3121 opts
[i
].ovs_configuration
);
3122 if (!lookup
&& opts
[i
].default_enabled
) {
3123 lookup
= opts
[i
].default_value
;
3127 if (!argv_contains(extra_args
, extra_argc
, opts
[i
].dpdk_option
)) {
3128 dpdk_option_extend(argv
, ret
, opts
[i
].dpdk_option
, lookup
);
3131 VLOG_WARN("Ignoring database defined option '%s' due to "
3132 "dpdk_extras config", opts
[i
].dpdk_option
);
3140 #define MAX_DPDK_EXCL_OPTS 10
3143 construct_dpdk_mutex_options(const struct smap
*ovs_other_config
,
3144 char ***argv
, const int initial_size
,
3145 char **extra_args
, const size_t extra_argc
)
3147 struct dpdk_exclusive_options_map
{
3148 const char *category
;
3149 const char *ovs_dpdk_options
[MAX_DPDK_EXCL_OPTS
];
3150 const char *eal_dpdk_options
[MAX_DPDK_EXCL_OPTS
];
3151 const char *default_value
;
3155 {"dpdk-alloc-mem", "dpdk-socket-mem", NULL
,},
3156 {"-m", "--socket-mem", NULL
,},
3161 int i
, ret
= initial_size
;
3162 for (i
= 0; i
< ARRAY_SIZE(excl_opts
); ++i
) {
3163 int found_opts
= 0, scan
, found_pos
= -1;
3164 const char *found_value
;
3165 struct dpdk_exclusive_options_map
*popt
= &excl_opts
[i
];
3167 for (scan
= 0; scan
< MAX_DPDK_EXCL_OPTS
3168 && popt
->ovs_dpdk_options
[scan
]; ++scan
) {
3169 const char *lookup
= smap_get(ovs_other_config
,
3170 popt
->ovs_dpdk_options
[scan
]);
3171 if (lookup
&& strlen(lookup
)) {
3174 found_value
= lookup
;
3179 if (popt
->default_option
) {
3180 found_pos
= popt
->default_option
;
3181 found_value
= popt
->default_value
;
3187 if (found_opts
> 1) {
3188 VLOG_ERR("Multiple defined options for %s. Please check your"
3189 " database settings and reconfigure if necessary.",
3193 if (!argv_contains(extra_args
, extra_argc
,
3194 popt
->eal_dpdk_options
[found_pos
])) {
3195 dpdk_option_extend(argv
, ret
, popt
->eal_dpdk_options
[found_pos
],
3199 VLOG_WARN("Ignoring database defined option '%s' due to "
3200 "dpdk_extras config", popt
->eal_dpdk_options
[found_pos
]);
3208 get_dpdk_args(const struct smap
*ovs_other_config
, char ***argv
,
3211 const char *extra_configuration
;
3212 char **extra_args
= NULL
;
3214 size_t extra_argc
= 0;
3216 extra_configuration
= smap_get(ovs_other_config
, "dpdk-extra");
3217 if (extra_configuration
) {
3218 extra_argc
= extra_dpdk_args(extra_configuration
, &extra_args
, 0);
3221 i
= construct_dpdk_options(ovs_other_config
, argv
, argc
, extra_args
,
3223 i
= construct_dpdk_mutex_options(ovs_other_config
, argv
, i
, extra_args
,
3226 if (extra_configuration
) {
3227 *argv
= move_argv(argv
, i
, extra_args
, extra_argc
);
3230 return i
+ extra_argc
;
3233 static char **dpdk_argv
;
3234 static int dpdk_argc
;
3237 deferred_argv_release(void)
3240 for (result
= 0; result
< dpdk_argc
; ++result
) {
3241 free(dpdk_argv
[result
]);
3248 dpdk_init__(const struct smap
*ovs_other_config
)
3253 bool auto_determine
= true;
3257 char *sock_dir_subcomponent
;
3260 if (!smap_get_bool(ovs_other_config
, "dpdk-init", false)) {
3261 VLOG_INFO("DPDK Disabled - to change this requires a restart.\n");
3265 VLOG_INFO("DPDK Enabled, initializing");
3268 if (process_vhost_flags("cuse-dev-name", xstrdup("vhost-net"),
3269 PATH_MAX
, ovs_other_config
, &cuse_dev_name
)) {
3271 if (process_vhost_flags("vhost-sock-dir", xstrdup(ovs_rundir()),
3272 NAME_MAX
, ovs_other_config
,
3273 &sock_dir_subcomponent
)) {
3275 if (!strstr(sock_dir_subcomponent
, "..")) {
3276 vhost_sock_dir
= xasprintf("%s/%s", ovs_rundir(),
3277 sock_dir_subcomponent
);
3279 err
= stat(vhost_sock_dir
, &s
);
3281 VLOG_ERR("vhost-user sock directory '%s' does not exist.",
3285 vhost_sock_dir
= xstrdup(ovs_rundir());
3286 VLOG_ERR("vhost-user sock directory request '%s/%s' has invalid"
3287 "characters '..' - using %s instead.",
3288 ovs_rundir(), sock_dir_subcomponent
, ovs_rundir());
3290 free(sock_dir_subcomponent
);
3292 vhost_sock_dir
= sock_dir_subcomponent
;
3296 argv
= grow_argv(&argv
, 0, 1);
3298 argv
[0] = xstrdup(ovs_get_program_name());
3299 argc_tmp
= get_dpdk_args(ovs_other_config
, &argv
, argc
);
3301 while (argc_tmp
!= argc
) {
3302 if (!strcmp("-c", argv
[argc
]) || !strcmp("-l", argv
[argc
])) {
3303 auto_determine
= false;
3311 * NOTE: This is an unsophisticated mechanism for determining the DPDK
3312 * lcore for the DPDK Master.
3314 if (auto_determine
) {
3316 /* Get the main thread affinity */
3318 err
= pthread_getaffinity_np(pthread_self(), sizeof(cpu_set_t
),
3321 for (i
= 0; i
< CPU_SETSIZE
; i
++) {
3322 if (CPU_ISSET(i
, &cpuset
)) {
3323 argv
= grow_argv(&argv
, argc
, 2);
3324 argv
[argc
++] = xstrdup("-c");
3325 argv
[argc
++] = xasprintf("0x%08llX", (1ULL<<i
));
3330 VLOG_ERR("Thread getaffinity error %d. Using core 0x1", err
);
3331 /* User did not set dpdk-lcore-mask and unable to get current
3332 * thread affintity - default to core 0x1 */
3333 argv
= grow_argv(&argv
, argc
, 2);
3334 argv
[argc
++] = xstrdup("-c");
3335 argv
[argc
++] = xasprintf("0x%X", 1);
3339 argv
= grow_argv(&argv
, argc
, 1);
3344 if (VLOG_IS_INFO_ENABLED()) {
3348 ds_put_cstr(&eal_args
, "EAL ARGS:");
3349 for (opt
= 0; opt
< argc
; ++opt
) {
3350 ds_put_cstr(&eal_args
, " ");
3351 ds_put_cstr(&eal_args
, argv
[opt
]);
3353 VLOG_INFO("%s", ds_cstr_ro(&eal_args
));
3354 ds_destroy(&eal_args
);
3357 /* Make sure things are initialized ... */
3358 result
= rte_eal_init(argc
, argv
);
3360 ovs_abort(result
, "Cannot init EAL");
3363 /* Set the main thread affinity back to pre rte_eal_init() value */
3364 if (auto_determine
&& !err
) {
3365 err
= pthread_setaffinity_np(pthread_self(), sizeof(cpu_set_t
),
3368 VLOG_ERR("Thread setaffinity error %d", err
);
3375 atexit(deferred_argv_release
);
3377 rte_memzone_dump(stdout
);
3378 rte_eal_init_ret
= 0;
3380 /* We are called from the main thread here */
3381 RTE_PER_LCORE(_lcore_id
) = NON_PMD_CORE_ID
;
3383 ovs_thread_create("dpdk_watchdog", dpdk_watchdog
, NULL
);
3386 /* Register CUSE device to handle IOCTLs.
3387 * Unless otherwise specified, cuse_dev_name is set to vhost-net.
3389 err
= rte_vhost_driver_register(cuse_dev_name
);
3392 VLOG_ERR("CUSE device setup failure.");
3397 dpdk_vhost_class_init();
3399 /* Finally, register the dpdk classes */
3400 netdev_dpdk_register();
3404 dpdk_init(const struct smap
*ovs_other_config
)
3406 static struct ovsthread_once once
= OVSTHREAD_ONCE_INITIALIZER
;
3408 if (ovs_other_config
&& ovsthread_once_start(&once
)) {
3409 dpdk_init__(ovs_other_config
);
3410 ovsthread_once_done(&once
);
3414 static const struct netdev_class dpdk_class
=
3418 netdev_dpdk_construct
,
3419 netdev_dpdk_destruct
,
3420 netdev_dpdk_eth_send
,
3421 netdev_dpdk_get_carrier
,
3422 netdev_dpdk_get_stats
,
3423 netdev_dpdk_get_features
,
3424 netdev_dpdk_get_status
,
3425 netdev_dpdk_reconfigure
,
3426 netdev_dpdk_rxq_recv
);
3428 static const struct netdev_class dpdk_ring_class
=
3432 netdev_dpdk_ring_construct
,
3433 netdev_dpdk_destruct
,
3434 netdev_dpdk_ring_send
,
3435 netdev_dpdk_get_carrier
,
3436 netdev_dpdk_get_stats
,
3437 netdev_dpdk_get_features
,
3438 netdev_dpdk_get_status
,
3439 netdev_dpdk_reconfigure
,
3440 netdev_dpdk_rxq_recv
);
3442 static const struct netdev_class OVS_UNUSED dpdk_vhost_cuse_class
=
3445 dpdk_vhost_cuse_class_init
,
3446 netdev_dpdk_vhost_cuse_construct
,
3447 netdev_dpdk_vhost_destruct
,
3448 netdev_dpdk_vhost_send
,
3449 netdev_dpdk_vhost_get_carrier
,
3450 netdev_dpdk_vhost_get_stats
,
3453 netdev_dpdk_vhost_cuse_reconfigure
,
3454 netdev_dpdk_vhost_rxq_recv
);
3456 static const struct netdev_class OVS_UNUSED dpdk_vhost_user_class
=
3459 dpdk_vhost_user_class_init
,
3460 netdev_dpdk_vhost_user_construct
,
3461 netdev_dpdk_vhost_destruct
,
3462 netdev_dpdk_vhost_send
,
3463 netdev_dpdk_vhost_get_carrier
,
3464 netdev_dpdk_vhost_get_stats
,
3467 netdev_dpdk_vhost_user_reconfigure
,
3468 netdev_dpdk_vhost_rxq_recv
);
3471 netdev_dpdk_register(void)
3474 netdev_register_provider(&dpdk_class
);
3475 netdev_register_provider(&dpdk_ring_class
);
3477 netdev_register_provider(&dpdk_vhost_cuse_class
);
3479 netdev_register_provider(&dpdk_vhost_user_class
);
3484 dpdk_set_lcore_id(unsigned cpu
)
3486 /* NON_PMD_CORE_ID is reserved for use by non pmd threads. */
3487 ovs_assert(cpu
!= NON_PMD_CORE_ID
);
3488 RTE_PER_LCORE(_lcore_id
) = cpu
;
3492 dpdk_thread_is_pmd(void)
3494 return rte_lcore_id() != NON_PMD_CORE_ID
;