2 * Copyright (c) 2014, 2015, 2016 Nicira, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
18 #include "netdev-dpdk.h"
26 #include <rte_config.h>
27 #include <rte_cycles.h>
28 #include <rte_errno.h>
29 #include <rte_eth_ring.h>
30 #include <rte_ethdev.h>
31 #include <rte_malloc.h>
33 #include <rte_meter.h>
34 #include <rte_virtio_net.h>
37 #include "dp-packet.h"
39 #include "dpif-netdev.h"
40 #include "fatal-signal.h"
41 #include "netdev-provider.h"
42 #include "netdev-vport.h"
44 #include "openvswitch/dynamic-string.h"
45 #include "openvswitch/list.h"
46 #include "openvswitch/ofp-print.h"
47 #include "openvswitch/vlog.h"
49 #include "ovs-thread.h"
52 #include "openvswitch/shash.h"
55 #include "unaligned.h"
59 VLOG_DEFINE_THIS_MODULE(netdev_dpdk
);
60 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(5, 20);
62 #define DPDK_PORT_WATCHDOG_INTERVAL 5
64 #define OVS_CACHE_LINE_SIZE CACHE_LINE_SIZE
65 #define OVS_VPORT_DPDK "ovs_dpdk"
68 * need to reserve tons of extra space in the mbufs so we can align the
69 * DMA addresses to 4KB.
70 * The minimum mbuf size is limited to avoid scatter behaviour and drop in
71 * performance for standard Ethernet MTU.
73 #define ETHER_HDR_MAX_LEN (ETHER_HDR_LEN + ETHER_CRC_LEN \
74 + (2 * VLAN_HEADER_LEN))
75 #define MTU_TO_FRAME_LEN(mtu) ((mtu) + ETHER_HDR_LEN + ETHER_CRC_LEN)
76 #define MTU_TO_MAX_FRAME_LEN(mtu) ((mtu) + ETHER_HDR_MAX_LEN)
77 #define FRAME_LEN_TO_MTU(frame_len) ((frame_len) \
78 - ETHER_HDR_LEN - ETHER_CRC_LEN)
79 #define MBUF_SIZE(mtu) (MTU_TO_MAX_FRAME_LEN(mtu) \
80 + sizeof(struct dp_packet) \
81 + RTE_PKTMBUF_HEADROOM)
82 #define NETDEV_DPDK_MBUF_ALIGN 1024
83 #define NETDEV_DPDK_MAX_PKT_LEN 9728
85 /* Max and min number of packets in the mempool. OVS tries to allocate a
86 * mempool with MAX_NB_MBUF: if this fails (because the system doesn't have
87 * enough hugepages) we keep halving the number until the allocation succeeds
88 * or we reach MIN_NB_MBUF */
90 #define MAX_NB_MBUF (4096 * 64)
91 #define MIN_NB_MBUF (4096 * 4)
92 #define MP_CACHE_SZ RTE_MEMPOOL_CACHE_MAX_SIZE
94 /* MAX_NB_MBUF can be divided by 2 many times, until MIN_NB_MBUF */
95 BUILD_ASSERT_DECL(MAX_NB_MBUF
% ROUND_DOWN_POW2(MAX_NB_MBUF
/MIN_NB_MBUF
) == 0);
97 /* The smallest possible NB_MBUF that we're going to try should be a multiple
98 * of MP_CACHE_SZ. This is advised by DPDK documentation. */
99 BUILD_ASSERT_DECL((MAX_NB_MBUF
/ ROUND_DOWN_POW2(MAX_NB_MBUF
/MIN_NB_MBUF
))
103 * DPDK XSTATS Counter names definition
105 #define XSTAT_RX_64_PACKETS "rx_size_64_packets"
106 #define XSTAT_RX_65_TO_127_PACKETS "rx_size_65_to_127_packets"
107 #define XSTAT_RX_128_TO_255_PACKETS "rx_size_128_to_255_packets"
108 #define XSTAT_RX_256_TO_511_PACKETS "rx_size_256_to_511_packets"
109 #define XSTAT_RX_512_TO_1023_PACKETS "rx_size_512_to_1023_packets"
110 #define XSTAT_RX_1024_TO_1522_PACKETS "rx_size_1024_to_1522_packets"
111 #define XSTAT_RX_1523_TO_MAX_PACKETS "rx_size_1523_to_max_packets"
113 #define XSTAT_TX_64_PACKETS "tx_size_64_packets"
114 #define XSTAT_TX_65_TO_127_PACKETS "tx_size_65_to_127_packets"
115 #define XSTAT_TX_128_TO_255_PACKETS "tx_size_128_to_255_packets"
116 #define XSTAT_TX_256_TO_511_PACKETS "tx_size_256_to_511_packets"
117 #define XSTAT_TX_512_TO_1023_PACKETS "tx_size_512_to_1023_packets"
118 #define XSTAT_TX_1024_TO_1522_PACKETS "tx_size_1024_to_1522_packets"
119 #define XSTAT_TX_1523_TO_MAX_PACKETS "tx_size_1523_to_max_packets"
121 #define XSTAT_TX_MULTICAST_PACKETS "tx_multicast_packets"
122 #define XSTAT_RX_BROADCAST_PACKETS "rx_broadcast_packets"
123 #define XSTAT_TX_BROADCAST_PACKETS "tx_broadcast_packets"
124 #define XSTAT_RX_UNDERSIZED_ERRORS "rx_undersized_errors"
125 #define XSTAT_RX_OVERSIZE_ERRORS "rx_oversize_errors"
126 #define XSTAT_RX_FRAGMENTED_ERRORS "rx_fragmented_errors"
127 #define XSTAT_RX_JABBER_ERRORS "rx_jabber_errors"
131 /* Default size of Physical NIC RXQ */
132 #define NIC_PORT_DEFAULT_RXQ_SIZE 2048
133 /* Default size of Physical NIC TXQ */
134 #define NIC_PORT_DEFAULT_TXQ_SIZE 2048
135 /* Maximum size of Physical NIC Queues */
136 #define NIC_PORT_MAX_Q_SIZE 4096
138 #define OVS_VHOST_MAX_QUEUE_NUM 1024 /* Maximum number of vHost TX queues. */
139 #define OVS_VHOST_QUEUE_MAP_UNKNOWN (-1) /* Mapping not initialized. */
140 #define OVS_VHOST_QUEUE_DISABLED (-2) /* Queue was disabled by guest and not
141 * yet mapped to another queue. */
143 #define VHOST_ENQ_RETRY_NUM 8
144 #define IF_NAME_SZ (PATH_MAX > IFNAMSIZ ? PATH_MAX : IFNAMSIZ)
146 static const struct rte_eth_conf port_conf
= {
148 .mq_mode
= ETH_MQ_RX_RSS
,
150 .header_split
= 0, /* Header Split disabled */
151 .hw_ip_checksum
= 0, /* IP checksum offload disabled */
152 .hw_vlan_filter
= 0, /* VLAN filtering disabled */
153 .jumbo_frame
= 0, /* Jumbo Frame Support disabled */
159 .rss_hf
= ETH_RSS_IP
| ETH_RSS_UDP
| ETH_RSS_TCP
,
163 .mq_mode
= ETH_MQ_TX_NONE
,
167 enum { DPDK_RING_SIZE
= 256 };
168 BUILD_ASSERT_DECL(IS_POW2(DPDK_RING_SIZE
));
169 enum { DRAIN_TSC
= 200000ULL };
176 /* Quality of Service */
178 /* An instance of a QoS configuration. Always associated with a particular
181 * Each QoS implementation subclasses this with whatever additional data it
185 const struct dpdk_qos_ops
*ops
;
189 /* A particular implementation of dpdk QoS operations.
191 * The functions below return 0 if successful or a positive errno value on
192 * failure, except where otherwise noted. All of them must be provided, except
193 * where otherwise noted.
195 struct dpdk_qos_ops
{
197 /* Name of the QoS type */
198 const char *qos_name
;
200 /* Called to construct a qos_conf object. The implementation should make
201 * the appropriate calls to configure QoS according to 'details'.
203 * The contents of 'details' should be documented as valid for 'ovs_name'
204 * in the "other_config" column in the "QoS" table in vswitchd/vswitch.xml
205 * (which is built as ovs-vswitchd.conf.db(8)).
207 * This function must return 0 if and only if it sets '*conf' to an
208 * initialized 'struct qos_conf'.
210 * For all QoS implementations it should always be non-null.
212 int (*qos_construct
)(const struct smap
*details
, struct qos_conf
**conf
);
214 /* Destroys the data structures allocated by the implementation as part of
217 * For all QoS implementations it should always be non-null.
219 void (*qos_destruct
)(struct qos_conf
*conf
);
221 /* Retrieves details of 'conf' configuration into 'details'.
223 * The contents of 'details' should be documented as valid for 'ovs_name'
224 * in the "other_config" column in the "QoS" table in vswitchd/vswitch.xml
225 * (which is built as ovs-vswitchd.conf.db(8)).
227 int (*qos_get
)(const struct qos_conf
*conf
, struct smap
*details
);
229 /* Returns true if 'conf' is already configured according to 'details'.
231 * The contents of 'details' should be documented as valid for 'ovs_name'
232 * in the "other_config" column in the "QoS" table in vswitchd/vswitch.xml
233 * (which is built as ovs-vswitchd.conf.db(8)).
235 * For all QoS implementations it should always be non-null.
237 bool (*qos_is_equal
)(const struct qos_conf
*conf
,
238 const struct smap
*details
);
240 /* Modify an array of rte_mbufs. The modification is specific to
241 * each qos implementation.
243 * The function should take and array of mbufs and an int representing
244 * the current number of mbufs present in the array.
246 * After the function has performed a qos modification to the array of
247 * mbufs it returns an int representing the number of mbufs now present in
248 * the array. This value is can then be passed to the port send function
249 * along with the modified array for transmission.
251 * For all QoS implementations it should always be non-null.
253 int (*qos_run
)(struct qos_conf
*qos_conf
, struct rte_mbuf
**pkts
,
257 /* dpdk_qos_ops for each type of user space QoS implementation */
258 static const struct dpdk_qos_ops egress_policer_ops
;
261 * Array of dpdk_qos_ops, contains pointer to all supported QoS
264 static const struct dpdk_qos_ops
*const qos_confs
[] = {
269 static struct ovs_mutex dpdk_mutex
= OVS_MUTEX_INITIALIZER
;
271 /* Contains all 'struct dpdk_dev's. */
272 static struct ovs_list dpdk_list
OVS_GUARDED_BY(dpdk_mutex
)
273 = OVS_LIST_INITIALIZER(&dpdk_list
);
275 static struct ovs_mutex dpdk_mp_mutex
OVS_ACQ_AFTER(dpdk_mutex
)
276 = OVS_MUTEX_INITIALIZER
;
278 static struct ovs_list dpdk_mp_list
OVS_GUARDED_BY(dpdk_mp_mutex
)
279 = OVS_LIST_INITIALIZER(&dpdk_mp_list
);
282 struct rte_mempool
*mp
;
286 struct ovs_list list_node
OVS_GUARDED_BY(dpdk_mp_mutex
);
289 /* There should be one 'struct dpdk_tx_queue' created for
291 struct dpdk_tx_queue
{
292 rte_spinlock_t tx_lock
; /* Protects the members and the NIC queue
293 * from concurrent access. It is used only
294 * if the queue is shared among different
295 * pmd threads (see 'concurrent_txq'). */
296 int map
; /* Mapping of configured vhost-user queues
297 * to enabled by guest. */
300 /* dpdk has no way to remove dpdk ring ethernet devices
301 so we have to keep them around once they've been created
304 static struct ovs_list dpdk_ring_list
OVS_GUARDED_BY(dpdk_mutex
)
305 = OVS_LIST_INITIALIZER(&dpdk_ring_list
);
308 /* For the client rings */
309 struct rte_ring
*cring_tx
;
310 struct rte_ring
*cring_rx
;
311 unsigned int user_port_id
; /* User given port no, parsed from port name */
312 int eth_port_id
; /* ethernet device port id */
313 struct ovs_list list_node
OVS_GUARDED_BY(dpdk_mutex
);
316 struct ingress_policer
{
317 struct rte_meter_srtcm_params app_srtcm_params
;
318 struct rte_meter_srtcm in_policer
;
319 rte_spinlock_t policer_lock
;
326 enum dpdk_dev_type type
;
328 struct dpdk_tx_queue
*tx_q
;
330 struct ovs_mutex mutex
OVS_ACQ_AFTER(dpdk_mutex
);
332 struct dpdk_mp
*dpdk_mp
;
336 struct netdev_stats stats
;
338 rte_spinlock_t stats_lock
;
340 struct eth_addr hwaddr
;
341 enum netdev_flags flags
;
343 struct rte_eth_link link
;
346 /* virtio identifier for vhost devices */
349 /* True if vHost device is 'up' and has been reconfigured at least once */
350 bool vhost_reconfigured
;
352 /* Identifier used to distinguish vhost devices from each other. */
353 char vhost_id
[PATH_MAX
];
356 struct ovs_list list_node
OVS_GUARDED_BY(dpdk_mutex
);
358 /* QoS configuration and lock for the device */
359 OVSRCU_TYPE(struct qos_conf
*) qos_conf
;
361 /* The following properties cannot be changed when a device is running,
362 * so we remember the request and update them next time
363 * netdev_dpdk*_reconfigure() is called */
367 int requested_rxq_size
;
368 int requested_txq_size
;
370 /* Number of rx/tx descriptors for physical devices */
374 /* Socket ID detected when vHost device is brought up */
375 int requested_socket_id
;
377 /* Denotes whether vHost port is client/server mode */
378 uint64_t vhost_driver_flags
;
380 /* Ingress Policer */
381 OVSRCU_TYPE(struct ingress_policer
*) ingress_policer
;
382 uint32_t policer_rate
;
383 uint32_t policer_burst
;
385 /* DPDK-ETH Flow control */
386 struct rte_eth_fc_conf fc_conf
;
389 struct netdev_rxq_dpdk
{
390 struct netdev_rxq up
;
394 static int netdev_dpdk_class_init(void);
395 static int netdev_dpdk_vhost_class_init(void);
397 int netdev_dpdk_get_vid(const struct netdev_dpdk
*dev
);
399 struct ingress_policer
*
400 netdev_dpdk_get_ingress_policer(const struct netdev_dpdk
*dev
);
403 is_dpdk_class(const struct netdev_class
*class)
405 return class->init
== netdev_dpdk_class_init
406 || class->init
== netdev_dpdk_vhost_class_init
;
409 /* DPDK NIC drivers allocate RX buffers at a particular granularity, typically
410 * aligned at 1k or less. If a declared mbuf size is not a multiple of this
411 * value, insufficient buffers are allocated to accomodate the packet in its
412 * entirety. Furthermore, certain drivers need to ensure that there is also
413 * sufficient space in the Rx buffer to accommodate two VLAN tags (for QinQ
414 * frames). If the RX buffer is too small, then the driver enables scatter RX
415 * behaviour, which reduces performance. To prevent this, use a buffer size
416 * that is closest to 'mtu', but which satisfies the aforementioned criteria.
419 dpdk_buf_size(int mtu
)
421 return ROUND_UP((MTU_TO_MAX_FRAME_LEN(mtu
) + RTE_PKTMBUF_HEADROOM
),
422 NETDEV_DPDK_MBUF_ALIGN
);
425 /* Allocates an area of 'sz' bytes from DPDK. The memory is zero'ed.
427 * Unlike xmalloc(), this function can return NULL on failure. */
429 dpdk_rte_mzalloc(size_t sz
)
431 return rte_zmalloc(OVS_VPORT_DPDK
, sz
, OVS_CACHE_LINE_SIZE
);
435 free_dpdk_buf(struct dp_packet
*p
)
437 struct rte_mbuf
*pkt
= (struct rte_mbuf
*) p
;
439 rte_pktmbuf_free(pkt
);
443 ovs_rte_pktmbuf_init(struct rte_mempool
*mp
,
444 void *opaque_arg OVS_UNUSED
,
446 unsigned i OVS_UNUSED
)
448 struct rte_mbuf
*pkt
= _p
;
450 rte_pktmbuf_init(mp
, opaque_arg
, _p
, i
);
452 dp_packet_init_dpdk((struct dp_packet
*) pkt
, pkt
->buf_len
);
455 static struct dpdk_mp
*
456 dpdk_mp_create(int socket_id
, int mtu
)
458 struct rte_pktmbuf_pool_private mbp_priv
;
463 dmp
= dpdk_rte_mzalloc(sizeof *dmp
);
467 dmp
->socket_id
= socket_id
;
470 mbp_priv
.mbuf_data_room_size
= MBUF_SIZE(mtu
) - sizeof(struct dp_packet
);
471 mbp_priv
.mbuf_priv_size
= sizeof(struct dp_packet
)
472 - sizeof(struct rte_mbuf
);
473 /* XXX: this is a really rough method of provisioning memory.
474 * It's impossible to determine what the exact memory requirements are
475 * when the number of ports and rxqs that utilize a particular mempool can
476 * change dynamically at runtime. For now, use this rough heurisitic.
478 if (mtu
>= ETHER_MTU
) {
479 mp_size
= MAX_NB_MBUF
;
481 mp_size
= MIN_NB_MBUF
;
485 mp_name
= xasprintf("ovs_mp_%d_%d_%u", dmp
->mtu
, dmp
->socket_id
,
488 dmp
->mp
= rte_mempool_create(mp_name
, mp_size
, MBUF_SIZE(mtu
),
490 sizeof(struct rte_pktmbuf_pool_private
),
491 rte_pktmbuf_pool_init
, &mbp_priv
,
492 ovs_rte_pktmbuf_init
, NULL
,
495 VLOG_DBG("Allocated \"%s\" mempool with %u mbufs",
502 } while (rte_errno
== ENOMEM
&& (mp_size
/= 2) >= MIN_NB_MBUF
);
508 static struct dpdk_mp
*
509 dpdk_mp_get(int socket_id
, int mtu
)
513 ovs_mutex_lock(&dpdk_mp_mutex
);
514 LIST_FOR_EACH (dmp
, list_node
, &dpdk_mp_list
) {
515 if (dmp
->socket_id
== socket_id
&& dmp
->mtu
== mtu
) {
521 dmp
= dpdk_mp_create(socket_id
, mtu
);
522 ovs_list_push_back(&dpdk_mp_list
, &dmp
->list_node
);
525 ovs_mutex_unlock(&dpdk_mp_mutex
);
531 dpdk_mp_put(struct dpdk_mp
*dmp
)
537 ovs_mutex_lock(&dpdk_mp_mutex
);
538 ovs_assert(dmp
->refcount
);
540 if (!--dmp
->refcount
) {
541 ovs_list_remove(&dmp
->list_node
);
542 rte_mempool_free(dmp
->mp
);
545 ovs_mutex_unlock(&dpdk_mp_mutex
);
548 /* Tries to allocate new mempool on requested_socket_id with
549 * mbuf size corresponding to requested_mtu.
550 * On success new configuration will be applied.
551 * On error, device will be left unchanged. */
553 netdev_dpdk_mempool_configure(struct netdev_dpdk
*dev
)
554 OVS_REQUIRES(dev
->mutex
)
556 uint32_t buf_size
= dpdk_buf_size(dev
->requested_mtu
);
559 mp
= dpdk_mp_get(dev
->requested_socket_id
, FRAME_LEN_TO_MTU(buf_size
));
561 VLOG_ERR("Insufficient memory to create memory pool for netdev "
562 "%s, with MTU %d on socket %d\n",
563 dev
->up
.name
, dev
->requested_mtu
, dev
->requested_socket_id
);
566 dpdk_mp_put(dev
->dpdk_mp
);
568 dev
->mtu
= dev
->requested_mtu
;
569 dev
->socket_id
= dev
->requested_socket_id
;
570 dev
->max_packet_len
= MTU_TO_FRAME_LEN(dev
->mtu
);
577 check_link_status(struct netdev_dpdk
*dev
)
579 struct rte_eth_link link
;
581 rte_eth_link_get_nowait(dev
->port_id
, &link
);
583 if (dev
->link
.link_status
!= link
.link_status
) {
584 netdev_change_seq_changed(&dev
->up
);
586 dev
->link_reset_cnt
++;
588 if (dev
->link
.link_status
) {
589 VLOG_DBG_RL(&rl
, "Port %d Link Up - speed %u Mbps - %s",
590 dev
->port_id
, (unsigned) dev
->link
.link_speed
,
591 (dev
->link
.link_duplex
== ETH_LINK_FULL_DUPLEX
) ?
592 ("full-duplex") : ("half-duplex"));
594 VLOG_DBG_RL(&rl
, "Port %d Link Down", dev
->port_id
);
600 dpdk_watchdog(void *dummy OVS_UNUSED
)
602 struct netdev_dpdk
*dev
;
604 pthread_detach(pthread_self());
607 ovs_mutex_lock(&dpdk_mutex
);
608 LIST_FOR_EACH (dev
, list_node
, &dpdk_list
) {
609 ovs_mutex_lock(&dev
->mutex
);
610 if (dev
->type
== DPDK_DEV_ETH
) {
611 check_link_status(dev
);
613 ovs_mutex_unlock(&dev
->mutex
);
615 ovs_mutex_unlock(&dpdk_mutex
);
616 xsleep(DPDK_PORT_WATCHDOG_INTERVAL
);
623 dpdk_eth_dev_queue_setup(struct netdev_dpdk
*dev
, int n_rxq
, int n_txq
)
627 struct rte_eth_conf conf
= port_conf
;
629 if (dev
->mtu
> ETHER_MTU
) {
630 conf
.rxmode
.jumbo_frame
= 1;
631 conf
.rxmode
.max_rx_pkt_len
= dev
->max_packet_len
;
633 conf
.rxmode
.jumbo_frame
= 0;
634 conf
.rxmode
.max_rx_pkt_len
= 0;
636 /* A device may report more queues than it makes available (this has
637 * been observed for Intel xl710, which reserves some of them for
638 * SRIOV): rte_eth_*_queue_setup will fail if a queue is not
639 * available. When this happens we can retry the configuration
640 * and request less queues */
641 while (n_rxq
&& n_txq
) {
643 VLOG_INFO("Retrying setup with (rxq:%d txq:%d)", n_rxq
, n_txq
);
646 diag
= rte_eth_dev_configure(dev
->port_id
, n_rxq
, n_txq
, &conf
);
648 VLOG_WARN("Interface %s eth_dev setup error %s\n",
649 dev
->up
.name
, rte_strerror(-diag
));
653 for (i
= 0; i
< n_txq
; i
++) {
654 diag
= rte_eth_tx_queue_setup(dev
->port_id
, i
, dev
->txq_size
,
655 dev
->socket_id
, NULL
);
657 VLOG_INFO("Interface %s txq(%d) setup error: %s",
658 dev
->up
.name
, i
, rte_strerror(-diag
));
664 /* Retry with less tx queues */
669 for (i
= 0; i
< n_rxq
; i
++) {
670 diag
= rte_eth_rx_queue_setup(dev
->port_id
, i
, dev
->rxq_size
,
671 dev
->socket_id
, NULL
,
674 VLOG_INFO("Interface %s rxq(%d) setup error: %s",
675 dev
->up
.name
, i
, rte_strerror(-diag
));
681 /* Retry with less rx queues */
686 dev
->up
.n_rxq
= n_rxq
;
687 dev
->up
.n_txq
= n_txq
;
696 dpdk_eth_flow_ctrl_setup(struct netdev_dpdk
*dev
) OVS_REQUIRES(dev
->mutex
)
698 if (rte_eth_dev_flow_ctrl_set(dev
->port_id
, &dev
->fc_conf
)) {
699 VLOG_WARN("Failed to enable flow control on device %d", dev
->port_id
);
704 dpdk_eth_dev_init(struct netdev_dpdk
*dev
)
705 OVS_REQUIRES(dev
->mutex
)
707 struct rte_pktmbuf_pool_private
*mbp_priv
;
708 struct rte_eth_dev_info info
;
709 struct ether_addr eth_addr
;
713 if (!rte_eth_dev_is_valid_port(dev
->port_id
)) {
717 rte_eth_dev_info_get(dev
->port_id
, &info
);
719 n_rxq
= MIN(info
.max_rx_queues
, dev
->up
.n_rxq
);
720 n_txq
= MIN(info
.max_tx_queues
, dev
->up
.n_txq
);
722 diag
= dpdk_eth_dev_queue_setup(dev
, n_rxq
, n_txq
);
724 VLOG_ERR("Interface %s(rxq:%d txq:%d) configure error: %s",
725 dev
->up
.name
, n_rxq
, n_txq
, rte_strerror(-diag
));
729 diag
= rte_eth_dev_start(dev
->port_id
);
731 VLOG_ERR("Interface %s start error: %s", dev
->up
.name
,
732 rte_strerror(-diag
));
736 rte_eth_promiscuous_enable(dev
->port_id
);
737 rte_eth_allmulticast_enable(dev
->port_id
);
739 memset(ð_addr
, 0x0, sizeof(eth_addr
));
740 rte_eth_macaddr_get(dev
->port_id
, ð_addr
);
741 VLOG_INFO_RL(&rl
, "Port %d: "ETH_ADDR_FMT
"",
742 dev
->port_id
, ETH_ADDR_BYTES_ARGS(eth_addr
.addr_bytes
));
744 memcpy(dev
->hwaddr
.ea
, eth_addr
.addr_bytes
, ETH_ADDR_LEN
);
745 rte_eth_link_get_nowait(dev
->port_id
, &dev
->link
);
747 mbp_priv
= rte_mempool_get_priv(dev
->dpdk_mp
->mp
);
748 dev
->buf_size
= mbp_priv
->mbuf_data_room_size
- RTE_PKTMBUF_HEADROOM
;
750 dev
->flags
= NETDEV_UP
| NETDEV_PROMISC
;
752 /* Get the Flow control configuration for DPDK-ETH */
753 diag
= rte_eth_dev_flow_ctrl_get(dev
->port_id
, &dev
->fc_conf
);
755 VLOG_DBG("cannot get flow control parameters on port=%d, err=%d",
762 static struct netdev_dpdk
*
763 netdev_dpdk_cast(const struct netdev
*netdev
)
765 return CONTAINER_OF(netdev
, struct netdev_dpdk
, up
);
768 static struct netdev
*
769 netdev_dpdk_alloc(void)
771 struct netdev_dpdk
*dev
;
773 dev
= dpdk_rte_mzalloc(sizeof *dev
);
781 static struct dpdk_tx_queue
*
782 netdev_dpdk_alloc_txq(unsigned int n_txqs
)
784 struct dpdk_tx_queue
*txqs
;
787 txqs
= dpdk_rte_mzalloc(n_txqs
* sizeof *txqs
);
789 for (i
= 0; i
< n_txqs
; i
++) {
790 /* Initialize map for vhost devices. */
791 txqs
[i
].map
= OVS_VHOST_QUEUE_MAP_UNKNOWN
;
792 rte_spinlock_init(&txqs
[i
].tx_lock
);
800 netdev_dpdk_init(struct netdev
*netdev
, unsigned int port_no
,
801 enum dpdk_dev_type type
)
802 OVS_REQUIRES(dpdk_mutex
)
804 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
808 ovs_mutex_init(&dev
->mutex
);
809 ovs_mutex_lock(&dev
->mutex
);
811 rte_spinlock_init(&dev
->stats_lock
);
813 /* If the 'sid' is negative, it means that the kernel fails
814 * to obtain the pci numa info. In that situation, always
816 if (type
== DPDK_DEV_ETH
) {
817 sid
= rte_eth_dev_socket_id(port_no
);
819 sid
= rte_lcore_to_socket_id(rte_get_master_lcore());
822 dev
->socket_id
= sid
< 0 ? SOCKET0
: sid
;
823 dev
->requested_socket_id
= dev
->socket_id
;
824 dev
->port_id
= port_no
;
827 dev
->requested_mtu
= dev
->mtu
= ETHER_MTU
;
828 dev
->max_packet_len
= MTU_TO_FRAME_LEN(dev
->mtu
);
829 ovsrcu_index_init(&dev
->vid
, -1);
830 dev
->vhost_reconfigured
= false;
832 err
= netdev_dpdk_mempool_configure(dev
);
837 ovsrcu_init(&dev
->qos_conf
, NULL
);
839 ovsrcu_init(&dev
->ingress_policer
, NULL
);
840 dev
->policer_rate
= 0;
841 dev
->policer_burst
= 0;
843 netdev
->n_rxq
= NR_QUEUE
;
844 netdev
->n_txq
= NR_QUEUE
;
845 dev
->requested_n_rxq
= netdev
->n_rxq
;
846 dev
->requested_n_txq
= netdev
->n_txq
;
847 dev
->rxq_size
= NIC_PORT_DEFAULT_RXQ_SIZE
;
848 dev
->txq_size
= NIC_PORT_DEFAULT_TXQ_SIZE
;
849 dev
->requested_rxq_size
= dev
->rxq_size
;
850 dev
->requested_txq_size
= dev
->txq_size
;
852 /* Initialize the flow control to NULL */
853 memset(&dev
->fc_conf
, 0, sizeof dev
->fc_conf
);
854 if (type
== DPDK_DEV_ETH
) {
855 err
= dpdk_eth_dev_init(dev
);
859 dev
->tx_q
= netdev_dpdk_alloc_txq(netdev
->n_txq
);
861 dev
->tx_q
= netdev_dpdk_alloc_txq(OVS_VHOST_MAX_QUEUE_NUM
);
862 /* Enable DPDK_DEV_VHOST device and set promiscuous mode flag. */
863 dev
->flags
= NETDEV_UP
| NETDEV_PROMISC
;
871 ovs_list_push_back(&dpdk_list
, &dev
->list_node
);
874 ovs_mutex_unlock(&dev
->mutex
);
878 /* dev_name must be the prefix followed by a positive decimal number.
879 * (no leading + or - signs are allowed) */
881 dpdk_dev_parse_name(const char dev_name
[], const char prefix
[],
882 unsigned int *port_no
)
886 if (strncmp(dev_name
, prefix
, strlen(prefix
))) {
890 cport
= dev_name
+ strlen(prefix
);
892 if (str_to_uint(cport
, 10, port_no
)) {
900 netdev_dpdk_vhost_construct(struct netdev
*netdev
)
902 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
903 const char *name
= netdev
->name
;
906 /* 'name' is appended to 'vhost_sock_dir' and used to create a socket in
907 * the file system. '/' or '\' would traverse directories, so they're not
908 * acceptable in 'name'. */
909 if (strchr(name
, '/') || strchr(name
, '\\')) {
910 VLOG_ERR("\"%s\" is not a valid name for a vhost-user port. "
911 "A valid name must not include '/' or '\\'",
916 ovs_mutex_lock(&dpdk_mutex
);
917 /* Take the name of the vhost-user port and append it to the location where
918 * the socket is to be created, then register the socket.
920 snprintf(dev
->vhost_id
, sizeof dev
->vhost_id
, "%s/%s",
921 dpdk_get_vhost_sock_dir(), name
);
923 dev
->vhost_driver_flags
&= ~RTE_VHOST_USER_CLIENT
;
924 err
= rte_vhost_driver_register(dev
->vhost_id
, dev
->vhost_driver_flags
);
926 VLOG_ERR("vhost-user socket device setup failure for socket %s\n",
929 fatal_signal_add_file_to_unlink(dev
->vhost_id
);
930 VLOG_INFO("Socket %s created for vhost-user port %s\n",
931 dev
->vhost_id
, name
);
933 err
= netdev_dpdk_init(netdev
, -1, DPDK_DEV_VHOST
);
935 ovs_mutex_unlock(&dpdk_mutex
);
940 netdev_dpdk_vhost_client_construct(struct netdev
*netdev
)
944 ovs_mutex_lock(&dpdk_mutex
);
945 err
= netdev_dpdk_init(netdev
, -1, DPDK_DEV_VHOST
);
946 ovs_mutex_unlock(&dpdk_mutex
);
951 netdev_dpdk_construct(struct netdev
*netdev
)
953 unsigned int port_no
;
956 /* Names always start with "dpdk" */
957 err
= dpdk_dev_parse_name(netdev
->name
, "dpdk", &port_no
);
962 ovs_mutex_lock(&dpdk_mutex
);
963 err
= netdev_dpdk_init(netdev
, port_no
, DPDK_DEV_ETH
);
964 ovs_mutex_unlock(&dpdk_mutex
);
969 netdev_dpdk_destruct(struct netdev
*netdev
)
971 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
973 ovs_mutex_lock(&dpdk_mutex
);
974 ovs_mutex_lock(&dev
->mutex
);
976 rte_eth_dev_stop(dev
->port_id
);
977 free(ovsrcu_get_protected(struct ingress_policer
*,
978 &dev
->ingress_policer
));
981 ovs_list_remove(&dev
->list_node
);
982 dpdk_mp_put(dev
->dpdk_mp
);
984 ovs_mutex_unlock(&dev
->mutex
);
985 ovs_mutex_unlock(&dpdk_mutex
);
988 /* rte_vhost_driver_unregister() can call back destroy_device(), which will
989 * try to acquire 'dpdk_mutex' and possibly 'dev->mutex'. To avoid a
990 * deadlock, none of the mutexes must be held while calling this function. */
992 dpdk_vhost_driver_unregister(struct netdev_dpdk
*dev OVS_UNUSED
,
994 OVS_EXCLUDED(dpdk_mutex
)
995 OVS_EXCLUDED(dev
->mutex
)
997 return rte_vhost_driver_unregister(vhost_id
);
1001 netdev_dpdk_vhost_destruct(struct netdev
*netdev
)
1003 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1006 ovs_mutex_lock(&dpdk_mutex
);
1007 ovs_mutex_lock(&dev
->mutex
);
1009 /* Guest becomes an orphan if still attached. */
1010 if (netdev_dpdk_get_vid(dev
) >= 0
1011 && !(dev
->vhost_driver_flags
& RTE_VHOST_USER_CLIENT
)) {
1012 VLOG_ERR("Removing port '%s' while vhost device still attached.",
1014 VLOG_ERR("To restore connectivity after re-adding of port, VM on "
1015 "socket '%s' must be restarted.", dev
->vhost_id
);
1018 free(ovsrcu_get_protected(struct ingress_policer
*,
1019 &dev
->ingress_policer
));
1021 rte_free(dev
->tx_q
);
1022 ovs_list_remove(&dev
->list_node
);
1023 dpdk_mp_put(dev
->dpdk_mp
);
1025 vhost_id
= xstrdup(dev
->vhost_id
);
1027 ovs_mutex_unlock(&dev
->mutex
);
1028 ovs_mutex_unlock(&dpdk_mutex
);
1030 if (dpdk_vhost_driver_unregister(dev
, vhost_id
)) {
1031 VLOG_ERR("%s: Unable to unregister vhost driver for socket '%s'.\n",
1032 netdev
->name
, vhost_id
);
1033 } else if (!(dev
->vhost_driver_flags
& RTE_VHOST_USER_CLIENT
)) {
1034 /* OVS server mode - remove this socket from list for deletion */
1035 fatal_signal_remove_file_to_unlink(vhost_id
);
1041 netdev_dpdk_dealloc(struct netdev
*netdev
)
1043 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1049 netdev_dpdk_get_config(const struct netdev
*netdev
, struct smap
*args
)
1051 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1053 ovs_mutex_lock(&dev
->mutex
);
1055 smap_add_format(args
, "requested_rx_queues", "%d", dev
->requested_n_rxq
);
1056 smap_add_format(args
, "configured_rx_queues", "%d", netdev
->n_rxq
);
1057 smap_add_format(args
, "requested_tx_queues", "%d", dev
->requested_n_txq
);
1058 smap_add_format(args
, "configured_tx_queues", "%d", netdev
->n_txq
);
1059 smap_add_format(args
, "requested_rxq_descriptors", "%d",
1060 dev
->requested_rxq_size
);
1061 smap_add_format(args
, "configured_rxq_descriptors", "%d", dev
->rxq_size
);
1062 smap_add_format(args
, "requested_txq_descriptors", "%d",
1063 dev
->requested_txq_size
);
1064 smap_add_format(args
, "configured_txq_descriptors", "%d", dev
->txq_size
);
1065 smap_add_format(args
, "mtu", "%d", dev
->mtu
);
1066 ovs_mutex_unlock(&dev
->mutex
);
1072 dpdk_set_rxq_config(struct netdev_dpdk
*dev
, const struct smap
*args
)
1073 OVS_REQUIRES(dev
->mutex
)
1077 new_n_rxq
= MAX(smap_get_int(args
, "n_rxq", dev
->requested_n_rxq
), 1);
1078 if (new_n_rxq
!= dev
->requested_n_rxq
) {
1079 dev
->requested_n_rxq
= new_n_rxq
;
1080 netdev_request_reconfigure(&dev
->up
);
1085 dpdk_process_queue_size(struct netdev
*netdev
, const struct smap
*args
,
1086 const char *flag
, int default_size
, int *new_size
)
1088 int queue_size
= smap_get_int(args
, flag
, default_size
);
1090 if (queue_size
<= 0 || queue_size
> NIC_PORT_MAX_Q_SIZE
1091 || !is_pow2(queue_size
)) {
1092 queue_size
= default_size
;
1095 if (queue_size
!= *new_size
) {
1096 *new_size
= queue_size
;
1097 netdev_request_reconfigure(netdev
);
1102 netdev_dpdk_set_config(struct netdev
*netdev
, const struct smap
*args
)
1104 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1105 bool rx_fc_en
, tx_fc_en
, autoneg
;
1106 enum rte_eth_fc_mode fc_mode
;
1107 static const enum rte_eth_fc_mode fc_mode_set
[2][2] = {
1108 {RTE_FC_NONE
, RTE_FC_TX_PAUSE
},
1109 {RTE_FC_RX_PAUSE
, RTE_FC_FULL
}
1112 ovs_mutex_lock(&dev
->mutex
);
1114 dpdk_set_rxq_config(dev
, args
);
1116 dpdk_process_queue_size(netdev
, args
, "n_rxq_desc",
1117 NIC_PORT_DEFAULT_RXQ_SIZE
,
1118 &dev
->requested_rxq_size
);
1119 dpdk_process_queue_size(netdev
, args
, "n_txq_desc",
1120 NIC_PORT_DEFAULT_TXQ_SIZE
,
1121 &dev
->requested_txq_size
);
1123 rx_fc_en
= smap_get_bool(args
, "rx-flow-ctrl", false);
1124 tx_fc_en
= smap_get_bool(args
, "tx-flow-ctrl", false);
1125 autoneg
= smap_get_bool(args
, "flow-ctrl-autoneg", false);
1127 fc_mode
= fc_mode_set
[tx_fc_en
][rx_fc_en
];
1128 if (dev
->fc_conf
.mode
!= fc_mode
|| autoneg
!= dev
->fc_conf
.autoneg
) {
1129 dev
->fc_conf
.mode
= fc_mode
;
1130 dev
->fc_conf
.autoneg
= autoneg
;
1131 dpdk_eth_flow_ctrl_setup(dev
);
1134 ovs_mutex_unlock(&dev
->mutex
);
1140 netdev_dpdk_ring_set_config(struct netdev
*netdev
, const struct smap
*args
)
1142 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1144 ovs_mutex_lock(&dev
->mutex
);
1145 dpdk_set_rxq_config(dev
, args
);
1146 ovs_mutex_unlock(&dev
->mutex
);
1152 netdev_dpdk_vhost_client_set_config(struct netdev
*netdev
,
1153 const struct smap
*args
)
1155 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1158 ovs_mutex_lock(&dev
->mutex
);
1159 if (!(dev
->vhost_driver_flags
& RTE_VHOST_USER_CLIENT
)) {
1160 path
= smap_get(args
, "vhost-server-path");
1161 if (path
&& strcmp(path
, dev
->vhost_id
)) {
1162 strcpy(dev
->vhost_id
, path
);
1163 netdev_request_reconfigure(netdev
);
1166 ovs_mutex_unlock(&dev
->mutex
);
1172 netdev_dpdk_get_numa_id(const struct netdev
*netdev
)
1174 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1176 return dev
->socket_id
;
1179 /* Sets the number of tx queues for the dpdk interface. */
1181 netdev_dpdk_set_tx_multiq(struct netdev
*netdev
, unsigned int n_txq
)
1183 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1185 ovs_mutex_lock(&dev
->mutex
);
1187 if (dev
->requested_n_txq
== n_txq
) {
1191 dev
->requested_n_txq
= n_txq
;
1192 netdev_request_reconfigure(netdev
);
1195 ovs_mutex_unlock(&dev
->mutex
);
1199 static struct netdev_rxq
*
1200 netdev_dpdk_rxq_alloc(void)
1202 struct netdev_rxq_dpdk
*rx
= dpdk_rte_mzalloc(sizeof *rx
);
1211 static struct netdev_rxq_dpdk
*
1212 netdev_rxq_dpdk_cast(const struct netdev_rxq
*rxq
)
1214 return CONTAINER_OF(rxq
, struct netdev_rxq_dpdk
, up
);
1218 netdev_dpdk_rxq_construct(struct netdev_rxq
*rxq
)
1220 struct netdev_rxq_dpdk
*rx
= netdev_rxq_dpdk_cast(rxq
);
1221 struct netdev_dpdk
*dev
= netdev_dpdk_cast(rxq
->netdev
);
1223 ovs_mutex_lock(&dev
->mutex
);
1224 rx
->port_id
= dev
->port_id
;
1225 ovs_mutex_unlock(&dev
->mutex
);
1231 netdev_dpdk_rxq_destruct(struct netdev_rxq
*rxq OVS_UNUSED
)
1236 netdev_dpdk_rxq_dealloc(struct netdev_rxq
*rxq
)
1238 struct netdev_rxq_dpdk
*rx
= netdev_rxq_dpdk_cast(rxq
);
1243 /* Tries to transmit 'pkts' to txq 'qid' of device 'dev'. Takes ownership of
1244 * 'pkts', even in case of failure.
1246 * Returns the number of packets that weren't transmitted. */
1248 netdev_dpdk_eth_tx_burst(struct netdev_dpdk
*dev
, int qid
,
1249 struct rte_mbuf
**pkts
, int cnt
)
1253 while (nb_tx
!= cnt
) {
1256 ret
= rte_eth_tx_burst(dev
->port_id
, qid
, pkts
+ nb_tx
, cnt
- nb_tx
);
1264 if (OVS_UNLIKELY(nb_tx
!= cnt
)) {
1265 /* Free buffers, which we couldn't transmit, one at a time (each
1266 * packet could come from a different mempool) */
1269 for (i
= nb_tx
; i
< cnt
; i
++) {
1270 rte_pktmbuf_free(pkts
[i
]);
1278 netdev_dpdk_policer_pkt_handle(struct rte_meter_srtcm
*meter
,
1279 struct rte_mbuf
*pkt
, uint64_t time
)
1281 uint32_t pkt_len
= rte_pktmbuf_pkt_len(pkt
) - sizeof(struct ether_hdr
);
1283 return rte_meter_srtcm_color_blind_check(meter
, time
, pkt_len
) ==
1288 netdev_dpdk_policer_run(struct rte_meter_srtcm
*meter
,
1289 struct rte_mbuf
**pkts
, int pkt_cnt
)
1293 struct rte_mbuf
*pkt
= NULL
;
1294 uint64_t current_time
= rte_rdtsc();
1296 for (i
= 0; i
< pkt_cnt
; i
++) {
1298 /* Handle current packet */
1299 if (netdev_dpdk_policer_pkt_handle(meter
, pkt
, current_time
)) {
1305 rte_pktmbuf_free(pkt
);
1313 ingress_policer_run(struct ingress_policer
*policer
, struct rte_mbuf
**pkts
,
1318 rte_spinlock_lock(&policer
->policer_lock
);
1319 cnt
= netdev_dpdk_policer_run(&policer
->in_policer
, pkts
, pkt_cnt
);
1320 rte_spinlock_unlock(&policer
->policer_lock
);
1326 is_vhost_running(struct netdev_dpdk
*dev
)
1328 return (netdev_dpdk_get_vid(dev
) >= 0 && dev
->vhost_reconfigured
);
1332 netdev_dpdk_vhost_update_rx_size_counters(struct netdev_stats
*stats
,
1333 unsigned int packet_size
)
1335 /* Hard-coded search for the size bucket. */
1336 if (packet_size
< 256) {
1337 if (packet_size
>= 128) {
1338 stats
->rx_128_to_255_packets
++;
1339 } else if (packet_size
<= 64) {
1340 stats
->rx_1_to_64_packets
++;
1342 stats
->rx_65_to_127_packets
++;
1345 if (packet_size
>= 1523) {
1346 stats
->rx_1523_to_max_packets
++;
1347 } else if (packet_size
>= 1024) {
1348 stats
->rx_1024_to_1522_packets
++;
1349 } else if (packet_size
< 512) {
1350 stats
->rx_256_to_511_packets
++;
1352 stats
->rx_512_to_1023_packets
++;
1358 netdev_dpdk_vhost_update_rx_counters(struct netdev_stats
*stats
,
1359 struct dp_packet
**packets
, int count
,
1363 unsigned int packet_size
;
1364 struct dp_packet
*packet
;
1366 stats
->rx_packets
+= count
;
1367 stats
->rx_dropped
+= dropped
;
1368 for (i
= 0; i
< count
; i
++) {
1369 packet
= packets
[i
];
1370 packet_size
= dp_packet_size(packet
);
1372 if (OVS_UNLIKELY(packet_size
< ETH_HEADER_LEN
)) {
1373 /* This only protects the following multicast counting from
1374 * too short packets, but it does not stop the packet from
1375 * further processing. */
1377 stats
->rx_length_errors
++;
1381 netdev_dpdk_vhost_update_rx_size_counters(stats
, packet_size
);
1383 struct eth_header
*eh
= (struct eth_header
*) dp_packet_data(packet
);
1384 if (OVS_UNLIKELY(eth_addr_is_multicast(eh
->eth_dst
))) {
1388 stats
->rx_bytes
+= packet_size
;
1393 * The receive path for the vhost port is the TX path out from guest.
1396 netdev_dpdk_vhost_rxq_recv(struct netdev_rxq
*rxq
,
1397 struct dp_packet_batch
*batch
)
1399 struct netdev_dpdk
*dev
= netdev_dpdk_cast(rxq
->netdev
);
1400 int qid
= rxq
->queue_id
;
1401 struct ingress_policer
*policer
= netdev_dpdk_get_ingress_policer(dev
);
1403 uint16_t dropped
= 0;
1405 if (OVS_UNLIKELY(!is_vhost_running(dev
)
1406 || !(dev
->flags
& NETDEV_UP
))) {
1410 nb_rx
= rte_vhost_dequeue_burst(netdev_dpdk_get_vid(dev
),
1411 qid
* VIRTIO_QNUM
+ VIRTIO_TXQ
,
1413 (struct rte_mbuf
**) batch
->packets
,
1421 nb_rx
= ingress_policer_run(policer
,
1422 (struct rte_mbuf
**) batch
->packets
,
1427 rte_spinlock_lock(&dev
->stats_lock
);
1428 netdev_dpdk_vhost_update_rx_counters(&dev
->stats
, batch
->packets
,
1430 rte_spinlock_unlock(&dev
->stats_lock
);
1432 batch
->count
= (int) nb_rx
;
1437 netdev_dpdk_rxq_recv(struct netdev_rxq
*rxq
, struct dp_packet_batch
*batch
)
1439 struct netdev_rxq_dpdk
*rx
= netdev_rxq_dpdk_cast(rxq
);
1440 struct netdev_dpdk
*dev
= netdev_dpdk_cast(rxq
->netdev
);
1441 struct ingress_policer
*policer
= netdev_dpdk_get_ingress_policer(dev
);
1445 nb_rx
= rte_eth_rx_burst(rx
->port_id
, rxq
->queue_id
,
1446 (struct rte_mbuf
**) batch
->packets
,
1454 nb_rx
= ingress_policer_run(policer
,
1455 (struct rte_mbuf
**) batch
->packets
,
1460 /* Update stats to reflect dropped packets */
1461 if (OVS_UNLIKELY(dropped
)) {
1462 rte_spinlock_lock(&dev
->stats_lock
);
1463 dev
->stats
.rx_dropped
+= dropped
;
1464 rte_spinlock_unlock(&dev
->stats_lock
);
1467 batch
->count
= nb_rx
;
1473 netdev_dpdk_qos_run(struct netdev_dpdk
*dev
, struct rte_mbuf
**pkts
,
1476 struct qos_conf
*qos_conf
= ovsrcu_get(struct qos_conf
*, &dev
->qos_conf
);
1479 rte_spinlock_lock(&qos_conf
->lock
);
1480 cnt
= qos_conf
->ops
->qos_run(qos_conf
, pkts
, cnt
);
1481 rte_spinlock_unlock(&qos_conf
->lock
);
1488 netdev_dpdk_filter_packet_len(struct netdev_dpdk
*dev
, struct rte_mbuf
**pkts
,
1493 struct rte_mbuf
*pkt
;
1495 for (i
= 0; i
< pkt_cnt
; i
++) {
1497 if (OVS_UNLIKELY(pkt
->pkt_len
> dev
->max_packet_len
)) {
1498 VLOG_WARN_RL(&rl
, "%s: Too big size %" PRIu32
" max_packet_len %d",
1499 dev
->up
.name
, pkt
->pkt_len
, dev
->max_packet_len
);
1500 rte_pktmbuf_free(pkt
);
1504 if (OVS_UNLIKELY(i
!= cnt
)) {
1514 netdev_dpdk_vhost_update_tx_counters(struct netdev_stats
*stats
,
1515 struct dp_packet
**packets
,
1520 int sent
= attempted
- dropped
;
1522 stats
->tx_packets
+= sent
;
1523 stats
->tx_dropped
+= dropped
;
1525 for (i
= 0; i
< sent
; i
++) {
1526 stats
->tx_bytes
+= dp_packet_size(packets
[i
]);
1531 __netdev_dpdk_vhost_send(struct netdev
*netdev
, int qid
,
1532 struct dp_packet
**pkts
, int cnt
)
1534 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1535 struct rte_mbuf
**cur_pkts
= (struct rte_mbuf
**) pkts
;
1536 unsigned int total_pkts
= cnt
;
1537 unsigned int dropped
= 0;
1540 qid
= dev
->tx_q
[qid
% netdev
->n_txq
].map
;
1542 if (OVS_UNLIKELY(!is_vhost_running(dev
) || qid
< 0
1543 || !(dev
->flags
& NETDEV_UP
))) {
1544 rte_spinlock_lock(&dev
->stats_lock
);
1545 dev
->stats
.tx_dropped
+= cnt
;
1546 rte_spinlock_unlock(&dev
->stats_lock
);
1550 rte_spinlock_lock(&dev
->tx_q
[qid
].tx_lock
);
1552 cnt
= netdev_dpdk_filter_packet_len(dev
, cur_pkts
, cnt
);
1553 /* Check has QoS has been configured for the netdev */
1554 cnt
= netdev_dpdk_qos_run(dev
, cur_pkts
, cnt
);
1555 dropped
= total_pkts
- cnt
;
1558 int vhost_qid
= qid
* VIRTIO_QNUM
+ VIRTIO_RXQ
;
1559 unsigned int tx_pkts
;
1561 tx_pkts
= rte_vhost_enqueue_burst(netdev_dpdk_get_vid(dev
),
1562 vhost_qid
, cur_pkts
, cnt
);
1563 if (OVS_LIKELY(tx_pkts
)) {
1564 /* Packets have been sent.*/
1566 /* Prepare for possible retry.*/
1567 cur_pkts
= &cur_pkts
[tx_pkts
];
1569 /* No packets sent - do not retry.*/
1572 } while (cnt
&& (retries
++ <= VHOST_ENQ_RETRY_NUM
));
1574 rte_spinlock_unlock(&dev
->tx_q
[qid
].tx_lock
);
1576 rte_spinlock_lock(&dev
->stats_lock
);
1577 netdev_dpdk_vhost_update_tx_counters(&dev
->stats
, pkts
, total_pkts
,
1579 rte_spinlock_unlock(&dev
->stats_lock
);
1582 for (i
= 0; i
< total_pkts
- dropped
; i
++) {
1583 dp_packet_delete(pkts
[i
]);
1587 /* Tx function. Transmit packets indefinitely */
1589 dpdk_do_tx_copy(struct netdev
*netdev
, int qid
, struct dp_packet_batch
*batch
)
1590 OVS_NO_THREAD_SAFETY_ANALYSIS
1592 #if !defined(__CHECKER__) && !defined(_WIN32)
1593 const size_t PKT_ARRAY_SIZE
= batch
->count
;
1595 /* Sparse or MSVC doesn't like variable length array. */
1596 enum { PKT_ARRAY_SIZE
= NETDEV_MAX_BURST
};
1598 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1599 struct rte_mbuf
*pkts
[PKT_ARRAY_SIZE
];
1604 dp_packet_batch_apply_cutlen(batch
);
1606 for (i
= 0; i
< batch
->count
; i
++) {
1607 int size
= dp_packet_size(batch
->packets
[i
]);
1609 if (OVS_UNLIKELY(size
> dev
->max_packet_len
)) {
1610 VLOG_WARN_RL(&rl
, "Too big size %d max_packet_len %d",
1611 (int) size
, dev
->max_packet_len
);
1617 pkts
[newcnt
] = rte_pktmbuf_alloc(dev
->dpdk_mp
->mp
);
1619 if (!pkts
[newcnt
]) {
1620 dropped
+= batch
->count
- i
;
1624 /* We have to do a copy for now */
1625 memcpy(rte_pktmbuf_mtod(pkts
[newcnt
], void *),
1626 dp_packet_data(batch
->packets
[i
]), size
);
1628 rte_pktmbuf_data_len(pkts
[newcnt
]) = size
;
1629 rte_pktmbuf_pkt_len(pkts
[newcnt
]) = size
;
1634 if (dev
->type
== DPDK_DEV_VHOST
) {
1635 __netdev_dpdk_vhost_send(netdev
, qid
, (struct dp_packet
**) pkts
,
1638 unsigned int qos_pkts
= newcnt
;
1640 /* Check if QoS has been configured for this netdev. */
1641 newcnt
= netdev_dpdk_qos_run(dev
, pkts
, newcnt
);
1643 dropped
+= qos_pkts
- newcnt
;
1644 dropped
+= netdev_dpdk_eth_tx_burst(dev
, qid
, pkts
, newcnt
);
1647 if (OVS_UNLIKELY(dropped
)) {
1648 rte_spinlock_lock(&dev
->stats_lock
);
1649 dev
->stats
.tx_dropped
+= dropped
;
1650 rte_spinlock_unlock(&dev
->stats_lock
);
1655 netdev_dpdk_vhost_send(struct netdev
*netdev
, int qid
,
1656 struct dp_packet_batch
*batch
,
1657 bool may_steal
, bool concurrent_txq OVS_UNUSED
)
1660 if (OVS_UNLIKELY(!may_steal
|| batch
->packets
[0]->source
!= DPBUF_DPDK
)) {
1661 dpdk_do_tx_copy(netdev
, qid
, batch
);
1662 dp_packet_delete_batch(batch
, may_steal
);
1664 dp_packet_batch_apply_cutlen(batch
);
1665 __netdev_dpdk_vhost_send(netdev
, qid
, batch
->packets
, batch
->count
);
1671 netdev_dpdk_send__(struct netdev_dpdk
*dev
, int qid
,
1672 struct dp_packet_batch
*batch
, bool may_steal
,
1673 bool concurrent_txq
)
1675 if (OVS_UNLIKELY(concurrent_txq
)) {
1676 qid
= qid
% dev
->up
.n_txq
;
1677 rte_spinlock_lock(&dev
->tx_q
[qid
].tx_lock
);
1680 if (OVS_UNLIKELY(!may_steal
||
1681 batch
->packets
[0]->source
!= DPBUF_DPDK
)) {
1682 struct netdev
*netdev
= &dev
->up
;
1684 dpdk_do_tx_copy(netdev
, qid
, batch
);
1685 dp_packet_delete_batch(batch
, may_steal
);
1688 int cnt
= batch
->count
;
1689 struct rte_mbuf
**pkts
= (struct rte_mbuf
**) batch
->packets
;
1691 dp_packet_batch_apply_cutlen(batch
);
1693 cnt
= netdev_dpdk_filter_packet_len(dev
, pkts
, cnt
);
1694 cnt
= netdev_dpdk_qos_run(dev
, pkts
, cnt
);
1695 dropped
= batch
->count
- cnt
;
1697 dropped
+= netdev_dpdk_eth_tx_burst(dev
, qid
, pkts
, cnt
);
1699 if (OVS_UNLIKELY(dropped
)) {
1700 rte_spinlock_lock(&dev
->stats_lock
);
1701 dev
->stats
.tx_dropped
+= dropped
;
1702 rte_spinlock_unlock(&dev
->stats_lock
);
1706 if (OVS_UNLIKELY(concurrent_txq
)) {
1707 rte_spinlock_unlock(&dev
->tx_q
[qid
].tx_lock
);
1712 netdev_dpdk_eth_send(struct netdev
*netdev
, int qid
,
1713 struct dp_packet_batch
*batch
, bool may_steal
,
1714 bool concurrent_txq
)
1716 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1718 netdev_dpdk_send__(dev
, qid
, batch
, may_steal
, concurrent_txq
);
1723 netdev_dpdk_set_etheraddr(struct netdev
*netdev
, const struct eth_addr mac
)
1725 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1727 ovs_mutex_lock(&dev
->mutex
);
1728 if (!eth_addr_equals(dev
->hwaddr
, mac
)) {
1730 netdev_change_seq_changed(netdev
);
1732 ovs_mutex_unlock(&dev
->mutex
);
1738 netdev_dpdk_get_etheraddr(const struct netdev
*netdev
, struct eth_addr
*mac
)
1740 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1742 ovs_mutex_lock(&dev
->mutex
);
1744 ovs_mutex_unlock(&dev
->mutex
);
1750 netdev_dpdk_get_mtu(const struct netdev
*netdev
, int *mtup
)
1752 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1754 ovs_mutex_lock(&dev
->mutex
);
1756 ovs_mutex_unlock(&dev
->mutex
);
1762 netdev_dpdk_set_mtu(struct netdev
*netdev
, int mtu
)
1764 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1766 if (MTU_TO_FRAME_LEN(mtu
) > NETDEV_DPDK_MAX_PKT_LEN
1767 || mtu
< ETHER_MIN_MTU
) {
1768 VLOG_WARN("%s: unsupported MTU %d\n", dev
->up
.name
, mtu
);
1772 ovs_mutex_lock(&dev
->mutex
);
1773 if (dev
->requested_mtu
!= mtu
) {
1774 dev
->requested_mtu
= mtu
;
1775 netdev_request_reconfigure(netdev
);
1777 ovs_mutex_unlock(&dev
->mutex
);
1783 netdev_dpdk_get_carrier(const struct netdev
*netdev
, bool *carrier
);
1786 netdev_dpdk_vhost_get_stats(const struct netdev
*netdev
,
1787 struct netdev_stats
*stats
)
1789 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1791 ovs_mutex_lock(&dev
->mutex
);
1793 rte_spinlock_lock(&dev
->stats_lock
);
1794 /* Supported Stats */
1795 stats
->rx_packets
+= dev
->stats
.rx_packets
;
1796 stats
->tx_packets
+= dev
->stats
.tx_packets
;
1797 stats
->rx_dropped
= dev
->stats
.rx_dropped
;
1798 stats
->tx_dropped
+= dev
->stats
.tx_dropped
;
1799 stats
->multicast
= dev
->stats
.multicast
;
1800 stats
->rx_bytes
= dev
->stats
.rx_bytes
;
1801 stats
->tx_bytes
= dev
->stats
.tx_bytes
;
1802 stats
->rx_errors
= dev
->stats
.rx_errors
;
1803 stats
->rx_length_errors
= dev
->stats
.rx_length_errors
;
1805 stats
->rx_1_to_64_packets
= dev
->stats
.rx_1_to_64_packets
;
1806 stats
->rx_65_to_127_packets
= dev
->stats
.rx_65_to_127_packets
;
1807 stats
->rx_128_to_255_packets
= dev
->stats
.rx_128_to_255_packets
;
1808 stats
->rx_256_to_511_packets
= dev
->stats
.rx_256_to_511_packets
;
1809 stats
->rx_512_to_1023_packets
= dev
->stats
.rx_512_to_1023_packets
;
1810 stats
->rx_1024_to_1522_packets
= dev
->stats
.rx_1024_to_1522_packets
;
1811 stats
->rx_1523_to_max_packets
= dev
->stats
.rx_1523_to_max_packets
;
1813 rte_spinlock_unlock(&dev
->stats_lock
);
1815 ovs_mutex_unlock(&dev
->mutex
);
1821 netdev_dpdk_convert_xstats(struct netdev_stats
*stats
,
1822 const struct rte_eth_xstat
*xstats
,
1823 const struct rte_eth_xstat_name
*names
,
1824 const unsigned int size
)
1826 for (unsigned int i
= 0; i
< size
; i
++) {
1827 if (strcmp(XSTAT_RX_64_PACKETS
, names
[i
].name
) == 0) {
1828 stats
->rx_1_to_64_packets
= xstats
[i
].value
;
1829 } else if (strcmp(XSTAT_RX_65_TO_127_PACKETS
, names
[i
].name
) == 0) {
1830 stats
->rx_65_to_127_packets
= xstats
[i
].value
;
1831 } else if (strcmp(XSTAT_RX_128_TO_255_PACKETS
, names
[i
].name
) == 0) {
1832 stats
->rx_128_to_255_packets
= xstats
[i
].value
;
1833 } else if (strcmp(XSTAT_RX_256_TO_511_PACKETS
, names
[i
].name
) == 0) {
1834 stats
->rx_256_to_511_packets
= xstats
[i
].value
;
1835 } else if (strcmp(XSTAT_RX_512_TO_1023_PACKETS
, names
[i
].name
) == 0) {
1836 stats
->rx_512_to_1023_packets
= xstats
[i
].value
;
1837 } else if (strcmp(XSTAT_RX_1024_TO_1522_PACKETS
, names
[i
].name
) == 0) {
1838 stats
->rx_1024_to_1522_packets
= xstats
[i
].value
;
1839 } else if (strcmp(XSTAT_RX_1523_TO_MAX_PACKETS
, names
[i
].name
) == 0) {
1840 stats
->rx_1523_to_max_packets
= xstats
[i
].value
;
1841 } else if (strcmp(XSTAT_TX_64_PACKETS
, names
[i
].name
) == 0) {
1842 stats
->tx_1_to_64_packets
= xstats
[i
].value
;
1843 } else if (strcmp(XSTAT_TX_65_TO_127_PACKETS
, names
[i
].name
) == 0) {
1844 stats
->tx_65_to_127_packets
= xstats
[i
].value
;
1845 } else if (strcmp(XSTAT_TX_128_TO_255_PACKETS
, names
[i
].name
) == 0) {
1846 stats
->tx_128_to_255_packets
= xstats
[i
].value
;
1847 } else if (strcmp(XSTAT_TX_256_TO_511_PACKETS
, names
[i
].name
) == 0) {
1848 stats
->tx_256_to_511_packets
= xstats
[i
].value
;
1849 } else if (strcmp(XSTAT_TX_512_TO_1023_PACKETS
, names
[i
].name
) == 0) {
1850 stats
->tx_512_to_1023_packets
= xstats
[i
].value
;
1851 } else if (strcmp(XSTAT_TX_1024_TO_1522_PACKETS
, names
[i
].name
) == 0) {
1852 stats
->tx_1024_to_1522_packets
= xstats
[i
].value
;
1853 } else if (strcmp(XSTAT_TX_1523_TO_MAX_PACKETS
, names
[i
].name
) == 0) {
1854 stats
->tx_1523_to_max_packets
= xstats
[i
].value
;
1855 } else if (strcmp(XSTAT_TX_MULTICAST_PACKETS
, names
[i
].name
) == 0) {
1856 stats
->tx_multicast_packets
= xstats
[i
].value
;
1857 } else if (strcmp(XSTAT_RX_BROADCAST_PACKETS
, names
[i
].name
) == 0) {
1858 stats
->rx_broadcast_packets
= xstats
[i
].value
;
1859 } else if (strcmp(XSTAT_TX_BROADCAST_PACKETS
, names
[i
].name
) == 0) {
1860 stats
->tx_broadcast_packets
= xstats
[i
].value
;
1861 } else if (strcmp(XSTAT_RX_UNDERSIZED_ERRORS
, names
[i
].name
) == 0) {
1862 stats
->rx_undersized_errors
= xstats
[i
].value
;
1863 } else if (strcmp(XSTAT_RX_FRAGMENTED_ERRORS
, names
[i
].name
) == 0) {
1864 stats
->rx_fragmented_errors
= xstats
[i
].value
;
1865 } else if (strcmp(XSTAT_RX_JABBER_ERRORS
, names
[i
].name
) == 0) {
1866 stats
->rx_jabber_errors
= xstats
[i
].value
;
1872 netdev_dpdk_get_stats(const struct netdev
*netdev
, struct netdev_stats
*stats
)
1874 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1875 struct rte_eth_stats rte_stats
;
1878 netdev_dpdk_get_carrier(netdev
, &gg
);
1879 ovs_mutex_lock(&dev
->mutex
);
1881 struct rte_eth_xstat
*rte_xstats
= NULL
;
1882 struct rte_eth_xstat_name
*rte_xstats_names
= NULL
;
1883 int rte_xstats_len
, rte_xstats_new_len
, rte_xstats_ret
;
1885 if (rte_eth_stats_get(dev
->port_id
, &rte_stats
)) {
1886 VLOG_ERR("Can't get ETH statistics for port: %i.", dev
->port_id
);
1887 ovs_mutex_unlock(&dev
->mutex
);
1891 /* Get length of statistics */
1892 rte_xstats_len
= rte_eth_xstats_get_names(dev
->port_id
, NULL
, 0);
1893 if (rte_xstats_len
< 0) {
1894 VLOG_WARN("Cannot get XSTATS values for port: %i", dev
->port_id
);
1897 /* Reserve memory for xstats names and values */
1898 rte_xstats_names
= xcalloc(rte_xstats_len
, sizeof *rte_xstats_names
);
1899 rte_xstats
= xcalloc(rte_xstats_len
, sizeof *rte_xstats
);
1901 /* Retreive xstats names */
1902 rte_xstats_new_len
= rte_eth_xstats_get_names(dev
->port_id
,
1905 if (rte_xstats_new_len
!= rte_xstats_len
) {
1906 VLOG_WARN("Cannot get XSTATS names for port: %i.", dev
->port_id
);
1909 /* Retreive xstats values */
1910 memset(rte_xstats
, 0xff, sizeof *rte_xstats
* rte_xstats_len
);
1911 rte_xstats_ret
= rte_eth_xstats_get(dev
->port_id
, rte_xstats
,
1913 if (rte_xstats_ret
> 0 && rte_xstats_ret
<= rte_xstats_len
) {
1914 netdev_dpdk_convert_xstats(stats
, rte_xstats
, rte_xstats_names
,
1917 VLOG_WARN("Cannot get XSTATS values for port: %i.", dev
->port_id
);
1922 free(rte_xstats_names
);
1924 stats
->rx_packets
= rte_stats
.ipackets
;
1925 stats
->tx_packets
= rte_stats
.opackets
;
1926 stats
->rx_bytes
= rte_stats
.ibytes
;
1927 stats
->tx_bytes
= rte_stats
.obytes
;
1928 /* DPDK counts imissed as errors, but count them here as dropped instead */
1929 stats
->rx_errors
= rte_stats
.ierrors
- rte_stats
.imissed
;
1930 stats
->tx_errors
= rte_stats
.oerrors
;
1932 rte_spinlock_lock(&dev
->stats_lock
);
1933 stats
->tx_dropped
= dev
->stats
.tx_dropped
;
1934 stats
->rx_dropped
= dev
->stats
.rx_dropped
;
1935 rte_spinlock_unlock(&dev
->stats_lock
);
1937 /* These are the available DPDK counters for packets not received due to
1938 * local resource constraints in DPDK and NIC respectively. */
1939 stats
->rx_dropped
+= rte_stats
.rx_nombuf
+ rte_stats
.imissed
;
1940 stats
->rx_missed_errors
= rte_stats
.imissed
;
1942 ovs_mutex_unlock(&dev
->mutex
);
1948 netdev_dpdk_get_features(const struct netdev
*netdev
,
1949 enum netdev_features
*current
,
1950 enum netdev_features
*advertised OVS_UNUSED
,
1951 enum netdev_features
*supported OVS_UNUSED
,
1952 enum netdev_features
*peer OVS_UNUSED
)
1954 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
1955 struct rte_eth_link link
;
1957 ovs_mutex_lock(&dev
->mutex
);
1959 ovs_mutex_unlock(&dev
->mutex
);
1961 if (link
.link_duplex
== ETH_LINK_HALF_DUPLEX
) {
1962 if (link
.link_speed
== ETH_SPEED_NUM_10M
) {
1963 *current
= NETDEV_F_10MB_HD
;
1965 if (link
.link_speed
== ETH_SPEED_NUM_100M
) {
1966 *current
= NETDEV_F_100MB_HD
;
1968 if (link
.link_speed
== ETH_SPEED_NUM_1G
) {
1969 *current
= NETDEV_F_1GB_HD
;
1971 } else if (link
.link_duplex
== ETH_LINK_FULL_DUPLEX
) {
1972 if (link
.link_speed
== ETH_SPEED_NUM_10M
) {
1973 *current
= NETDEV_F_10MB_FD
;
1975 if (link
.link_speed
== ETH_SPEED_NUM_100M
) {
1976 *current
= NETDEV_F_100MB_FD
;
1978 if (link
.link_speed
== ETH_SPEED_NUM_1G
) {
1979 *current
= NETDEV_F_1GB_FD
;
1981 if (link
.link_speed
== ETH_SPEED_NUM_10G
) {
1982 *current
= NETDEV_F_10GB_FD
;
1986 if (link
.link_autoneg
) {
1987 *current
|= NETDEV_F_AUTONEG
;
1993 static struct ingress_policer
*
1994 netdev_dpdk_policer_construct(uint32_t rate
, uint32_t burst
)
1996 struct ingress_policer
*policer
= NULL
;
1997 uint64_t rate_bytes
;
1998 uint64_t burst_bytes
;
2001 policer
= xmalloc(sizeof *policer
);
2002 rte_spinlock_init(&policer
->policer_lock
);
2004 /* rte_meter requires bytes so convert kbits rate and burst to bytes. */
2005 rate_bytes
= rate
* 1000/8;
2006 burst_bytes
= burst
* 1000/8;
2008 policer
->app_srtcm_params
.cir
= rate_bytes
;
2009 policer
->app_srtcm_params
.cbs
= burst_bytes
;
2010 policer
->app_srtcm_params
.ebs
= 0;
2011 err
= rte_meter_srtcm_config(&policer
->in_policer
,
2012 &policer
->app_srtcm_params
);
2014 VLOG_ERR("Could not create rte meter for ingress policer");
2022 netdev_dpdk_set_policing(struct netdev
* netdev
, uint32_t policer_rate
,
2023 uint32_t policer_burst
)
2025 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
2026 struct ingress_policer
*policer
;
2028 /* Force to 0 if no rate specified,
2029 * default to 8000 kbits if burst is 0,
2030 * else stick with user-specified value.
2032 policer_burst
= (!policer_rate
? 0
2033 : !policer_burst
? 8000
2036 ovs_mutex_lock(&dev
->mutex
);
2038 policer
= ovsrcu_get_protected(struct ingress_policer
*,
2039 &dev
->ingress_policer
);
2041 if (dev
->policer_rate
== policer_rate
&&
2042 dev
->policer_burst
== policer_burst
) {
2043 /* Assume that settings haven't changed since we last set them. */
2044 ovs_mutex_unlock(&dev
->mutex
);
2048 /* Destroy any existing ingress policer for the device if one exists */
2050 ovsrcu_postpone(free
, policer
);
2053 if (policer_rate
!= 0) {
2054 policer
= netdev_dpdk_policer_construct(policer_rate
, policer_burst
);
2058 ovsrcu_set(&dev
->ingress_policer
, policer
);
2059 dev
->policer_rate
= policer_rate
;
2060 dev
->policer_burst
= policer_burst
;
2061 ovs_mutex_unlock(&dev
->mutex
);
2067 netdev_dpdk_get_ifindex(const struct netdev
*netdev
)
2069 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
2072 ovs_mutex_lock(&dev
->mutex
);
2073 ifindex
= dev
->port_id
;
2074 ovs_mutex_unlock(&dev
->mutex
);
2080 netdev_dpdk_get_carrier(const struct netdev
*netdev
, bool *carrier
)
2082 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
2084 ovs_mutex_lock(&dev
->mutex
);
2085 check_link_status(dev
);
2086 *carrier
= dev
->link
.link_status
;
2088 ovs_mutex_unlock(&dev
->mutex
);
2094 netdev_dpdk_vhost_get_carrier(const struct netdev
*netdev
, bool *carrier
)
2096 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
2098 ovs_mutex_lock(&dev
->mutex
);
2100 if (is_vhost_running(dev
)) {
2106 ovs_mutex_unlock(&dev
->mutex
);
2111 static long long int
2112 netdev_dpdk_get_carrier_resets(const struct netdev
*netdev
)
2114 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
2115 long long int carrier_resets
;
2117 ovs_mutex_lock(&dev
->mutex
);
2118 carrier_resets
= dev
->link_reset_cnt
;
2119 ovs_mutex_unlock(&dev
->mutex
);
2121 return carrier_resets
;
2125 netdev_dpdk_set_miimon(struct netdev
*netdev OVS_UNUSED
,
2126 long long int interval OVS_UNUSED
)
2132 netdev_dpdk_update_flags__(struct netdev_dpdk
*dev
,
2133 enum netdev_flags off
, enum netdev_flags on
,
2134 enum netdev_flags
*old_flagsp
)
2135 OVS_REQUIRES(dev
->mutex
)
2139 if ((off
| on
) & ~(NETDEV_UP
| NETDEV_PROMISC
)) {
2143 *old_flagsp
= dev
->flags
;
2147 if (dev
->flags
== *old_flagsp
) {
2151 if (dev
->type
== DPDK_DEV_ETH
) {
2152 if (dev
->flags
& NETDEV_UP
) {
2153 err
= rte_eth_dev_start(dev
->port_id
);
2158 if (dev
->flags
& NETDEV_PROMISC
) {
2159 rte_eth_promiscuous_enable(dev
->port_id
);
2162 if (!(dev
->flags
& NETDEV_UP
)) {
2163 rte_eth_dev_stop(dev
->port_id
);
2166 netdev_change_seq_changed(&dev
->up
);
2168 /* If DPDK_DEV_VHOST device's NETDEV_UP flag was changed and vhost is
2169 * running then change netdev's change_seq to trigger link state
2172 if ((NETDEV_UP
& ((*old_flagsp
^ on
) | (*old_flagsp
^ off
)))
2173 && is_vhost_running(dev
)) {
2174 netdev_change_seq_changed(&dev
->up
);
2176 /* Clear statistics if device is getting up. */
2177 if (NETDEV_UP
& on
) {
2178 rte_spinlock_lock(&dev
->stats_lock
);
2179 memset(&dev
->stats
, 0, sizeof dev
->stats
);
2180 rte_spinlock_unlock(&dev
->stats_lock
);
2189 netdev_dpdk_update_flags(struct netdev
*netdev
,
2190 enum netdev_flags off
, enum netdev_flags on
,
2191 enum netdev_flags
*old_flagsp
)
2193 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
2196 ovs_mutex_lock(&dev
->mutex
);
2197 error
= netdev_dpdk_update_flags__(dev
, off
, on
, old_flagsp
);
2198 ovs_mutex_unlock(&dev
->mutex
);
2204 netdev_dpdk_get_status(const struct netdev
*netdev
, struct smap
*args
)
2206 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
2207 struct rte_eth_dev_info dev_info
;
2209 if (!rte_eth_dev_is_valid_port(dev
->port_id
)) {
2213 ovs_mutex_lock(&dev
->mutex
);
2214 rte_eth_dev_info_get(dev
->port_id
, &dev_info
);
2215 ovs_mutex_unlock(&dev
->mutex
);
2217 smap_add_format(args
, "port_no", "%d", dev
->port_id
);
2218 smap_add_format(args
, "numa_id", "%d",
2219 rte_eth_dev_socket_id(dev
->port_id
));
2220 smap_add_format(args
, "driver_name", "%s", dev_info
.driver_name
);
2221 smap_add_format(args
, "min_rx_bufsize", "%u", dev_info
.min_rx_bufsize
);
2222 smap_add_format(args
, "max_rx_pktlen", "%u", dev
->max_packet_len
);
2223 smap_add_format(args
, "max_rx_queues", "%u", dev_info
.max_rx_queues
);
2224 smap_add_format(args
, "max_tx_queues", "%u", dev_info
.max_tx_queues
);
2225 smap_add_format(args
, "max_mac_addrs", "%u", dev_info
.max_mac_addrs
);
2226 smap_add_format(args
, "max_hash_mac_addrs", "%u",
2227 dev_info
.max_hash_mac_addrs
);
2228 smap_add_format(args
, "max_vfs", "%u", dev_info
.max_vfs
);
2229 smap_add_format(args
, "max_vmdq_pools", "%u", dev_info
.max_vmdq_pools
);
2231 if (dev_info
.pci_dev
) {
2232 smap_add_format(args
, "pci-vendor_id", "0x%u",
2233 dev_info
.pci_dev
->id
.vendor_id
);
2234 smap_add_format(args
, "pci-device_id", "0x%x",
2235 dev_info
.pci_dev
->id
.device_id
);
2242 netdev_dpdk_set_admin_state__(struct netdev_dpdk
*dev
, bool admin_state
)
2243 OVS_REQUIRES(dev
->mutex
)
2245 enum netdev_flags old_flags
;
2248 netdev_dpdk_update_flags__(dev
, 0, NETDEV_UP
, &old_flags
);
2250 netdev_dpdk_update_flags__(dev
, NETDEV_UP
, 0, &old_flags
);
2255 netdev_dpdk_set_admin_state(struct unixctl_conn
*conn
, int argc
,
2256 const char *argv
[], void *aux OVS_UNUSED
)
2260 if (!strcasecmp(argv
[argc
- 1], "up")) {
2262 } else if ( !strcasecmp(argv
[argc
- 1], "down")) {
2265 unixctl_command_reply_error(conn
, "Invalid Admin State");
2270 struct netdev
*netdev
= netdev_from_name(argv
[1]);
2271 if (netdev
&& is_dpdk_class(netdev
->netdev_class
)) {
2272 struct netdev_dpdk
*dpdk_dev
= netdev_dpdk_cast(netdev
);
2274 ovs_mutex_lock(&dpdk_dev
->mutex
);
2275 netdev_dpdk_set_admin_state__(dpdk_dev
, up
);
2276 ovs_mutex_unlock(&dpdk_dev
->mutex
);
2278 netdev_close(netdev
);
2280 unixctl_command_reply_error(conn
, "Not a DPDK Interface");
2281 netdev_close(netdev
);
2285 struct netdev_dpdk
*netdev
;
2287 ovs_mutex_lock(&dpdk_mutex
);
2288 LIST_FOR_EACH (netdev
, list_node
, &dpdk_list
) {
2289 ovs_mutex_lock(&netdev
->mutex
);
2290 netdev_dpdk_set_admin_state__(netdev
, up
);
2291 ovs_mutex_unlock(&netdev
->mutex
);
2293 ovs_mutex_unlock(&dpdk_mutex
);
2295 unixctl_command_reply(conn
, "OK");
2299 * Set virtqueue flags so that we do not receive interrupts.
2302 set_irq_status(int vid
)
2307 for (i
= 0; i
< rte_vhost_get_queue_num(vid
); i
++) {
2308 idx
= i
* VIRTIO_QNUM
;
2309 rte_vhost_enable_guest_notification(vid
, idx
+ VIRTIO_RXQ
, 0);
2310 rte_vhost_enable_guest_notification(vid
, idx
+ VIRTIO_TXQ
, 0);
2315 * Fixes mapping for vhost-user tx queues. Must be called after each
2316 * enabling/disabling of queues and n_txq modifications.
2319 netdev_dpdk_remap_txqs(struct netdev_dpdk
*dev
)
2320 OVS_REQUIRES(dev
->mutex
)
2322 int *enabled_queues
, n_enabled
= 0;
2323 int i
, k
, total_txqs
= dev
->up
.n_txq
;
2325 enabled_queues
= xcalloc(total_txqs
, sizeof *enabled_queues
);
2327 for (i
= 0; i
< total_txqs
; i
++) {
2328 /* Enabled queues always mapped to themselves. */
2329 if (dev
->tx_q
[i
].map
== i
) {
2330 enabled_queues
[n_enabled
++] = i
;
2334 if (n_enabled
== 0 && total_txqs
!= 0) {
2335 enabled_queues
[0] = OVS_VHOST_QUEUE_DISABLED
;
2340 for (i
= 0; i
< total_txqs
; i
++) {
2341 if (dev
->tx_q
[i
].map
!= i
) {
2342 dev
->tx_q
[i
].map
= enabled_queues
[k
];
2343 k
= (k
+ 1) % n_enabled
;
2347 VLOG_DBG("TX queue mapping for %s\n", dev
->vhost_id
);
2348 for (i
= 0; i
< total_txqs
; i
++) {
2349 VLOG_DBG("%2d --> %2d", i
, dev
->tx_q
[i
].map
);
2352 free(enabled_queues
);
2356 * A new virtio-net device is added to a vhost port.
2361 struct netdev_dpdk
*dev
;
2362 bool exists
= false;
2364 char ifname
[IF_NAME_SZ
];
2366 rte_vhost_get_ifname(vid
, ifname
, sizeof ifname
);
2368 ovs_mutex_lock(&dpdk_mutex
);
2369 /* Add device to the vhost port with the same name as that passed down. */
2370 LIST_FOR_EACH(dev
, list_node
, &dpdk_list
) {
2371 ovs_mutex_lock(&dev
->mutex
);
2372 if (strncmp(ifname
, dev
->vhost_id
, IF_NAME_SZ
) == 0) {
2373 uint32_t qp_num
= rte_vhost_get_queue_num(vid
);
2375 /* Get NUMA information */
2376 newnode
= rte_vhost_get_numa_node(vid
);
2377 if (newnode
== -1) {
2379 VLOG_INFO("Error getting NUMA info for vHost Device '%s'",
2382 newnode
= dev
->socket_id
;
2385 if (dev
->requested_n_txq
!= qp_num
2386 || dev
->requested_n_rxq
!= qp_num
2387 || dev
->requested_socket_id
!= newnode
) {
2388 dev
->requested_socket_id
= newnode
;
2389 dev
->requested_n_rxq
= qp_num
;
2390 dev
->requested_n_txq
= qp_num
;
2391 netdev_request_reconfigure(&dev
->up
);
2393 /* Reconfiguration not required. */
2394 dev
->vhost_reconfigured
= true;
2397 ovsrcu_index_set(&dev
->vid
, vid
);
2400 /* Disable notifications. */
2401 set_irq_status(vid
);
2402 netdev_change_seq_changed(&dev
->up
);
2403 ovs_mutex_unlock(&dev
->mutex
);
2406 ovs_mutex_unlock(&dev
->mutex
);
2408 ovs_mutex_unlock(&dpdk_mutex
);
2411 VLOG_INFO("vHost Device '%s' can't be added - name not found", ifname
);
2416 VLOG_INFO("vHost Device '%s' has been added on numa node %i",
2422 /* Clears mapping for all available queues of vhost interface. */
2424 netdev_dpdk_txq_map_clear(struct netdev_dpdk
*dev
)
2425 OVS_REQUIRES(dev
->mutex
)
2429 for (i
= 0; i
< dev
->up
.n_txq
; i
++) {
2430 dev
->tx_q
[i
].map
= OVS_VHOST_QUEUE_MAP_UNKNOWN
;
2435 * Remove a virtio-net device from the specific vhost port. Use dev->remove
2436 * flag to stop any more packets from being sent or received to/from a VM and
2437 * ensure all currently queued packets have been sent/received before removing
2441 destroy_device(int vid
)
2443 struct netdev_dpdk
*dev
;
2444 bool exists
= false;
2445 char ifname
[IF_NAME_SZ
];
2447 rte_vhost_get_ifname(vid
, ifname
, sizeof ifname
);
2449 ovs_mutex_lock(&dpdk_mutex
);
2450 LIST_FOR_EACH (dev
, list_node
, &dpdk_list
) {
2451 if (netdev_dpdk_get_vid(dev
) == vid
) {
2453 ovs_mutex_lock(&dev
->mutex
);
2454 dev
->vhost_reconfigured
= false;
2455 ovsrcu_index_set(&dev
->vid
, -1);
2456 netdev_dpdk_txq_map_clear(dev
);
2458 netdev_change_seq_changed(&dev
->up
);
2459 ovs_mutex_unlock(&dev
->mutex
);
2465 ovs_mutex_unlock(&dpdk_mutex
);
2469 * Wait for other threads to quiesce after setting the 'virtio_dev'
2470 * to NULL, before returning.
2472 ovsrcu_synchronize();
2474 * As call to ovsrcu_synchronize() will end the quiescent state,
2475 * put thread back into quiescent state before returning.
2477 ovsrcu_quiesce_start();
2478 VLOG_INFO("vHost Device '%s' has been removed", ifname
);
2480 VLOG_INFO("vHost Device '%s' not found", ifname
);
2485 vring_state_changed(int vid
, uint16_t queue_id
, int enable
)
2487 struct netdev_dpdk
*dev
;
2488 bool exists
= false;
2489 int qid
= queue_id
/ VIRTIO_QNUM
;
2490 char ifname
[IF_NAME_SZ
];
2492 rte_vhost_get_ifname(vid
, ifname
, sizeof ifname
);
2494 if (queue_id
% VIRTIO_QNUM
== VIRTIO_TXQ
) {
2498 ovs_mutex_lock(&dpdk_mutex
);
2499 LIST_FOR_EACH (dev
, list_node
, &dpdk_list
) {
2500 ovs_mutex_lock(&dev
->mutex
);
2501 if (strncmp(ifname
, dev
->vhost_id
, IF_NAME_SZ
) == 0) {
2503 dev
->tx_q
[qid
].map
= qid
;
2505 dev
->tx_q
[qid
].map
= OVS_VHOST_QUEUE_DISABLED
;
2507 netdev_dpdk_remap_txqs(dev
);
2509 ovs_mutex_unlock(&dev
->mutex
);
2512 ovs_mutex_unlock(&dev
->mutex
);
2514 ovs_mutex_unlock(&dpdk_mutex
);
2517 VLOG_INFO("State of queue %d ( tx_qid %d ) of vhost device '%s'"
2518 "changed to \'%s\'", queue_id
, qid
, ifname
,
2519 (enable
== 1) ? "enabled" : "disabled");
2521 VLOG_INFO("vHost Device '%s' not found", ifname
);
2529 netdev_dpdk_get_vid(const struct netdev_dpdk
*dev
)
2531 return ovsrcu_index_get(&dev
->vid
);
2534 struct ingress_policer
*
2535 netdev_dpdk_get_ingress_policer(const struct netdev_dpdk
*dev
)
2537 return ovsrcu_get(struct ingress_policer
*, &dev
->ingress_policer
);
2541 * These callbacks allow virtio-net devices to be added to vhost ports when
2542 * configuration has been fully complete.
2544 static const struct virtio_net_device_ops virtio_net_device_ops
=
2546 .new_device
= new_device
,
2547 .destroy_device
= destroy_device
,
2548 .vring_state_changed
= vring_state_changed
2552 start_vhost_loop(void *dummy OVS_UNUSED
)
2554 pthread_detach(pthread_self());
2555 /* Put the vhost thread into quiescent state. */
2556 ovsrcu_quiesce_start();
2557 rte_vhost_driver_session_start();
2562 netdev_dpdk_class_init(void)
2564 static struct ovsthread_once once
= OVSTHREAD_ONCE_INITIALIZER
;
2566 /* This function can be called for different classes. The initialization
2567 * needs to be done only once */
2568 if (ovsthread_once_start(&once
)) {
2569 ovs_thread_create("dpdk_watchdog", dpdk_watchdog
, NULL
);
2570 unixctl_command_register("netdev-dpdk/set-admin-state",
2571 "[netdev] up|down", 1, 2,
2572 netdev_dpdk_set_admin_state
, NULL
);
2574 ovsthread_once_done(&once
);
2581 netdev_dpdk_vhost_class_init(void)
2583 static struct ovsthread_once once
= OVSTHREAD_ONCE_INITIALIZER
;
2585 /* This function can be called for different classes. The initialization
2586 * needs to be done only once */
2587 if (ovsthread_once_start(&once
)) {
2588 rte_vhost_driver_callback_register(&virtio_net_device_ops
);
2589 rte_vhost_feature_disable(1ULL << VIRTIO_NET_F_HOST_TSO4
2590 | 1ULL << VIRTIO_NET_F_HOST_TSO6
2591 | 1ULL << VIRTIO_NET_F_CSUM
);
2592 ovs_thread_create("vhost_thread", start_vhost_loop
, NULL
);
2594 ovsthread_once_done(&once
);
2603 dpdk_ring_create(const char dev_name
[], unsigned int port_no
,
2604 unsigned int *eth_port_id
)
2606 struct dpdk_ring
*ivshmem
;
2610 ivshmem
= dpdk_rte_mzalloc(sizeof *ivshmem
);
2615 /* XXX: Add support for multiquque ring. */
2616 ring_name
= xasprintf("%s_tx", dev_name
);
2618 /* Create single producer tx ring, netdev does explicit locking. */
2619 ivshmem
->cring_tx
= rte_ring_create(ring_name
, DPDK_RING_SIZE
, SOCKET0
,
2622 if (ivshmem
->cring_tx
== NULL
) {
2627 ring_name
= xasprintf("%s_rx", dev_name
);
2629 /* Create single consumer rx ring, netdev does explicit locking. */
2630 ivshmem
->cring_rx
= rte_ring_create(ring_name
, DPDK_RING_SIZE
, SOCKET0
,
2633 if (ivshmem
->cring_rx
== NULL
) {
2638 err
= rte_eth_from_rings(dev_name
, &ivshmem
->cring_rx
, 1,
2639 &ivshmem
->cring_tx
, 1, SOCKET0
);
2646 ivshmem
->user_port_id
= port_no
;
2647 ivshmem
->eth_port_id
= rte_eth_dev_count() - 1;
2648 ovs_list_push_back(&dpdk_ring_list
, &ivshmem
->list_node
);
2650 *eth_port_id
= ivshmem
->eth_port_id
;
2655 dpdk_ring_open(const char dev_name
[], unsigned int *eth_port_id
)
2656 OVS_REQUIRES(dpdk_mutex
)
2658 struct dpdk_ring
*ivshmem
;
2659 unsigned int port_no
;
2662 /* Names always start with "dpdkr" */
2663 err
= dpdk_dev_parse_name(dev_name
, "dpdkr", &port_no
);
2668 /* Look through our list to find the device */
2669 LIST_FOR_EACH (ivshmem
, list_node
, &dpdk_ring_list
) {
2670 if (ivshmem
->user_port_id
== port_no
) {
2671 VLOG_INFO("Found dpdk ring device %s:", dev_name
);
2672 /* Really all that is needed */
2673 *eth_port_id
= ivshmem
->eth_port_id
;
2677 /* Need to create the device rings */
2678 return dpdk_ring_create(dev_name
, port_no
, eth_port_id
);
2682 netdev_dpdk_ring_send(struct netdev
*netdev
, int qid
,
2683 struct dp_packet_batch
*batch
, bool may_steal
,
2684 bool concurrent_txq
)
2686 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
2689 /* When using 'dpdkr' and sending to a DPDK ring, we want to ensure that
2690 * the rss hash field is clear. This is because the same mbuf may be
2691 * modified by the consumer of the ring and return into the datapath
2692 * without recalculating the RSS hash. */
2693 for (i
= 0; i
< batch
->count
; i
++) {
2694 dp_packet_rss_invalidate(batch
->packets
[i
]);
2697 netdev_dpdk_send__(dev
, qid
, batch
, may_steal
, concurrent_txq
);
2702 netdev_dpdk_ring_construct(struct netdev
*netdev
)
2704 unsigned int port_no
= 0;
2707 ovs_mutex_lock(&dpdk_mutex
);
2709 err
= dpdk_ring_open(netdev
->name
, &port_no
);
2714 err
= netdev_dpdk_init(netdev
, port_no
, DPDK_DEV_ETH
);
2717 ovs_mutex_unlock(&dpdk_mutex
);
2724 * Initialize QoS configuration operations.
2727 qos_conf_init(struct qos_conf
*conf
, const struct dpdk_qos_ops
*ops
)
2730 rte_spinlock_init(&conf
->lock
);
2734 * Search existing QoS operations in qos_ops and compare each set of
2735 * operations qos_name to name. Return a dpdk_qos_ops pointer to a match,
2738 static const struct dpdk_qos_ops
*
2739 qos_lookup_name(const char *name
)
2741 const struct dpdk_qos_ops
*const *opsp
;
2743 for (opsp
= qos_confs
; *opsp
!= NULL
; opsp
++) {
2744 const struct dpdk_qos_ops
*ops
= *opsp
;
2745 if (!strcmp(name
, ops
->qos_name
)) {
2753 netdev_dpdk_get_qos_types(const struct netdev
*netdev OVS_UNUSED
,
2756 const struct dpdk_qos_ops
*const *opsp
;
2758 for (opsp
= qos_confs
; *opsp
!= NULL
; opsp
++) {
2759 const struct dpdk_qos_ops
*ops
= *opsp
;
2760 if (ops
->qos_construct
&& ops
->qos_name
[0] != '\0') {
2761 sset_add(types
, ops
->qos_name
);
2768 netdev_dpdk_get_qos(const struct netdev
*netdev
,
2769 const char **typep
, struct smap
*details
)
2771 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
2772 struct qos_conf
*qos_conf
;
2775 ovs_mutex_lock(&dev
->mutex
);
2776 qos_conf
= ovsrcu_get_protected(struct qos_conf
*, &dev
->qos_conf
);
2778 *typep
= qos_conf
->ops
->qos_name
;
2779 error
= (qos_conf
->ops
->qos_get
2780 ? qos_conf
->ops
->qos_get(qos_conf
, details
): 0);
2782 /* No QoS configuration set, return an empty string */
2785 ovs_mutex_unlock(&dev
->mutex
);
2791 netdev_dpdk_set_qos(struct netdev
*netdev
, const char *type
,
2792 const struct smap
*details
)
2794 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
2795 const struct dpdk_qos_ops
*new_ops
= NULL
;
2796 struct qos_conf
*qos_conf
, *new_qos_conf
= NULL
;
2799 ovs_mutex_lock(&dev
->mutex
);
2801 qos_conf
= ovsrcu_get_protected(struct qos_conf
*, &dev
->qos_conf
);
2803 new_ops
= qos_lookup_name(type
);
2805 if (!new_ops
|| !new_ops
->qos_construct
) {
2806 new_qos_conf
= NULL
;
2807 if (type
&& type
[0]) {
2810 } else if (qos_conf
&& qos_conf
->ops
== new_ops
2811 && qos_conf
->ops
->qos_is_equal(qos_conf
, details
)) {
2812 new_qos_conf
= qos_conf
;
2814 error
= new_ops
->qos_construct(details
, &new_qos_conf
);
2818 VLOG_ERR("Failed to set QoS type %s on port %s: %s",
2819 type
, netdev
->name
, rte_strerror(error
));
2822 if (new_qos_conf
!= qos_conf
) {
2823 ovsrcu_set(&dev
->qos_conf
, new_qos_conf
);
2825 ovsrcu_postpone(qos_conf
->ops
->qos_destruct
, qos_conf
);
2829 ovs_mutex_unlock(&dev
->mutex
);
2834 /* egress-policer details */
2836 struct egress_policer
{
2837 struct qos_conf qos_conf
;
2838 struct rte_meter_srtcm_params app_srtcm_params
;
2839 struct rte_meter_srtcm egress_meter
;
2843 egress_policer_details_to_param(const struct smap
*details
,
2844 struct rte_meter_srtcm_params
*params
)
2846 memset(params
, 0, sizeof *params
);
2847 params
->cir
= smap_get_ullong(details
, "cir", 0);
2848 params
->cbs
= smap_get_ullong(details
, "cbs", 0);
2853 egress_policer_qos_construct(const struct smap
*details
,
2854 struct qos_conf
**conf
)
2856 struct egress_policer
*policer
;
2859 policer
= xmalloc(sizeof *policer
);
2860 qos_conf_init(&policer
->qos_conf
, &egress_policer_ops
);
2861 egress_policer_details_to_param(details
, &policer
->app_srtcm_params
);
2862 err
= rte_meter_srtcm_config(&policer
->egress_meter
,
2863 &policer
->app_srtcm_params
);
2865 *conf
= &policer
->qos_conf
;
2876 egress_policer_qos_destruct(struct qos_conf
*conf
)
2878 struct egress_policer
*policer
= CONTAINER_OF(conf
, struct egress_policer
,
2884 egress_policer_qos_get(const struct qos_conf
*conf
, struct smap
*details
)
2886 struct egress_policer
*policer
=
2887 CONTAINER_OF(conf
, struct egress_policer
, qos_conf
);
2889 smap_add_format(details
, "cir", "%"PRIu64
, policer
->app_srtcm_params
.cir
);
2890 smap_add_format(details
, "cbs", "%"PRIu64
, policer
->app_srtcm_params
.cbs
);
2896 egress_policer_qos_is_equal(const struct qos_conf
*conf
, const struct smap
*details
)
2898 struct egress_policer
*policer
=
2899 CONTAINER_OF(conf
, struct egress_policer
, qos_conf
);
2900 struct rte_meter_srtcm_params params
;
2902 egress_policer_details_to_param(details
, ¶ms
);
2904 return !memcmp(¶ms
, &policer
->app_srtcm_params
, sizeof params
);
2908 egress_policer_run(struct qos_conf
*conf
, struct rte_mbuf
**pkts
, int pkt_cnt
)
2911 struct egress_policer
*policer
=
2912 CONTAINER_OF(conf
, struct egress_policer
, qos_conf
);
2914 cnt
= netdev_dpdk_policer_run(&policer
->egress_meter
, pkts
, pkt_cnt
);
2919 static const struct dpdk_qos_ops egress_policer_ops
= {
2920 "egress-policer", /* qos_name */
2921 egress_policer_qos_construct
,
2922 egress_policer_qos_destruct
,
2923 egress_policer_qos_get
,
2924 egress_policer_qos_is_equal
,
2929 netdev_dpdk_reconfigure(struct netdev
*netdev
)
2931 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
2934 ovs_mutex_lock(&dev
->mutex
);
2936 if (netdev
->n_txq
== dev
->requested_n_txq
2937 && netdev
->n_rxq
== dev
->requested_n_rxq
2938 && dev
->mtu
== dev
->requested_mtu
2939 && dev
->rxq_size
== dev
->requested_rxq_size
2940 && dev
->txq_size
== dev
->requested_txq_size
) {
2941 /* Reconfiguration is unnecessary */
2946 rte_eth_dev_stop(dev
->port_id
);
2948 if (dev
->mtu
!= dev
->requested_mtu
) {
2949 netdev_dpdk_mempool_configure(dev
);
2952 netdev
->n_txq
= dev
->requested_n_txq
;
2953 netdev
->n_rxq
= dev
->requested_n_rxq
;
2955 dev
->rxq_size
= dev
->requested_rxq_size
;
2956 dev
->txq_size
= dev
->requested_txq_size
;
2958 rte_free(dev
->tx_q
);
2959 err
= dpdk_eth_dev_init(dev
);
2960 dev
->tx_q
= netdev_dpdk_alloc_txq(netdev
->n_txq
);
2965 netdev_change_seq_changed(netdev
);
2968 ovs_mutex_unlock(&dev
->mutex
);
2973 dpdk_vhost_reconfigure_helper(struct netdev_dpdk
*dev
)
2974 OVS_REQUIRES(dev
->mutex
)
2976 dev
->up
.n_txq
= dev
->requested_n_txq
;
2977 dev
->up
.n_rxq
= dev
->requested_n_rxq
;
2979 /* Enable TX queue 0 by default if it wasn't disabled. */
2980 if (dev
->tx_q
[0].map
== OVS_VHOST_QUEUE_MAP_UNKNOWN
) {
2981 dev
->tx_q
[0].map
= 0;
2984 netdev_dpdk_remap_txqs(dev
);
2986 if (dev
->requested_socket_id
!= dev
->socket_id
2987 || dev
->requested_mtu
!= dev
->mtu
) {
2988 if (!netdev_dpdk_mempool_configure(dev
)) {
2989 netdev_change_seq_changed(&dev
->up
);
2993 if (netdev_dpdk_get_vid(dev
) >= 0) {
2994 dev
->vhost_reconfigured
= true;
2999 netdev_dpdk_vhost_reconfigure(struct netdev
*netdev
)
3001 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
3003 ovs_mutex_lock(&dev
->mutex
);
3004 dpdk_vhost_reconfigure_helper(dev
);
3005 ovs_mutex_unlock(&dev
->mutex
);
3010 netdev_dpdk_vhost_client_reconfigure(struct netdev
*netdev
)
3012 struct netdev_dpdk
*dev
= netdev_dpdk_cast(netdev
);
3015 ovs_mutex_lock(&dev
->mutex
);
3017 dpdk_vhost_reconfigure_helper(dev
);
3019 /* Configure vHost client mode if requested and if the following criteria
3021 * 1. Device hasn't been registered yet.
3022 * 2. A path has been specified.
3024 if (!(dev
->vhost_driver_flags
& RTE_VHOST_USER_CLIENT
)
3025 && strlen(dev
->vhost_id
)) {
3026 /* Register client-mode device */
3027 err
= rte_vhost_driver_register(dev
->vhost_id
,
3028 RTE_VHOST_USER_CLIENT
);
3030 VLOG_ERR("vhost-user device setup failure for device %s\n",
3033 /* Configuration successful */
3034 dev
->vhost_driver_flags
|= RTE_VHOST_USER_CLIENT
;
3035 VLOG_INFO("vHost User device '%s' created in 'client' mode, "
3036 "using client socket '%s'",
3037 dev
->up
.name
, dev
->vhost_id
);
3041 ovs_mutex_unlock(&dev
->mutex
);
3046 #define NETDEV_DPDK_CLASS(NAME, INIT, CONSTRUCT, DESTRUCT, \
3047 SET_CONFIG, SET_TX_MULTIQ, SEND, \
3048 GET_CARRIER, GET_STATS, \
3049 GET_FEATURES, GET_STATUS, \
3050 RECONFIGURE, RXQ_RECV) \
3053 true, /* is_pmd */ \
3055 NULL, /* netdev_dpdk_run */ \
3056 NULL, /* netdev_dpdk_wait */ \
3058 netdev_dpdk_alloc, \
3061 netdev_dpdk_dealloc, \
3062 netdev_dpdk_get_config, \
3064 NULL, /* get_tunnel_config */ \
3065 NULL, /* build header */ \
3066 NULL, /* push header */ \
3067 NULL, /* pop header */ \
3068 netdev_dpdk_get_numa_id, /* get_numa_id */ \
3072 NULL, /* send_wait */ \
3074 netdev_dpdk_set_etheraddr, \
3075 netdev_dpdk_get_etheraddr, \
3076 netdev_dpdk_get_mtu, \
3077 netdev_dpdk_set_mtu, \
3078 netdev_dpdk_get_ifindex, \
3080 netdev_dpdk_get_carrier_resets, \
3081 netdev_dpdk_set_miimon, \
3084 NULL, /* set_advertisements */ \
3086 netdev_dpdk_set_policing, \
3087 netdev_dpdk_get_qos_types, \
3088 NULL, /* get_qos_capabilities */ \
3089 netdev_dpdk_get_qos, \
3090 netdev_dpdk_set_qos, \
3091 NULL, /* get_queue */ \
3092 NULL, /* set_queue */ \
3093 NULL, /* delete_queue */ \
3094 NULL, /* get_queue_stats */ \
3095 NULL, /* queue_dump_start */ \
3096 NULL, /* queue_dump_next */ \
3097 NULL, /* queue_dump_done */ \
3098 NULL, /* dump_queue_stats */ \
3100 NULL, /* set_in4 */ \
3101 NULL, /* get_addr_list */ \
3102 NULL, /* add_router */ \
3103 NULL, /* get_next_hop */ \
3105 NULL, /* arp_lookup */ \
3107 netdev_dpdk_update_flags, \
3110 netdev_dpdk_rxq_alloc, \
3111 netdev_dpdk_rxq_construct, \
3112 netdev_dpdk_rxq_destruct, \
3113 netdev_dpdk_rxq_dealloc, \
3115 NULL, /* rx_wait */ \
3116 NULL, /* rxq_drain */ \
3119 static const struct netdev_class dpdk_class
=
3122 netdev_dpdk_class_init
,
3123 netdev_dpdk_construct
,
3124 netdev_dpdk_destruct
,
3125 netdev_dpdk_set_config
,
3126 netdev_dpdk_set_tx_multiq
,
3127 netdev_dpdk_eth_send
,
3128 netdev_dpdk_get_carrier
,
3129 netdev_dpdk_get_stats
,
3130 netdev_dpdk_get_features
,
3131 netdev_dpdk_get_status
,
3132 netdev_dpdk_reconfigure
,
3133 netdev_dpdk_rxq_recv
);
3135 static const struct netdev_class dpdk_ring_class
=
3138 netdev_dpdk_class_init
,
3139 netdev_dpdk_ring_construct
,
3140 netdev_dpdk_destruct
,
3141 netdev_dpdk_ring_set_config
,
3142 netdev_dpdk_set_tx_multiq
,
3143 netdev_dpdk_ring_send
,
3144 netdev_dpdk_get_carrier
,
3145 netdev_dpdk_get_stats
,
3146 netdev_dpdk_get_features
,
3147 netdev_dpdk_get_status
,
3148 netdev_dpdk_reconfigure
,
3149 netdev_dpdk_rxq_recv
);
3151 static const struct netdev_class dpdk_vhost_class
=
3154 netdev_dpdk_vhost_class_init
,
3155 netdev_dpdk_vhost_construct
,
3156 netdev_dpdk_vhost_destruct
,
3159 netdev_dpdk_vhost_send
,
3160 netdev_dpdk_vhost_get_carrier
,
3161 netdev_dpdk_vhost_get_stats
,
3164 netdev_dpdk_vhost_reconfigure
,
3165 netdev_dpdk_vhost_rxq_recv
);
3166 static const struct netdev_class dpdk_vhost_client_class
=
3168 "dpdkvhostuserclient",
3169 netdev_dpdk_vhost_class_init
,
3170 netdev_dpdk_vhost_client_construct
,
3171 netdev_dpdk_vhost_destruct
,
3172 netdev_dpdk_vhost_client_set_config
,
3174 netdev_dpdk_vhost_send
,
3175 netdev_dpdk_vhost_get_carrier
,
3176 netdev_dpdk_vhost_get_stats
,
3179 netdev_dpdk_vhost_client_reconfigure
,
3180 netdev_dpdk_vhost_rxq_recv
);
3183 netdev_dpdk_register(void)
3185 netdev_register_provider(&dpdk_class
);
3186 netdev_register_provider(&dpdk_ring_class
);
3187 netdev_register_provider(&dpdk_vhost_class
);
3188 netdev_register_provider(&dpdk_vhost_client_class
);