#include "ovs-thread.h"
#include "ovs-rcu.h"
#include "packets.h"
-#include "shash.h"
+#include "openvswitch/shash.h"
#include "smap.h"
#include "sset.h"
#include "unaligned.h"
#include "rte_config.h"
#include "rte_mbuf.h"
#include "rte_meter.h"
+#ifdef DPDK_PDUMP
+#include "rte_pdump.h"
+#endif
#include "rte_virtio_net.h"
VLOG_DEFINE_THIS_MODULE(dpdk);
+ sizeof(struct dp_packet) \
+ RTE_PKTMBUF_HEADROOM)
#define NETDEV_DPDK_MBUF_ALIGN 1024
+#define NETDEV_DPDK_MAX_PKT_LEN 9728
/* Max and min number of packets in the mempool. OVS tries to allocate a
* mempool with MAX_NB_MBUF: if this fails (because the system doesn't have
#define OVS_VHOST_QUEUE_DISABLED (-2) /* Queue was disabled by guest and not
* yet mapped to another queue. */
-#ifdef VHOST_CUSE
-static char *cuse_dev_name = NULL; /* Character device cuse_dev_name. */
-#endif
static char *vhost_sock_dir = NULL; /* Location of vhost-user sockets */
-/*
- * Maximum amount of time in micro seconds to try and enqueue to vhost.
- */
-#define VHOST_ENQ_RETRY_USECS 100
+#define VHOST_ENQ_RETRY_NUM 8
+#define IF_NAME_SZ (PATH_MAX > IFNAMSIZ ? PATH_MAX : IFNAMSIZ)
static const struct rte_eth_conf port_conf = {
.rxmode = {
},
};
-enum { MAX_TX_QUEUE_LEN = 384 };
enum { DPDK_RING_SIZE = 256 };
BUILD_ASSERT_DECL(IS_POW2(DPDK_RING_SIZE));
enum { DRAIN_TSC = 200000ULL };
= OVS_LIST_INITIALIZER(&dpdk_mp_list);
/* This mutex must be used by non pmd threads when allocating or freeing
- * mbufs through mempools. Since dpdk_queue_pkts() and dpdk_queue_flush() may
- * use mempools, a non pmd thread should hold this mutex while calling them */
+ * mbufs through mempools. */
static struct ovs_mutex nonpmd_mempool_mutex = OVS_MUTEX_INITIALIZER;
struct dpdk_mp {
/* There should be one 'struct dpdk_tx_queue' created for
* each cpu core. */
struct dpdk_tx_queue {
- bool flush_tx; /* Set to true to flush queue everytime */
- /* pkts are queued. */
- int count;
rte_spinlock_t tx_lock; /* Protects the members and the NIC queue
* from concurrent access. It is used only
* if the queue is shared among different
- * pmd threads (see 'txq_needs_locking'). */
+ * pmd threads (see 'concurrent_txq'). */
int map; /* Mapping of configured vhost-user queues
* to enabled by guest. */
- uint64_t tsc;
- struct rte_mbuf *burst_pkts[MAX_TX_QUEUE_LEN];
};
/* dpdk has no way to remove dpdk ring ethernet devices
struct ovs_list list_node OVS_GUARDED_BY(dpdk_mutex);
};
+struct ingress_policer {
+ struct rte_meter_srtcm_params app_srtcm_params;
+ struct rte_meter_srtcm in_policer;
+ rte_spinlock_t policer_lock;
+};
+
struct netdev_dpdk {
struct netdev up;
int port_id;
struct rte_eth_link link;
int link_reset_cnt;
- /* The user might request more txqs than the NIC has. We remap those
- * ('up.n_txq') on these ('real_n_txq').
- * If the numbers match, 'txq_needs_locking' is false, otherwise it is
- * true and we will take a spinlock on transmission */
- int real_n_txq;
- int real_n_rxq;
- bool txq_needs_locking;
+ /* virtio identifier for vhost devices */
+ ovsrcu_index vid;
- /* virtio-net structure for vhost device */
- OVSRCU_TYPE(struct virtio_net *) virtio_dev;
+ /* True if vHost device is 'up' and has been reconfigured at least once */
+ bool vhost_reconfigured;
- /* Identifier used to distinguish vhost devices from each other */
- char vhost_id[PATH_MAX];
+ /* Identifier used to distinguish vhost devices from each other. It does
+ * not change during the lifetime of a struct netdev_dpdk. It can be read
+ * without holding any mutex. */
+ const char vhost_id[PATH_MAX];
/* In dpdk_list. */
struct ovs_list list_node OVS_GUARDED_BY(dpdk_mutex);
struct qos_conf *qos_conf;
rte_spinlock_t qos_lock;
+ /* The following properties cannot be changed when a device is running,
+ * so we remember the request and update them next time
+ * netdev_dpdk*_reconfigure() is called */
+ int requested_mtu;
+ int requested_n_txq;
+ int requested_n_rxq;
+
+ /* Socket ID detected when vHost device is brought up */
+ int requested_socket_id;
+
+ /* Ingress Policer */
+ OVSRCU_TYPE(struct ingress_policer *) ingress_policer;
+ uint32_t policer_rate;
+ uint32_t policer_burst;
+
+ /* DPDK-ETH Flow control */
+ struct rte_eth_fc_conf fc_conf;
};
struct netdev_rxq_dpdk {
static int netdev_dpdk_construct(struct netdev *);
-struct virtio_net * netdev_dpdk_get_virtio(const struct netdev_dpdk *dev);
+int netdev_dpdk_get_vid(const struct netdev_dpdk *dev);
+
+struct ingress_policer *
+netdev_dpdk_get_ingress_policer(const struct netdev_dpdk *dev);
static bool
is_dpdk_class(const struct netdev_class *class)
dmp->mtu = mtu;
dmp->refcount = 1;
mbp_priv.mbuf_data_room_size = MBUF_SIZE(mtu) - sizeof(struct dp_packet);
- mbp_priv.mbuf_priv_size = sizeof (struct dp_packet) - sizeof (struct rte_mbuf);
+ mbp_priv.mbuf_priv_size = sizeof (struct dp_packet)
+ - sizeof (struct rte_mbuf);
+ /* XXX: this is a really rough method of provisioning memory.
+ * It's impossible to determine what the exact memory requirements are when
+ * the number of ports and rxqs that utilize a particular mempool can change
+ * dynamically at runtime. For the moment, use this rough heurisitic.
+ */
+ if (mtu >= ETHER_MTU) {
+ mp_size = MAX_NB_MBUF;
+ } else {
+ mp_size = MIN_NB_MBUF;
+ }
- mp_size = MAX_NB_MBUF;
do {
if (snprintf(mp_name, RTE_MEMPOOL_NAMESIZE, "ovs_mp_%d_%d_%u",
dmp->mtu, dmp->socket_id, mp_size) < 0) {
}
static void
-dpdk_mp_put(struct dpdk_mp *dmp)
+dpdk_mp_put(struct dpdk_mp *dmp) OVS_REQUIRES(dpdk_mutex)
{
-
if (!dmp) {
return;
}
- dmp->refcount--;
- ovs_assert(dmp->refcount >= 0);
+ ovs_assert(dmp->refcount);
-#if 0
- /* I could not find any API to destroy mp. */
- if (dmp->refcount == 0) {
- list_delete(dmp->list_node);
- /* destroy mp-pool. */
+ if (!--dmp->refcount) {
+ ovs_list_remove(&dmp->list_node);
+ rte_mempool_free(dmp->mp);
}
-#endif
+}
+
+/* Tries to allocate new mempool on requested_socket_id with
+ * mbuf size corresponding to requested_mtu.
+ * On success new configuration will be applied.
+ * On error, device will be left unchanged. */
+static int
+netdev_dpdk_mempool_configure(struct netdev_dpdk *dev)
+ OVS_REQUIRES(dpdk_mutex)
+ OVS_REQUIRES(dev->mutex)
+{
+ uint32_t buf_size = dpdk_buf_size(dev->requested_mtu);
+ struct dpdk_mp *mp;
+
+ mp = dpdk_mp_get(dev->requested_socket_id, FRAME_LEN_TO_MTU(buf_size));
+ if (!mp) {
+ VLOG_ERR("Insufficient memory to create memory pool for netdev "
+ "%s, with MTU %d on socket %d\n",
+ dev->up.name, dev->requested_mtu, dev->requested_socket_id);
+ return ENOMEM;
+ } else {
+ dpdk_mp_put(dev->dpdk_mp);
+ dev->dpdk_mp = mp;
+ dev->mtu = dev->requested_mtu;
+ dev->socket_id = dev->requested_socket_id;
+ dev->max_packet_len = MTU_TO_FRAME_LEN(dev->mtu);
+ }
+
+ return 0;
}
static void
ovs_mutex_lock(&dpdk_mutex);
LIST_FOR_EACH (dev, list_node, &dpdk_list) {
ovs_mutex_lock(&dev->mutex);
- check_link_status(dev);
+ if (dev->type == DPDK_DEV_ETH) {
+ check_link_status(dev);
+ }
ovs_mutex_unlock(&dev->mutex);
}
ovs_mutex_unlock(&dpdk_mutex);
{
int diag = 0;
int i;
+ struct rte_eth_conf conf = port_conf;
+ if (dev->mtu > ETHER_MTU) {
+ conf.rxmode.jumbo_frame = 1;
+ conf.rxmode.max_rx_pkt_len = dev->max_packet_len;
+ } else {
+ conf.rxmode.jumbo_frame = 0;
+ conf.rxmode.max_rx_pkt_len = 0;
+ }
/* A device may report more queues than it makes available (this has
* been observed for Intel xl710, which reserves some of them for
* SRIOV): rte_eth_*_queue_setup will fail if a queue is not
VLOG_INFO("Retrying setup with (rxq:%d txq:%d)", n_rxq, n_txq);
}
- diag = rte_eth_dev_configure(dev->port_id, n_rxq, n_txq, &port_conf);
+ diag = rte_eth_dev_configure(dev->port_id, n_rxq, n_txq, &conf);
if (diag) {
+ VLOG_WARN("Interface %s eth_dev setup error %s\n",
+ dev->up.name, rte_strerror(-diag));
break;
}
}
dev->up.n_rxq = n_rxq;
- dev->real_n_txq = n_txq;
+ dev->up.n_txq = n_txq;
return 0;
}
return diag;
}
+static void
+dpdk_eth_flow_ctrl_setup(struct netdev_dpdk *dev) OVS_REQUIRES(dev->mutex)
+{
+ if (rte_eth_dev_flow_ctrl_set(dev->port_id, &dev->fc_conf)) {
+ VLOG_WARN("Failed to enable flow control on device %d", dev->port_id);
+ }
+}
static int
dpdk_eth_dev_init(struct netdev_dpdk *dev) OVS_REQUIRES(dpdk_mutex)
int diag;
int n_rxq, n_txq;
- if (dev->port_id < 0 || dev->port_id >= rte_eth_dev_count()) {
+ if (!rte_eth_dev_is_valid_port(dev->port_id)) {
return ENODEV;
}
dev->buf_size = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM;
dev->flags = NETDEV_UP | NETDEV_PROMISC;
+
+ /* Get the Flow control configuration for DPDK-ETH */
+ diag = rte_eth_dev_flow_ctrl_get(dev->port_id, &dev->fc_conf);
+ if (diag) {
+ VLOG_DBG("cannot get flow control parameters on port=%d, err=%d",
+ dev->port_id, diag);
+ }
+
return 0;
}
dev->tx_q = dpdk_rte_mzalloc(n_txqs * sizeof *dev->tx_q);
for (i = 0; i < n_txqs; i++) {
- int numa_id = ovs_numa_get_numa_id(i);
-
- if (!dev->txq_needs_locking) {
- /* Each index is considered as a cpu core id, since there should
- * be one tx queue for each cpu core. If the corresponding core
- * is not on the same numa node as 'dev', flags the
- * 'flush_tx'. */
- dev->tx_q[i].flush_tx = dev->socket_id == numa_id;
- } else {
- /* Queues are shared among CPUs. Always flush */
- dev->tx_q[i].flush_tx = true;
- }
-
/* Initialize map for vhost devices. */
dev->tx_q[i].map = OVS_VHOST_QUEUE_MAP_UNKNOWN;
rte_spinlock_init(&dev->tx_q[i].tx_lock);
struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
int sid;
int err = 0;
- uint32_t buf_size;
ovs_mutex_init(&dev->mutex);
ovs_mutex_lock(&dev->mutex);
}
dev->socket_id = sid < 0 ? SOCKET0 : sid;
+ dev->requested_socket_id = dev->socket_id;
dev->port_id = port_no;
dev->type = type;
dev->flags = 0;
- dev->mtu = ETHER_MTU;
+ dev->requested_mtu = dev->mtu = ETHER_MTU;
dev->max_packet_len = MTU_TO_FRAME_LEN(dev->mtu);
+ ovsrcu_index_init(&dev->vid, -1);
+ dev->vhost_reconfigured = false;
- buf_size = dpdk_buf_size(dev->mtu);
- dev->dpdk_mp = dpdk_mp_get(dev->socket_id, FRAME_LEN_TO_MTU(buf_size));
- if (!dev->dpdk_mp) {
- err = ENOMEM;
+ err = netdev_dpdk_mempool_configure(dev);
+ if (err) {
goto unlock;
}
dev->qos_conf = NULL;
rte_spinlock_init(&dev->qos_lock);
- netdev->n_txq = NR_QUEUE;
+ /* Initialise rcu pointer for ingress policer to NULL */
+ ovsrcu_init(&dev->ingress_policer, NULL);
+ dev->policer_rate = 0;
+ dev->policer_burst = 0;
+
netdev->n_rxq = NR_QUEUE;
- netdev->requested_n_rxq = NR_QUEUE;
- dev->real_n_txq = NR_QUEUE;
+ netdev->n_txq = NR_QUEUE;
+ dev->requested_n_rxq = netdev->n_rxq;
+ dev->requested_n_txq = netdev->n_txq;
+ /* Initialize the flow control to NULL */
+ memset(&dev->fc_conf, 0, sizeof dev->fc_conf);
if (type == DPDK_DEV_ETH) {
- netdev_dpdk_alloc_txq(dev, NR_QUEUE);
err = dpdk_eth_dev_init(dev);
if (err) {
goto unlock;
}
+ netdev_dpdk_alloc_txq(dev, netdev->n_txq);
} else {
netdev_dpdk_alloc_txq(dev, OVS_VHOST_MAX_QUEUE_NUM);
+ /* Enable DPDK_DEV_VHOST device and set promiscuous mode flag. */
+ dev->flags = NETDEV_UP | NETDEV_PROMISC;
}
ovs_list_push_back(&dpdk_list, &dev->list_node);
unlock:
- if (err) {
- rte_free(dev->tx_q);
- }
ovs_mutex_unlock(&dev->mutex);
return err;
}
}
}
-static int
-vhost_construct_helper(struct netdev *netdev) OVS_REQUIRES(dpdk_mutex)
-{
- if (rte_eal_init_ret) {
- return rte_eal_init_ret;
- }
-
- return netdev_dpdk_init(netdev, -1, DPDK_DEV_VHOST);
-}
-
-static int
-netdev_dpdk_vhost_cuse_construct(struct netdev *netdev)
-{
- struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
- int err;
-
- if (rte_eal_init_ret) {
- return rte_eal_init_ret;
- }
-
- ovs_mutex_lock(&dpdk_mutex);
- strncpy(dev->vhost_id, netdev->name, sizeof(dev->vhost_id));
- err = vhost_construct_helper(netdev);
- ovs_mutex_unlock(&dpdk_mutex);
- return err;
-}
-
static int
netdev_dpdk_vhost_user_construct(struct netdev *netdev)
{
struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
const char *name = netdev->name;
int err;
+ uint64_t flags = 0;
/* 'name' is appended to 'vhost_sock_dir' and used to create a socket in
* the file system. '/' or '\' would traverse directories, so they're not
/* Take the name of the vhost-user port and append it to the location where
* the socket is to be created, then register the socket.
*/
- snprintf(dev->vhost_id, sizeof(dev->vhost_id), "%s/%s",
+ snprintf(CONST_CAST(char *, dev->vhost_id), sizeof dev->vhost_id, "%s/%s",
vhost_sock_dir, name);
- err = rte_vhost_driver_register(dev->vhost_id);
+ err = rte_vhost_driver_register(dev->vhost_id, flags);
if (err) {
VLOG_ERR("vhost-user socket device setup failure for socket %s\n",
dev->vhost_id);
fatal_signal_add_file_to_unlink(dev->vhost_id);
VLOG_INFO("Socket %s created for vhost-user port %s\n",
dev->vhost_id, name);
- err = vhost_construct_helper(netdev);
+ err = netdev_dpdk_init(netdev, -1, DPDK_DEV_VHOST);
}
ovs_mutex_unlock(&dpdk_mutex);
{
struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
+ ovs_mutex_lock(&dpdk_mutex);
ovs_mutex_lock(&dev->mutex);
+
rte_eth_dev_stop(dev->port_id);
- ovs_mutex_unlock(&dev->mutex);
+ free(ovsrcu_get_protected(struct ingress_policer *,
+ &dev->ingress_policer));
- ovs_mutex_lock(&dpdk_mutex);
rte_free(dev->tx_q);
ovs_list_remove(&dev->list_node);
dpdk_mp_put(dev->dpdk_mp);
+
+ ovs_mutex_unlock(&dev->mutex);
ovs_mutex_unlock(&dpdk_mutex);
}
+/* rte_vhost_driver_unregister() can call back destroy_device(), which will
+ * try to acquire 'dpdk_mutex' and possibly 'dev->mutex'. To avoid a
+ * deadlock, none of the mutexes must be held while calling this function. */
+static int
+dpdk_vhost_driver_unregister(struct netdev_dpdk *dev)
+ OVS_EXCLUDED(dpdk_mutex)
+ OVS_EXCLUDED(dev->mutex)
+{
+ return rte_vhost_driver_unregister(dev->vhost_id);
+}
+
static void
netdev_dpdk_vhost_destruct(struct netdev *netdev)
{
struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
+ ovs_mutex_lock(&dpdk_mutex);
+ ovs_mutex_lock(&dev->mutex);
+
/* Guest becomes an orphan if still attached. */
- if (netdev_dpdk_get_virtio(dev) != NULL) {
+ if (netdev_dpdk_get_vid(dev) >= 0) {
VLOG_ERR("Removing port '%s' while vhost device still attached.",
netdev->name);
VLOG_ERR("To restore connectivity after re-adding of port, VM on socket"
dev->vhost_id);
}
- if (rte_vhost_driver_unregister(dev->vhost_id)) {
- VLOG_ERR("Unable to remove vhost-user socket %s", dev->vhost_id);
- } else {
- fatal_signal_remove_file_to_unlink(dev->vhost_id);
- }
+ free(ovsrcu_get_protected(struct ingress_policer *,
+ &dev->ingress_policer));
- ovs_mutex_lock(&dpdk_mutex);
rte_free(dev->tx_q);
ovs_list_remove(&dev->list_node);
dpdk_mp_put(dev->dpdk_mp);
+
+ ovs_mutex_unlock(&dev->mutex);
ovs_mutex_unlock(&dpdk_mutex);
+
+ if (dpdk_vhost_driver_unregister(dev)) {
+ VLOG_ERR("Unable to remove vhost-user socket %s", dev->vhost_id);
+ } else {
+ fatal_signal_remove_file_to_unlink(dev->vhost_id);
+ }
}
static void
ovs_mutex_lock(&dev->mutex);
- smap_add_format(args, "requested_rx_queues", "%d", netdev->requested_n_rxq);
+ smap_add_format(args, "requested_rx_queues", "%d", dev->requested_n_rxq);
smap_add_format(args, "configured_rx_queues", "%d", netdev->n_rxq);
- smap_add_format(args, "requested_tx_queues", "%d", netdev->n_txq);
- smap_add_format(args, "configured_tx_queues", "%d", dev->real_n_txq);
- ovs_mutex_unlock(&dev->mutex);
-
- return 0;
-}
-
-static int
-netdev_dpdk_set_config(struct netdev *netdev, const struct smap *args)
-{
- struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
-
- ovs_mutex_lock(&dev->mutex);
- netdev->requested_n_rxq = MAX(smap_get_int(args, "n_rxq",
- netdev->requested_n_rxq), 1);
- netdev_change_seq_changed(netdev);
+ smap_add_format(args, "requested_tx_queues", "%d", dev->requested_n_txq);
+ smap_add_format(args, "configured_tx_queues", "%d", netdev->n_txq);
+ smap_add_format(args, "mtu", "%d", dev->mtu);
ovs_mutex_unlock(&dev->mutex);
return 0;
}
-static int
-netdev_dpdk_get_numa_id(const struct netdev *netdev)
+static void
+dpdk_set_rxq_config(struct netdev_dpdk *dev, const struct smap *args)
{
- struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
+ int new_n_rxq;
- return dev->socket_id;
+ new_n_rxq = MAX(smap_get_int(args, "n_rxq", dev->requested_n_rxq), 1);
+ if (new_n_rxq != dev->requested_n_rxq) {
+ dev->requested_n_rxq = new_n_rxq;
+ netdev_request_reconfigure(&dev->up);
+ }
}
-/* Sets the number of tx queues and rx queues for the dpdk interface.
- * If the configuration fails, do not try restoring its old configuration
- * and just returns the error. */
static int
-netdev_dpdk_set_multiq(struct netdev *netdev, unsigned int n_txq,
- unsigned int n_rxq)
+netdev_dpdk_set_config(struct netdev *netdev, const struct smap *args)
{
struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
- int err = 0;
- int old_rxq, old_txq;
- if (netdev->n_txq == n_txq && netdev->n_rxq == n_rxq) {
- return err;
- }
-
- ovs_mutex_lock(&dpdk_mutex);
ovs_mutex_lock(&dev->mutex);
- rte_eth_dev_stop(dev->port_id);
+ dpdk_set_rxq_config(dev, args);
- old_txq = netdev->n_txq;
- old_rxq = netdev->n_rxq;
- netdev->n_txq = n_txq;
- netdev->n_rxq = n_rxq;
+ /* Flow control support is only available for DPDK Ethernet ports. */
+ bool rx_fc_en = false;
+ bool tx_fc_en = false;
+ enum rte_eth_fc_mode fc_mode_set[2][2] =
+ {{RTE_FC_NONE, RTE_FC_TX_PAUSE},
+ {RTE_FC_RX_PAUSE, RTE_FC_FULL}
+ };
+ rx_fc_en = smap_get_bool(args, "rx-flow-ctrl", false);
+ tx_fc_en = smap_get_bool(args, "tx-flow-ctrl", false);
+ dev->fc_conf.autoneg = smap_get_bool(args, "flow-ctrl-autoneg", false);
+ dev->fc_conf.mode = fc_mode_set[tx_fc_en][rx_fc_en];
- rte_free(dev->tx_q);
- err = dpdk_eth_dev_init(dev);
- netdev_dpdk_alloc_txq(dev, dev->real_n_txq);
- if (err) {
- /* If there has been an error, it means that the requested queues
- * have not been created. Restore the old numbers. */
- netdev->n_txq = old_txq;
- netdev->n_rxq = old_rxq;
- }
-
- dev->txq_needs_locking = dev->real_n_txq != netdev->n_txq;
+ dpdk_eth_flow_ctrl_setup(dev);
ovs_mutex_unlock(&dev->mutex);
- ovs_mutex_unlock(&dpdk_mutex);
- return err;
+ return 0;
}
static int
-netdev_dpdk_vhost_cuse_set_multiq(struct netdev *netdev, unsigned int n_txq,
- unsigned int n_rxq)
+netdev_dpdk_ring_set_config(struct netdev *netdev, const struct smap *args)
{
struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
- int err = 0;
-
- if (netdev->n_txq == n_txq && netdev->n_rxq == n_rxq) {
- return err;
- }
- ovs_mutex_lock(&dpdk_mutex);
ovs_mutex_lock(&dev->mutex);
-
- netdev->n_txq = n_txq;
- dev->real_n_txq = 1;
- netdev->n_rxq = 1;
- dev->txq_needs_locking = dev->real_n_txq != netdev->n_txq;
-
+ dpdk_set_rxq_config(dev, args);
ovs_mutex_unlock(&dev->mutex);
- ovs_mutex_unlock(&dpdk_mutex);
- return err;
+ return 0;
}
static int
-netdev_dpdk_vhost_set_multiq(struct netdev *netdev, unsigned int n_txq,
- unsigned int n_rxq)
+netdev_dpdk_get_numa_id(const struct netdev *netdev)
{
struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
- int err = 0;
- if (netdev->n_txq == n_txq && netdev->n_rxq == n_rxq) {
- return err;
- }
+ return dev->socket_id;
+}
+
+/* Sets the number of tx queues for the dpdk interface. */
+static int
+netdev_dpdk_set_tx_multiq(struct netdev *netdev, unsigned int n_txq)
+{
+ struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
- ovs_mutex_lock(&dpdk_mutex);
ovs_mutex_lock(&dev->mutex);
- netdev->n_txq = n_txq;
- netdev->n_rxq = n_rxq;
+ if (dev->requested_n_txq == n_txq) {
+ goto out;
+ }
- ovs_mutex_unlock(&dev->mutex);
- ovs_mutex_unlock(&dpdk_mutex);
+ dev->requested_n_txq = n_txq;
+ netdev_request_reconfigure(netdev);
- return err;
+out:
+ ovs_mutex_unlock(&dev->mutex);
+ return 0;
}
static struct netdev_rxq *
}
static inline void
-dpdk_queue_flush__(struct netdev_dpdk *dev, int qid)
+netdev_dpdk_eth_tx_burst(struct netdev_dpdk *dev, int qid,
+ struct rte_mbuf **pkts, int cnt)
{
- struct dpdk_tx_queue *txq = &dev->tx_q[qid];
uint32_t nb_tx = 0;
- while (nb_tx != txq->count) {
+ while (nb_tx != cnt) {
uint32_t ret;
- ret = rte_eth_tx_burst(dev->port_id, qid, txq->burst_pkts + nb_tx,
- txq->count - nb_tx);
+ ret = rte_eth_tx_burst(dev->port_id, qid, pkts + nb_tx, cnt - nb_tx);
if (!ret) {
break;
}
nb_tx += ret;
}
- if (OVS_UNLIKELY(nb_tx != txq->count)) {
+ if (OVS_UNLIKELY(nb_tx != cnt)) {
/* free buffers, which we couldn't transmit, one at a time (each
* packet could come from a different mempool) */
int i;
- for (i = nb_tx; i < txq->count; i++) {
- rte_pktmbuf_free(txq->burst_pkts[i]);
+ for (i = nb_tx; i < cnt; i++) {
+ rte_pktmbuf_free(pkts[i]);
}
rte_spinlock_lock(&dev->stats_lock);
- dev->stats.tx_dropped += txq->count-nb_tx;
+ dev->stats.tx_dropped += cnt - nb_tx;
rte_spinlock_unlock(&dev->stats_lock);
}
+}
+
+static inline bool
+netdev_dpdk_policer_pkt_handle(struct rte_meter_srtcm *meter,
+ struct rte_mbuf *pkt, uint64_t time)
+{
+ uint32_t pkt_len = rte_pktmbuf_pkt_len(pkt) - sizeof(struct ether_hdr);
- txq->count = 0;
- txq->tsc = rte_get_timer_cycles();
+ return rte_meter_srtcm_color_blind_check(meter, time, pkt_len) ==
+ e_RTE_METER_GREEN;
}
-static inline void
-dpdk_queue_flush(struct netdev_dpdk *dev, int qid)
+static int
+netdev_dpdk_policer_run(struct rte_meter_srtcm *meter,
+ struct rte_mbuf **pkts, int pkt_cnt)
{
- struct dpdk_tx_queue *txq = &dev->tx_q[qid];
+ int i = 0;
+ int cnt = 0;
+ struct rte_mbuf *pkt = NULL;
+ uint64_t current_time = rte_rdtsc();
- if (txq->count == 0) {
- return;
+ for (i = 0; i < pkt_cnt; i++) {
+ pkt = pkts[i];
+ /* Handle current packet */
+ if (netdev_dpdk_policer_pkt_handle(meter, pkt, current_time)) {
+ if (cnt != i) {
+ pkts[cnt] = pkt;
+ }
+ cnt++;
+ } else {
+ rte_pktmbuf_free(pkt);
+ }
}
- dpdk_queue_flush__(dev, qid);
+
+ return cnt;
+}
+
+static int
+ingress_policer_run(struct ingress_policer *policer, struct rte_mbuf **pkts,
+ int pkt_cnt)
+{
+ int cnt = 0;
+
+ rte_spinlock_lock(&policer->policer_lock);
+ cnt = netdev_dpdk_policer_run(&policer->in_policer, pkts, pkt_cnt);
+ rte_spinlock_unlock(&policer->policer_lock);
+
+ return cnt;
}
static bool
-is_vhost_running(struct virtio_net *virtio_dev)
+is_vhost_running(struct netdev_dpdk *dev)
{
- return (virtio_dev != NULL && (virtio_dev->flags & VIRTIO_DEV_RUNNING));
+ return (netdev_dpdk_get_vid(dev) >= 0 && dev->vhost_reconfigured);
}
static inline void
static inline void
netdev_dpdk_vhost_update_rx_counters(struct netdev_stats *stats,
- struct dp_packet **packets, int count)
+ struct dp_packet **packets, int count,
+ int dropped)
{
int i;
unsigned int packet_size;
struct dp_packet *packet;
stats->rx_packets += count;
+ stats->rx_dropped += dropped;
for (i = 0; i < count; i++) {
packet = packets[i];
packet_size = dp_packet_size(packet);
*/
static int
netdev_dpdk_vhost_rxq_recv(struct netdev_rxq *rxq,
- struct dp_packet **packets, int *c)
+ struct dp_packet_batch *batch)
{
struct netdev_dpdk *dev = netdev_dpdk_cast(rxq->netdev);
- struct virtio_net *virtio_dev = netdev_dpdk_get_virtio(dev);
int qid = rxq->queue_id;
+ struct ingress_policer *policer = netdev_dpdk_get_ingress_policer(dev);
uint16_t nb_rx = 0;
+ uint16_t dropped = 0;
- if (OVS_UNLIKELY(!is_vhost_running(virtio_dev))) {
+ if (OVS_UNLIKELY(!is_vhost_running(dev)
+ || !(dev->flags & NETDEV_UP))) {
return EAGAIN;
}
- if (rxq->queue_id >= dev->real_n_rxq) {
- return EOPNOTSUPP;
- }
-
- nb_rx = rte_vhost_dequeue_burst(virtio_dev, qid * VIRTIO_QNUM + VIRTIO_TXQ,
+ nb_rx = rte_vhost_dequeue_burst(netdev_dpdk_get_vid(dev),
+ qid * VIRTIO_QNUM + VIRTIO_TXQ,
dev->dpdk_mp->mp,
- (struct rte_mbuf **)packets,
+ (struct rte_mbuf **) batch->packets,
NETDEV_MAX_BURST);
if (!nb_rx) {
return EAGAIN;
}
+ if (policer) {
+ dropped = nb_rx;
+ nb_rx = ingress_policer_run(policer,
+ (struct rte_mbuf **) batch->packets,
+ nb_rx);
+ dropped -= nb_rx;
+ }
+
rte_spinlock_lock(&dev->stats_lock);
- netdev_dpdk_vhost_update_rx_counters(&dev->stats, packets, nb_rx);
+ netdev_dpdk_vhost_update_rx_counters(&dev->stats, batch->packets,
+ nb_rx, dropped);
rte_spinlock_unlock(&dev->stats_lock);
- *c = (int) nb_rx;
+ batch->count = (int) nb_rx;
return 0;
}
static int
-netdev_dpdk_rxq_recv(struct netdev_rxq *rxq, struct dp_packet **packets,
- int *c)
+netdev_dpdk_rxq_recv(struct netdev_rxq *rxq, struct dp_packet_batch *batch)
{
struct netdev_rxq_dpdk *rx = netdev_rxq_dpdk_cast(rxq);
struct netdev_dpdk *dev = netdev_dpdk_cast(rxq->netdev);
+ struct ingress_policer *policer = netdev_dpdk_get_ingress_policer(dev);
int nb_rx;
-
- /* There is only one tx queue for this core. Do not flush other
- * queues.
- * Do not flush tx queue which is shared among CPUs
- * since it is always flushed */
- if (rxq->queue_id == rte_lcore_id() &&
- OVS_LIKELY(!dev->txq_needs_locking)) {
- dpdk_queue_flush(dev, rxq->queue_id);
- }
+ int dropped = 0;
nb_rx = rte_eth_rx_burst(rx->port_id, rxq->queue_id,
- (struct rte_mbuf **) packets,
+ (struct rte_mbuf **) batch->packets,
NETDEV_MAX_BURST);
if (!nb_rx) {
return EAGAIN;
}
- *c = nb_rx;
+ if (policer) {
+ dropped = nb_rx;
+ nb_rx = ingress_policer_run(policer,
+ (struct rte_mbuf **)batch->packets,
+ nb_rx);
+ dropped -= nb_rx;
+ }
+
+ /* Update stats to reflect dropped packets */
+ if (OVS_UNLIKELY(dropped)) {
+ rte_spinlock_lock(&dev->stats_lock);
+ dev->stats.rx_dropped += dropped;
+ rte_spinlock_unlock(&dev->stats_lock);
+ }
+
+ batch->count = nb_rx;
return 0;
}
static void
__netdev_dpdk_vhost_send(struct netdev *netdev, int qid,
- struct dp_packet **pkts, int cnt,
- bool may_steal)
+ struct dp_packet **pkts, int cnt)
{
struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
- struct virtio_net *virtio_dev = netdev_dpdk_get_virtio(dev);
struct rte_mbuf **cur_pkts = (struct rte_mbuf **) pkts;
unsigned int total_pkts = cnt;
- unsigned int qos_pkts = cnt;
- uint64_t start = 0;
+ unsigned int qos_pkts = 0;
+ unsigned int mtu_dropped = 0;
+ int i, retries = 0;
- qid = dev->tx_q[qid % dev->real_n_txq].map;
+ qid = dev->tx_q[qid % netdev->n_txq].map;
- if (OVS_UNLIKELY(!is_vhost_running(virtio_dev) || qid < 0)) {
+ if (OVS_UNLIKELY(!is_vhost_running(dev) || qid < 0
+ || !(dev->flags & NETDEV_UP))) {
rte_spinlock_lock(&dev->stats_lock);
dev->stats.tx_dropped+= cnt;
rte_spinlock_unlock(&dev->stats_lock);
/* Check has QoS has been configured for the netdev */
cnt = netdev_dpdk_qos_run__(dev, cur_pkts, cnt);
- qos_pkts -= cnt;
+ qos_pkts = total_pkts - cnt;
do {
int vhost_qid = qid * VIRTIO_QNUM + VIRTIO_RXQ;
unsigned int tx_pkts;
+ unsigned int try_tx_pkts = cnt;
- tx_pkts = rte_vhost_enqueue_burst(virtio_dev, vhost_qid,
- cur_pkts, cnt);
+ for (i = 0; i < cnt; i++) {
+ if (cur_pkts[i]->pkt_len > dev->max_packet_len) {
+ try_tx_pkts = i;
+ break;
+ }
+ }
+ if (!try_tx_pkts) {
+ cur_pkts++;
+ mtu_dropped++;
+ cnt--;
+ continue;
+ }
+ tx_pkts = rte_vhost_enqueue_burst(netdev_dpdk_get_vid(dev),
+ vhost_qid, cur_pkts, try_tx_pkts);
if (OVS_LIKELY(tx_pkts)) {
/* Packets have been sent.*/
cnt -= tx_pkts;
- /* Prepare for possible next iteration.*/
+ /* Prepare for possible retry.*/
cur_pkts = &cur_pkts[tx_pkts];
- } else {
- uint64_t timeout = VHOST_ENQ_RETRY_USECS * rte_get_timer_hz() / 1E6;
- unsigned int expired = 0;
-
- if (!start) {
- start = rte_get_timer_cycles();
- }
-
- /*
- * Unable to enqueue packets to vhost interface.
- * Check available entries before retrying.
- */
- while (!rte_vring_available_entries(virtio_dev, vhost_qid)) {
- if (OVS_UNLIKELY((rte_get_timer_cycles() - start) > timeout)) {
- expired = 1;
- break;
- }
- }
- if (expired) {
- /* break out of main loop. */
- break;
+ if (tx_pkts != try_tx_pkts) {
+ retries++;
}
+ } else {
+ /* No packets sent - do not retry.*/
+ break;
}
- } while (cnt);
+ } while (cnt && (retries <= VHOST_ENQ_RETRY_NUM));
rte_spinlock_unlock(&dev->tx_q[qid].tx_lock);
rte_spinlock_lock(&dev->stats_lock);
- cnt += qos_pkts;
- netdev_dpdk_vhost_update_tx_counters(&dev->stats, pkts, total_pkts, cnt);
+ netdev_dpdk_vhost_update_tx_counters(&dev->stats, pkts, total_pkts,
+ cnt + mtu_dropped + qos_pkts);
rte_spinlock_unlock(&dev->stats_lock);
out:
- if (may_steal) {
- int i;
-
- for (i = 0; i < total_pkts; i++) {
- dp_packet_delete(pkts[i]);
- }
- }
-}
-
-inline static void
-dpdk_queue_pkts(struct netdev_dpdk *dev, int qid,
- struct rte_mbuf **pkts, int cnt)
-{
- struct dpdk_tx_queue *txq = &dev->tx_q[qid];
- uint64_t diff_tsc;
-
- int i = 0;
-
- while (i < cnt) {
- int freeslots = MAX_TX_QUEUE_LEN - txq->count;
- int tocopy = MIN(freeslots, cnt-i);
-
- memcpy(&txq->burst_pkts[txq->count], &pkts[i],
- tocopy * sizeof (struct rte_mbuf *));
-
- txq->count += tocopy;
- i += tocopy;
-
- if (txq->count == MAX_TX_QUEUE_LEN || txq->flush_tx) {
- dpdk_queue_flush__(dev, qid);
- }
- diff_tsc = rte_get_timer_cycles() - txq->tsc;
- if (diff_tsc >= DRAIN_TSC) {
- dpdk_queue_flush__(dev, qid);
- }
+ for (i = 0; i < total_pkts - qos_pkts; i++) {
+ dp_packet_delete(pkts[i]);
}
}
/* Tx function. Transmit packets indefinitely */
static void
-dpdk_do_tx_copy(struct netdev *netdev, int qid, struct dp_packet **pkts,
- int cnt)
+dpdk_do_tx_copy(struct netdev *netdev, int qid, struct dp_packet_batch *batch)
OVS_NO_THREAD_SAFETY_ANALYSIS
{
#if !defined(__CHECKER__) && !defined(_WIN32)
- const size_t PKT_ARRAY_SIZE = cnt;
+ const size_t PKT_ARRAY_SIZE = batch->count;
#else
/* Sparse or MSVC doesn't like variable length array. */
enum { PKT_ARRAY_SIZE = NETDEV_MAX_BURST };
ovs_mutex_lock(&nonpmd_mempool_mutex);
}
- for (i = 0; i < cnt; i++) {
- int size = dp_packet_size(pkts[i]);
+ dp_packet_batch_apply_cutlen(batch);
+
+ for (i = 0; i < batch->count; i++) {
+ int size = dp_packet_size(batch->packets[i]);
if (OVS_UNLIKELY(size > dev->max_packet_len)) {
VLOG_WARN_RL(&rl, "Too big size %d max_packet_len %d",
mbufs[newcnt] = rte_pktmbuf_alloc(dev->dpdk_mp->mp);
if (!mbufs[newcnt]) {
- dropped += cnt - i;
+ dropped += batch->count - i;
break;
}
/* We have to do a copy for now */
- memcpy(rte_pktmbuf_mtod(mbufs[newcnt], void *), dp_packet_data(pkts[i]), size);
+ memcpy(rte_pktmbuf_mtod(mbufs[newcnt], void *),
+ dp_packet_data(batch->packets[i]), size);
rte_pktmbuf_data_len(mbufs[newcnt]) = size;
rte_pktmbuf_pkt_len(mbufs[newcnt]) = size;
}
if (dev->type == DPDK_DEV_VHOST) {
- __netdev_dpdk_vhost_send(netdev, qid, (struct dp_packet **) mbufs, newcnt, true);
+ __netdev_dpdk_vhost_send(netdev, qid, (struct dp_packet **) mbufs,
+ newcnt);
} else {
unsigned int qos_pkts = newcnt;
newcnt = netdev_dpdk_qos_run__(dev, mbufs, newcnt);
dropped += qos_pkts - newcnt;
- dpdk_queue_pkts(dev, qid, mbufs, newcnt);
- dpdk_queue_flush(dev, qid);
+ netdev_dpdk_eth_tx_burst(dev, qid, mbufs, newcnt);
}
if (OVS_UNLIKELY(dropped)) {
}
static int
-netdev_dpdk_vhost_send(struct netdev *netdev, int qid, struct dp_packet **pkts,
- int cnt, bool may_steal)
+netdev_dpdk_vhost_send(struct netdev *netdev, int qid,
+ struct dp_packet_batch *batch,
+ bool may_steal, bool concurrent_txq OVS_UNUSED)
{
- if (OVS_UNLIKELY(pkts[0]->source != DPBUF_DPDK)) {
- int i;
- dpdk_do_tx_copy(netdev, qid, pkts, cnt);
- if (may_steal) {
- for (i = 0; i < cnt; i++) {
- dp_packet_delete(pkts[i]);
- }
- }
+ if (OVS_UNLIKELY(!may_steal || batch->packets[0]->source != DPBUF_DPDK)) {
+ dpdk_do_tx_copy(netdev, qid, batch);
+ dp_packet_delete_batch(batch, may_steal);
} else {
- __netdev_dpdk_vhost_send(netdev, qid, pkts, cnt, may_steal);
+ dp_packet_batch_apply_cutlen(batch);
+ __netdev_dpdk_vhost_send(netdev, qid, batch->packets, batch->count);
}
return 0;
}
static inline void
netdev_dpdk_send__(struct netdev_dpdk *dev, int qid,
- struct dp_packet **pkts, int cnt, bool may_steal)
+ struct dp_packet_batch *batch, bool may_steal,
+ bool concurrent_txq)
{
- int i;
-
- if (OVS_UNLIKELY(dev->txq_needs_locking)) {
- qid = qid % dev->real_n_txq;
+ if (OVS_UNLIKELY(concurrent_txq)) {
+ qid = qid % dev->up.n_txq;
rte_spinlock_lock(&dev->tx_q[qid].tx_lock);
}
if (OVS_UNLIKELY(!may_steal ||
- pkts[0]->source != DPBUF_DPDK)) {
+ batch->packets[0]->source != DPBUF_DPDK)) {
struct netdev *netdev = &dev->up;
- dpdk_do_tx_copy(netdev, qid, pkts, cnt);
-
- if (may_steal) {
- for (i = 0; i < cnt; i++) {
- dp_packet_delete(pkts[i]);
- }
- }
+ dpdk_do_tx_copy(netdev, qid, batch);
+ dp_packet_delete_batch(batch, may_steal);
} else {
int next_tx_idx = 0;
int dropped = 0;
unsigned int qos_pkts = 0;
unsigned int temp_cnt = 0;
+ int cnt = batch->count;
- for (i = 0; i < cnt; i++) {
- int size = dp_packet_size(pkts[i]);
+ dp_packet_batch_apply_cutlen(batch);
+
+ for (int i = 0; i < cnt; i++) {
+ int size = dp_packet_size(batch->packets[i]);
if (OVS_UNLIKELY(size > dev->max_packet_len)) {
if (next_tx_idx != i) {
temp_cnt = i - next_tx_idx;
qos_pkts = temp_cnt;
- temp_cnt = netdev_dpdk_qos_run__(dev, (struct rte_mbuf**)pkts,
- temp_cnt);
+ temp_cnt = netdev_dpdk_qos_run__(dev,
+ (struct rte_mbuf**)batch->packets,
+ temp_cnt);
dropped += qos_pkts - temp_cnt;
- dpdk_queue_pkts(dev, qid,
- (struct rte_mbuf **)&pkts[next_tx_idx],
- temp_cnt);
+ netdev_dpdk_eth_tx_burst(dev, qid,
+ (struct rte_mbuf **)&batch->packets[next_tx_idx],
+ temp_cnt);
}
VLOG_WARN_RL(&rl, "Too big size %d max_packet_len %d",
(int)size , dev->max_packet_len);
- dp_packet_delete(pkts[i]);
+ dp_packet_delete(batch->packets[i]);
dropped++;
next_tx_idx = i + 1;
}
cnt -= next_tx_idx;
qos_pkts = cnt;
- cnt = netdev_dpdk_qos_run__(dev, (struct rte_mbuf**)pkts, cnt);
+ cnt = netdev_dpdk_qos_run__(dev,
+ (struct rte_mbuf**)batch->packets, cnt);
dropped += qos_pkts - cnt;
- dpdk_queue_pkts(dev, qid, (struct rte_mbuf **)&pkts[next_tx_idx],
- cnt);
+ netdev_dpdk_eth_tx_burst(dev, qid,
+ (struct rte_mbuf **)&batch->packets[next_tx_idx],
+ cnt);
}
if (OVS_UNLIKELY(dropped)) {
}
}
- if (OVS_UNLIKELY(dev->txq_needs_locking)) {
+ if (OVS_UNLIKELY(concurrent_txq)) {
rte_spinlock_unlock(&dev->tx_q[qid].tx_lock);
}
}
static int
netdev_dpdk_eth_send(struct netdev *netdev, int qid,
- struct dp_packet **pkts, int cnt, bool may_steal)
+ struct dp_packet_batch *batch, bool may_steal,
+ bool concurrent_txq)
{
struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
- netdev_dpdk_send__(dev, qid, pkts, cnt, may_steal);
+ netdev_dpdk_send__(dev, qid, batch, may_steal, concurrent_txq);
return 0;
}
}
static int
-netdev_dpdk_set_mtu(const struct netdev *netdev, int mtu)
+netdev_dpdk_set_mtu(struct netdev *netdev, int mtu)
{
struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
- int old_mtu, err, dpdk_mtu;
- struct dpdk_mp *old_mp;
- struct dpdk_mp *mp;
- uint32_t buf_size;
- ovs_mutex_lock(&dpdk_mutex);
- ovs_mutex_lock(&dev->mutex);
- if (dev->mtu == mtu) {
- err = 0;
- goto out;
- }
-
- buf_size = dpdk_buf_size(mtu);
- dpdk_mtu = FRAME_LEN_TO_MTU(buf_size);
-
- mp = dpdk_mp_get(dev->socket_id, dpdk_mtu);
- if (!mp) {
- err = ENOMEM;
- goto out;
+ if (MTU_TO_FRAME_LEN(mtu) > NETDEV_DPDK_MAX_PKT_LEN
+ || mtu < ETHER_MIN_MTU) {
+ VLOG_WARN("%s: unsupported MTU %d\n", dev->up.name, mtu);
+ return EINVAL;
}
- rte_eth_dev_stop(dev->port_id);
-
- old_mtu = dev->mtu;
- old_mp = dev->dpdk_mp;
- dev->dpdk_mp = mp;
- dev->mtu = mtu;
- dev->max_packet_len = MTU_TO_FRAME_LEN(dev->mtu);
-
- err = dpdk_eth_dev_init(dev);
- if (err) {
- dpdk_mp_put(mp);
- dev->mtu = old_mtu;
- dev->dpdk_mp = old_mp;
- dev->max_packet_len = MTU_TO_FRAME_LEN(dev->mtu);
- dpdk_eth_dev_init(dev);
- goto out;
+ ovs_mutex_lock(&dev->mutex);
+ if (dev->requested_mtu != mtu) {
+ dev->requested_mtu = mtu;
+ netdev_request_reconfigure(netdev);
}
-
- dpdk_mp_put(old_mp);
- netdev_change_seq_changed(netdev);
-out:
ovs_mutex_unlock(&dev->mutex);
- ovs_mutex_unlock(&dpdk_mutex);
- return err;
+
+ return 0;
}
static int
/* Supported Stats */
stats->rx_packets += dev->stats.rx_packets;
stats->tx_packets += dev->stats.tx_packets;
+ stats->rx_dropped = dev->stats.rx_dropped;
stats->tx_dropped += dev->stats.tx_dropped;
stats->multicast = dev->stats.multicast;
stats->rx_bytes = dev->stats.rx_bytes;
static void
netdev_dpdk_convert_xstats(struct netdev_stats *stats,
- const struct rte_eth_xstats *xstats,
+ const struct rte_eth_xstat *xstats,
+ const struct rte_eth_xstat_name *names,
const unsigned int size)
{
- /* XXX Current implementation is simple search through an array
- * to find hardcoded counter names. In future DPDK release (TBD)
- * XSTATS API will change so each counter will be represented by
- * unique ID instead of String. */
-
for (unsigned int i = 0; i < size; i++) {
- if (strcmp(XSTAT_RX_64_PACKETS, xstats[i].name) == 0) {
+ if (strcmp(XSTAT_RX_64_PACKETS, names[i].name) == 0) {
stats->rx_1_to_64_packets = xstats[i].value;
- } else if (strcmp(XSTAT_RX_65_TO_127_PACKETS, xstats[i].name) == 0) {
+ } else if (strcmp(XSTAT_RX_65_TO_127_PACKETS, names[i].name) == 0) {
stats->rx_65_to_127_packets = xstats[i].value;
- } else if (strcmp(XSTAT_RX_128_TO_255_PACKETS, xstats[i].name) == 0) {
+ } else if (strcmp(XSTAT_RX_128_TO_255_PACKETS, names[i].name) == 0) {
stats->rx_128_to_255_packets = xstats[i].value;
- } else if (strcmp(XSTAT_RX_256_TO_511_PACKETS, xstats[i].name) == 0) {
+ } else if (strcmp(XSTAT_RX_256_TO_511_PACKETS, names[i].name) == 0) {
stats->rx_256_to_511_packets = xstats[i].value;
- } else if (strcmp(XSTAT_RX_512_TO_1023_PACKETS,
- xstats[i].name) == 0) {
+ } else if (strcmp(XSTAT_RX_512_TO_1023_PACKETS, names[i].name) == 0) {
stats->rx_512_to_1023_packets = xstats[i].value;
- } else if (strcmp(XSTAT_RX_1024_TO_1522_PACKETS,
- xstats[i].name) == 0) {
+ } else if (strcmp(XSTAT_RX_1024_TO_1522_PACKETS, names[i].name) == 0) {
stats->rx_1024_to_1522_packets = xstats[i].value;
- } else if (strcmp(XSTAT_RX_1523_TO_MAX_PACKETS,
- xstats[i].name) == 0) {
+ } else if (strcmp(XSTAT_RX_1523_TO_MAX_PACKETS, names[i].name) == 0) {
stats->rx_1523_to_max_packets = xstats[i].value;
- } else if (strcmp(XSTAT_TX_64_PACKETS, xstats[i].name) == 0) {
+ } else if (strcmp(XSTAT_TX_64_PACKETS, names[i].name) == 0) {
stats->tx_1_to_64_packets = xstats[i].value;
- } else if (strcmp(XSTAT_TX_65_TO_127_PACKETS, xstats[i].name) == 0) {
+ } else if (strcmp(XSTAT_TX_65_TO_127_PACKETS, names[i].name) == 0) {
stats->tx_65_to_127_packets = xstats[i].value;
- } else if (strcmp(XSTAT_TX_128_TO_255_PACKETS, xstats[i].name) == 0) {
+ } else if (strcmp(XSTAT_TX_128_TO_255_PACKETS, names[i].name) == 0) {
stats->tx_128_to_255_packets = xstats[i].value;
- } else if (strcmp(XSTAT_TX_256_TO_511_PACKETS, xstats[i].name) == 0) {
+ } else if (strcmp(XSTAT_TX_256_TO_511_PACKETS, names[i].name) == 0) {
stats->tx_256_to_511_packets = xstats[i].value;
- } else if (strcmp(XSTAT_TX_512_TO_1023_PACKETS,
- xstats[i].name) == 0) {
+ } else if (strcmp(XSTAT_TX_512_TO_1023_PACKETS, names[i].name) == 0) {
stats->tx_512_to_1023_packets = xstats[i].value;
- } else if (strcmp(XSTAT_TX_1024_TO_1522_PACKETS,
- xstats[i].name) == 0) {
+ } else if (strcmp(XSTAT_TX_1024_TO_1522_PACKETS, names[i].name) == 0) {
stats->tx_1024_to_1522_packets = xstats[i].value;
- } else if (strcmp(XSTAT_TX_1523_TO_MAX_PACKETS,
- xstats[i].name) == 0) {
+ } else if (strcmp(XSTAT_TX_1523_TO_MAX_PACKETS, names[i].name) == 0) {
stats->tx_1523_to_max_packets = xstats[i].value;
- } else if (strcmp(XSTAT_TX_MULTICAST_PACKETS, xstats[i].name) == 0) {
+ } else if (strcmp(XSTAT_TX_MULTICAST_PACKETS, names[i].name) == 0) {
stats->tx_multicast_packets = xstats[i].value;
- } else if (strcmp(XSTAT_RX_BROADCAST_PACKETS, xstats[i].name) == 0) {
+ } else if (strcmp(XSTAT_RX_BROADCAST_PACKETS, names[i].name) == 0) {
stats->rx_broadcast_packets = xstats[i].value;
- } else if (strcmp(XSTAT_TX_BROADCAST_PACKETS, xstats[i].name) == 0) {
+ } else if (strcmp(XSTAT_TX_BROADCAST_PACKETS, names[i].name) == 0) {
stats->tx_broadcast_packets = xstats[i].value;
- } else if (strcmp(XSTAT_RX_UNDERSIZED_ERRORS, xstats[i].name) == 0) {
+ } else if (strcmp(XSTAT_RX_UNDERSIZED_ERRORS, names[i].name) == 0) {
stats->rx_undersized_errors = xstats[i].value;
- } else if (strcmp(XSTAT_RX_FRAGMENTED_ERRORS, xstats[i].name) == 0) {
+ } else if (strcmp(XSTAT_RX_FRAGMENTED_ERRORS, names[i].name) == 0) {
stats->rx_fragmented_errors = xstats[i].value;
- } else if (strcmp(XSTAT_RX_JABBER_ERRORS, xstats[i].name) == 0) {
+ } else if (strcmp(XSTAT_RX_JABBER_ERRORS, names[i].name) == 0) {
stats->rx_jabber_errors = xstats[i].value;
}
}
netdev_dpdk_get_carrier(netdev, &gg);
ovs_mutex_lock(&dev->mutex);
- struct rte_eth_xstats *rte_xstats;
- int rte_xstats_len, rte_xstats_ret;
+ struct rte_eth_xstat *rte_xstats = NULL;
+ struct rte_eth_xstat_name *rte_xstats_names = NULL;
+ int rte_xstats_len, rte_xstats_new_len, rte_xstats_ret;
if (rte_eth_stats_get(dev->port_id, &rte_stats)) {
VLOG_ERR("Can't get ETH statistics for port: %i.", dev->port_id);
+ ovs_mutex_unlock(&dev->mutex);
return EPROTO;
}
- rte_xstats_len = rte_eth_xstats_get(dev->port_id, NULL, 0);
- if (rte_xstats_len > 0) {
- rte_xstats = dpdk_rte_mzalloc(sizeof(*rte_xstats) * rte_xstats_len);
- memset(rte_xstats, 0xff, sizeof(*rte_xstats) * rte_xstats_len);
- rte_xstats_ret = rte_eth_xstats_get(dev->port_id, rte_xstats,
- rte_xstats_len);
- if (rte_xstats_ret > 0 && rte_xstats_ret <= rte_xstats_len) {
- netdev_dpdk_convert_xstats(stats, rte_xstats, rte_xstats_ret);
- }
- rte_free(rte_xstats);
+ /* Get length of statistics */
+ rte_xstats_len = rte_eth_xstats_get_names(dev->port_id, NULL, 0);
+ if (rte_xstats_len < 0) {
+ VLOG_WARN("Cannot get XSTATS values for port: %i", dev->port_id);
+ goto out;
+ }
+ /* Reserve memory for xstats names and values */
+ rte_xstats_names = xcalloc(rte_xstats_len, sizeof *rte_xstats_names);
+ rte_xstats = xcalloc(rte_xstats_len, sizeof *rte_xstats);
+
+ /* Retreive xstats names */
+ rte_xstats_new_len = rte_eth_xstats_get_names(dev->port_id,
+ rte_xstats_names,
+ rte_xstats_len);
+ if (rte_xstats_new_len != rte_xstats_len) {
+ VLOG_WARN("Cannot get XSTATS names for port: %i.", dev->port_id);
+ goto out;
+ }
+ /* Retreive xstats values */
+ memset(rte_xstats, 0xff, sizeof *rte_xstats * rte_xstats_len);
+ rte_xstats_ret = rte_eth_xstats_get(dev->port_id, rte_xstats,
+ rte_xstats_len);
+ if (rte_xstats_ret > 0 && rte_xstats_ret <= rte_xstats_len) {
+ netdev_dpdk_convert_xstats(stats, rte_xstats, rte_xstats_names,
+ rte_xstats_len);
} else {
- VLOG_WARN("Can't get XSTATS counters for port: %i.", dev->port_id);
+ VLOG_WARN("Cannot get XSTATS values for port: %i.", dev->port_id);
}
+out:
+ free(rte_xstats);
+ free(rte_xstats_names);
+
stats->rx_packets = rte_stats.ipackets;
stats->tx_packets = rte_stats.opackets;
stats->rx_bytes = rte_stats.ibytes;
/* DPDK counts imissed as errors, but count them here as dropped instead */
stats->rx_errors = rte_stats.ierrors - rte_stats.imissed;
stats->tx_errors = rte_stats.oerrors;
- stats->multicast = rte_stats.imcasts;
rte_spinlock_lock(&dev->stats_lock);
stats->tx_dropped = dev->stats.tx_dropped;
+ stats->rx_dropped = dev->stats.rx_dropped;
rte_spinlock_unlock(&dev->stats_lock);
/* These are the available DPDK counters for packets not received due to
* local resource constraints in DPDK and NIC respectively. */
- stats->rx_dropped = rte_stats.rx_nombuf + rte_stats.imissed;
+ stats->rx_dropped += rte_stats.rx_nombuf + rte_stats.imissed;
stats->rx_missed_errors = rte_stats.imissed;
ovs_mutex_unlock(&dev->mutex);
return 0;
}
+static struct ingress_policer *
+netdev_dpdk_policer_construct(uint32_t rate, uint32_t burst)
+{
+ struct ingress_policer *policer = NULL;
+ uint64_t rate_bytes;
+ uint64_t burst_bytes;
+ int err = 0;
+
+ policer = xmalloc(sizeof *policer);
+ rte_spinlock_init(&policer->policer_lock);
+
+ /* rte_meter requires bytes so convert kbits rate and burst to bytes. */
+ rate_bytes = rate * 1000/8;
+ burst_bytes = burst * 1000/8;
+
+ policer->app_srtcm_params.cir = rate_bytes;
+ policer->app_srtcm_params.cbs = burst_bytes;
+ policer->app_srtcm_params.ebs = 0;
+ err = rte_meter_srtcm_config(&policer->in_policer,
+ &policer->app_srtcm_params);
+ if(err) {
+ VLOG_ERR("Could not create rte meter for ingress policer");
+ return NULL;
+ }
+
+ return policer;
+}
+
+static int
+netdev_dpdk_set_policing(struct netdev* netdev, uint32_t policer_rate,
+ uint32_t policer_burst)
+{
+ struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
+ struct ingress_policer *policer;
+
+ /* Force to 0 if no rate specified,
+ * default to 8000 kbits if burst is 0,
+ * else stick with user-specified value.
+ */
+ policer_burst = (!policer_rate ? 0
+ : !policer_burst ? 8000
+ : policer_burst);
+
+ ovs_mutex_lock(&dev->mutex);
+
+ policer = ovsrcu_get_protected(struct ingress_policer *,
+ &dev->ingress_policer);
+
+ if (dev->policer_rate == policer_rate &&
+ dev->policer_burst == policer_burst) {
+ /* Assume that settings haven't changed since we last set them. */
+ ovs_mutex_unlock(&dev->mutex);
+ return 0;
+ }
+
+ /* Destroy any existing ingress policer for the device if one exists */
+ if (policer) {
+ ovsrcu_postpone(free, policer);
+ }
+
+ if (policer_rate != 0) {
+ policer = netdev_dpdk_policer_construct(policer_rate, policer_burst);
+ } else {
+ policer = NULL;
+ }
+ ovsrcu_set(&dev->ingress_policer, policer);
+ dev->policer_rate = policer_rate;
+ dev->policer_burst = policer_burst;
+ ovs_mutex_unlock(&dev->mutex);
+
+ return 0;
+}
+
static int
netdev_dpdk_get_ifindex(const struct netdev *netdev)
{
netdev_dpdk_vhost_get_carrier(const struct netdev *netdev, bool *carrier)
{
struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
- struct virtio_net *virtio_dev = netdev_dpdk_get_virtio(dev);
ovs_mutex_lock(&dev->mutex);
- if (is_vhost_running(virtio_dev)) {
+ if (is_vhost_running(dev)) {
*carrier = 1;
} else {
*carrier = 0;
static int
netdev_dpdk_update_flags__(struct netdev_dpdk *dev,
enum netdev_flags off, enum netdev_flags on,
- enum netdev_flags *old_flagsp) OVS_REQUIRES(dev->mutex)
+ enum netdev_flags *old_flagsp)
+ OVS_REQUIRES(dev->mutex)
{
int err;
if (!(dev->flags & NETDEV_UP)) {
rte_eth_dev_stop(dev->port_id);
}
+ } else {
+ /* If DPDK_DEV_VHOST device's NETDEV_UP flag was changed and vhost is
+ * running then change netdev's change_seq to trigger link state
+ * update. */
+
+ if ((NETDEV_UP & ((*old_flagsp ^ on) | (*old_flagsp ^ off)))
+ && is_vhost_running(dev)) {
+ netdev_change_seq_changed(&dev->up);
+
+ /* Clear statistics if device is getting up. */
+ if (NETDEV_UP & on) {
+ rte_spinlock_lock(&dev->stats_lock);
+ memset(&dev->stats, 0, sizeof(dev->stats));
+ rte_spinlock_unlock(&dev->stats_lock);
+ }
+ }
}
return 0;
struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
struct rte_eth_dev_info dev_info;
- if (dev->port_id < 0)
+ if (!rte_eth_dev_is_valid_port(dev->port_id)) {
return ENODEV;
+ }
ovs_mutex_lock(&dev->mutex);
rte_eth_dev_info_get(dev->port_id, &dev_info);
ovs_mutex_unlock(&dev->mutex);
- smap_add_format(args, "driver_name", "%s", dev_info.driver_name);
-
smap_add_format(args, "port_no", "%d", dev->port_id);
smap_add_format(args, "numa_id", "%d", rte_eth_dev_socket_id(dev->port_id));
smap_add_format(args, "driver_name", "%s", dev_info.driver_name);
* Set virtqueue flags so that we do not receive interrupts.
*/
static void
-set_irq_status(struct virtio_net *virtio_dev)
+set_irq_status(int vid)
{
uint32_t i;
uint64_t idx;
- for (i = 0; i < virtio_dev->virt_qp_nb; i++) {
+ for (i = 0; i < rte_vhost_get_queue_num(vid); i++) {
idx = i * VIRTIO_QNUM;
- rte_vhost_enable_guest_notification(virtio_dev, idx + VIRTIO_RXQ, 0);
- rte_vhost_enable_guest_notification(virtio_dev, idx + VIRTIO_TXQ, 0);
+ rte_vhost_enable_guest_notification(vid, idx + VIRTIO_RXQ, 0);
+ rte_vhost_enable_guest_notification(vid, idx + VIRTIO_TXQ, 0);
}
}
/*
* Fixes mapping for vhost-user tx queues. Must be called after each
- * enabling/disabling of queues and real_n_txq modifications.
+ * enabling/disabling of queues and n_txq modifications.
*/
static void
netdev_dpdk_remap_txqs(struct netdev_dpdk *dev)
OVS_REQUIRES(dev->mutex)
{
int *enabled_queues, n_enabled = 0;
- int i, k, total_txqs = dev->real_n_txq;
+ int i, k, total_txqs = dev->up.n_txq;
enabled_queues = dpdk_rte_mzalloc(total_txqs * sizeof *enabled_queues);
rte_free(enabled_queues);
}
-static int
-netdev_dpdk_vhost_set_queues(struct netdev_dpdk *dev, struct virtio_net *virtio_dev)
- OVS_REQUIRES(dev->mutex)
-{
- uint32_t qp_num;
-
- qp_num = virtio_dev->virt_qp_nb;
- if (qp_num > dev->up.n_rxq) {
- VLOG_ERR("vHost Device '%s' %"PRIu64" can't be added - "
- "too many queues %d > %d", virtio_dev->ifname, virtio_dev->device_fh,
- qp_num, dev->up.n_rxq);
- return -1;
- }
-
- dev->real_n_rxq = qp_num;
- dev->real_n_txq = qp_num;
- dev->txq_needs_locking = true;
- /* Enable TX queue 0 by default if it wasn't disabled. */
- if (dev->tx_q[0].map == OVS_VHOST_QUEUE_MAP_UNKNOWN) {
- dev->tx_q[0].map = 0;
- }
-
- netdev_dpdk_remap_txqs(dev);
-
- return 0;
-}
-
/*
* A new virtio-net device is added to a vhost port.
*/
static int
-new_device(struct virtio_net *virtio_dev)
+new_device(int vid)
{
struct netdev_dpdk *dev;
bool exists = false;
+ int newnode = 0;
+ char ifname[IF_NAME_SZ];
+
+ rte_vhost_get_ifname(vid, ifname, sizeof(ifname));
ovs_mutex_lock(&dpdk_mutex);
/* Add device to the vhost port with the same name as that passed down. */
LIST_FOR_EACH(dev, list_node, &dpdk_list) {
- if (strncmp(virtio_dev->ifname, dev->vhost_id, IF_NAME_SZ) == 0) {
+ if (strncmp(ifname, dev->vhost_id, IF_NAME_SZ) == 0) {
+ uint32_t qp_num = rte_vhost_get_queue_num(vid);
+
ovs_mutex_lock(&dev->mutex);
- if (netdev_dpdk_vhost_set_queues(dev, virtio_dev)) {
- ovs_mutex_unlock(&dev->mutex);
- ovs_mutex_unlock(&dpdk_mutex);
- return -1;
+ /* Get NUMA information */
+ newnode = rte_vhost_get_numa_node(vid);
+ if (newnode == -1) {
+ VLOG_INFO("Error getting NUMA info for vHost Device '%s'",
+ ifname);
+ newnode = dev->socket_id;
+ }
+
+ if (dev->requested_n_txq != qp_num
+ || dev->requested_n_rxq != qp_num
+ || dev->requested_socket_id != newnode) {
+ dev->requested_socket_id = newnode;
+ dev->requested_n_rxq = qp_num;
+ dev->requested_n_txq = qp_num;
+ netdev_request_reconfigure(&dev->up);
+ } else {
+ /* Reconfiguration not required. */
+ dev->vhost_reconfigured = true;
}
- ovsrcu_set(&dev->virtio_dev, virtio_dev);
+
+ ovsrcu_index_set(&dev->vid, vid);
exists = true;
- virtio_dev->flags |= VIRTIO_DEV_RUNNING;
+
/* Disable notifications. */
- set_irq_status(virtio_dev);
+ set_irq_status(vid);
+ netdev_change_seq_changed(&dev->up);
ovs_mutex_unlock(&dev->mutex);
break;
}
ovs_mutex_unlock(&dpdk_mutex);
if (!exists) {
- VLOG_INFO("vHost Device '%s' %"PRIu64" can't be added - name not "
- "found", virtio_dev->ifname, virtio_dev->device_fh);
+ VLOG_INFO("vHost Device '%s' can't be added - name not found", ifname);
return -1;
}
- VLOG_INFO("vHost Device '%s' %"PRIu64" has been added", virtio_dev->ifname,
- virtio_dev->device_fh);
+ VLOG_INFO("vHost Device '%s' has been added on numa node %i",
+ ifname, newnode);
+
return 0;
}
{
int i;
- for (i = 0; i < dev->real_n_txq; i++) {
+ for (i = 0; i < dev->up.n_txq; i++) {
dev->tx_q[i].map = OVS_VHOST_QUEUE_MAP_UNKNOWN;
}
}
* the device.
*/
static void
-destroy_device(volatile struct virtio_net *virtio_dev)
+destroy_device(int vid)
{
struct netdev_dpdk *dev;
bool exists = false;
+ char ifname[IF_NAME_SZ];
+
+ rte_vhost_get_ifname(vid, ifname, sizeof(ifname));
ovs_mutex_lock(&dpdk_mutex);
LIST_FOR_EACH (dev, list_node, &dpdk_list) {
- if (netdev_dpdk_get_virtio(dev) == virtio_dev) {
+ if (netdev_dpdk_get_vid(dev) == vid) {
ovs_mutex_lock(&dev->mutex);
- virtio_dev->flags &= ~VIRTIO_DEV_RUNNING;
- ovsrcu_set(&dev->virtio_dev, NULL);
+ dev->vhost_reconfigured = false;
+ ovsrcu_index_set(&dev->vid, -1);
netdev_dpdk_txq_map_clear(dev);
- exists = true;
+
+ netdev_change_seq_changed(&dev->up);
ovs_mutex_unlock(&dev->mutex);
+ exists = true;
break;
}
}
ovs_mutex_unlock(&dpdk_mutex);
- if (exists == true) {
+ if (exists) {
/*
* Wait for other threads to quiesce after setting the 'virtio_dev'
* to NULL, before returning.
* put thread back into quiescent state before returning.
*/
ovsrcu_quiesce_start();
- VLOG_INFO("vHost Device '%s' %"PRIu64" has been removed",
- virtio_dev->ifname, virtio_dev->device_fh);
+ VLOG_INFO("vHost Device '%s' has been removed", ifname);
} else {
- VLOG_INFO("vHost Device '%s' %"PRIu64" not found", virtio_dev->ifname,
- virtio_dev->device_fh);
+ VLOG_INFO("vHost Device '%s' not found", ifname);
}
}
static int
-vring_state_changed(struct virtio_net *virtio_dev, uint16_t queue_id,
- int enable)
+vring_state_changed(int vid, uint16_t queue_id, int enable)
{
struct netdev_dpdk *dev;
bool exists = false;
int qid = queue_id / VIRTIO_QNUM;
+ char ifname[IF_NAME_SZ];
+
+ rte_vhost_get_ifname(vid, ifname, sizeof(ifname));
if (queue_id % VIRTIO_QNUM == VIRTIO_TXQ) {
return 0;
ovs_mutex_lock(&dpdk_mutex);
LIST_FOR_EACH (dev, list_node, &dpdk_list) {
- if (strncmp(virtio_dev->ifname, dev->vhost_id, IF_NAME_SZ) == 0) {
+ if (strncmp(ifname, dev->vhost_id, IF_NAME_SZ) == 0) {
ovs_mutex_lock(&dev->mutex);
if (enable) {
dev->tx_q[qid].map = qid;
ovs_mutex_unlock(&dpdk_mutex);
if (exists) {
- VLOG_INFO("State of queue %d ( tx_qid %d ) of vhost device '%s' %"
- PRIu64" changed to \'%s\'", queue_id, qid,
- virtio_dev->ifname, virtio_dev->device_fh,
+ VLOG_INFO("State of queue %d ( tx_qid %d ) of vhost device '%s'"
+ "changed to \'%s\'", queue_id, qid, ifname,
(enable == 1) ? "enabled" : "disabled");
} else {
- VLOG_INFO("vHost Device '%s' %"PRIu64" not found", virtio_dev->ifname,
- virtio_dev->device_fh);
+ VLOG_INFO("vHost Device '%s' not found", ifname);
return -1;
}
return 0;
}
-struct virtio_net *
-netdev_dpdk_get_virtio(const struct netdev_dpdk *dev)
+int
+netdev_dpdk_get_vid(const struct netdev_dpdk *dev)
+{
+ return ovsrcu_index_get(&dev->vid);
+}
+
+struct ingress_policer *
+netdev_dpdk_get_ingress_policer(const struct netdev_dpdk *dev)
{
- return ovsrcu_get(struct virtio_net *, &dev->virtio_dev);
+ return ovsrcu_get(struct ingress_policer *, &dev->ingress_policer);
}
/*
start_vhost_loop(void *dummy OVS_UNUSED)
{
pthread_detach(pthread_self());
- /* Put the cuse thread into quiescent state. */
+ /* Put the vhost thread into quiescent state. */
ovsrcu_quiesce_start();
rte_vhost_driver_session_start();
return NULL;
return 0;
}
-static int
-dpdk_vhost_cuse_class_init(void)
-{
- return 0;
-}
-
static int
dpdk_vhost_user_class_init(void)
{
}
static int
-dpdk_ring_open(const char dev_name[], unsigned int *eth_port_id) OVS_REQUIRES(dpdk_mutex)
+dpdk_ring_open(const char dev_name[], unsigned int *eth_port_id)
+ OVS_REQUIRES(dpdk_mutex)
{
struct dpdk_ring *ivshmem;
unsigned int port_no;
static int
netdev_dpdk_ring_send(struct netdev *netdev, int qid,
- struct dp_packet **pkts, int cnt, bool may_steal)
+ struct dp_packet_batch *batch, bool may_steal,
+ bool concurrent_txq)
{
struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
unsigned i;
* rss hash field is clear. This is because the same mbuf may be modified by
* the consumer of the ring and return into the datapath without recalculating
* the RSS hash. */
- for (i = 0; i < cnt; i++) {
- dp_packet_rss_invalidate(pkts[i]);
+ for (i = 0; i < batch->count; i++) {
+ dp_packet_rss_invalidate(batch->packets[i]);
}
- netdev_dpdk_send__(dev, qid, pkts, cnt, may_steal);
+ netdev_dpdk_send__(dev, qid, batch, may_steal, concurrent_txq);
return 0;
}
*typep = dev->qos_conf->ops->qos_name;
error = (dev->qos_conf->ops->qos_get
? dev->qos_conf->ops->qos_get(netdev, details): 0);
+ } else {
+ /* No QoS configuration set, return an empty string */
+ *typep = "";
}
ovs_mutex_unlock(&dev->mutex);
/* Install new QoS configuration. */
error = new_ops->qos_construct(netdev, details);
- ovs_assert((error == 0) == (dev->qos_conf != NULL));
}
} else {
error = new_ops->qos_construct(netdev, details);
- ovs_assert((error == 0) == (dev->qos_conf != NULL));
+ }
+
+ ovs_assert((error == 0) == (dev->qos_conf != NULL));
+ if (error) {
+ VLOG_ERR("Failed to set QoS type %s on port %s, returned error: %s",
+ type, netdev->name, rte_strerror(-error));
}
ovs_mutex_unlock(&dev->mutex);
{
struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
struct egress_policer *policer;
- const char *cir_s;
- const char *cbs_s;
int err = 0;
rte_spinlock_lock(&dev->qos_lock);
policer = xmalloc(sizeof *policer);
qos_conf_init(&policer->qos_conf, &egress_policer_ops);
dev->qos_conf = &policer->qos_conf;
- cir_s = smap_get(details, "cir");
- cbs_s = smap_get(details, "cbs");
- policer->app_srtcm_params.cir = cir_s ? strtoull(cir_s, NULL, 10) : 0;
- policer->app_srtcm_params.cbs = cbs_s ? strtoull(cbs_s, NULL, 10) : 0;
+ policer->app_srtcm_params.cir = smap_get_ullong(details, "cir", 0);
+ policer->app_srtcm_params.cbs = smap_get_ullong(details, "cbs", 0);
policer->app_srtcm_params.ebs = 0;
err = rte_meter_srtcm_config(&policer->egress_meter,
&policer->app_srtcm_params);
+
+ if (err < 0) {
+ /* Error occurred during rte_meter creation, destroy the policer
+ * and set the qos configuration for the netdev dpdk to NULL
+ */
+ free(policer);
+ dev->qos_conf = NULL;
+ err = -err;
+ }
rte_spinlock_unlock(&dev->qos_lock);
return err;
1ULL * policer->app_srtcm_params.cir);
smap_add_format(details, "cbs", "%llu",
1ULL * policer->app_srtcm_params.cbs);
+
return 0;
}
egress_policer_qos_set(struct netdev *netdev, const struct smap *details)
{
struct egress_policer *policer;
- const char *cir_s;
- const char *cbs_s;
+ struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
int err = 0;
policer = egress_policer_get__(netdev);
- cir_s = smap_get(details, "cir");
- cbs_s = smap_get(details, "cbs");
- policer->app_srtcm_params.cir = cir_s ? strtoull(cir_s, NULL, 10) : 0;
- policer->app_srtcm_params.cbs = cbs_s ? strtoull(cbs_s, NULL, 10) : 0;
+ rte_spinlock_lock(&dev->qos_lock);
+ policer->app_srtcm_params.cir = smap_get_ullong(details, "cir", 0);
+ policer->app_srtcm_params.cbs = smap_get_ullong(details, "cbs", 0);
policer->app_srtcm_params.ebs = 0;
err = rte_meter_srtcm_config(&policer->egress_meter,
&policer->app_srtcm_params);
- return err;
-}
-
-static inline bool
-egress_policer_pkt_handle__(struct rte_meter_srtcm *meter,
- struct rte_mbuf *pkt, uint64_t time)
-{
- uint32_t pkt_len = rte_pktmbuf_pkt_len(pkt) - sizeof(struct ether_hdr);
+ if (err < 0) {
+ /* Error occurred during rte_meter creation, destroy the policer
+ * and set the qos configuration for the netdev dpdk to NULL
+ */
+ free(policer);
+ dev->qos_conf = NULL;
+ err = -err;
+ }
+ rte_spinlock_unlock(&dev->qos_lock);
- return rte_meter_srtcm_color_blind_check(meter, time, pkt_len) ==
- e_RTE_METER_GREEN;
+ return err;
}
static int
-egress_policer_run(struct netdev *netdev, struct rte_mbuf **pkts,
- int pkt_cnt)
+egress_policer_run(struct netdev *netdev, struct rte_mbuf **pkts, int pkt_cnt)
{
- int i = 0;
int cnt = 0;
struct egress_policer *policer = egress_policer_get__(netdev);
- struct rte_mbuf *pkt = NULL;
- uint64_t current_time = rte_rdtsc();
- for(i = 0; i < pkt_cnt; i++) {
- pkt = pkts[i];
- /* Handle current packet */
- if (egress_policer_pkt_handle__(&policer->egress_meter, pkt,
- current_time)) {
- if (cnt != i) {
- pkts[cnt] = pkt;
- }
- cnt++;
- } else {
- rte_pktmbuf_free(pkt);
- }
- }
+ cnt = netdev_dpdk_policer_run(&policer->egress_meter, pkts, pkt_cnt);
return cnt;
}
egress_policer_run
};
-#define NETDEV_DPDK_CLASS(NAME, INIT, CONSTRUCT, DESTRUCT, MULTIQ, SEND, \
- GET_CARRIER, GET_STATS, GET_FEATURES, GET_STATUS, RXQ_RECV) \
+static int
+netdev_dpdk_reconfigure(struct netdev *netdev)
+{
+ struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
+ int err = 0;
+
+ ovs_mutex_lock(&dpdk_mutex);
+ ovs_mutex_lock(&dev->mutex);
+
+ if (netdev->n_txq == dev->requested_n_txq
+ && netdev->n_rxq == dev->requested_n_rxq
+ && dev->mtu == dev->requested_mtu) {
+ /* Reconfiguration is unnecessary */
+
+ goto out;
+ }
+
+ rte_eth_dev_stop(dev->port_id);
+
+ if (dev->mtu != dev->requested_mtu) {
+ netdev_dpdk_mempool_configure(dev);
+ }
+
+ netdev->n_txq = dev->requested_n_txq;
+ netdev->n_rxq = dev->requested_n_rxq;
+
+ rte_free(dev->tx_q);
+ err = dpdk_eth_dev_init(dev);
+ netdev_dpdk_alloc_txq(dev, netdev->n_txq);
+
+ netdev_change_seq_changed(netdev);
+
+out:
+
+ ovs_mutex_unlock(&dev->mutex);
+ ovs_mutex_unlock(&dpdk_mutex);
+
+ return err;
+}
+
+static int
+netdev_dpdk_vhost_user_reconfigure(struct netdev *netdev)
+{
+ struct netdev_dpdk *dev = netdev_dpdk_cast(netdev);
+
+ ovs_mutex_lock(&dpdk_mutex);
+ ovs_mutex_lock(&dev->mutex);
+
+ netdev->n_txq = dev->requested_n_txq;
+ netdev->n_rxq = dev->requested_n_rxq;
+
+ /* Enable TX queue 0 by default if it wasn't disabled. */
+ if (dev->tx_q[0].map == OVS_VHOST_QUEUE_MAP_UNKNOWN) {
+ dev->tx_q[0].map = 0;
+ }
+
+ netdev_dpdk_remap_txqs(dev);
+
+ if (dev->requested_socket_id != dev->socket_id
+ || dev->requested_mtu != dev->mtu) {
+ if (!netdev_dpdk_mempool_configure(dev)) {
+ netdev_change_seq_changed(netdev);
+ }
+ }
+
+ if (netdev_dpdk_get_vid(dev) >= 0) {
+ dev->vhost_reconfigured = true;
+ }
+
+ ovs_mutex_unlock(&dev->mutex);
+ ovs_mutex_unlock(&dpdk_mutex);
+
+ return 0;
+}
+
+#define NETDEV_DPDK_CLASS(NAME, INIT, CONSTRUCT, DESTRUCT, \
+ SET_CONFIG, SET_TX_MULTIQ, SEND, \
+ GET_CARRIER, GET_STATS, \
+ GET_FEATURES, GET_STATUS, \
+ RECONFIGURE, RXQ_RECV) \
{ \
NAME, \
true, /* is_pmd */ \
DESTRUCT, \
netdev_dpdk_dealloc, \
netdev_dpdk_get_config, \
- netdev_dpdk_set_config, \
+ SET_CONFIG, \
NULL, /* get_tunnel_config */ \
NULL, /* build header */ \
NULL, /* push header */ \
NULL, /* pop header */ \
netdev_dpdk_get_numa_id, /* get_numa_id */ \
- MULTIQ, /* set_multiq */ \
+ SET_TX_MULTIQ, \
\
SEND, /* send */ \
NULL, /* send_wait */ \
GET_FEATURES, \
NULL, /* set_advertisements */ \
\
- NULL, /* set_policing */ \
+ netdev_dpdk_set_policing, \
netdev_dpdk_get_qos_types, \
NULL, /* get_qos_capabilities */ \
netdev_dpdk_get_qos, \
NULL, /* arp_lookup */ \
\
netdev_dpdk_update_flags, \
+ RECONFIGURE, \
\
netdev_dpdk_rxq_alloc, \
netdev_dpdk_rxq_construct, \
val = smap_get(ovs_other_config, flag);
- /* Depending on which version of vhost is in use, process the vhost-specific
- * flag if it is provided, otherwise resort to default value.
+ /* Process the vhost-sock-dir flag if it is provided, otherwise resort to
+ * default value.
*/
if (val && (strlen(val) <= size)) {
changed = 1;
{
int ret = argc;
char *release_tok = xstrdup(ovs_extra_config);
- char *tok = release_tok, *endptr = NULL;
+ char *tok, *endptr = NULL;
for (tok = strtok_r(release_tok, " ", &endptr); tok != NULL;
tok = strtok_r(NULL, " ", &endptr)) {
int result;
int argc, argc_tmp;
bool auto_determine = true;
- int err;
+ int err = 0;
cpu_set_t cpuset;
-#ifndef VHOST_CUSE
char *sock_dir_subcomponent;
-#endif
if (!smap_get_bool(ovs_other_config, "dpdk-init", false)) {
VLOG_INFO("DPDK Disabled - to change this requires a restart.\n");
}
VLOG_INFO("DPDK Enabled, initializing");
-
-#ifdef VHOST_CUSE
- if (process_vhost_flags("cuse-dev-name", xstrdup("vhost-net"),
- PATH_MAX, ovs_other_config, &cuse_dev_name)) {
-#else
- if (process_vhost_flags("vhost-sock-dir", xstrdup(""),
+ if (process_vhost_flags("vhost-sock-dir", xstrdup(ovs_rundir()),
NAME_MAX, ovs_other_config,
&sock_dir_subcomponent)) {
struct stat s;
}
free(sock_dir_subcomponent);
} else {
- vhost_sock_dir = xstrdup(ovs_rundir());
- free(sock_dir_subcomponent);
-#endif
- }
-
- /* Get the main thread affinity */
- CPU_ZERO(&cpuset);
- err = pthread_getaffinity_np(pthread_self(), sizeof(cpu_set_t),
- &cpuset);
- if (err) {
- VLOG_ERR("Thread getaffinity error %d.", err);
+ vhost_sock_dir = sock_dir_subcomponent;
}
argv = grow_argv(&argv, 0, 1);
*/
if (auto_determine) {
int i;
- for (i = 0; i < CPU_SETSIZE; i++) {
- if (CPU_ISSET(i, &cpuset)) {
- argv = grow_argv(&argv, argc, 2);
- argv[argc++] = xstrdup("-c");
- argv[argc++] = xasprintf("0x%08llX", (1ULL<<i));
- i = CPU_SETSIZE;
+ /* Get the main thread affinity */
+ CPU_ZERO(&cpuset);
+ err = pthread_getaffinity_np(pthread_self(), sizeof(cpu_set_t),
+ &cpuset);
+ if (!err) {
+ for (i = 0; i < CPU_SETSIZE; i++) {
+ if (CPU_ISSET(i, &cpuset)) {
+ argv = grow_argv(&argv, argc, 2);
+ argv[argc++] = xstrdup("-c");
+ argv[argc++] = xasprintf("0x%08llX", (1ULL<<i));
+ i = CPU_SETSIZE;
+ }
}
+ } else {
+ VLOG_ERR("Thread getaffinity error %d. Using core 0x1", err);
+ /* User did not set dpdk-lcore-mask and unable to get current
+ * thread affintity - default to core 0x1 */
+ argv = grow_argv(&argv, argc, 2);
+ argv[argc++] = xstrdup("-c");
+ argv[argc++] = xasprintf("0x%X", 1);
}
}
}
/* Set the main thread affinity back to pre rte_eal_init() value */
- if (!auto_determine) {
+ if (auto_determine && !err) {
err = pthread_setaffinity_np(pthread_self(), sizeof(cpu_set_t),
&cpuset);
if (err) {
ovs_thread_create("dpdk_watchdog", dpdk_watchdog, NULL);
-#ifdef VHOST_CUSE
- /* Register CUSE device to handle IOCTLs.
- * Unless otherwise specified, cuse_dev_name is set to vhost-net.
- */
- err = rte_vhost_driver_register(cuse_dev_name);
+ dpdk_vhost_class_init();
- if (err != 0) {
- VLOG_ERR("CUSE device setup failure.");
- return;
+#ifdef DPDK_PDUMP
+ VLOG_INFO("DPDK pdump packet capture enabled");
+ err = rte_pdump_init(ovs_rundir());
+ if (err) {
+ VLOG_INFO("Error initialising DPDK pdump");
+ rte_pdump_uninit();
+ } else {
+ char *server_socket_path;
+
+ server_socket_path = xasprintf("%s/%s", ovs_rundir(),
+ "pdump_server_socket");
+ fatal_signal_add_file_to_unlink(server_socket_path);
+ free(server_socket_path);
}
#endif
- dpdk_vhost_class_init();
-
/* Finally, register the dpdk classes */
netdev_dpdk_register();
}
NULL,
netdev_dpdk_construct,
netdev_dpdk_destruct,
- netdev_dpdk_set_multiq,
+ netdev_dpdk_set_config,
+ netdev_dpdk_set_tx_multiq,
netdev_dpdk_eth_send,
netdev_dpdk_get_carrier,
netdev_dpdk_get_stats,
netdev_dpdk_get_features,
netdev_dpdk_get_status,
+ netdev_dpdk_reconfigure,
netdev_dpdk_rxq_recv);
static const struct netdev_class dpdk_ring_class =
NULL,
netdev_dpdk_ring_construct,
netdev_dpdk_destruct,
- netdev_dpdk_set_multiq,
+ netdev_dpdk_ring_set_config,
+ netdev_dpdk_set_tx_multiq,
netdev_dpdk_ring_send,
netdev_dpdk_get_carrier,
netdev_dpdk_get_stats,
netdev_dpdk_get_features,
netdev_dpdk_get_status,
+ netdev_dpdk_reconfigure,
netdev_dpdk_rxq_recv);
-static const struct netdev_class OVS_UNUSED dpdk_vhost_cuse_class =
- NETDEV_DPDK_CLASS(
- "dpdkvhostcuse",
- dpdk_vhost_cuse_class_init,
- netdev_dpdk_vhost_cuse_construct,
- netdev_dpdk_vhost_destruct,
- netdev_dpdk_vhost_cuse_set_multiq,
- netdev_dpdk_vhost_send,
- netdev_dpdk_vhost_get_carrier,
- netdev_dpdk_vhost_get_stats,
- NULL,
- NULL,
- netdev_dpdk_vhost_rxq_recv);
-
-static const struct netdev_class OVS_UNUSED dpdk_vhost_user_class =
+static const struct netdev_class dpdk_vhost_user_class =
NETDEV_DPDK_CLASS(
"dpdkvhostuser",
dpdk_vhost_user_class_init,
netdev_dpdk_vhost_user_construct,
netdev_dpdk_vhost_destruct,
- netdev_dpdk_vhost_set_multiq,
+ NULL,
+ NULL,
netdev_dpdk_vhost_send,
netdev_dpdk_vhost_get_carrier,
netdev_dpdk_vhost_get_stats,
NULL,
NULL,
+ netdev_dpdk_vhost_user_reconfigure,
netdev_dpdk_vhost_rxq_recv);
void
dpdk_common_init();
netdev_register_provider(&dpdk_class);
netdev_register_provider(&dpdk_ring_class);
-#ifdef VHOST_CUSE
- netdev_register_provider(&dpdk_vhost_cuse_class);
-#else
netdev_register_provider(&dpdk_vhost_user_class);
-#endif
}
-int
-pmd_thread_setaffinity_cpu(unsigned cpu)
+void
+dpdk_set_lcore_id(unsigned cpu)
{
- cpu_set_t cpuset;
- int err;
-
- CPU_ZERO(&cpuset);
- CPU_SET(cpu, &cpuset);
- err = pthread_setaffinity_np(pthread_self(), sizeof(cpu_set_t), &cpuset);
- if (err) {
- VLOG_ERR("Thread affinity error %d",err);
- return err;
- }
/* NON_PMD_CORE_ID is reserved for use by non pmd threads. */
ovs_assert(cpu != NON_PMD_CORE_ID);
RTE_PER_LCORE(_lcore_id) = cpu;
-
- return 0;
}
static bool