]> git.proxmox.com Git - ceph.git/blobdiff - ceph/src/spdk/dpdk/drivers/net/netvsc/hn_vf.c
update source to Ceph Pacific 16.2.2
[ceph.git] / ceph / src / spdk / dpdk / drivers / net / netvsc / hn_vf.c
index b980bb8a4d8a1d48a663d3b558160cbf34a99132..b7e3ba46bf63f4007af39a27a34faf04561326e5 100644 (file)
 /* Search for VF with matching MAC address, return port id */
 static int hn_vf_match(const struct rte_eth_dev *dev)
 {
-       const struct ether_addr *mac = dev->data->mac_addrs;
+       const struct rte_ether_addr *mac = dev->data->mac_addrs;
        int i;
 
        RTE_ETH_FOREACH_DEV(i) {
                const struct rte_eth_dev *vf_dev = &rte_eth_devices[i];
-               const struct ether_addr *vf_mac = vf_dev->data->mac_addrs;
+               const struct rte_ether_addr *vf_mac = vf_dev->data->mac_addrs;
 
                if (vf_dev == dev)
                        continue;
 
-               if (is_same_ether_addr(mac, vf_mac))
+               if (rte_is_same_ether_addr(mac, vf_mac))
                        return i;
        }
        return -ENOENT;
@@ -82,8 +82,6 @@ static int hn_vf_attach(struct hn_data *hv, uint16_t port_id)
 
        PMD_DRV_LOG(DEBUG, "Attach VF device %u", port_id);
        hv->vf_port = port_id;
-       rte_smp_wmb();
-
        return 0;
 }
 
@@ -98,9 +96,7 @@ int hn_vf_add(struct rte_eth_dev *dev, struct hn_data *hv)
                return port;
        }
 
-       rte_spinlock_lock(&hv->vf_lock);
        err = hn_vf_attach(hv, port);
-
        if (err == 0) {
                dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC;
                hv->vf_intr = (struct rte_intr_handle) {
@@ -110,7 +106,6 @@ int hn_vf_add(struct rte_eth_dev *dev, struct hn_data *hv)
                dev->intr_handle = &hv->vf_intr;
                hn_nvs_set_datapath(hv, NVS_DATAPATH_VF);
        }
-       rte_spinlock_unlock(&hv->vf_lock);
 
        return err;
 }
@@ -119,8 +114,6 @@ int hn_vf_add(struct rte_eth_dev *dev, struct hn_data *hv)
 static void hn_vf_remove(struct hn_data *hv)
 {
 
-       rte_spinlock_lock(&hv->vf_lock);
-
        if (!hn_vf_attached(hv)) {
                PMD_DRV_LOG(ERR, "VF path not active");
        } else {
@@ -129,12 +122,10 @@ static void hn_vf_remove(struct hn_data *hv)
 
                /* Stop transmission over VF */
                hv->vf_port = HN_INVALID_PORT;
-               rte_smp_wmb();
 
                /* Give back ownership */
                rte_eth_dev_owner_unset(hv->vf_port, hv->owner.id);
        }
-       rte_spinlock_unlock(&hv->vf_lock);
 }
 
 /* Handle VF association message from host */
@@ -156,15 +147,27 @@ hn_nvs_handle_vfassoc(struct rte_eth_dev *dev,
                    vf_assoc->allocated ? "add to" : "remove from",
                    dev->data->port_id);
 
+       rte_rwlock_write_lock(&hv->vf_lock);
        hv->vf_present = vf_assoc->allocated;
 
-       if (dev->state != RTE_ETH_DEV_ATTACHED)
-               return;
+       if (dev->state == RTE_ETH_DEV_ATTACHED) {
+               if (vf_assoc->allocated)
+                       hn_vf_add(dev, hv);
+               else
+                       hn_vf_remove(hv);
+       }
+       rte_rwlock_write_unlock(&hv->vf_lock);
+}
 
-       if (vf_assoc->allocated)
-               hn_vf_add(dev, hv);
-       else
-               hn_vf_remove(hv);
+static void
+hn_vf_merge_desc_lim(struct rte_eth_desc_lim *lim,
+                    const struct rte_eth_desc_lim *vf_lim)
+{
+       lim->nb_max = RTE_MIN(vf_lim->nb_max, lim->nb_max);
+       lim->nb_min = RTE_MAX(vf_lim->nb_min, lim->nb_min);
+       lim->nb_align = RTE_MAX(vf_lim->nb_align, lim->nb_align);
+       lim->nb_seg_max = RTE_MIN(vf_lim->nb_seg_max, lim->nb_seg_max);
+       lim->nb_mtu_seg_max = RTE_MIN(vf_lim->nb_seg_max, lim->nb_seg_max);
 }
 
 /*
@@ -172,12 +175,15 @@ hn_nvs_handle_vfassoc(struct rte_eth_dev *dev,
  * use the default config of the VF
  * and the minimum number of queues and buffer sizes.
  */
-static void hn_vf_info_merge(struct rte_eth_dev *vf_dev,
+static int hn_vf_info_merge(struct rte_eth_dev *vf_dev,
                             struct rte_eth_dev_info *info)
 {
        struct rte_eth_dev_info vf_info;
+       int ret;
 
-       rte_eth_dev_info_get(vf_dev->data->port_id, &vf_info);
+       ret = rte_eth_dev_info_get(vf_dev->data->port_id, &vf_info);
+       if (ret != 0)
+               return ret;
 
        info->speed_capa = vf_info.speed_capa;
        info->default_rxportconf = vf_info.default_rxportconf;
@@ -193,22 +199,28 @@ static void hn_vf_info_merge(struct rte_eth_dev *vf_dev,
                                      info->max_tx_queues);
        info->tx_offload_capa &= vf_info.tx_offload_capa;
        info->tx_queue_offload_capa &= vf_info.tx_queue_offload_capa;
+       hn_vf_merge_desc_lim(&info->tx_desc_lim, &vf_info.tx_desc_lim);
 
        info->min_rx_bufsize = RTE_MAX(vf_info.min_rx_bufsize,
                                       info->min_rx_bufsize);
        info->max_rx_pktlen  = RTE_MAX(vf_info.max_rx_pktlen,
                                       info->max_rx_pktlen);
+       hn_vf_merge_desc_lim(&info->rx_desc_lim, &vf_info.rx_desc_lim);
+
+       return 0;
 }
 
-void hn_vf_info_get(struct hn_data *hv, struct rte_eth_dev_info *info)
+int hn_vf_info_get(struct hn_data *hv, struct rte_eth_dev_info *info)
 {
        struct rte_eth_dev *vf_dev;
+       int ret = 0;
 
-       rte_spinlock_lock(&hv->vf_lock);
+       rte_rwlock_read_lock(&hv->vf_lock);
        vf_dev = hn_get_vf_dev(hv);
        if (vf_dev)
-               hn_vf_info_merge(vf_dev, info);
-       rte_spinlock_unlock(&hv->vf_lock);
+               ret = hn_vf_info_merge(vf_dev, info);
+       rte_rwlock_read_unlock(&hv->vf_lock);
+       return ret;
 }
 
 int hn_vf_link_update(struct rte_eth_dev *dev,
@@ -218,11 +230,11 @@ int hn_vf_link_update(struct rte_eth_dev *dev,
        struct rte_eth_dev *vf_dev;
        int ret = 0;
 
-       rte_spinlock_lock(&hv->vf_lock);
+       rte_rwlock_read_lock(&hv->vf_lock);
        vf_dev = hn_get_vf_dev(hv);
        if (vf_dev && vf_dev->dev_ops->link_update)
                ret = (*vf_dev->dev_ops->link_update)(vf_dev, wait_to_complete);
-       rte_spinlock_unlock(&hv->vf_lock);
+       rte_rwlock_read_unlock(&hv->vf_lock);
 
        return ret;
 }
@@ -295,10 +307,10 @@ int hn_vf_configure(struct rte_eth_dev *dev,
        struct hn_data *hv = dev->data->dev_private;
        int ret = 0;
 
-       rte_spinlock_lock(&hv->vf_lock);
+       rte_rwlock_read_lock(&hv->vf_lock);
        if (hv->vf_port != HN_INVALID_PORT)
                ret = _hn_vf_configure(dev, hv->vf_port, dev_conf);
-       rte_spinlock_unlock(&hv->vf_lock);
+       rte_rwlock_read_unlock(&hv->vf_lock);
        return ret;
 }
 
@@ -308,11 +320,11 @@ const uint32_t *hn_vf_supported_ptypes(struct rte_eth_dev *dev)
        struct rte_eth_dev *vf_dev;
        const uint32_t *ptypes = NULL;
 
-       rte_spinlock_lock(&hv->vf_lock);
+       rte_rwlock_read_lock(&hv->vf_lock);
        vf_dev = hn_get_vf_dev(hv);
        if (vf_dev && vf_dev->dev_ops->dev_supported_ptypes_get)
                ptypes = (*vf_dev->dev_ops->dev_supported_ptypes_get)(vf_dev);
-       rte_spinlock_unlock(&hv->vf_lock);
+       rte_rwlock_read_unlock(&hv->vf_lock);
 
        return ptypes;
 }
@@ -323,11 +335,11 @@ int hn_vf_start(struct rte_eth_dev *dev)
        struct rte_eth_dev *vf_dev;
        int ret = 0;
 
-       rte_spinlock_lock(&hv->vf_lock);
+       rte_rwlock_read_lock(&hv->vf_lock);
        vf_dev = hn_get_vf_dev(hv);
        if (vf_dev)
                ret = rte_eth_dev_start(vf_dev->data->port_id);
-       rte_spinlock_unlock(&hv->vf_lock);
+       rte_rwlock_read_unlock(&hv->vf_lock);
        return ret;
 }
 
@@ -336,11 +348,11 @@ void hn_vf_stop(struct rte_eth_dev *dev)
        struct hn_data *hv = dev->data->dev_private;
        struct rte_eth_dev *vf_dev;
 
-       rte_spinlock_lock(&hv->vf_lock);
+       rte_rwlock_read_lock(&hv->vf_lock);
        vf_dev = hn_get_vf_dev(hv);
        if (vf_dev)
                rte_eth_dev_stop(vf_dev->data->port_id);
-       rte_spinlock_unlock(&hv->vf_lock);
+       rte_rwlock_read_unlock(&hv->vf_lock);
 }
 
 /* If VF is present, then cascade configuration down */
@@ -348,11 +360,25 @@ void hn_vf_stop(struct rte_eth_dev *dev)
        {                                                       \
                struct hn_data *hv = (dev)->data->dev_private;  \
                struct rte_eth_dev *vf_dev;                     \
-               rte_spinlock_lock(&hv->vf_lock);                \
+               rte_rwlock_read_lock(&hv->vf_lock);             \
                vf_dev = hn_get_vf_dev(hv);                     \
                if (vf_dev)                                     \
                        func(vf_dev->data->port_id);            \
-               rte_spinlock_unlock(&hv->vf_lock);              \
+               rte_rwlock_read_unlock(&hv->vf_lock);           \
+       }
+
+/* If VF is present, then cascade configuration down */
+#define VF_ETHDEV_FUNC_RET_STATUS(dev, func)                   \
+       {                                                       \
+               struct hn_data *hv = (dev)->data->dev_private;  \
+               struct rte_eth_dev *vf_dev;                     \
+               int ret = 0;                                    \
+               rte_rwlock_read_lock(&hv->vf_lock);             \
+               vf_dev = hn_get_vf_dev(hv);                     \
+               if (vf_dev)                                     \
+                       ret = func(vf_dev->data->port_id);      \
+               rte_rwlock_read_unlock(&hv->vf_lock);           \
+               return ret;                                     \
        }
 
 void hn_vf_reset(struct rte_eth_dev *dev)
@@ -365,54 +391,54 @@ void hn_vf_close(struct rte_eth_dev *dev)
        struct hn_data *hv = dev->data->dev_private;
        uint16_t vf_port;
 
-       rte_spinlock_lock(&hv->vf_lock);
+       rte_rwlock_read_lock(&hv->vf_lock);
        vf_port = hv->vf_port;
        if (vf_port != HN_INVALID_PORT)
                rte_eth_dev_close(vf_port);
 
        hv->vf_port = HN_INVALID_PORT;
-       rte_spinlock_unlock(&hv->vf_lock);
+       rte_rwlock_read_unlock(&hv->vf_lock);
 }
 
-void hn_vf_stats_reset(struct rte_eth_dev *dev)
+int hn_vf_stats_reset(struct rte_eth_dev *dev)
 {
-       VF_ETHDEV_FUNC(dev, rte_eth_stats_reset);
+       VF_ETHDEV_FUNC_RET_STATUS(dev, rte_eth_stats_reset);
 }
 
-void hn_vf_allmulticast_enable(struct rte_eth_dev *dev)
+int hn_vf_allmulticast_enable(struct rte_eth_dev *dev)
 {
-       VF_ETHDEV_FUNC(dev, rte_eth_allmulticast_enable);
+       VF_ETHDEV_FUNC_RET_STATUS(dev, rte_eth_allmulticast_enable);
 }
 
-void hn_vf_allmulticast_disable(struct rte_eth_dev *dev)
+int hn_vf_allmulticast_disable(struct rte_eth_dev *dev)
 {
-       VF_ETHDEV_FUNC(dev, rte_eth_allmulticast_disable);
+       VF_ETHDEV_FUNC_RET_STATUS(dev, rte_eth_allmulticast_disable);
 }
 
-void hn_vf_promiscuous_enable(struct rte_eth_dev *dev)
+int hn_vf_promiscuous_enable(struct rte_eth_dev *dev)
 {
-       VF_ETHDEV_FUNC(dev, rte_eth_promiscuous_enable);
+       VF_ETHDEV_FUNC_RET_STATUS(dev, rte_eth_promiscuous_enable);
 }
 
-void hn_vf_promiscuous_disable(struct rte_eth_dev *dev)
+int hn_vf_promiscuous_disable(struct rte_eth_dev *dev)
 {
-       VF_ETHDEV_FUNC(dev, rte_eth_promiscuous_disable);
+       VF_ETHDEV_FUNC_RET_STATUS(dev, rte_eth_promiscuous_disable);
 }
 
 int hn_vf_mc_addr_list(struct rte_eth_dev *dev,
-                       struct ether_addr *mc_addr_set,
+                       struct rte_ether_addr *mc_addr_set,
                        uint32_t nb_mc_addr)
 {
        struct hn_data *hv = dev->data->dev_private;
        struct rte_eth_dev *vf_dev;
        int ret = 0;
 
-       rte_spinlock_lock(&hv->vf_lock);
+       rte_rwlock_read_lock(&hv->vf_lock);
        vf_dev = hn_get_vf_dev(hv);
        if (vf_dev)
                ret = rte_eth_dev_set_mc_addr_list(vf_dev->data->port_id,
                                                   mc_addr_set, nb_mc_addr);
-       rte_spinlock_unlock(&hv->vf_lock);
+       rte_rwlock_read_unlock(&hv->vf_lock);
        return ret;
 }
 
@@ -425,13 +451,13 @@ int hn_vf_tx_queue_setup(struct rte_eth_dev *dev,
        struct rte_eth_dev *vf_dev;
        int ret = 0;
 
-       rte_spinlock_lock(&hv->vf_lock);
+       rte_rwlock_read_lock(&hv->vf_lock);
        vf_dev = hn_get_vf_dev(hv);
        if (vf_dev)
                ret = rte_eth_tx_queue_setup(vf_dev->data->port_id,
                                             queue_idx, nb_desc,
                                             socket_id, tx_conf);
-       rte_spinlock_unlock(&hv->vf_lock);
+       rte_rwlock_read_unlock(&hv->vf_lock);
        return ret;
 }
 
@@ -439,7 +465,7 @@ void hn_vf_tx_queue_release(struct hn_data *hv, uint16_t queue_id)
 {
        struct rte_eth_dev *vf_dev;
 
-       rte_spinlock_lock(&hv->vf_lock);
+       rte_rwlock_read_lock(&hv->vf_lock);
        vf_dev = hn_get_vf_dev(hv);
        if (vf_dev && vf_dev->dev_ops->tx_queue_release) {
                void *subq = vf_dev->data->tx_queues[queue_id];
@@ -447,7 +473,7 @@ void hn_vf_tx_queue_release(struct hn_data *hv, uint16_t queue_id)
                (*vf_dev->dev_ops->tx_queue_release)(subq);
        }
 
-       rte_spinlock_unlock(&hv->vf_lock);
+       rte_rwlock_read_unlock(&hv->vf_lock);
 }
 
 int hn_vf_rx_queue_setup(struct rte_eth_dev *dev,
@@ -460,13 +486,13 @@ int hn_vf_rx_queue_setup(struct rte_eth_dev *dev,
        struct rte_eth_dev *vf_dev;
        int ret = 0;
 
-       rte_spinlock_lock(&hv->vf_lock);
+       rte_rwlock_read_lock(&hv->vf_lock);
        vf_dev = hn_get_vf_dev(hv);
        if (vf_dev)
                ret = rte_eth_rx_queue_setup(vf_dev->data->port_id,
                                             queue_idx, nb_desc,
                                             socket_id, rx_conf, mp);
-       rte_spinlock_unlock(&hv->vf_lock);
+       rte_rwlock_read_unlock(&hv->vf_lock);
        return ret;
 }
 
@@ -474,14 +500,14 @@ void hn_vf_rx_queue_release(struct hn_data *hv, uint16_t queue_id)
 {
        struct rte_eth_dev *vf_dev;
 
-       rte_spinlock_lock(&hv->vf_lock);
+       rte_rwlock_read_lock(&hv->vf_lock);
        vf_dev = hn_get_vf_dev(hv);
        if (vf_dev && vf_dev->dev_ops->rx_queue_release) {
                void *subq = vf_dev->data->rx_queues[queue_id];
 
                (*vf_dev->dev_ops->rx_queue_release)(subq);
        }
-       rte_spinlock_unlock(&hv->vf_lock);
+       rte_rwlock_read_unlock(&hv->vf_lock);
 }
 
 int hn_vf_stats_get(struct rte_eth_dev *dev,
@@ -491,11 +517,11 @@ int hn_vf_stats_get(struct rte_eth_dev *dev,
        struct rte_eth_dev *vf_dev;
        int ret = 0;
 
-       rte_spinlock_lock(&hv->vf_lock);
+       rte_rwlock_read_lock(&hv->vf_lock);
        vf_dev = hn_get_vf_dev(hv);
        if (vf_dev)
                ret = rte_eth_stats_get(vf_dev->data->port_id, stats);
-       rte_spinlock_unlock(&hv->vf_lock);
+       rte_rwlock_read_unlock(&hv->vf_lock);
        return ret;
 }
 
@@ -506,17 +532,19 @@ int hn_vf_xstats_get_names(struct rte_eth_dev *dev,
        struct hn_data *hv = dev->data->dev_private;
        struct rte_eth_dev *vf_dev;
        int i, count = 0;
-       char tmp[RTE_ETH_XSTATS_NAME_SIZE];
 
-       rte_spinlock_lock(&hv->vf_lock);
+       rte_rwlock_read_lock(&hv->vf_lock);
        vf_dev = hn_get_vf_dev(hv);
-       if (vf_dev && vf_dev->dev_ops->xstats_get_names)
-               count = vf_dev->dev_ops->xstats_get_names(vf_dev, names, n);
-       rte_spinlock_unlock(&hv->vf_lock);
+       if (vf_dev)
+               count = rte_eth_xstats_get_names(vf_dev->data->port_id,
+                                                names, n);
+       rte_rwlock_read_unlock(&hv->vf_lock);
 
        /* add vf_ prefix to xstat names */
        if (names) {
                for (i = 0; i < count; i++) {
+                       char tmp[RTE_ETH_XSTATS_NAME_SIZE];
+
                        snprintf(tmp, sizeof(tmp), "vf_%s", names[i].name);
                        strlcpy(names[i].name, tmp, sizeof(names[i].name));
                }
@@ -527,29 +555,76 @@ int hn_vf_xstats_get_names(struct rte_eth_dev *dev,
 
 int hn_vf_xstats_get(struct rte_eth_dev *dev,
                     struct rte_eth_xstat *xstats,
+                    unsigned int offset,
                     unsigned int n)
 {
        struct hn_data *hv = dev->data->dev_private;
        struct rte_eth_dev *vf_dev;
-       int count = 0;
+       int i, count = 0;
 
-       rte_spinlock_lock(&hv->vf_lock);
+       rte_rwlock_read_lock(&hv->vf_lock);
        vf_dev = hn_get_vf_dev(hv);
-       if (vf_dev && vf_dev->dev_ops->xstats_get)
-               count = vf_dev->dev_ops->xstats_get(vf_dev, xstats, n);
-       rte_spinlock_unlock(&hv->vf_lock);
+       if (vf_dev)
+               count = rte_eth_xstats_get(vf_dev->data->port_id,
+                                          xstats + offset, n - offset);
+       rte_rwlock_read_unlock(&hv->vf_lock);
+
+       /* Offset id's for VF stats */
+       if (count > 0) {
+               for (i = 0; i < count; i++)
+                       xstats[i + offset].id += offset;
+       }
 
        return count;
 }
 
-void hn_vf_xstats_reset(struct rte_eth_dev *dev)
+int hn_vf_xstats_reset(struct rte_eth_dev *dev)
+{
+       struct hn_data *hv = dev->data->dev_private;
+       struct rte_eth_dev *vf_dev;
+       int ret;
+
+       rte_rwlock_read_lock(&hv->vf_lock);
+       vf_dev = hn_get_vf_dev(hv);
+       if (vf_dev)
+               ret = rte_eth_xstats_reset(vf_dev->data->port_id);
+       else
+               ret = -EINVAL;
+       rte_rwlock_read_unlock(&hv->vf_lock);
+
+       return ret;
+}
+
+int hn_vf_rss_hash_update(struct rte_eth_dev *dev,
+                         struct rte_eth_rss_conf *rss_conf)
 {
        struct hn_data *hv = dev->data->dev_private;
        struct rte_eth_dev *vf_dev;
+       int ret = 0;
 
-       rte_spinlock_lock(&hv->vf_lock);
+       rte_rwlock_read_lock(&hv->vf_lock);
        vf_dev = hn_get_vf_dev(hv);
-       if (vf_dev && vf_dev->dev_ops->xstats_reset)
-               vf_dev->dev_ops->xstats_reset(vf_dev);
-       rte_spinlock_unlock(&hv->vf_lock);
+       if (vf_dev && vf_dev->dev_ops->rss_hash_update)
+               ret = vf_dev->dev_ops->rss_hash_update(vf_dev, rss_conf);
+       rte_rwlock_read_unlock(&hv->vf_lock);
+
+       return ret;
+}
+
+int hn_vf_reta_hash_update(struct rte_eth_dev *dev,
+                          struct rte_eth_rss_reta_entry64 *reta_conf,
+                          uint16_t reta_size)
+{
+       struct hn_data *hv = dev->data->dev_private;
+       struct rte_eth_dev *vf_dev;
+       int ret = 0;
+
+       rte_rwlock_read_lock(&hv->vf_lock);
+       vf_dev = hn_get_vf_dev(hv);
+       if (vf_dev && vf_dev->dev_ops->reta_update)
+               ret = vf_dev->dev_ops->reta_update(vf_dev,
+                                                  reta_conf, reta_size);
+       rte_rwlock_read_unlock(&hv->vf_lock);
+
+       return ret;
 }