]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/commitdiff
net/mlx5: Adjustments for the activate LAG logic to run under sriov
authorRabie Loulou <rabiel@mellanox.com>
Thu, 26 Apr 2018 13:45:41 +0000 (16:45 +0300)
committerSaeed Mahameed <saeedm@mellanox.com>
Fri, 14 Dec 2018 21:28:53 +0000 (13:28 -0800)
When HW lag is set/unset, roce must not be enabled on the port, as such
we wrap such changes with roce enable/disable either directly or through
re-creation of IB device.

Currently, lag and sriov are mutually exclusive, so by definition this
code doesn't run under sriov.

Towards changing this exclusion, we need to make sure that roce will not
be enabled on the eswitch manager port under sriov since this is
requirement of the switchdev mode.

We are going strict here and avoiding this all together under sriov.

Signed-off-by: Rabie Loulou <rabiel@mellanox.com>
Reviewed-by: Or Gerlitz <ogerlitz@mellanox.com>
Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
drivers/net/ethernet/mellanox/mlx5/core/lag.c

index 8c5c5e418d613d5508d7e148125cb4e3bcc3c5c6..8127d907e1efe3a652a8f62c23638ce72b42e786 100644 (file)
@@ -34,6 +34,7 @@
 #include <linux/mlx5/driver.h>
 #include <linux/mlx5/vport.h>
 #include "mlx5_core.h"
+#include "eswitch.h"
 
 enum {
        MLX5_LAG_FLAG_BONDED = 1 << 0,
@@ -257,13 +258,15 @@ static void mlx5_do_bond(struct mlx5_lag *ldev)
 {
        struct mlx5_core_dev *dev0 = ldev->pf[0].dev;
        struct mlx5_core_dev *dev1 = ldev->pf[1].dev;
+       bool do_bond, sriov_enabled;
        struct lag_tracker tracker;
        int i;
-       bool do_bond;
 
        if (!dev0 || !dev1)
                return;
 
+       sriov_enabled = mlx5_sriov_is_enabled(dev0) || mlx5_sriov_is_enabled(dev1);
+
        mutex_lock(&lag_mutex);
        tracker = ldev->tracker;
        mutex_unlock(&lag_mutex);
@@ -271,26 +274,32 @@ static void mlx5_do_bond(struct mlx5_lag *ldev)
        do_bond = tracker.is_bonded && ldev->allowed;
 
        if (do_bond && !mlx5_lag_is_bonded(ldev)) {
-               for (i = 0; i < MLX5_MAX_PORTS; i++)
-                       mlx5_remove_dev_by_protocol(ldev->pf[i].dev,
-                                                   MLX5_INTERFACE_PROTOCOL_IB);
+               if (!sriov_enabled)
+                       for (i = 0; i < MLX5_MAX_PORTS; i++)
+                               mlx5_remove_dev_by_protocol(ldev->pf[i].dev,
+                                                           MLX5_INTERFACE_PROTOCOL_IB);
 
                mlx5_activate_lag(ldev, &tracker);
 
-               mlx5_add_dev_by_protocol(dev0, MLX5_INTERFACE_PROTOCOL_IB);
-               mlx5_nic_vport_enable_roce(dev1);
+               if (!sriov_enabled) {
+                       mlx5_add_dev_by_protocol(dev0, MLX5_INTERFACE_PROTOCOL_IB);
+                       mlx5_nic_vport_enable_roce(dev1);
+               }
        } else if (do_bond && mlx5_lag_is_bonded(ldev)) {
                mlx5_modify_lag(ldev, &tracker);
        } else if (!do_bond && mlx5_lag_is_bonded(ldev)) {
-               mlx5_remove_dev_by_protocol(dev0, MLX5_INTERFACE_PROTOCOL_IB);
-               mlx5_nic_vport_disable_roce(dev1);
+               if (!sriov_enabled) {
+                       mlx5_remove_dev_by_protocol(dev0, MLX5_INTERFACE_PROTOCOL_IB);
+                       mlx5_nic_vport_disable_roce(dev1);
+               }
 
                mlx5_deactivate_lag(ldev);
 
-               for (i = 0; i < MLX5_MAX_PORTS; i++)
-                       if (ldev->pf[i].dev)
-                               mlx5_add_dev_by_protocol(ldev->pf[i].dev,
-                                                        MLX5_INTERFACE_PROTOCOL_IB);
+               if (!sriov_enabled)
+                       for (i = 0; i < MLX5_MAX_PORTS; i++)
+                               if (ldev->pf[i].dev)
+                                       mlx5_add_dev_by_protocol(ldev->pf[i].dev,
+                                                                MLX5_INTERFACE_PROTOCOL_IB);
        }
 }