]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/commitdiff
net/mlx5e: Enable adding peer miss rules only if merged eswitch is supported
authorMaor Dickman <maord@mellanox.com>
Wed, 5 Aug 2020 14:56:04 +0000 (17:56 +0300)
committerSaeed Mahameed <saeedm@nvidia.com>
Tue, 22 Sep 2020 00:22:22 +0000 (17:22 -0700)
The cited commit creates peer miss group during switchdev mode
initialization in order to handle miss packets correctly while in VF
LAG mode. This is done regardless of FW support of such groups which
could cause rules setups failure later on.

Fix by adding FW capability check before creating peer groups/rule.

Fixes: ac004b832128 ("net/mlx5e: E-Switch, Add peer miss rules")
Signed-off-by: Maor Dickman <maord@mellanox.com>
Reviewed-by: Roi Dayan <roid@mellanox.com>
Reviewed-by: Raed Salem <raeds@mellanox.com>
Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c

index d2516922d8678b1678a454b71e9487f5ac607e6b..1bcf2609dca86962f628009478e60b7c4d7b374e 100644 (file)
@@ -1219,35 +1219,37 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw)
        }
        esw->fdb_table.offloads.send_to_vport_grp = g;
 
-       /* create peer esw miss group */
-       memset(flow_group_in, 0, inlen);
+       if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) {
+               /* create peer esw miss group */
+               memset(flow_group_in, 0, inlen);
 
-       esw_set_flow_group_source_port(esw, flow_group_in);
+               esw_set_flow_group_source_port(esw, flow_group_in);
 
-       if (!mlx5_eswitch_vport_match_metadata_enabled(esw)) {
-               match_criteria = MLX5_ADDR_OF(create_flow_group_in,
-                                             flow_group_in,
-                                             match_criteria);
+               if (!mlx5_eswitch_vport_match_metadata_enabled(esw)) {
+                       match_criteria = MLX5_ADDR_OF(create_flow_group_in,
+                                                     flow_group_in,
+                                                     match_criteria);
 
-               MLX5_SET_TO_ONES(fte_match_param, match_criteria,
-                                misc_parameters.source_eswitch_owner_vhca_id);
+                       MLX5_SET_TO_ONES(fte_match_param, match_criteria,
+                                        misc_parameters.source_eswitch_owner_vhca_id);
 
-               MLX5_SET(create_flow_group_in, flow_group_in,
-                        source_eswitch_owner_vhca_id_valid, 1);
-       }
+                       MLX5_SET(create_flow_group_in, flow_group_in,
+                                source_eswitch_owner_vhca_id_valid, 1);
+               }
 
-       MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
-       MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
-                ix + esw->total_vports - 1);
-       ix += esw->total_vports;
+               MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
+               MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
+                        ix + esw->total_vports - 1);
+               ix += esw->total_vports;
 
-       g = mlx5_create_flow_group(fdb, flow_group_in);
-       if (IS_ERR(g)) {
-               err = PTR_ERR(g);
-               esw_warn(dev, "Failed to create peer miss flow group err(%d)\n", err);
-               goto peer_miss_err;
+               g = mlx5_create_flow_group(fdb, flow_group_in);
+               if (IS_ERR(g)) {
+                       err = PTR_ERR(g);
+                       esw_warn(dev, "Failed to create peer miss flow group err(%d)\n", err);
+                       goto peer_miss_err;
+               }
+               esw->fdb_table.offloads.peer_miss_grp = g;
        }
-       esw->fdb_table.offloads.peer_miss_grp = g;
 
        /* create miss group */
        memset(flow_group_in, 0, inlen);
@@ -1281,7 +1283,8 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw)
 miss_rule_err:
        mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
 miss_err:
-       mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp);
+       if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
+               mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp);
 peer_miss_err:
        mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
 send_vport_err:
@@ -1305,7 +1308,8 @@ static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw)
        mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_multi);
        mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni);
        mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
-       mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp);
+       if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
+               mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp);
        mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
 
        mlx5_esw_chains_destroy(esw);