]> git.proxmox.com Git - mirror_ubuntu-kernels.git/commitdiff
net/mlx5e: QoS, Fix wrongfully setting parent_element_id on MODIFY_SCHEDULING_ELEMENT
authorMaor Dickman <maord@nvidia.com>
Tue, 27 Dec 2022 08:51:38 +0000 (10:51 +0200)
committerSaeed Mahameed <saeedm@nvidia.com>
Wed, 18 Jan 2023 08:01:39 +0000 (00:01 -0800)
According to HW spec parent_element_id field should be reserved (0x0) when calling
MODIFY_SCHEDULING_ELEMENT command.

This patch remove the wrong initialization of reserved field, parent_element_id, on
mlx5_qos_update_node.

Fixes: 214baf22870c ("net/mlx5e: Support HTB offload")
Signed-off-by: Maor Dickman <maord@nvidia.com>
Reviewed-by: Eli Cohen <elic@nvidia.com>
Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
drivers/net/ethernet/mellanox/mlx5/core/qos.c
drivers/net/ethernet/mellanox/mlx5/core/qos.h

index 6dac76fa58a3f158fdd5fc08cddeb94685c160c7..09d441ecb9f6d204e236f7e340e174a0842349bb 100644 (file)
@@ -637,7 +637,7 @@ mlx5e_htb_update_children(struct mlx5e_htb *htb, struct mlx5e_qos_node *node,
                if (child->bw_share == old_bw_share)
                        continue;
 
-               err_one = mlx5_qos_update_node(htb->mdev, child->hw_id, child->bw_share,
+               err_one = mlx5_qos_update_node(htb->mdev, child->bw_share,
                                               child->max_average_bw, child->hw_id);
                if (!err && err_one) {
                        err = err_one;
@@ -671,7 +671,7 @@ mlx5e_htb_node_modify(struct mlx5e_htb *htb, u16 classid, u64 rate, u64 ceil,
        mlx5e_htb_convert_rate(htb, rate, node->parent, &bw_share);
        mlx5e_htb_convert_ceil(htb, ceil, &max_average_bw);
 
-       err = mlx5_qos_update_node(htb->mdev, node->parent->hw_id, bw_share,
+       err = mlx5_qos_update_node(htb->mdev, bw_share,
                                   max_average_bw, node->hw_id);
        if (err) {
                NL_SET_ERR_MSG_MOD(extack, "Firmware error when modifying a node.");
index 0777be24a3074a7a078b091cb30b93d4d33df9e1..8bce730b5c5befdc4e553810eae0ab1400fe836f 100644 (file)
@@ -62,13 +62,12 @@ int mlx5_qos_create_root_node(struct mlx5_core_dev *mdev, u32 *id)
        return mlx5_qos_create_inner_node(mdev, MLX5_QOS_DEFAULT_DWRR_UID, 0, 0, id);
 }
 
-int mlx5_qos_update_node(struct mlx5_core_dev *mdev, u32 parent_id,
+int mlx5_qos_update_node(struct mlx5_core_dev *mdev,
                         u32 bw_share, u32 max_avg_bw, u32 id)
 {
        u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0};
        u32 bitmask = 0;
 
-       MLX5_SET(scheduling_context, sched_ctx, parent_element_id, parent_id);
        MLX5_SET(scheduling_context, sched_ctx, bw_share, bw_share);
        MLX5_SET(scheduling_context, sched_ctx, max_average_bw, max_avg_bw);
 
index 125e4e47e6f71f2ecf31a960d5eca467ab8a5b92..624ce822b7f596e1c6ca1ce04b0bf0606e894abc 100644 (file)
@@ -23,7 +23,7 @@ int mlx5_qos_create_leaf_node(struct mlx5_core_dev *mdev, u32 parent_id,
 int mlx5_qos_create_inner_node(struct mlx5_core_dev *mdev, u32 parent_id,
                               u32 bw_share, u32 max_avg_bw, u32 *id);
 int mlx5_qos_create_root_node(struct mlx5_core_dev *mdev, u32 *id);
-int mlx5_qos_update_node(struct mlx5_core_dev *mdev, u32 parent_id, u32 bw_share,
+int mlx5_qos_update_node(struct mlx5_core_dev *mdev, u32 bw_share,
                         u32 max_avg_bw, u32 id);
 int mlx5_qos_destroy_node(struct mlx5_core_dev *mdev, u32 id);