]> git.proxmox.com Git - mirror_ubuntu-kernels.git/commitdiff
mlxsw: spectrum_qdisc: Offload root TBF as port shaper
authorPetr Machata <petrm@nvidia.com>
Wed, 27 Oct 2021 15:19:59 +0000 (18:19 +0300)
committerJakub Kicinski <kuba@kernel.org>
Fri, 29 Oct 2021 02:47:49 +0000 (19:47 -0700)
The Spectrum ASIC allows configuration of maximum shaper on all levels of
the scheduling hierarchy: TCs, subgroups, groups and also ports. Currently,
TBF always configures a subgroup. But a user could reasonably express the
intent to configure port shaper by putting TBF to a root position, around
ETS / PRIO. Accept this usage and offload appropriately.

Signed-off-by: Petr Machata <petrm@nvidia.com>
Signed-off-by: Ido Schimmel <idosch@nvidia.com>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c

index ddb5ad88b35071b4f6422555e98cad0cbf2344a4..4243d3b883ff1854dbb112132b23c4cb39e8d6e2 100644 (file)
@@ -271,6 +271,7 @@ mlxsw_sp_qdisc_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
 
 struct mlxsw_sp_qdisc_tree_validate {
        bool forbid_ets;
+       bool forbid_root_tbf;
        bool forbid_tbf;
        bool forbid_red;
 };
@@ -310,18 +311,26 @@ __mlxsw_sp_qdisc_tree_validate(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
                if (validate.forbid_red)
                        return -EINVAL;
                validate.forbid_red = true;
+               validate.forbid_root_tbf = true;
                validate.forbid_ets = true;
                break;
        case MLXSW_SP_QDISC_TBF:
-               if (validate.forbid_tbf)
-                       return -EINVAL;
-               validate.forbid_tbf = true;
-               validate.forbid_ets = true;
+               if (validate.forbid_root_tbf) {
+                       if (validate.forbid_tbf)
+                               return -EINVAL;
+                       /* This is a TC TBF. */
+                       validate.forbid_tbf = true;
+                       validate.forbid_ets = true;
+               } else {
+                       /* This is root TBF. */
+                       validate.forbid_root_tbf = true;
+               }
                break;
        case MLXSW_SP_QDISC_PRIO:
        case MLXSW_SP_QDISC_ETS:
                if (validate.forbid_ets)
                        return -EINVAL;
+               validate.forbid_root_tbf = true;
                validate.forbid_ets = true;
                break;
        default:
@@ -905,16 +914,34 @@ mlxsw_sp_setup_tc_qdisc_leaf_clean_stats(struct mlxsw_sp_port *mlxsw_sp_port,
        mlxsw_sp_qdisc->stats_base.backlog = 0;
 }
 
+static enum mlxsw_reg_qeec_hr
+mlxsw_sp_qdisc_tbf_hr(struct mlxsw_sp_port *mlxsw_sp_port,
+                     struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
+{
+       if (mlxsw_sp_qdisc == &mlxsw_sp_port->qdisc->root_qdisc)
+               return MLXSW_REG_QEEC_HR_PORT;
+
+       /* Configure subgroup shaper, so that both UC and MC traffic is subject
+        * to shaping. That is unlike RED, however UC queue lengths are going to
+        * be different than MC ones due to different pool and quota
+        * configurations, so the configuration is not applicable. For shaper on
+        * the other hand, subjecting the overall stream to the configured
+        * shaper makes sense. Also note that that is what we do for
+        * ieee_setmaxrate().
+        */
+       return MLXSW_REG_QEEC_HR_SUBGROUP;
+}
+
 static int
 mlxsw_sp_qdisc_tbf_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
                           struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
 {
+       enum mlxsw_reg_qeec_hr hr = mlxsw_sp_qdisc_tbf_hr(mlxsw_sp_port,
+                                                         mlxsw_sp_qdisc);
        int tclass_num = mlxsw_sp_qdisc_get_tclass_num(mlxsw_sp_port,
                                                       mlxsw_sp_qdisc);
 
-       return mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
-                                            MLXSW_REG_QEEC_HR_SUBGROUP,
-                                            tclass_num, 0,
+       return mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, hr, tclass_num, 0,
                                             MLXSW_REG_QEEC_MAS_DIS, 0);
 }
 
@@ -996,6 +1023,8 @@ mlxsw_sp_qdisc_tbf_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
                           struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
                           void *params)
 {
+       enum mlxsw_reg_qeec_hr hr = mlxsw_sp_qdisc_tbf_hr(mlxsw_sp_port,
+                                                         mlxsw_sp_qdisc);
        struct tc_tbf_qopt_offload_replace_params *p = params;
        u64 rate_kbps = mlxsw_sp_qdisc_tbf_rate_kbps(p);
        int tclass_num;
@@ -1016,17 +1045,7 @@ mlxsw_sp_qdisc_tbf_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
                /* check_params above was supposed to reject this value. */
                return -EINVAL;
 
-       /* Configure subgroup shaper, so that both UC and MC traffic is subject
-        * to shaping. That is unlike RED, however UC queue lengths are going to
-        * be different than MC ones due to different pool and quota
-        * configurations, so the configuration is not applicable. For shaper on
-        * the other hand, subjecting the overall stream to the configured
-        * shaper makes sense. Also note that that is what we do for
-        * ieee_setmaxrate().
-        */
-       return mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
-                                            MLXSW_REG_QEEC_HR_SUBGROUP,
-                                            tclass_num, 0,
+       return mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, hr, tclass_num, 0,
                                             rate_kbps, burst_size);
 }