]> git.proxmox.com Git - mirror_ubuntu-kernels.git/commitdiff
net/mlx5e: RX, Restrict bulk size for small Striding RQs
authorTariq Toukan <tariqt@nvidia.com>
Wed, 19 Jan 2022 16:35:42 +0000 (18:35 +0200)
committerSaeed Mahameed <saeedm@nvidia.com>
Thu, 17 Feb 2022 07:55:14 +0000 (23:55 -0800)
In RQs of type multi-packet WQE (Striding RQ), each WQE is relatively
large (typically 256KB) but their number is relatively small (8 in
default).

Re-mapping the descriptors' buffers before re-posting them is done via
UMR (User-Mode Memory Registration) operations.

On the one hand, posting UMR WQEs in bulks reduces communication overhead
with the HW and better utilizes its processing units.
On the other hand, delaying the WQE repost operations for a small RQ
(say, of 4 WQEs) might drastically hit its performance, causing packet
drops due to no receive buffer, for high or bursty incoming packets rate.

Here we restrict the bulk size for too small RQs. Effectively, with the current
constants, RQ of size 4 (minimum allowed) would have no bulking, while larger
RQs will continue working with bulks of 2.

Signed-off-by: Tariq Toukan <tariqt@nvidia.com>
Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
drivers/net/ethernet/mellanox/mlx5/core/en.h
drivers/net/ethernet/mellanox/mlx5/core/en/params.c
drivers/net/ethernet/mellanox/mlx5/core/en/params.h
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c

index e29ac77e9bec2b6a91c8a1fa8dc5ab39b604a61a..2704c7537481cffc3b58f8d7be196ca109303525 100644 (file)
@@ -714,6 +714,7 @@ struct mlx5e_rq {
                        u8                     umr_in_progress;
                        u8                     umr_last_bulk;
                        u8                     umr_completed;
+                       u8                     min_wqe_bulk;
                        struct mlx5e_shampo_hd *shampo;
                } mpwqe;
        };
index 2d9707c024b05f3cbaa494c45e2029103034e263..0bd8698f7226f98d24e02f2bd02842689e0fcb8a 100644 (file)
@@ -178,6 +178,12 @@ u8 mlx5e_mpwqe_get_log_num_strides(struct mlx5_core_dev *mdev,
                mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk);
 }
 
+u8 mlx5e_mpwqe_get_min_wqe_bulk(unsigned int wq_sz)
+{
+#define UMR_WQE_BULK (2)
+       return min_t(unsigned int, UMR_WQE_BULK, wq_sz / 2 - 1);
+}
+
 u16 mlx5e_get_rq_headroom(struct mlx5_core_dev *mdev,
                          struct mlx5e_params *params,
                          struct mlx5e_xsk_param *xsk)
index 433e6967692d9864ea77c6e096ba7c0c1ad545ea..47a368112e313bb4c4937d859ac96c230e1f409e 100644 (file)
@@ -129,6 +129,7 @@ u8 mlx5e_mpwqe_get_log_stride_size(struct mlx5_core_dev *mdev,
 u8 mlx5e_mpwqe_get_log_num_strides(struct mlx5_core_dev *mdev,
                                   struct mlx5e_params *params,
                                   struct mlx5e_xsk_param *xsk);
+u8 mlx5e_mpwqe_get_min_wqe_bulk(unsigned int wq_sz);
 u16 mlx5e_get_rq_headroom(struct mlx5_core_dev *mdev,
                          struct mlx5e_params *params,
                          struct mlx5e_xsk_param *xsk);
index b157c7aac4cac5382a7f5e2c87c6b279a83af992..b2ed2f6d4a9208aebfd17fd0c503cd1e37c39ee1 100644 (file)
@@ -595,6 +595,7 @@ static int mlx5e_alloc_rq(struct mlx5e_params *params,
                rq->mpwqe.log_stride_sz = mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk);
                rq->mpwqe.num_strides =
                        BIT(mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk));
+               rq->mpwqe.min_wqe_bulk = mlx5e_mpwqe_get_min_wqe_bulk(wq_sz);
 
                rq->buff.frame0_sz = (1 << rq->mpwqe.log_stride_sz);
 
index 3fe4f06f3e7187b363aea916bd177afd4ffb4bce..4cb7c7135b6aaab101229572adcdf2175e4c1c56 100644 (file)
@@ -960,8 +960,7 @@ INDIRECT_CALLABLE_SCOPE bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq)
        if (unlikely(rq->mpwqe.umr_in_progress > rq->mpwqe.umr_last_bulk))
                rq->stats->congst_umr++;
 
-#define UMR_WQE_BULK (2)
-       if (likely(missing < UMR_WQE_BULK))
+       if (likely(missing < rq->mpwqe.min_wqe_bulk))
                return false;
 
        if (rq->page_pool)