]> git.proxmox.com Git - mirror_ubuntu-kernels.git/commitdiff
net/mlx5e: Optimize modulo in mlx5e_select_queue
authorMaxim Mikityanskiy <maximmi@nvidia.com>
Tue, 25 Jan 2022 10:52:59 +0000 (12:52 +0200)
committerSaeed Mahameed <saeedm@nvidia.com>
Tue, 15 Feb 2022 06:30:51 +0000 (22:30 -0800)
To improve the performance of the modulo operation (%), it's replaced by
a subtracting the divisor in a loop. The modulo is used to fix up an
out-of-bounds value that might be returned by netdev_pick_tx or to
convert the queue number to the channel number when num_tcs > 1. Both
situations are unlikely, because XPS is configured not to pick higher
queues (qid >= num_channels) by default, so under normal circumstances
the flow won't go inside the loop, and it will be faster than %.

num_tcs == 8 adds at most 7 iterations to the loop. PTP adds at most 1
iteration to the loop. HTB would add at most 256 iterations (when
num_channels == 1), so there is an additional boundary check in the HTB
flow, which falls back to % if more than 7 iterations are expected.

Signed-off-by: Maxim Mikityanskiy <maximmi@nvidia.com>
Reviewed-by: Tariq Toukan <tariqt@nvidia.com>
Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
drivers/net/ethernet/mellanox/mlx5/core/en/selq.c
drivers/net/ethernet/mellanox/mlx5/core/en/selq.h

index b3ed5262d2a16392ba1638ac44e9b471cb7a0107..667bc95a0d44a4193edf22138612841b800e5142 100644 (file)
@@ -178,7 +178,8 @@ u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
                 * So we can return a txq_ix that matches the channel and
                 * packet UP.
                 */
-               return txq_ix % selq->num_channels + up * selq->num_channels;
+               return mlx5e_txq_to_ch_ix(txq_ix, selq->num_channels) +
+                       up * selq->num_channels;
        }
 
        if (unlikely(selq->is_htb)) {
@@ -198,7 +199,7 @@ u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
                 * Driver to select these queues only at mlx5e_select_ptpsq()
                 * and mlx5e_select_htb_queue().
                 */
-               return txq_ix % selq->num_channels;
+               return mlx5e_txq_to_ch_ix_htb(txq_ix, selq->num_channels);
        }
 
        /* PTP is enabled */
@@ -214,7 +215,7 @@ u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
         * If netdev_pick_tx() picks ptp_channel, switch to a regular queue,
         * because driver should select the PTP only at mlx5e_select_ptpsq().
         */
-       txq_ix %= selq->num_channels;
+       txq_ix = mlx5e_txq_to_ch_ix(txq_ix, selq->num_channels);
 
        if (selq->num_tcs <= 1)
                return txq_ix;
index b1c73b509f6b0b967f5ef148a4f2b5ab519d4c14..6c070141d8f11e7976aafde6c16fa472576dbeb4 100644 (file)
@@ -25,6 +25,26 @@ void mlx5e_selq_prepare(struct mlx5e_selq *selq, struct mlx5e_params *params, bo
 void mlx5e_selq_apply(struct mlx5e_selq *selq);
 void mlx5e_selq_cancel(struct mlx5e_selq *selq);
 
+static inline u16 mlx5e_txq_to_ch_ix(u16 txq, u16 num_channels)
+{
+       while (unlikely(txq >= num_channels))
+               txq -= num_channels;
+       return txq;
+}
+
+static inline u16 mlx5e_txq_to_ch_ix_htb(u16 txq, u16 num_channels)
+{
+       if (unlikely(txq >= num_channels)) {
+               if (unlikely(txq >= num_channels << 3))
+                       txq %= num_channels;
+               else
+                       do
+                               txq -= num_channels;
+                       while (txq >= num_channels);
+       }
+       return txq;
+}
+
 u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
                       struct net_device *sb_dev);