]> git.proxmox.com Git - mirror_ubuntu-kernels.git/commitdiff
net: mhi: Get rid of local rx queue count
authorLoic Poulain <loic.poulain@linaro.org>
Mon, 11 Jan 2021 18:07:42 +0000 (19:07 +0100)
committerJakub Kicinski <kuba@kernel.org>
Sat, 30 Jan 2021 03:42:06 +0000 (19:42 -0800)
Use the new mhi_get_free_desc_count helper to track queue usage
instead of relying on the locally maintained rx_queued count.

Signed-off-by: Loic Poulain <loic.poulain@linaro.org>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
drivers/net/mhi_net.c

index abdd2baa312a47e45de88b90c3a6e2ae4dccc503..4f512531b7d0e8d721b40ae373b0ae50c7fdc938 100644 (file)
@@ -25,7 +25,6 @@ struct mhi_net_stats {
        u64_stats_t tx_bytes;
        u64_stats_t tx_errors;
        u64_stats_t tx_dropped;
-       atomic_t rx_queued;
        struct u64_stats_sync tx_syncp;
        struct u64_stats_sync rx_syncp;
 };
@@ -138,9 +137,9 @@ static void mhi_net_dl_callback(struct mhi_device *mhi_dev,
 {
        struct mhi_net_dev *mhi_netdev = dev_get_drvdata(&mhi_dev->dev);
        struct sk_buff *skb = mhi_res->buf_addr;
-       int remaining;
+       int free_desc_count;
 
-       remaining = atomic_dec_return(&mhi_netdev->stats.rx_queued);
+       free_desc_count = mhi_get_free_desc_count(mhi_dev, DMA_FROM_DEVICE);
 
        if (unlikely(mhi_res->transaction_status)) {
                dev_kfree_skb_any(skb);
@@ -175,7 +174,7 @@ static void mhi_net_dl_callback(struct mhi_device *mhi_dev,
        }
 
        /* Refill if RX buffers queue becomes low */
-       if (remaining <= mhi_netdev->rx_queue_sz / 2)
+       if (free_desc_count >= mhi_netdev->rx_queue_sz / 2)
                schedule_delayed_work(&mhi_netdev->rx_refill, 0);
 }
 
@@ -222,7 +221,7 @@ static void mhi_net_rx_refill_work(struct work_struct *work)
        struct sk_buff *skb;
        int err;
 
-       while (atomic_read(&mhi_netdev->stats.rx_queued) < mhi_netdev->rx_queue_sz) {
+       while (!mhi_queue_is_full(mdev, DMA_FROM_DEVICE)) {
                skb = netdev_alloc_skb(ndev, size);
                if (unlikely(!skb))
                        break;
@@ -235,8 +234,6 @@ static void mhi_net_rx_refill_work(struct work_struct *work)
                        break;
                }
 
-               atomic_inc(&mhi_netdev->stats.rx_queued);
-
                /* Do not hog the CPU if rx buffers are consumed faster than
                 * queued (unlikely).
                 */
@@ -244,7 +241,7 @@ static void mhi_net_rx_refill_work(struct work_struct *work)
        }
 
        /* If we're still starved of rx buffers, reschedule later */
-       if (unlikely(!atomic_read(&mhi_netdev->stats.rx_queued)))
+       if (mhi_get_free_desc_count(mdev, DMA_FROM_DEVICE) == mhi_netdev->rx_queue_sz)
                schedule_delayed_work(&mhi_netdev->rx_refill, HZ / 2);
 }