]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/commitdiff
mt76: reduce locking in mt76_dma_tx_cleanup
authorFelix Fietkau <nbd@nbd.name>
Thu, 31 Jan 2019 21:39:32 +0000 (22:39 +0100)
committerFelix Fietkau <nbd@nbd.name>
Wed, 1 May 2019 11:03:58 +0000 (13:03 +0200)
q->tail can be safely updated without locking, because there is no
concurrent access. If called from outside of the tasklet (for flushing),
the tasklet is always disabled.
q->queued can be safely read without locking, as long as the decrement
happens within the locked section.
This patch allows cleaning up tx packets outside of the section that holds
the queue lock for improved performance

Signed-off-by: Felix Fietkau <nbd@nbd.name>
drivers/net/wireless/mediatek/mt76/dma.c

index 7145b75b6438c1be7abb28dcbe5c50a1722b4253..e4a5b34915bfddd194b78f0fb31dc5eade142752 100644 (file)
@@ -149,31 +149,29 @@ mt76_dma_tx_cleanup(struct mt76_dev *dev, enum mt76_txq_id qid, bool flush)
        struct mt76_sw_queue *sq = &dev->q_tx[qid];
        struct mt76_queue *q = sq->q;
        struct mt76_queue_entry entry;
+       unsigned int n_swq_queued[4] = {};
+       unsigned int n_queued = 0;
        bool wake = false;
-       int last;
+       int i, last;
 
        if (!q)
                return;
 
-       spin_lock_bh(&q->lock);
        if (flush)
                last = -1;
        else
                last = readl(&q->regs->dma_idx);
 
-       while (q->queued && q->tail != last) {
+       while ((q->queued > n_queued) && q->tail != last) {
                mt76_dma_tx_cleanup_idx(dev, q, q->tail, &entry);
                if (entry.schedule)
-                       dev->q_tx[entry.qid].swq_queued--;
+                       n_swq_queued[entry.qid]++;
 
                q->tail = (q->tail + 1) % q->ndesc;
-               q->queued--;
+               n_queued++;
 
-               if (entry.skb) {
-                       spin_unlock_bh(&q->lock);
+               if (entry.skb)
                        dev->drv->tx_complete_skb(dev, qid, &entry);
-                       spin_lock_bh(&q->lock);
-               }
 
                if (entry.txwi) {
                        mt76_put_txwi(dev, entry.txwi);
@@ -184,6 +182,16 @@ mt76_dma_tx_cleanup(struct mt76_dev *dev, enum mt76_txq_id qid, bool flush)
                        last = readl(&q->regs->dma_idx);
        }
 
+       spin_lock_bh(&q->lock);
+
+       q->queued -= n_queued;
+       for (i = 0; i < ARRAY_SIZE(n_swq_queued); i++) {
+               if (!n_swq_queued[i])
+                       continue;
+
+               dev->q_tx[i].swq_queued -= n_swq_queued[i];
+       }
+
        if (flush)
                mt76_dma_sync_idx(dev, q);