]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blobdiff - drivers/net/ethernet/broadcom/bcmsysport.c
Merge tag 'mac80211-next-for-davem-2018-03-29' of git://git.kernel.org/pub/scm/linux...
[mirror_ubuntu-eoan-kernel.git] / drivers / net / ethernet / broadcom / bcmsysport.c
index f15a8fc6dfc97419f8e1492dd1717b9d2e562b84..4e26f606a7f227ca46f2139b7b04ca9fd46ffa67 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/netdevice.h>
+#include <linux/net_dim.h>
 #include <linux/etherdevice.h>
 #include <linux/platform_device.h>
 #include <linux/of.h>
@@ -574,21 +575,55 @@ static int bcm_sysport_set_wol(struct net_device *dev,
        return 0;
 }
 
+static void bcm_sysport_set_rx_coalesce(struct bcm_sysport_priv *priv)
+{
+       u32 reg;
+
+       reg = rdma_readl(priv, RDMA_MBDONE_INTR);
+       reg &= ~(RDMA_INTR_THRESH_MASK |
+                RDMA_TIMEOUT_MASK << RDMA_TIMEOUT_SHIFT);
+       reg |= priv->dim.coal_pkts;
+       reg |= DIV_ROUND_UP(priv->dim.coal_usecs * 1000, 8192) <<
+                           RDMA_TIMEOUT_SHIFT;
+       rdma_writel(priv, reg, RDMA_MBDONE_INTR);
+}
+
+static void bcm_sysport_set_tx_coalesce(struct bcm_sysport_tx_ring *ring)
+{
+       struct bcm_sysport_priv *priv = ring->priv;
+       u32 reg;
+
+       reg = tdma_readl(priv, TDMA_DESC_RING_INTR_CONTROL(ring->index));
+       reg &= ~(RING_INTR_THRESH_MASK |
+                RING_TIMEOUT_MASK << RING_TIMEOUT_SHIFT);
+       reg |= ring->dim.coal_pkts;
+       reg |= DIV_ROUND_UP(ring->dim.coal_usecs * 1000, 8192) <<
+                           RING_TIMEOUT_SHIFT;
+       tdma_writel(priv, reg, TDMA_DESC_RING_INTR_CONTROL(ring->index));
+}
+
 static int bcm_sysport_get_coalesce(struct net_device *dev,
                                    struct ethtool_coalesce *ec)
 {
        struct bcm_sysport_priv *priv = netdev_priv(dev);
+       struct bcm_sysport_tx_ring *ring;
+       unsigned int i;
        u32 reg;
 
        reg = tdma_readl(priv, TDMA_DESC_RING_INTR_CONTROL(0));
 
        ec->tx_coalesce_usecs = (reg >> RING_TIMEOUT_SHIFT) * 8192 / 1000;
        ec->tx_max_coalesced_frames = reg & RING_INTR_THRESH_MASK;
+       for (i = 0; i < dev->num_tx_queues; i++) {
+               ring = &priv->tx_rings[i];
+               ec->use_adaptive_tx_coalesce |= ring->dim.use_dim;
+       }
 
        reg = rdma_readl(priv, RDMA_MBDONE_INTR);
 
        ec->rx_coalesce_usecs = (reg >> RDMA_TIMEOUT_SHIFT) * 8192 / 1000;
        ec->rx_max_coalesced_frames = reg & RDMA_INTR_THRESH_MASK;
+       ec->use_adaptive_rx_coalesce = priv->dim.use_dim;
 
        return 0;
 }
@@ -597,8 +632,8 @@ static int bcm_sysport_set_coalesce(struct net_device *dev,
                                    struct ethtool_coalesce *ec)
 {
        struct bcm_sysport_priv *priv = netdev_priv(dev);
+       struct bcm_sysport_tx_ring *ring;
        unsigned int i;
-       u32 reg;
 
        /* Base system clock is 125Mhz, DMA timeout is this reference clock
         * divided by 1024, which yield roughly 8.192 us, our maximum value has
@@ -615,22 +650,26 @@ static int bcm_sysport_set_coalesce(struct net_device *dev,
                return -EINVAL;
 
        for (i = 0; i < dev->num_tx_queues; i++) {
-               reg = tdma_readl(priv, TDMA_DESC_RING_INTR_CONTROL(i));
-               reg &= ~(RING_INTR_THRESH_MASK |
-                        RING_TIMEOUT_MASK << RING_TIMEOUT_SHIFT);
-               reg |= ec->tx_max_coalesced_frames;
-               reg |= DIV_ROUND_UP(ec->tx_coalesce_usecs * 1000, 8192) <<
-                        RING_TIMEOUT_SHIFT;
-               tdma_writel(priv, reg, TDMA_DESC_RING_INTR_CONTROL(i));
+               ring = &priv->tx_rings[i];
+               ring->dim.coal_pkts = ec->tx_max_coalesced_frames;
+               ring->dim.coal_usecs = ec->tx_coalesce_usecs;
+               if (!ec->use_adaptive_tx_coalesce && ring->dim.use_dim) {
+                       ring->dim.coal_pkts = 1;
+                       ring->dim.coal_usecs = 0;
+               }
+               ring->dim.use_dim = ec->use_adaptive_tx_coalesce;
+               bcm_sysport_set_tx_coalesce(ring);
        }
 
-       reg = rdma_readl(priv, RDMA_MBDONE_INTR);
-       reg &= ~(RDMA_INTR_THRESH_MASK |
-                RDMA_TIMEOUT_MASK << RDMA_TIMEOUT_SHIFT);
-       reg |= ec->rx_max_coalesced_frames;
-       reg |= DIV_ROUND_UP(ec->rx_coalesce_usecs * 1000, 8192) <<
-                           RDMA_TIMEOUT_SHIFT;
-       rdma_writel(priv, reg, RDMA_MBDONE_INTR);
+       priv->dim.coal_usecs = ec->rx_coalesce_usecs;
+       priv->dim.coal_pkts = ec->rx_max_coalesced_frames;
+
+       if (!ec->use_adaptive_rx_coalesce && priv->dim.use_dim) {
+               priv->dim.coal_pkts = 1;
+               priv->dim.coal_usecs = 0;
+       }
+       priv->dim.use_dim = ec->use_adaptive_rx_coalesce;
+       bcm_sysport_set_rx_coalesce(priv);
 
        return 0;
 }
@@ -709,6 +748,7 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv,
        struct bcm_sysport_stats64 *stats64 = &priv->stats64;
        struct net_device *ndev = priv->netdev;
        unsigned int processed = 0, to_process;
+       unsigned int processed_bytes = 0;
        struct bcm_sysport_cb *cb;
        struct sk_buff *skb;
        unsigned int p_index;
@@ -800,6 +840,7 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv,
                 */
                skb_pull(skb, sizeof(*rsb) + 2);
                len -= (sizeof(*rsb) + 2);
+               processed_bytes += len;
 
                /* UniMAC may forward CRC */
                if (priv->crc_fwd) {
@@ -824,6 +865,9 @@ next:
                        priv->rx_read_ptr = 0;
        }
 
+       priv->dim.packets = processed;
+       priv->dim.bytes = processed_bytes;
+
        return processed;
 }
 
@@ -855,10 +899,12 @@ static void bcm_sysport_tx_reclaim_one(struct bcm_sysport_tx_ring *ring,
 static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
                                             struct bcm_sysport_tx_ring *ring)
 {
-       unsigned int c_index, last_c_index, last_tx_cn, num_tx_cbs;
        unsigned int pkts_compl = 0, bytes_compl = 0;
        struct net_device *ndev = priv->netdev;
+       unsigned int txbds_processed = 0;
        struct bcm_sysport_cb *cb;
+       unsigned int txbds_ready;
+       unsigned int c_index;
        u32 hw_ind;
 
        /* Clear status before servicing to reduce spurious interrupts */
@@ -871,35 +917,31 @@ static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
        /* Compute how many descriptors have been processed since last call */
        hw_ind = tdma_readl(priv, TDMA_DESC_RING_PROD_CONS_INDEX(ring->index));
        c_index = (hw_ind >> RING_CONS_INDEX_SHIFT) & RING_CONS_INDEX_MASK;
-       ring->p_index = (hw_ind & RING_PROD_INDEX_MASK);
-
-       last_c_index = ring->c_index;
-       num_tx_cbs = ring->size;
-
-       c_index &= (num_tx_cbs - 1);
-
-       if (c_index >= last_c_index)
-               last_tx_cn = c_index - last_c_index;
-       else
-               last_tx_cn = num_tx_cbs - last_c_index + c_index;
+       txbds_ready = (c_index - ring->c_index) & RING_CONS_INDEX_MASK;
 
        netif_dbg(priv, tx_done, ndev,
-                 "ring=%d c_index=%d last_tx_cn=%d last_c_index=%d\n",
-                 ring->index, c_index, last_tx_cn, last_c_index);
+                 "ring=%d old_c_index=%u c_index=%u txbds_ready=%u\n",
+                 ring->index, ring->c_index, c_index, txbds_ready);
 
-       while (last_tx_cn-- > 0) {
-               cb = ring->cbs + last_c_index;
+       while (txbds_processed < txbds_ready) {
+               cb = &ring->cbs[ring->clean_index];
                bcm_sysport_tx_reclaim_one(ring, cb, &bytes_compl, &pkts_compl);
 
                ring->desc_count++;
-               last_c_index++;
-               last_c_index &= (num_tx_cbs - 1);
+               txbds_processed++;
+
+               if (likely(ring->clean_index < ring->size - 1))
+                       ring->clean_index++;
+               else
+                       ring->clean_index = 0;
        }
 
        u64_stats_update_begin(&priv->syncp);
        ring->packets += pkts_compl;
        ring->bytes += bytes_compl;
        u64_stats_update_end(&priv->syncp);
+       ring->dim.packets = pkts_compl;
+       ring->dim.bytes = bytes_compl;
 
        ring->c_index = c_index;
 
@@ -945,6 +987,7 @@ static int bcm_sysport_tx_poll(struct napi_struct *napi, int budget)
 {
        struct bcm_sysport_tx_ring *ring =
                container_of(napi, struct bcm_sysport_tx_ring, napi);
+       struct net_dim_sample dim_sample;
        unsigned int work_done = 0;
 
        work_done = bcm_sysport_tx_reclaim(ring->priv, ring);
@@ -961,6 +1004,12 @@ static int bcm_sysport_tx_poll(struct napi_struct *napi, int budget)
                return 0;
        }
 
+       if (ring->dim.use_dim) {
+               net_dim_sample(ring->dim.event_ctr, ring->dim.packets,
+                              ring->dim.bytes, &dim_sample);
+               net_dim(&ring->dim.dim, dim_sample);
+       }
+
        return budget;
 }
 
@@ -976,6 +1025,7 @@ static int bcm_sysport_poll(struct napi_struct *napi, int budget)
 {
        struct bcm_sysport_priv *priv =
                container_of(napi, struct bcm_sysport_priv, napi);
+       struct net_dim_sample dim_sample;
        unsigned int work_done = 0;
 
        work_done = bcm_sysport_desc_rx(priv, budget);
@@ -998,6 +1048,12 @@ static int bcm_sysport_poll(struct napi_struct *napi, int budget)
                intrl2_0_mask_clear(priv, INTRL2_0_RDMA_MBDONE);
        }
 
+       if (priv->dim.use_dim) {
+               net_dim_sample(priv->dim.event_ctr, priv->dim.packets,
+                              priv->dim.bytes, &dim_sample);
+               net_dim(&priv->dim.dim, dim_sample);
+       }
+
        return work_done;
 }
 
@@ -1016,6 +1072,40 @@ static void bcm_sysport_resume_from_wol(struct bcm_sysport_priv *priv)
        netif_dbg(priv, wol, priv->netdev, "resumed from WOL\n");
 }
 
+static void bcm_sysport_dim_work(struct work_struct *work)
+{
+       struct net_dim *dim = container_of(work, struct net_dim, work);
+       struct bcm_sysport_net_dim *ndim =
+                       container_of(dim, struct bcm_sysport_net_dim, dim);
+       struct bcm_sysport_priv *priv =
+                       container_of(ndim, struct bcm_sysport_priv, dim);
+       struct net_dim_cq_moder cur_profile =
+                               net_dim_get_profile(dim->mode, dim->profile_ix);
+
+       priv->dim.coal_usecs = cur_profile.usec;
+       priv->dim.coal_pkts = cur_profile.pkts;
+
+       bcm_sysport_set_rx_coalesce(priv);
+       dim->state = NET_DIM_START_MEASURE;
+}
+
+static void bcm_sysport_dim_tx_work(struct work_struct *work)
+{
+       struct net_dim *dim = container_of(work, struct net_dim, work);
+       struct bcm_sysport_net_dim *ndim =
+                       container_of(dim, struct bcm_sysport_net_dim, dim);
+       struct bcm_sysport_tx_ring *ring =
+                       container_of(ndim, struct bcm_sysport_tx_ring, dim);
+       struct net_dim_cq_moder cur_profile =
+                               net_dim_get_profile(dim->mode, dim->profile_ix);
+
+       ring->dim.coal_usecs = cur_profile.usec;
+       ring->dim.coal_pkts = cur_profile.pkts;
+
+       bcm_sysport_set_tx_coalesce(ring);
+       dim->state = NET_DIM_START_MEASURE;
+}
+
 /* RX and misc interrupt routine */
 static irqreturn_t bcm_sysport_rx_isr(int irq, void *dev_id)
 {
@@ -1034,6 +1124,7 @@ static irqreturn_t bcm_sysport_rx_isr(int irq, void *dev_id)
        }
 
        if (priv->irq0_stat & INTRL2_0_RDMA_MBDONE) {
+               priv->dim.event_ctr++;
                if (likely(napi_schedule_prep(&priv->napi))) {
                        /* disable RX interrupts */
                        intrl2_0_mask_set(priv, INTRL2_0_RDMA_MBDONE);
@@ -1061,6 +1152,7 @@ static irqreturn_t bcm_sysport_rx_isr(int irq, void *dev_id)
                        continue;
 
                txr = &priv->tx_rings[ring];
+               txr->dim.event_ctr++;
 
                if (likely(napi_schedule_prep(&txr->napi))) {
                        intrl2_0_mask_set(priv, ring_bit);
@@ -1093,6 +1185,7 @@ static irqreturn_t bcm_sysport_tx_isr(int irq, void *dev_id)
                        continue;
 
                txr = &priv->tx_rings[ring];
+               txr->dim.event_ctr++;
 
                if (likely(napi_schedule_prep(&txr->napi))) {
                        intrl2_1_mask_set(priv, BIT(ring));
@@ -1358,6 +1451,16 @@ out:
                phy_print_status(phydev);
 }
 
+static void bcm_sysport_init_dim(struct bcm_sysport_net_dim *dim,
+                                void (*cb)(struct work_struct *work))
+{
+       INIT_WORK(&dim->dim.work, cb);
+       dim->dim.mode = NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE;
+       dim->event_ctr = 0;
+       dim->packets = 0;
+       dim->bytes = 0;
+}
+
 static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv,
                                    unsigned int index)
 {
@@ -1394,6 +1497,7 @@ static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv,
        netif_tx_napi_add(priv->netdev, &ring->napi, bcm_sysport_tx_poll, 64);
        ring->index = index;
        ring->size = size;
+       ring->clean_index = 0;
        ring->alloc_size = ring->size;
        ring->desc_cpu = p;
        ring->desc_count = ring->size;
@@ -1447,6 +1551,7 @@ static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv,
        reg |= (1 << index);
        tdma_writel(priv, reg, TDMA_TIER1_ARB_0_QUEUE_EN);
 
+       bcm_sysport_init_dim(&ring->dim, bcm_sysport_dim_tx_work);
        napi_enable(&ring->napi);
 
        netif_dbg(priv, hw, priv->netdev,
@@ -1477,6 +1582,7 @@ static void bcm_sysport_fini_tx_ring(struct bcm_sysport_priv *priv,
                return;
 
        napi_disable(&ring->napi);
+       cancel_work_sync(&ring->dim.dim.work);
        netif_napi_del(&ring->napi);
 
        bcm_sysport_tx_clean(priv, ring);
@@ -1766,6 +1872,7 @@ static void bcm_sysport_netif_start(struct net_device *dev)
        struct bcm_sysport_priv *priv = netdev_priv(dev);
 
        /* Enable NAPI */
+       bcm_sysport_init_dim(&priv->dim, bcm_sysport_dim_work);
        napi_enable(&priv->napi);
 
        /* Enable RX interrupt and TX ring full interrupt */
@@ -1951,6 +2058,7 @@ static void bcm_sysport_netif_stop(struct net_device *dev)
        /* stop all software from updating hardware */
        netif_tx_stop_all_queues(dev);
        napi_disable(&priv->napi);
+       cancel_work_sync(&priv->dim.dim.work);
        phy_stop(dev->phydev);
 
        /* mask all interrupts */