]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/commitdiff
ixgbe: remove marketing names from busy poll code
authorJacob Keller <jacob.e.keller@intel.com>
Tue, 1 Oct 2013 11:33:54 +0000 (04:33 -0700)
committerDavid S. Miller <davem@davemloft.net>
Tue, 1 Oct 2013 16:49:49 +0000 (12:49 -0400)
This patch renames the LL_EXTENDED_STATS and some of the functions required to
implement busy polling in the ixgbe driver, in order to remove the marketing
"low latency" blurb which hides what the code actually does.

This furthers work which was requested by Linus Torvalds when the initial busy
poll code was included in the kernel. The code in the ixgbe driver itself was
never properly renamed to reflect the change to busy polling as the title.

Signed-off-by: Jacob Keller <jacob.e.keller@intel.com>
Tested-by: Phil Schmitt <phillip.j.schmitt@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
drivers/net/ethernet/intel/ixgbe/ixgbe.h
drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c

index 3637841daea4bdc6abe08d2c8df79712c16181eb..dc1588ee264a31da9e7561904f4ced02b0861502 100644 (file)
@@ -55,7 +55,7 @@
 #include <net/busy_poll.h>
 
 #ifdef CONFIG_NET_RX_BUSY_POLL
-#define LL_EXTENDED_STATS
+#define BP_EXTENDED_STATS
 #endif
 /* common prefix used by pr_<> macros */
 #undef pr_fmt
@@ -187,11 +187,11 @@ struct ixgbe_rx_buffer {
 struct ixgbe_queue_stats {
        u64 packets;
        u64 bytes;
-#ifdef LL_EXTENDED_STATS
+#ifdef BP_EXTENDED_STATS
        u64 yields;
        u64 misses;
        u64 cleaned;
-#endif  /* LL_EXTENDED_STATS */
+#endif  /* BP_EXTENDED_STATS */
 };
 
 struct ixgbe_tx_queue_stats {
@@ -399,7 +399,7 @@ static inline bool ixgbe_qv_lock_napi(struct ixgbe_q_vector *q_vector)
                WARN_ON(q_vector->state & IXGBE_QV_STATE_NAPI);
                q_vector->state |= IXGBE_QV_STATE_NAPI_YIELD;
                rc = false;
-#ifdef LL_EXTENDED_STATS
+#ifdef BP_EXTENDED_STATS
                q_vector->tx.ring->stats.yields++;
 #endif
        } else
@@ -432,7 +432,7 @@ static inline bool ixgbe_qv_lock_poll(struct ixgbe_q_vector *q_vector)
        if ((q_vector->state & IXGBE_QV_LOCKED)) {
                q_vector->state |= IXGBE_QV_STATE_POLL_YIELD;
                rc = false;
-#ifdef LL_EXTENDED_STATS
+#ifdef BP_EXTENDED_STATS
                q_vector->rx.ring->stats.yields++;
 #endif
        } else
@@ -457,7 +457,7 @@ static inline bool ixgbe_qv_unlock_poll(struct ixgbe_q_vector *q_vector)
 }
 
 /* true if a socket is polling, even if it did not get the lock */
-static inline bool ixgbe_qv_ll_polling(struct ixgbe_q_vector *q_vector)
+static inline bool ixgbe_qv_busy_polling(struct ixgbe_q_vector *q_vector)
 {
        WARN_ON(!(q_vector->state & IXGBE_QV_LOCKED));
        return q_vector->state & IXGBE_QV_USER_PEND;
@@ -487,7 +487,7 @@ static inline bool ixgbe_qv_unlock_poll(struct ixgbe_q_vector *q_vector)
        return false;
 }
 
-static inline bool ixgbe_qv_ll_polling(struct ixgbe_q_vector *q_vector)
+static inline bool ixgbe_qv_busy_polling(struct ixgbe_q_vector *q_vector)
 {
        return false;
 }
index 27c2032effc4ff9e48278937705096593a7a8e2a..90aac31b3551746d8fd3bd95d61d5000d2fc4a1a 100644 (file)
@@ -1117,7 +1117,7 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
                        data[i] = 0;
                        data[i+1] = 0;
                        i += 2;
-#ifdef LL_EXTENDED_STATS
+#ifdef BP_EXTENDED_STATS
                        data[i] = 0;
                        data[i+1] = 0;
                        data[i+2] = 0;
@@ -1132,7 +1132,7 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
                        data[i+1] = ring->stats.bytes;
                } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
                i += 2;
-#ifdef LL_EXTENDED_STATS
+#ifdef BP_EXTENDED_STATS
                data[i] = ring->stats.yields;
                data[i+1] = ring->stats.misses;
                data[i+2] = ring->stats.cleaned;
@@ -1145,7 +1145,7 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
                        data[i] = 0;
                        data[i+1] = 0;
                        i += 2;
-#ifdef LL_EXTENDED_STATS
+#ifdef BP_EXTENDED_STATS
                        data[i] = 0;
                        data[i+1] = 0;
                        data[i+2] = 0;
@@ -1160,7 +1160,7 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
                        data[i+1] = ring->stats.bytes;
                } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
                i += 2;
-#ifdef LL_EXTENDED_STATS
+#ifdef BP_EXTENDED_STATS
                data[i] = ring->stats.yields;
                data[i+1] = ring->stats.misses;
                data[i+2] = ring->stats.cleaned;
@@ -1202,28 +1202,28 @@ static void ixgbe_get_strings(struct net_device *netdev, u32 stringset,
                        p += ETH_GSTRING_LEN;
                        sprintf(p, "tx_queue_%u_bytes", i);
                        p += ETH_GSTRING_LEN;
-#ifdef LL_EXTENDED_STATS
-                       sprintf(p, "tx_queue_%u_ll_napi_yield", i);
+#ifdef BP_EXTENDED_STATS
+                       sprintf(p, "tx_queue_%u_bp_napi_yield", i);
                        p += ETH_GSTRING_LEN;
-                       sprintf(p, "tx_queue_%u_ll_misses", i);
+                       sprintf(p, "tx_queue_%u_bp_misses", i);
                        p += ETH_GSTRING_LEN;
-                       sprintf(p, "tx_queue_%u_ll_cleaned", i);
+                       sprintf(p, "tx_queue_%u_bp_cleaned", i);
                        p += ETH_GSTRING_LEN;
-#endif /* LL_EXTENDED_STATS */
+#endif /* BP_EXTENDED_STATS */
                }
                for (i = 0; i < IXGBE_NUM_RX_QUEUES; i++) {
                        sprintf(p, "rx_queue_%u_packets", i);
                        p += ETH_GSTRING_LEN;
                        sprintf(p, "rx_queue_%u_bytes", i);
                        p += ETH_GSTRING_LEN;
-#ifdef LL_EXTENDED_STATS
-                       sprintf(p, "rx_queue_%u_ll_poll_yield", i);
+#ifdef BP_EXTENDED_STATS
+                       sprintf(p, "rx_queue_%u_bp_poll_yield", i);
                        p += ETH_GSTRING_LEN;
-                       sprintf(p, "rx_queue_%u_ll_misses", i);
+                       sprintf(p, "rx_queue_%u_bp_misses", i);
                        p += ETH_GSTRING_LEN;
-                       sprintf(p, "rx_queue_%u_ll_cleaned", i);
+                       sprintf(p, "rx_queue_%u_bp_cleaned", i);
                        p += ETH_GSTRING_LEN;
-#endif /* LL_EXTENDED_STATS */
+#endif /* BP_EXTENDED_STATS */
                }
                for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) {
                        sprintf(p, "tx_pb_%u_pxon", i);
index 0ade0cd5ef53ffab28b3fd34136374bfe9f4b51e..43b777aad2880857682353f83cd946048ee2278b 100644 (file)
@@ -1585,7 +1585,7 @@ static void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector,
 {
        struct ixgbe_adapter *adapter = q_vector->adapter;
 
-       if (ixgbe_qv_ll_polling(q_vector))
+       if (ixgbe_qv_busy_polling(q_vector))
                netif_receive_skb(skb);
        else if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL))
                napi_gro_receive(&q_vector->napi, skb);
@@ -2097,7 +2097,7 @@ static int ixgbe_low_latency_recv(struct napi_struct *napi)
 
        ixgbe_for_each_ring(ring, q_vector->rx) {
                found = ixgbe_clean_rx_irq(q_vector, ring, 4);
-#ifdef LL_EXTENDED_STATS
+#ifdef BP_EXTENDED_STATS
                if (found)
                        ring->stats.cleaned += found;
                else