]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/commitdiff
igb: Use length to determine if descriptor is done
authorAlexander Duyck <alexander.h.duyck@intel.com>
Tue, 7 Feb 2017 02:25:41 +0000 (18:25 -0800)
committerJeff Kirsher <jeffrey.t.kirsher@intel.com>
Fri, 17 Mar 2017 19:11:44 +0000 (12:11 -0700)
This change makes it so that we use the length of the packet instead of the
DD status bit to determine if a new descriptor is ready to be processed.
The obvious advantage is that it cuts down on reads as we don't really even
need the DD bit if going from a 0 to a non-zero value on size is enough to
inform us that the packet has been completed.

In addition I have updated the code so that we only reset the Rx descriptor
length for descriptor zero when resetting a ring instead of having to do a
memset with 0 over the entire ring.  By doing this we can save some time on
initialization.

Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com>
Tested-by: Aaron Brown <aaron.f.brown@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
drivers/net/ethernet/intel/igb/igb_ethtool.c
drivers/net/ethernet/intel/igb/igb_main.c

index 737b664d004cbb372222f6f63acc5c9f38ad4c95..3f5f7744c90f835324a1bf1457dabcff684c8ccc 100644 (file)
@@ -1811,7 +1811,7 @@ static int igb_clean_test_rings(struct igb_ring *rx_ring,
        tx_ntc = tx_ring->next_to_clean;
        rx_desc = IGB_RX_DESC(rx_ring, rx_ntc);
 
-       while (igb_test_staterr(rx_desc, E1000_RXD_STAT_DD)) {
+       while (rx_desc->wb.upper.length) {
                /* check Rx buffer */
                rx_buffer_info = &rx_ring->rx_buffer_info[rx_ntc];
 
index cf7ee9cdac6f96df318fd7dcf3cdb2fc12b49b00..1d76d3a90a17f81f9ee410eef06185818cc54945 100644 (file)
@@ -3720,6 +3720,7 @@ void igb_configure_rx_ring(struct igb_adapter *adapter,
                           struct igb_ring *ring)
 {
        struct e1000_hw *hw = &adapter->hw;
+       union e1000_adv_rx_desc *rx_desc;
        u64 rdba = ring->dma;
        int reg_idx = ring->reg_idx;
        u32 srrctl = 0, rxdctl = 0;
@@ -3758,6 +3759,10 @@ void igb_configure_rx_ring(struct igb_adapter *adapter,
        rxdctl |= IGB_RX_HTHRESH << 8;
        rxdctl |= IGB_RX_WTHRESH << 16;
 
+       /* initialize Rx descriptor 0 */
+       rx_desc = IGB_RX_DESC(ring, 0);
+       rx_desc->wb.upper.length = 0;
+
        /* enable receive descriptor fetching */
        rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
        wr32(E1000_RXDCTL(reg_idx), rxdctl);
@@ -3973,9 +3978,6 @@ static void igb_clean_rx_ring(struct igb_ring *rx_ring)
        size = sizeof(struct igb_rx_buffer) * rx_ring->count;
        memset(rx_ring->rx_buffer_info, 0, size);
 
-       /* Zero out the descriptor ring */
-       memset(rx_ring->desc, 0, rx_ring->size);
-
        rx_ring->next_to_alloc = 0;
        rx_ring->next_to_clean = 0;
        rx_ring->next_to_use = 0;
@@ -7172,7 +7174,7 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
 
                rx_desc = IGB_RX_DESC(rx_ring, rx_ring->next_to_clean);
 
-               if (!rx_desc->wb.upper.status_error)
+               if (!rx_desc->wb.upper.length)
                        break;
 
                /* This memory barrier is needed to keep us from reading
@@ -7312,8 +7314,8 @@ void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)
                        i -= rx_ring->count;
                }
 
-               /* clear the status bits for the next_to_use descriptor */
-               rx_desc->wb.upper.status_error = 0;
+               /* clear the length for the next_to_use descriptor */
+               rx_desc->wb.upper.length = 0;
 
                cleaned_count--;
        } while (cleaned_count);