]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blobdiff - drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
[mirror_ubuntu-jammy-kernel.git] / drivers / net / ethernet / intel / ixgbevf / ixgbevf_main.c
index 41e32257a4e873e37d4daf24798400c394416b69..2dc78d7e297ae4749d9fa5be1caa69cdfab8ea7d 100644 (file)
@@ -42,6 +42,7 @@
 #include <linux/in.h>
 #include <linux/ip.h>
 #include <linux/tcp.h>
+#include <linux/sctp.h>
 #include <linux/ipv6.h>
 #include <linux/slab.h>
 #include <net/checksum.h>
@@ -97,9 +98,7 @@ module_param(debug, int, 0);
 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
 
 /* forward decls */
-static void ixgbevf_set_itr_msix(struct ixgbevf_q_vector *q_vector);
-static void ixgbevf_write_eitr(struct ixgbevf_adapter *adapter, int v_idx,
-                              u32 itr_reg);
+static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector);
 
 static inline void ixgbevf_release_rx_desc(struct ixgbe_hw *hw,
                                           struct ixgbevf_ring *rx_ring,
@@ -115,7 +114,7 @@ static inline void ixgbevf_release_rx_desc(struct ixgbe_hw *hw,
        IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rx_ring->reg_idx), val);
 }
 
-/*
+/**
  * ixgbevf_set_ivar - set IVAR registers - maps interrupt causes to vectors
  * @adapter: pointer to adapter struct
  * @direction: 0 for Rx, 1 for Tx, -1 for other causes
@@ -146,18 +145,18 @@ static void ixgbevf_set_ivar(struct ixgbevf_adapter *adapter, s8 direction,
        }
 }
 
-static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_adapter *adapter,
+static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_ring *tx_ring,
                                               struct ixgbevf_tx_buffer
                                               *tx_buffer_info)
 {
        if (tx_buffer_info->dma) {
                if (tx_buffer_info->mapped_as_page)
-                       dma_unmap_page(&adapter->pdev->dev,
+                       dma_unmap_page(tx_ring->dev,
                                       tx_buffer_info->dma,
                                       tx_buffer_info->length,
                                       DMA_TO_DEVICE);
                else
-                       dma_unmap_single(&adapter->pdev->dev,
+                       dma_unmap_single(tx_ring->dev,
                                         tx_buffer_info->dma,
                                         tx_buffer_info->length,
                                         DMA_TO_DEVICE);
@@ -175,27 +174,20 @@ static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_adapter *adapter,
 #define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR)
 
 /* Tx Descriptors needed, worst case */
-#define TXD_USE_COUNT(S) (((S) >> IXGBE_MAX_TXD_PWR) + \
-                        (((S) & (IXGBE_MAX_DATA_PER_TXD - 1)) ? 1 : 0))
-#ifdef MAX_SKB_FRAGS
-#define DESC_NEEDED (TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD) /* skb->data */ + \
-       MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1)      /* for context */
-#else
-#define DESC_NEEDED TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD)
-#endif
+#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IXGBE_MAX_DATA_PER_TXD)
+#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
 
 static void ixgbevf_tx_timeout(struct net_device *netdev);
 
 /**
  * ixgbevf_clean_tx_irq - Reclaim resources after transmit completes
- * @adapter: board private structure
+ * @q_vector: board private structure
  * @tx_ring: tx ring to clean
  **/
-static bool ixgbevf_clean_tx_irq(struct ixgbevf_adapter *adapter,
+static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
                                 struct ixgbevf_ring *tx_ring)
 {
-       struct net_device *netdev = adapter->netdev;
-       struct ixgbe_hw *hw = &adapter->hw;
+       struct ixgbevf_adapter *adapter = q_vector->adapter;
        union ixgbe_adv_tx_desc *tx_desc, *eop_desc;
        struct ixgbevf_tx_buffer *tx_buffer_info;
        unsigned int i, eop, count = 0;
@@ -206,10 +198,10 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_adapter *adapter,
 
        i = tx_ring->next_to_clean;
        eop = tx_ring->tx_buffer_info[i].next_to_watch;
-       eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
+       eop_desc = IXGBEVF_TX_DESC(tx_ring, eop);
 
        while ((eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) &&
-              (count < tx_ring->work_limit)) {
+              (count < tx_ring->count)) {
                bool cleaned = false;
                rmb(); /* read buffer_info after eop_desc */
                /* eop could change between read and DD-check */
@@ -217,7 +209,7 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_adapter *adapter,
                        goto cont_loop;
                for ( ; !cleaned; count++) {
                        struct sk_buff *skb;
-                       tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i);
+                       tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
                        tx_buffer_info = &tx_ring->tx_buffer_info[i];
                        cleaned = (i == eop);
                        skb = tx_buffer_info->skb;
@@ -234,7 +226,7 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_adapter *adapter,
                                total_bytes += bytecount;
                        }
 
-                       ixgbevf_unmap_and_free_tx_resource(adapter,
+                       ixgbevf_unmap_and_free_tx_resource(tx_ring,
                                                           tx_buffer_info);
 
                        tx_desc->wb.status = 0;
@@ -246,37 +238,25 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_adapter *adapter,
 
 cont_loop:
                eop = tx_ring->tx_buffer_info[i].next_to_watch;
-               eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
+               eop_desc = IXGBEVF_TX_DESC(tx_ring, eop);
        }
 
        tx_ring->next_to_clean = i;
 
 #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
-       if (unlikely(count && netif_carrier_ok(netdev) &&
+       if (unlikely(count && netif_carrier_ok(tx_ring->netdev) &&
                     (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
                /* Make sure that anybody stopping the queue after this
                 * sees the new next_to_clean.
                 */
                smp_mb();
-#ifdef HAVE_TX_MQ
-               if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) &&
-                   !test_bit(__IXGBEVF_DOWN, &adapter->state)) {
-                       netif_wake_subqueue(netdev, tx_ring->queue_index);
-                       ++adapter->restart_queue;
-               }
-#else
-               if (netif_queue_stopped(netdev) &&
+               if (__netif_subqueue_stopped(tx_ring->netdev,
+                                            tx_ring->queue_index) &&
                    !test_bit(__IXGBEVF_DOWN, &adapter->state)) {
-                       netif_wake_queue(netdev);
+                       netif_wake_subqueue(tx_ring->netdev,
+                                           tx_ring->queue_index);
                        ++adapter->restart_queue;
                }
-#endif
-       }
-
-       /* re-arm the interrupt */
-       if ((count >= tx_ring->work_limit) &&
-           (!test_bit(__IXGBEVF_DOWN, &adapter->state))) {
-               IXGBE_WRITE_REG(hw, IXGBE_VTEICS, tx_ring->v_idx);
        }
 
        u64_stats_update_begin(&tx_ring->syncp);
@@ -284,7 +264,7 @@ cont_loop:
        tx_ring->total_packets += total_packets;
        u64_stats_update_end(&tx_ring->syncp);
 
-       return count < tx_ring->work_limit;
+       return count < tx_ring->count;
 }
 
 /**
@@ -304,13 +284,10 @@ static void ixgbevf_receive_skb(struct ixgbevf_q_vector *q_vector,
        bool is_vlan = (status & IXGBE_RXD_STAT_VP);
        u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan);
 
-       if (is_vlan && test_bit(tag, adapter->active_vlans))
+       if (is_vlan && test_bit(tag & VLAN_VID_MASK, adapter->active_vlans))
                __vlan_hwaccel_put_tag(skb, tag);
 
-       if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL))
-                       napi_gro_receive(&q_vector->napi, skb);
-       else
-                       netif_rx(skb);
+       napi_gro_receive(&q_vector->napi, skb);
 }
 
 /**
@@ -320,12 +297,13 @@ static void ixgbevf_receive_skb(struct ixgbevf_q_vector *q_vector,
  * @skb: skb currently being received and modified
  **/
 static inline void ixgbevf_rx_checksum(struct ixgbevf_adapter *adapter,
+                                      struct ixgbevf_ring *ring,
                                       u32 status_err, struct sk_buff *skb)
 {
        skb_checksum_none_assert(skb);
 
        /* Rx csum disabled */
-       if (!(adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED))
+       if (!(ring->netdev->features & NETIF_F_RXCSUM))
                return;
 
        /* if IP and error */
@@ -360,52 +338,21 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter,
        union ixgbe_adv_rx_desc *rx_desc;
        struct ixgbevf_rx_buffer *bi;
        struct sk_buff *skb;
-       unsigned int i;
-       unsigned int bufsz = rx_ring->rx_buf_len + NET_IP_ALIGN;
+       unsigned int i = rx_ring->next_to_use;
 
-       i = rx_ring->next_to_use;
        bi = &rx_ring->rx_buffer_info[i];
 
        while (cleaned_count--) {
-               rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i);
-
-               if (!bi->page_dma &&
-                   (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED)) {
-                       if (!bi->page) {
-                               bi->page = alloc_page(GFP_ATOMIC | __GFP_COLD);
-                               if (!bi->page) {
-                                       adapter->alloc_rx_page_failed++;
-                                       goto no_buffers;
-                               }
-                               bi->page_offset = 0;
-                       } else {
-                               /* use a half page if we're re-using */
-                               bi->page_offset ^= (PAGE_SIZE / 2);
-                       }
-
-                       bi->page_dma = dma_map_page(&pdev->dev, bi->page,
-                                                   bi->page_offset,
-                                                   (PAGE_SIZE / 2),
-                                                   DMA_FROM_DEVICE);
-               }
-
+               rx_desc = IXGBEVF_RX_DESC(rx_ring, i);
                skb = bi->skb;
                if (!skb) {
-                       skb = netdev_alloc_skb(adapter->netdev,
-                                                              bufsz);
-
+                       skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
+                                                       rx_ring->rx_buf_len);
                        if (!skb) {
                                adapter->alloc_rx_buff_failed++;
                                goto no_buffers;
                        }
 
-                       /*
-                        * Make buffer alignment 2 beyond a 16 byte boundary
-                        * this will result in a 16 byte aligned IP header after
-                        * the 14 byte MAC header is removed
-                        */
-                       skb_reserve(skb, NET_IP_ALIGN);
-
                        bi->skb = skb;
                }
                if (!bi->dma) {
@@ -413,14 +360,7 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter,
                                                 rx_ring->rx_buf_len,
                                                 DMA_FROM_DEVICE);
                }
-               /* Refresh the desc even if buffer_addrs didn't change because
-                * each write-back erases this info. */
-               if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
-                       rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
-                       rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
-               } else {
-                       rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
-               }
+               rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
 
                i++;
                if (i == rx_ring->count)
@@ -431,36 +371,22 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter,
 no_buffers:
        if (rx_ring->next_to_use != i) {
                rx_ring->next_to_use = i;
-               if (i-- == 0)
-                       i = (rx_ring->count - 1);
 
                ixgbevf_release_rx_desc(&adapter->hw, rx_ring, i);
        }
 }
 
 static inline void ixgbevf_irq_enable_queues(struct ixgbevf_adapter *adapter,
-                                            u64 qmask)
+                                            u32 qmask)
 {
-       u32 mask;
        struct ixgbe_hw *hw = &adapter->hw;
 
-       mask = (qmask & 0xFFFFFFFF);
-       IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
-}
-
-static inline u16 ixgbevf_get_hdr_info(union ixgbe_adv_rx_desc *rx_desc)
-{
-       return rx_desc->wb.lower.lo_dword.hs_rss.hdr_info;
-}
-
-static inline u16 ixgbevf_get_pkt_info(union ixgbe_adv_rx_desc *rx_desc)
-{
-       return rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
+       IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, qmask);
 }
 
 static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
                                 struct ixgbevf_ring *rx_ring,
-                                int *work_done, int work_to_do)
+                                int budget)
 {
        struct ixgbevf_adapter *adapter = q_vector->adapter;
        struct pci_dev *pdev = adapter->pdev;
@@ -469,36 +395,21 @@ static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
        struct sk_buff *skb;
        unsigned int i;
        u32 len, staterr;
-       u16 hdr_info;
-       bool cleaned = false;
        int cleaned_count = 0;
        unsigned int total_rx_bytes = 0, total_rx_packets = 0;
 
        i = rx_ring->next_to_clean;
-       rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i);
+       rx_desc = IXGBEVF_RX_DESC(rx_ring, i);
        staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
        rx_buffer_info = &rx_ring->rx_buffer_info[i];
 
        while (staterr & IXGBE_RXD_STAT_DD) {
-               u32 upper_len = 0;
-               if (*work_done >= work_to_do)
+               if (!budget)
                        break;
-               (*work_done)++;
+               budget--;
 
                rmb(); /* read descriptor and rx_buffer_info after status DD */
-               if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
-                       hdr_info = le16_to_cpu(ixgbevf_get_hdr_info(rx_desc));
-                       len = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
-                              IXGBE_RXDADV_HDRBUFLEN_SHIFT;
-                       if (hdr_info & IXGBE_RXDADV_SPH)
-                               adapter->rx_hdr_split++;
-                       if (len > IXGBEVF_RX_HDR_SIZE)
-                               len = IXGBEVF_RX_HDR_SIZE;
-                       upper_len = le16_to_cpu(rx_desc->wb.upper.length);
-               } else {
-                       len = le16_to_cpu(rx_desc->wb.upper.length);
-               }
-               cleaned = true;
+               len = le16_to_cpu(rx_desc->wb.upper.length);
                skb = rx_buffer_info->skb;
                prefetch(skb->data - NET_IP_ALIGN);
                rx_buffer_info->skb = NULL;
@@ -511,46 +422,19 @@ static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
                        skb_put(skb, len);
                }
 
-               if (upper_len) {
-                       dma_unmap_page(&pdev->dev, rx_buffer_info->page_dma,
-                                      PAGE_SIZE / 2, DMA_FROM_DEVICE);
-                       rx_buffer_info->page_dma = 0;
-                       skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
-                                          rx_buffer_info->page,
-                                          rx_buffer_info->page_offset,
-                                          upper_len);
-
-                       if ((rx_ring->rx_buf_len > (PAGE_SIZE / 2)) ||
-                           (page_count(rx_buffer_info->page) != 1))
-                               rx_buffer_info->page = NULL;
-                       else
-                               get_page(rx_buffer_info->page);
-
-                       skb->len += upper_len;
-                       skb->data_len += upper_len;
-                       skb->truesize += upper_len;
-               }
-
                i++;
                if (i == rx_ring->count)
                        i = 0;
 
-               next_rxd = IXGBE_RX_DESC_ADV(*rx_ring, i);
+               next_rxd = IXGBEVF_RX_DESC(rx_ring, i);
                prefetch(next_rxd);
                cleaned_count++;
 
                next_buffer = &rx_ring->rx_buffer_info[i];
 
                if (!(staterr & IXGBE_RXD_STAT_EOP)) {
-                       if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
-                               rx_buffer_info->skb = next_buffer->skb;
-                               rx_buffer_info->dma = next_buffer->dma;
-                               next_buffer->skb = skb;
-                               next_buffer->dma = 0;
-                       } else {
-                               skb->next = next_buffer->skb;
-                               skb->next->prev = skb;
-                       }
+                       skb->next = next_buffer->skb;
+                       skb->next->prev = skb;
                        adapter->non_eop_descs++;
                        goto next_desc;
                }
@@ -561,7 +445,7 @@ static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
                        goto next_desc;
                }
 
-               ixgbevf_rx_checksum(adapter, staterr, skb);
+               ixgbevf_rx_checksum(adapter, rx_ring, staterr, skb);
 
                /* probably a little skewed due to removing CRC */
                total_rx_bytes += skb->len;
@@ -576,7 +460,7 @@ static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
                        if (header_fixup_len < 14)
                                skb_push(skb, header_fixup_len);
                }
-               skb->protocol = eth_type_trans(skb, adapter->netdev);
+               skb->protocol = eth_type_trans(skb, rx_ring->netdev);
 
                ixgbevf_receive_skb(q_vector, skb, staterr, rx_ring, rx_desc);
 
@@ -608,92 +492,52 @@ next_desc:
        rx_ring->total_bytes += total_rx_bytes;
        u64_stats_update_end(&rx_ring->syncp);
 
-       return cleaned;
+       return !!budget;
 }
 
 /**
- * ixgbevf_clean_rxonly - msix (aka one shot) rx clean routine
+ * ixgbevf_poll - NAPI polling calback
  * @napi: napi struct with our devices info in it
  * @budget: amount of work driver is allowed to do this pass, in packets
  *
- * This function is optimized for cleaning one queue only on a single
- * q_vector!!!
- **/
-static int ixgbevf_clean_rxonly(struct napi_struct *napi, int budget)
-{
-       struct ixgbevf_q_vector *q_vector =
-               container_of(napi, struct ixgbevf_q_vector, napi);
-       struct ixgbevf_adapter *adapter = q_vector->adapter;
-       struct ixgbevf_ring *rx_ring = NULL;
-       int work_done = 0;
-       long r_idx;
-
-       r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
-       rx_ring = &(adapter->rx_ring[r_idx]);
-
-       ixgbevf_clean_rx_irq(q_vector, rx_ring, &work_done, budget);
-
-       /* If all Rx work done, exit the polling mode */
-       if (work_done < budget) {
-               napi_complete(napi);
-               if (adapter->itr_setting & 1)
-                       ixgbevf_set_itr_msix(q_vector);
-               if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
-                       ixgbevf_irq_enable_queues(adapter, rx_ring->v_idx);
-       }
-
-       return work_done;
-}
-
-/**
- * ixgbevf_clean_rxonly_many - msix (aka one shot) rx clean routine
- * @napi: napi struct with our devices info in it
- * @budget: amount of work driver is allowed to do this pass, in packets
- *
- * This function will clean more than one rx queue associated with a
+ * This function will clean more than one or more rings associated with a
  * q_vector.
  **/
-static int ixgbevf_clean_rxonly_many(struct napi_struct *napi, int budget)
+static int ixgbevf_poll(struct napi_struct *napi, int budget)
 {
        struct ixgbevf_q_vector *q_vector =
                container_of(napi, struct ixgbevf_q_vector, napi);
        struct ixgbevf_adapter *adapter = q_vector->adapter;
-       struct ixgbevf_ring *rx_ring = NULL;
-       int work_done = 0, i;
-       long r_idx;
-       u64 enable_mask = 0;
+       struct ixgbevf_ring *ring;
+       int per_ring_budget;
+       bool clean_complete = true;
+
+       ixgbevf_for_each_ring(ring, q_vector->tx)
+               clean_complete &= ixgbevf_clean_tx_irq(q_vector, ring);
 
        /* attempt to distribute budget to each queue fairly, but don't allow
         * the budget to go below 1 because we'll exit polling */
-       budget /= (q_vector->rxr_count ?: 1);
-       budget = max(budget, 1);
-       r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
-       for (i = 0; i < q_vector->rxr_count; i++) {
-               rx_ring = &(adapter->rx_ring[r_idx]);
-               ixgbevf_clean_rx_irq(q_vector, rx_ring, &work_done, budget);
-               enable_mask |= rx_ring->v_idx;
-               r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
-                                     r_idx + 1);
-       }
-
-#ifndef HAVE_NETDEV_NAPI_LIST
-       if (!netif_running(adapter->netdev))
-               work_done = 0;
-
-#endif
-       r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
-       rx_ring = &(adapter->rx_ring[r_idx]);
-
-       /* If all Rx work done, exit the polling mode */
-       if (work_done < budget) {
-               napi_complete(napi);
-               if (adapter->itr_setting & 1)
-                       ixgbevf_set_itr_msix(q_vector);
-               if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
-                       ixgbevf_irq_enable_queues(adapter, enable_mask);
-       }
+       if (q_vector->rx.count > 1)
+               per_ring_budget = max(budget/q_vector->rx.count, 1);
+       else
+               per_ring_budget = budget;
+
+       ixgbevf_for_each_ring(ring, q_vector->rx)
+               clean_complete &= ixgbevf_clean_rx_irq(q_vector, ring,
+                                                      per_ring_budget);
+
+       /* If all work not completed, return budget and keep polling */
+       if (!clean_complete)
+               return budget;
+       /* all work done, exit the polling mode */
+       napi_complete(napi);
+       if (adapter->rx_itr_setting & 1)
+               ixgbevf_set_itr(q_vector);
+       if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
+               ixgbevf_irq_enable_queues(adapter,
+                                         1 << q_vector->v_idx);
 
-       return work_done;
+       return 0;
 }
 
 
@@ -707,56 +551,49 @@ static int ixgbevf_clean_rxonly_many(struct napi_struct *napi, int budget)
 static void ixgbevf_configure_msix(struct ixgbevf_adapter *adapter)
 {
        struct ixgbevf_q_vector *q_vector;
-       struct ixgbe_hw *hw = &adapter->hw;
-       int i, j, q_vectors, v_idx, r_idx;
-       u32 mask;
+       int q_vectors, v_idx;
 
        q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+       adapter->eims_enable_mask = 0;
 
        /*
         * Populate the IVAR table and set the ITR values to the
         * corresponding register.
         */
        for (v_idx = 0; v_idx < q_vectors; v_idx++) {
+               struct ixgbevf_ring *ring;
                q_vector = adapter->q_vector[v_idx];
-               /* XXX for_each_set_bit(...) */
-               r_idx = find_first_bit(q_vector->rxr_idx,
-                                      adapter->num_rx_queues);
-
-               for (i = 0; i < q_vector->rxr_count; i++) {
-                       j = adapter->rx_ring[r_idx].reg_idx;
-                       ixgbevf_set_ivar(adapter, 0, j, v_idx);
-                       r_idx = find_next_bit(q_vector->rxr_idx,
-                                             adapter->num_rx_queues,
-                                             r_idx + 1);
-               }
-               r_idx = find_first_bit(q_vector->txr_idx,
-                                      adapter->num_tx_queues);
-
-               for (i = 0; i < q_vector->txr_count; i++) {
-                       j = adapter->tx_ring[r_idx].reg_idx;
-                       ixgbevf_set_ivar(adapter, 1, j, v_idx);
-                       r_idx = find_next_bit(q_vector->txr_idx,
-                                             adapter->num_tx_queues,
-                                             r_idx + 1);
+
+               ixgbevf_for_each_ring(ring, q_vector->rx)
+                       ixgbevf_set_ivar(adapter, 0, ring->reg_idx, v_idx);
+
+               ixgbevf_for_each_ring(ring, q_vector->tx)
+                       ixgbevf_set_ivar(adapter, 1, ring->reg_idx, v_idx);
+
+               if (q_vector->tx.ring && !q_vector->rx.ring) {
+                       /* tx only vector */
+                       if (adapter->tx_itr_setting == 1)
+                               q_vector->itr = IXGBE_10K_ITR;
+                       else
+                               q_vector->itr = adapter->tx_itr_setting;
+               } else {
+                       /* rx or rx/tx vector */
+                       if (adapter->rx_itr_setting == 1)
+                               q_vector->itr = IXGBE_20K_ITR;
+                       else
+                               q_vector->itr = adapter->rx_itr_setting;
                }
 
-               /* if this is a tx only vector halve the interrupt rate */
-               if (q_vector->txr_count && !q_vector->rxr_count)
-                       q_vector->eitr = (adapter->eitr_param >> 1);
-               else if (q_vector->rxr_count)
-                       /* rx only */
-                       q_vector->eitr = adapter->eitr_param;
+               /* add q_vector eims value to global eims_enable_mask */
+               adapter->eims_enable_mask |= 1 << v_idx;
 
-               ixgbevf_write_eitr(adapter, v_idx, q_vector->eitr);
+               ixgbevf_write_eitr(q_vector);
        }
 
        ixgbevf_set_ivar(adapter, -1, 1, v_idx);
-
-       /* set up to autoclear timer, and the vectors */
-       mask = IXGBE_EIMS_ENABLE_MASK;
-       mask &= ~IXGBE_EIMS_OTHER;
-       IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask);
+       /* setup eims_other and add value to global eims_enable_mask */
+       adapter->eims_other = 1 << v_idx;
+       adapter->eims_enable_mask |= adapter->eims_other;
 }
 
 enum latency_range {
@@ -768,11 +605,8 @@ enum latency_range {
 
 /**
  * ixgbevf_update_itr - update the dynamic ITR value based on statistics
- * @adapter: pointer to adapter
- * @eitr: eitr setting (ints per sec) to give last timeslice
- * @itr_setting: current throttle rate in ints/second
- * @packets: the number of packets during this measurement interval
- * @bytes: the number of bytes during this measurement interval
+ * @q_vector: structure containing interrupt and ring information
+ * @ring_container: structure containing ring performance data
  *
  *      Stores a new ITR value based on packets and byte
  *      counts during the last interrupt.  The advantage of per interrupt
@@ -782,17 +616,17 @@ enum latency_range {
  *      on testing data as well as attempting to minimize response time
  *      while increasing bulk throughput.
  **/
-static u8 ixgbevf_update_itr(struct ixgbevf_adapter *adapter,
-                            u32 eitr, u8 itr_setting,
-                            int packets, int bytes)
+static void ixgbevf_update_itr(struct ixgbevf_q_vector *q_vector,
+                              struct ixgbevf_ring_container *ring_container)
 {
-       unsigned int retval = itr_setting;
+       int bytes = ring_container->total_bytes;
+       int packets = ring_container->total_packets;
        u32 timepassed_us;
        u64 bytes_perint;
+       u8 itr_setting = ring_container->itr;
 
        if (packets == 0)
-               goto update_itr_done;
-
+               return;
 
        /* simple throttlerate management
         *    0-20MB/s lowest (100000 ints/s)
@@ -800,46 +634,48 @@ static u8 ixgbevf_update_itr(struct ixgbevf_adapter *adapter,
         *  100-1249MB/s bulk (8000 ints/s)
         */
        /* what was last interrupt timeslice? */
-       timepassed_us = 1000000/eitr;
+       timepassed_us = q_vector->itr >> 2;
        bytes_perint = bytes / timepassed_us; /* bytes/usec */
 
        switch (itr_setting) {
        case lowest_latency:
-               if (bytes_perint > adapter->eitr_low)
-                       retval = low_latency;
+               if (bytes_perint > 10)
+                       itr_setting = low_latency;
                break;
        case low_latency:
-               if (bytes_perint > adapter->eitr_high)
-                       retval = bulk_latency;
-               else if (bytes_perint <= adapter->eitr_low)
-                       retval = lowest_latency;
+               if (bytes_perint > 20)
+                       itr_setting = bulk_latency;
+               else if (bytes_perint <= 10)
+                       itr_setting = lowest_latency;
                break;
        case bulk_latency:
-               if (bytes_perint <= adapter->eitr_high)
-                       retval = low_latency;
+               if (bytes_perint <= 20)
+                       itr_setting = low_latency;
                break;
        }
 
-update_itr_done:
-       return retval;
+       /* clear work counters since we have the values we need */
+       ring_container->total_bytes = 0;
+       ring_container->total_packets = 0;
+
+       /* write updated itr to ring container */
+       ring_container->itr = itr_setting;
 }
 
 /**
  * ixgbevf_write_eitr - write VTEITR register in hardware specific way
- * @adapter: pointer to adapter struct
- * @v_idx: vector index into q_vector array
- * @itr_reg: new value to be written in *register* format, not ints/s
+ * @q_vector: structure containing interrupt and ring information
  *
  * This function is made to be called by ethtool and by the driver
  * when it needs to update VTEITR registers at runtime.  Hardware
  * specific quirks/differences are taken care of here.
  */
-static void ixgbevf_write_eitr(struct ixgbevf_adapter *adapter, int v_idx,
-                              u32 itr_reg)
+void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector)
 {
+       struct ixgbevf_adapter *adapter = q_vector->adapter;
        struct ixgbe_hw *hw = &adapter->hw;
-
-       itr_reg = EITR_INTS_PER_SEC_TO_REG(itr_reg);
+       int v_idx = q_vector->v_idx;
+       u32 itr_reg = q_vector->itr & IXGBE_MAX_EITR;
 
        /*
         * set the WDIS bit to not clear the timer bits and cause an
@@ -850,84 +686,49 @@ static void ixgbevf_write_eitr(struct ixgbevf_adapter *adapter, int v_idx,
        IXGBE_WRITE_REG(hw, IXGBE_VTEITR(v_idx), itr_reg);
 }
 
-static void ixgbevf_set_itr_msix(struct ixgbevf_q_vector *q_vector)
+static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector)
 {
-       struct ixgbevf_adapter *adapter = q_vector->adapter;
-       u32 new_itr;
-       u8 current_itr, ret_itr;
-       int i, r_idx, v_idx = q_vector->v_idx;
-       struct ixgbevf_ring *rx_ring, *tx_ring;
-
-       r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
-       for (i = 0; i < q_vector->txr_count; i++) {
-               tx_ring = &(adapter->tx_ring[r_idx]);
-               ret_itr = ixgbevf_update_itr(adapter, q_vector->eitr,
-                                            q_vector->tx_itr,
-                                            tx_ring->total_packets,
-                                            tx_ring->total_bytes);
-               /* if the result for this queue would decrease interrupt
-                * rate for this vector then use that result */
-               q_vector->tx_itr = ((q_vector->tx_itr > ret_itr) ?
-                                   q_vector->tx_itr - 1 : ret_itr);
-               r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
-                                     r_idx + 1);
-       }
-
-       r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
-       for (i = 0; i < q_vector->rxr_count; i++) {
-               rx_ring = &(adapter->rx_ring[r_idx]);
-               ret_itr = ixgbevf_update_itr(adapter, q_vector->eitr,
-                                            q_vector->rx_itr,
-                                            rx_ring->total_packets,
-                                            rx_ring->total_bytes);
-               /* if the result for this queue would decrease interrupt
-                * rate for this vector then use that result */
-               q_vector->rx_itr = ((q_vector->rx_itr > ret_itr) ?
-                                   q_vector->rx_itr - 1 : ret_itr);
-               r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
-                                     r_idx + 1);
-       }
-
-       current_itr = max(q_vector->rx_itr, q_vector->tx_itr);
+       u32 new_itr = q_vector->itr;
+       u8 current_itr;
+
+       ixgbevf_update_itr(q_vector, &q_vector->tx);
+       ixgbevf_update_itr(q_vector, &q_vector->rx);
+
+       current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
 
        switch (current_itr) {
        /* counts and packets in update_itr are dependent on these numbers */
        case lowest_latency:
-               new_itr = 100000;
+               new_itr = IXGBE_100K_ITR;
                break;
        case low_latency:
-               new_itr = 20000; /* aka hwitr = ~200 */
+               new_itr = IXGBE_20K_ITR;
                break;
        case bulk_latency:
        default:
-               new_itr = 8000;
+               new_itr = IXGBE_8K_ITR;
                break;
        }
 
-       if (new_itr != q_vector->eitr) {
-               u32 itr_reg;
-
-               /* save the algorithm value here, not the smoothed one */
-               q_vector->eitr = new_itr;
+       if (new_itr != q_vector->itr) {
                /* do an exponential smoothing */
-               new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100);
-               itr_reg = EITR_INTS_PER_SEC_TO_REG(new_itr);
-               ixgbevf_write_eitr(adapter, v_idx, itr_reg);
+               new_itr = (10 * new_itr * q_vector->itr) /
+                         ((9 * new_itr) + q_vector->itr);
+
+               /* save the algorithm value here */
+               q_vector->itr = new_itr;
+
+               ixgbevf_write_eitr(q_vector);
        }
 }
 
 static irqreturn_t ixgbevf_msix_mbx(int irq, void *data)
 {
-       struct net_device *netdev = data;
-       struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+       struct ixgbevf_adapter *adapter = data;
        struct ixgbe_hw *hw = &adapter->hw;
-       u32 eicr;
        u32 msg;
        bool got_ack = false;
 
-       eicr = IXGBE_READ_REG(hw, IXGBE_VTEICS);
-       IXGBE_WRITE_REG(hw, IXGBE_VTEICR, eicr);
-
        if (!hw->mbx.ops.check_for_ack(hw))
                got_ack = true;
 
@@ -956,63 +757,24 @@ static irqreturn_t ixgbevf_msix_mbx(int irq, void *data)
        if (got_ack)
                hw->mbx.v2p_mailbox |= IXGBE_VFMAILBOX_PFACK;
 
-       return IRQ_HANDLED;
-}
-
-static irqreturn_t ixgbevf_msix_clean_tx(int irq, void *data)
-{
-       struct ixgbevf_q_vector *q_vector = data;
-       struct ixgbevf_adapter  *adapter = q_vector->adapter;
-       struct ixgbevf_ring     *tx_ring;
-       int i, r_idx;
-
-       if (!q_vector->txr_count)
-               return IRQ_HANDLED;
-
-       r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
-       for (i = 0; i < q_vector->txr_count; i++) {
-               tx_ring = &(adapter->tx_ring[r_idx]);
-               ixgbevf_clean_tx_irq(adapter, tx_ring);
-               r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
-                                     r_idx + 1);
-       }
-
-       if (adapter->itr_setting & 1)
-               ixgbevf_set_itr_msix(q_vector);
+       IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_other);
 
        return IRQ_HANDLED;
 }
 
+
 /**
- * ixgbevf_msix_clean_rx - single unshared vector rx clean (all queues)
+ * ixgbevf_msix_clean_rings - single unshared vector rx clean (all queues)
  * @irq: unused
  * @data: pointer to our q_vector struct for this interrupt vector
  **/
-static irqreturn_t ixgbevf_msix_clean_rx(int irq, void *data)
+static irqreturn_t ixgbevf_msix_clean_rings(int irq, void *data)
 {
        struct ixgbevf_q_vector *q_vector = data;
-       struct ixgbevf_adapter  *adapter = q_vector->adapter;
-       struct ixgbe_hw *hw = &adapter->hw;
-       struct ixgbevf_ring  *rx_ring;
-       int r_idx;
-
-       if (!q_vector->rxr_count)
-               return IRQ_HANDLED;
-
-       r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
-       rx_ring = &(adapter->rx_ring[r_idx]);
-       /* disable interrupts on this vector only */
-       IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, rx_ring->v_idx);
-       napi_schedule(&q_vector->napi);
-
-
-       return IRQ_HANDLED;
-}
 
-static irqreturn_t ixgbevf_msix_clean_many(int irq, void *data)
-{
-       ixgbevf_msix_clean_rx(irq, data);
-       ixgbevf_msix_clean_tx(irq, data);
+       /* EIAM disabled interrupts (on this vector) for us */
+       if (q_vector->rx.ring || q_vector->tx.ring)
+               napi_schedule(&q_vector->napi);
 
        return IRQ_HANDLED;
 }
@@ -1022,9 +784,9 @@ static inline void map_vector_to_rxq(struct ixgbevf_adapter *a, int v_idx,
 {
        struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx];
 
-       set_bit(r_idx, q_vector->rxr_idx);
-       q_vector->rxr_count++;
-       a->rx_ring[r_idx].v_idx = 1 << v_idx;
+       a->rx_ring[r_idx].next = q_vector->rx.ring;
+       q_vector->rx.ring = &a->rx_ring[r_idx];
+       q_vector->rx.count++;
 }
 
 static inline void map_vector_to_txq(struct ixgbevf_adapter *a, int v_idx,
@@ -1032,9 +794,9 @@ static inline void map_vector_to_txq(struct ixgbevf_adapter *a, int v_idx,
 {
        struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx];
 
-       set_bit(t_idx, q_vector->txr_idx);
-       q_vector->txr_count++;
-       a->tx_ring[t_idx].v_idx = 1 << v_idx;
+       a->tx_ring[t_idx].next = q_vector->tx.ring;
+       q_vector->tx.ring = &a->tx_ring[t_idx];
+       q_vector->tx.count++;
 }
 
 /**
@@ -1110,37 +872,30 @@ out:
 static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter *adapter)
 {
        struct net_device *netdev = adapter->netdev;
-       irqreturn_t (*handler)(int, void *);
-       int i, vector, q_vectors, err;
+       int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+       int vector, err;
        int ri = 0, ti = 0;
 
-       /* Decrement for Other and TCP Timer vectors */
-       q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
-
-#define SET_HANDLER(_v) (((_v)->rxr_count && (_v)->txr_count)          \
-                                         ? &ixgbevf_msix_clean_many : \
-                         (_v)->rxr_count ? &ixgbevf_msix_clean_rx   : \
-                         (_v)->txr_count ? &ixgbevf_msix_clean_tx   : \
-                         NULL)
        for (vector = 0; vector < q_vectors; vector++) {
-               handler = SET_HANDLER(adapter->q_vector[vector]);
-
-               if (handler == &ixgbevf_msix_clean_rx) {
-                       sprintf(adapter->name[vector], "%s-%s-%d",
-                               netdev->name, "rx", ri++);
-               } else if (handler == &ixgbevf_msix_clean_tx) {
-                       sprintf(adapter->name[vector], "%s-%s-%d",
-                               netdev->name, "tx", ti++);
-               } else if (handler == &ixgbevf_msix_clean_many) {
-                       sprintf(adapter->name[vector], "%s-%s-%d",
-                               netdev->name, "TxRx", vector);
+               struct ixgbevf_q_vector *q_vector = adapter->q_vector[vector];
+               struct msix_entry *entry = &adapter->msix_entries[vector];
+
+               if (q_vector->tx.ring && q_vector->rx.ring) {
+                       snprintf(q_vector->name, sizeof(q_vector->name) - 1,
+                                "%s-%s-%d", netdev->name, "TxRx", ri++);
+                       ti++;
+               } else if (q_vector->rx.ring) {
+                       snprintf(q_vector->name, sizeof(q_vector->name) - 1,
+                                "%s-%s-%d", netdev->name, "rx", ri++);
+               } else if (q_vector->tx.ring) {
+                       snprintf(q_vector->name, sizeof(q_vector->name) - 1,
+                                "%s-%s-%d", netdev->name, "tx", ti++);
                } else {
                        /* skip this unused q_vector */
                        continue;
                }
-               err = request_irq(adapter->msix_entries[vector].vector,
-                                 handler, 0, adapter->name[vector],
-                                 adapter->q_vector[vector]);
+               err = request_irq(entry->vector, &ixgbevf_msix_clean_rings, 0,
+                                 q_vector->name, q_vector);
                if (err) {
                        hw_dbg(&adapter->hw,
                               "request_irq failed for MSIX interrupt "
@@ -1149,9 +904,8 @@ static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter *adapter)
                }
        }
 
-       sprintf(adapter->name[vector], "%s:mbx", netdev->name);
        err = request_irq(adapter->msix_entries[vector].vector,
-                         &ixgbevf_msix_mbx, 0, adapter->name[vector], netdev);
+                         &ixgbevf_msix_mbx, 0, netdev->name, adapter);
        if (err) {
                hw_dbg(&adapter->hw,
                       "request_irq for msix_mbx failed: %d\n", err);
@@ -1161,9 +915,11 @@ static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter *adapter)
        return 0;
 
 free_queue_irqs:
-       for (i = vector - 1; i >= 0; i--)
-               free_irq(adapter->msix_entries[--vector].vector,
-                        &(adapter->q_vector[i]));
+       while (vector) {
+               vector--;
+               free_irq(adapter->msix_entries[vector].vector,
+                        adapter->q_vector[vector]);
+       }
        pci_disable_msix(adapter->pdev);
        kfree(adapter->msix_entries);
        adapter->msix_entries = NULL;
@@ -1176,11 +932,10 @@ static inline void ixgbevf_reset_q_vectors(struct ixgbevf_adapter *adapter)
 
        for (i = 0; i < q_vectors; i++) {
                struct ixgbevf_q_vector *q_vector = adapter->q_vector[i];
-               bitmap_zero(q_vector->rxr_idx, MAX_RX_QUEUES);
-               bitmap_zero(q_vector->txr_idx, MAX_TX_QUEUES);
-               q_vector->rxr_count = 0;
-               q_vector->txr_count = 0;
-               q_vector->eitr = adapter->eitr_param;
+               q_vector->rx.ring = NULL;
+               q_vector->tx.ring = NULL;
+               q_vector->rx.count = 0;
+               q_vector->tx.count = 0;
        }
 }
 
@@ -1206,17 +961,20 @@ static int ixgbevf_request_irq(struct ixgbevf_adapter *adapter)
 
 static void ixgbevf_free_irq(struct ixgbevf_adapter *adapter)
 {
-       struct net_device *netdev = adapter->netdev;
        int i, q_vectors;
 
        q_vectors = adapter->num_msix_vectors;
-
        i = q_vectors - 1;
 
-       free_irq(adapter->msix_entries[i].vector, netdev);
+       free_irq(adapter->msix_entries[i].vector, adapter);
        i--;
 
        for (; i >= 0; i--) {
+               /* free only the irqs that were actually requested */
+               if (!adapter->q_vector[i]->rx.ring &&
+                   !adapter->q_vector[i]->tx.ring)
+                       continue;
+
                free_irq(adapter->msix_entries[i].vector,
                         adapter->q_vector[i]);
        }
@@ -1230,10 +988,12 @@ static void ixgbevf_free_irq(struct ixgbevf_adapter *adapter)
  **/
 static inline void ixgbevf_irq_disable(struct ixgbevf_adapter *adapter)
 {
-       int i;
        struct ixgbe_hw *hw = &adapter->hw;
+       int i;
 
+       IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, 0);
        IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, ~0);
+       IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, 0);
 
        IXGBE_WRITE_FLUSH(hw);
 
@@ -1245,23 +1005,13 @@ static inline void ixgbevf_irq_disable(struct ixgbevf_adapter *adapter)
  * ixgbevf_irq_enable - Enable default interrupt generation settings
  * @adapter: board private structure
  **/
-static inline void ixgbevf_irq_enable(struct ixgbevf_adapter *adapter,
-                                     bool queues, bool flush)
+static inline void ixgbevf_irq_enable(struct ixgbevf_adapter *adapter)
 {
        struct ixgbe_hw *hw = &adapter->hw;
-       u32 mask;
-       u64 qmask;
-
-       mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
-       qmask = ~0;
-
-       IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
-
-       if (queues)
-               ixgbevf_irq_enable_queues(adapter, qmask);
 
-       if (flush)
-               IXGBE_WRITE_FLUSH(hw);
+       IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, adapter->eims_enable_mask);
+       IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, adapter->eims_enable_mask);
+       IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_enable_mask);
 }
 
 /**
@@ -1311,29 +1061,14 @@ static void ixgbevf_configure_srrctl(struct ixgbevf_adapter *adapter, int index)
 
        srrctl = IXGBE_SRRCTL_DROP_EN;
 
-       if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
-               u16 bufsz = IXGBEVF_RXBUFFER_2048;
-               /* grow the amount we can receive on large page machines */
-               if (bufsz < (PAGE_SIZE / 2))
-                       bufsz = (PAGE_SIZE / 2);
-               /* cap the bufsz at our largest descriptor size */
-               bufsz = min((u16)IXGBEVF_MAX_RXBUFFER, bufsz);
-
-               srrctl |= bufsz >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
-               srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
-               srrctl |= ((IXGBEVF_RX_HDR_SIZE <<
-                          IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
-                          IXGBE_SRRCTL_BSIZEHDR_MASK);
-       } else {
-               srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
+       srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
 
-               if (rx_ring->rx_buf_len == MAXIMUM_ETHERNET_VLAN_SIZE)
-                       srrctl |= IXGBEVF_RXBUFFER_2048 >>
-                               IXGBE_SRRCTL_BSIZEPKT_SHIFT;
-               else
-                       srrctl |= rx_ring->rx_buf_len >>
-                               IXGBE_SRRCTL_BSIZEPKT_SHIFT;
-       }
+       if (rx_ring->rx_buf_len == MAXIMUM_ETHERNET_VLAN_SIZE)
+               srrctl |= IXGBEVF_RXBUFFER_2048 >>
+                       IXGBE_SRRCTL_BSIZEPKT_SHIFT;
+       else
+               srrctl |= rx_ring->rx_buf_len >>
+                       IXGBE_SRRCTL_BSIZEPKT_SHIFT;
        IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(index), srrctl);
 }
 
@@ -1353,36 +1088,12 @@ static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter)
        u32 rdlen;
        int rx_buf_len;
 
-       /* Decide whether to use packet split mode or not */
-       if (netdev->mtu > ETH_DATA_LEN) {
-               if (adapter->flags & IXGBE_FLAG_RX_PS_CAPABLE)
-                       adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED;
-               else
-                       adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED;
-       } else {
-               if (adapter->flags & IXGBE_FLAG_RX_1BUF_CAPABLE)
-                       adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED;
-               else
-                       adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED;
-       }
-
-       /* Set the RX buffer length according to the mode */
-       if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
-               /* PSRTYPE must be initialized in 82599 */
-               u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
-                       IXGBE_PSRTYPE_UDPHDR |
-                       IXGBE_PSRTYPE_IPV4HDR |
-                       IXGBE_PSRTYPE_IPV6HDR |
-                       IXGBE_PSRTYPE_L2HDR;
-               IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
-               rx_buf_len = IXGBEVF_RX_HDR_SIZE;
-       } else {
-               IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, 0);
-               if (netdev->mtu <= ETH_DATA_LEN)
-                       rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE;
-               else
-                       rx_buf_len = ALIGN(max_frame, 1024);
-       }
+       /* PSRTYPE must be initialized in 82599 */
+       IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, 0);
+       if (netdev->mtu <= ETH_DATA_LEN)
+               rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE;
+       else
+               rx_buf_len = ALIGN(max_frame, 1024);
 
        rdlen = adapter->rx_ring[0].count * sizeof(union ixgbe_adv_rx_desc);
        /* Setup the HW Rx Head and Tail Descriptor Pointers and
@@ -1493,15 +1204,8 @@ static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter)
        int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
 
        for (q_idx = 0; q_idx < q_vectors; q_idx++) {
-               struct napi_struct *napi;
                q_vector = adapter->q_vector[q_idx];
-               if (!q_vector->rxr_count)
-                       continue;
-               napi = &q_vector->napi;
-               if (q_vector->rxr_count > 1)
-                       napi->poll = &ixgbevf_clean_rxonly_many;
-
-               napi_enable(napi);
+               napi_enable(&q_vector->napi);
        }
 }
 
@@ -1513,8 +1217,6 @@ static void ixgbevf_napi_disable_all(struct ixgbevf_adapter *adapter)
 
        for (q_idx = 0; q_idx < q_vectors; q_idx++) {
                q_vector = adapter->q_vector[q_idx];
-               if (!q_vector->rxr_count)
-                       continue;
                napi_disable(&q_vector->napi);
        }
 }
@@ -1532,9 +1234,8 @@ static void ixgbevf_configure(struct ixgbevf_adapter *adapter)
        ixgbevf_configure_rx(adapter);
        for (i = 0; i < adapter->num_rx_queues; i++) {
                struct ixgbevf_ring *ring = &adapter->rx_ring[i];
-               ixgbevf_alloc_rx_buffers(adapter, ring, ring->count);
-               ring->next_to_use = ring->count - 1;
-               writel(ring->next_to_use, adapter->hw.hw_addr + ring->tail);
+               ixgbevf_alloc_rx_buffers(adapter, ring,
+                                        IXGBE_DESC_UNUSED(ring));
        }
 }
 
@@ -1658,10 +1359,6 @@ static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
        ixgbevf_save_reset_stats(adapter);
        ixgbevf_init_last_counter_stats(adapter);
 
-       /* bring the link up in the watchdog, this could race with our first
-        * link up interrupt but shouldn't be a problem */
-       adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
-       adapter->link_check_timeout = jiffies;
        mod_timer(&adapter->watchdog_timer, jiffies);
 }
 
@@ -1676,7 +1373,7 @@ void ixgbevf_up(struct ixgbevf_adapter *adapter)
        /* clear any pending interrupts, may auto mask */
        IXGBE_READ_REG(hw, IXGBE_VTEICR);
 
-       ixgbevf_irq_enable(adapter, true, true);
+       ixgbevf_irq_enable(adapter);
 }
 
 /**
@@ -1714,14 +1411,6 @@ static void ixgbevf_clean_rx_ring(struct ixgbevf_adapter *adapter,
                                dev_kfree_skb(this);
                        } while (skb);
                }
-               if (!rx_buffer_info->page)
-                       continue;
-               dma_unmap_page(&pdev->dev, rx_buffer_info->page_dma,
-                              PAGE_SIZE / 2, DMA_FROM_DEVICE);
-               rx_buffer_info->page_dma = 0;
-               put_page(rx_buffer_info->page);
-               rx_buffer_info->page = NULL;
-               rx_buffer_info->page_offset = 0;
        }
 
        size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count;
@@ -1758,7 +1447,7 @@ static void ixgbevf_clean_tx_ring(struct ixgbevf_adapter *adapter,
 
        for (i = 0; i < tx_ring->count; i++) {
                tx_buffer_info = &tx_ring->tx_buffer_info[i];
-               ixgbevf_unmap_and_free_tx_resource(adapter, tx_buffer_info);
+               ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
        }
 
        size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count;
@@ -1891,10 +1580,9 @@ static void ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter,
 {
        int err, vector_threshold;
 
-       /* We'll want at least 3 (vector_threshold):
-        * 1) TxQ[0] Cleanup
-        * 2) RxQ[0] Cleanup
-        * 3) Other (Link Status Change, etc.)
+       /* We'll want at least 2 (vector_threshold):
+        * 1) TxQ[0] + RxQ[0] handler
+        * 2) Other (Link Status Change, etc.)
         */
        vector_threshold = MIN_MSIX_COUNT;
 
@@ -1933,8 +1621,8 @@ static void ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter,
        }
 }
 
-/*
- * ixgbevf_set_num_queues: Allocate queues for device, feature dependent
+/**
+ * ixgbevf_set_num_queues - Allocate queues for device, feature dependent
  * @adapter: board private structure to initialize
  *
  * This is the top level queue allocation routine.  The order here is very
@@ -1949,8 +1637,6 @@ static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter)
        /* Start with base case */
        adapter->num_rx_queues = 1;
        adapter->num_tx_queues = 1;
-       adapter->num_rx_pools = adapter->num_rx_queues;
-       adapter->num_rx_queues_per_pool = 1;
 }
 
 /**
@@ -1979,12 +1665,16 @@ static int ixgbevf_alloc_queues(struct ixgbevf_adapter *adapter)
                adapter->tx_ring[i].count = adapter->tx_ring_count;
                adapter->tx_ring[i].queue_index = i;
                adapter->tx_ring[i].reg_idx = i;
+               adapter->tx_ring[i].dev = &adapter->pdev->dev;
+               adapter->tx_ring[i].netdev = adapter->netdev;
        }
 
        for (i = 0; i < adapter->num_rx_queues; i++) {
                adapter->rx_ring[i].count = adapter->rx_ring_count;
                adapter->rx_ring[i].queue_index = i;
                adapter->rx_ring[i].reg_idx = i;
+               adapter->rx_ring[i].dev = &adapter->pdev->dev;
+               adapter->rx_ring[i].netdev = adapter->netdev;
        }
 
        return 0;
@@ -2011,10 +1701,12 @@ static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter)
         * It's easy to be greedy for MSI-X vectors, but it really
         * doesn't do us much good if we have a lot more vectors
         * than CPU's.  So let's be conservative and only ask for
-        * (roughly) twice the number of vectors as there are CPU's.
+        * (roughly) the same number of vectors as there are CPU's.
+        * The default is to use pairs of vectors.
         */
-       v_budget = min(adapter->num_rx_queues + adapter->num_tx_queues,
-                      (int)(num_online_cpus() * 2)) + NON_Q_VECTORS;
+       v_budget = max(adapter->num_rx_queues, adapter->num_tx_queues);
+       v_budget = min_t(int, v_budget, num_online_cpus());
+       v_budget += NON_Q_VECTORS;
 
        /* A failure in MSI-X entry allocation isn't fatal, but it does
         * mean we disable MSI-X capabilities of the adapter. */
@@ -2045,12 +1737,8 @@ static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter *adapter)
 {
        int q_idx, num_q_vectors;
        struct ixgbevf_q_vector *q_vector;
-       int napi_vectors;
-       int (*poll)(struct napi_struct *, int);
 
        num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
-       napi_vectors = adapter->num_rx_queues;
-       poll = &ixgbevf_clean_rxonly;
 
        for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
                q_vector = kzalloc(sizeof(struct ixgbevf_q_vector), GFP_KERNEL);
@@ -2058,10 +1746,8 @@ static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter *adapter)
                        goto err_out;
                q_vector->adapter = adapter;
                q_vector->v_idx = q_idx;
-               q_vector->eitr = adapter->eitr_param;
-               if (q_idx < napi_vectors)
-                       netif_napi_add(adapter->netdev, &q_vector->napi,
-                                      (*poll), 64);
+               netif_napi_add(adapter->netdev, &q_vector->napi,
+                              ixgbevf_poll, 64);
                adapter->q_vector[q_idx] = q_vector;
        }
 
@@ -2208,20 +1894,13 @@ static int __devinit ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
        }
 
        /* Enable dynamic interrupt throttling rates */
-       adapter->eitr_param = 20000;
-       adapter->itr_setting = 1;
-
-       /* set defaults for eitr in MegaBytes */
-       adapter->eitr_low = 10;
-       adapter->eitr_high = 20;
+       adapter->rx_itr_setting = 1;
+       adapter->tx_itr_setting = 1;
 
        /* set default ring sizes */
        adapter->tx_ring_count = IXGBEVF_DEFAULT_TXD;
        adapter->rx_ring_count = IXGBEVF_DEFAULT_RXD;
 
-       /* enable rx csum by default */
-       adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED;
-
        set_bit(__IXGBEVF_DOWN, &adapter->state);
        return 0;
 
@@ -2281,7 +1960,7 @@ static void ixgbevf_watchdog(unsigned long data)
 {
        struct ixgbevf_adapter *adapter = (struct ixgbevf_adapter *)data;
        struct ixgbe_hw *hw = &adapter->hw;
-       u64 eics = 0;
+       u32 eics = 0;
        int i;
 
        /*
@@ -2295,11 +1974,11 @@ static void ixgbevf_watchdog(unsigned long data)
        /* get one bit for every active tx/rx interrupt vector */
        for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) {
                struct ixgbevf_q_vector *qv = adapter->q_vector[i];
-               if (qv->rxr_count || qv->txr_count)
-                       eics |= (1 << i);
+               if (qv->rx.ring || qv->tx.ring)
+                       eics |= 1 << i;
        }
 
-       IXGBE_WRITE_REG(hw, IXGBE_VTEICS, (u32)eics);
+       IXGBE_WRITE_REG(hw, IXGBE_VTEICS, eics);
 
 watchdog_short_circuit:
        schedule_work(&adapter->watchdog_task);
@@ -2469,7 +2148,6 @@ int ixgbevf_setup_tx_resources(struct ixgbevf_adapter *adapter,
 
        tx_ring->next_to_use = 0;
        tx_ring->next_to_clean = 0;
-       tx_ring->work_limit = tx_ring->count;
        return 0;
 
 err:
@@ -2673,7 +2351,7 @@ static int ixgbevf_open(struct net_device *netdev)
        if (err)
                goto err_req_irq;
 
-       ixgbevf_irq_enable(adapter, true, true);
+       ixgbevf_irq_enable(adapter);
 
        return 0;
 
@@ -2715,172 +2393,153 @@ static int ixgbevf_close(struct net_device *netdev)
        return 0;
 }
 
-static int ixgbevf_tso(struct ixgbevf_adapter *adapter,
-                      struct ixgbevf_ring *tx_ring,
-                      struct sk_buff *skb, u32 tx_flags, u8 *hdr_len)
+static void ixgbevf_tx_ctxtdesc(struct ixgbevf_ring *tx_ring,
+                               u32 vlan_macip_lens, u32 type_tucmd,
+                               u32 mss_l4len_idx)
 {
        struct ixgbe_adv_tx_context_desc *context_desc;
-       unsigned int i;
-       int err;
-       struct ixgbevf_tx_buffer *tx_buffer_info;
-       u32 vlan_macip_lens = 0, type_tucmd_mlhl;
-       u32 mss_l4len_idx, l4len;
+       u16 i = tx_ring->next_to_use;
 
-       if (skb_is_gso(skb)) {
-               if (skb_header_cloned(skb)) {
-                       err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
-                       if (err)
-                               return err;
-               }
-               l4len = tcp_hdrlen(skb);
-               *hdr_len += l4len;
-
-               if (skb->protocol == htons(ETH_P_IP)) {
-                       struct iphdr *iph = ip_hdr(skb);
-                       iph->tot_len = 0;
-                       iph->check = 0;
-                       tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
-                                                                iph->daddr, 0,
-                                                                IPPROTO_TCP,
-                                                                0);
-                       adapter->hw_tso_ctxt++;
-               } else if (skb_is_gso_v6(skb)) {
-                       ipv6_hdr(skb)->payload_len = 0;
-                       tcp_hdr(skb)->check =
-                           ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
-                                            &ipv6_hdr(skb)->daddr,
-                                            0, IPPROTO_TCP, 0);
-                       adapter->hw_tso6_ctxt++;
-               }
+       context_desc = IXGBEVF_TX_CTXTDESC(tx_ring, i);
 
-               i = tx_ring->next_to_use;
+       i++;
+       tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
 
-               tx_buffer_info = &tx_ring->tx_buffer_info[i];
-               context_desc = IXGBE_TX_CTXTDESC_ADV(*tx_ring, i);
-
-               /* VLAN MACLEN IPLEN */
-               if (tx_flags & IXGBE_TX_FLAGS_VLAN)
-                       vlan_macip_lens |=
-                               (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK);
-               vlan_macip_lens |= ((skb_network_offset(skb)) <<
-                                   IXGBE_ADVTXD_MACLEN_SHIFT);
-               *hdr_len += skb_network_offset(skb);
-               vlan_macip_lens |=
-                       (skb_transport_header(skb) - skb_network_header(skb));
-               *hdr_len +=
-                       (skb_transport_header(skb) - skb_network_header(skb));
-               context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
-               context_desc->seqnum_seed = 0;
-
-               /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
-               type_tucmd_mlhl = (IXGBE_TXD_CMD_DEXT |
-                                   IXGBE_ADVTXD_DTYP_CTXT);
-
-               if (skb->protocol == htons(ETH_P_IP))
-                       type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
-               type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
-               context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
-
-               /* MSS L4LEN IDX */
-               mss_l4len_idx =
-                       (skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT);
-               mss_l4len_idx |= (l4len << IXGBE_ADVTXD_L4LEN_SHIFT);
-               /* use index 1 for TSO */
-               mss_l4len_idx |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
-               context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
-
-               tx_buffer_info->time_stamp = jiffies;
-               tx_buffer_info->next_to_watch = i;
+       /* set bits to identify this as an advanced context descriptor */
+       type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
 
-               i++;
-               if (i == tx_ring->count)
-                       i = 0;
-               tx_ring->next_to_use = i;
+       context_desc->vlan_macip_lens   = cpu_to_le32(vlan_macip_lens);
+       context_desc->seqnum_seed       = 0;
+       context_desc->type_tucmd_mlhl   = cpu_to_le32(type_tucmd);
+       context_desc->mss_l4len_idx     = cpu_to_le32(mss_l4len_idx);
+}
 
-               return true;
+static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
+                      struct sk_buff *skb, u32 tx_flags, u8 *hdr_len)
+{
+       u32 vlan_macip_lens, type_tucmd;
+       u32 mss_l4len_idx, l4len;
+
+       if (!skb_is_gso(skb))
+               return 0;
+
+       if (skb_header_cloned(skb)) {
+               int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
+               if (err)
+                       return err;
+       }
+
+       /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
+       type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
+
+       if (skb->protocol == htons(ETH_P_IP)) {
+               struct iphdr *iph = ip_hdr(skb);
+               iph->tot_len = 0;
+               iph->check = 0;
+               tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
+                                                        iph->daddr, 0,
+                                                        IPPROTO_TCP,
+                                                        0);
+               type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
+       } else if (skb_is_gso_v6(skb)) {
+               ipv6_hdr(skb)->payload_len = 0;
+               tcp_hdr(skb)->check =
+                   ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
+                                    &ipv6_hdr(skb)->daddr,
+                                    0, IPPROTO_TCP, 0);
        }
 
-       return false;
+       /* compute header lengths */
+       l4len = tcp_hdrlen(skb);
+       *hdr_len += l4len;
+       *hdr_len = skb_transport_offset(skb) + l4len;
+
+       /* mss_l4len_id: use 1 as index for TSO */
+       mss_l4len_idx = l4len << IXGBE_ADVTXD_L4LEN_SHIFT;
+       mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
+       mss_l4len_idx |= 1 << IXGBE_ADVTXD_IDX_SHIFT;
+
+       /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */
+       vlan_macip_lens = skb_network_header_len(skb);
+       vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
+       vlan_macip_lens |= tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
+
+       ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens,
+                           type_tucmd, mss_l4len_idx);
+
+       return 1;
 }
 
-static bool ixgbevf_tx_csum(struct ixgbevf_adapter *adapter,
-                           struct ixgbevf_ring *tx_ring,
+static bool ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring,
                            struct sk_buff *skb, u32 tx_flags)
 {
-       struct ixgbe_adv_tx_context_desc *context_desc;
-       unsigned int i;
-       struct ixgbevf_tx_buffer *tx_buffer_info;
-       u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
 
-       if (skb->ip_summed == CHECKSUM_PARTIAL ||
-           (tx_flags & IXGBE_TX_FLAGS_VLAN)) {
-               i = tx_ring->next_to_use;
-               tx_buffer_info = &tx_ring->tx_buffer_info[i];
-               context_desc = IXGBE_TX_CTXTDESC_ADV(*tx_ring, i);
-
-               if (tx_flags & IXGBE_TX_FLAGS_VLAN)
-                       vlan_macip_lens |= (tx_flags &
-                                           IXGBE_TX_FLAGS_VLAN_MASK);
-               vlan_macip_lens |= (skb_network_offset(skb) <<
-                                   IXGBE_ADVTXD_MACLEN_SHIFT);
-               if (skb->ip_summed == CHECKSUM_PARTIAL)
-                       vlan_macip_lens |= (skb_transport_header(skb) -
-                                           skb_network_header(skb));
-
-               context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
-               context_desc->seqnum_seed = 0;
-
-               type_tucmd_mlhl |= (IXGBE_TXD_CMD_DEXT |
-                                   IXGBE_ADVTXD_DTYP_CTXT);
-
-               if (skb->ip_summed == CHECKSUM_PARTIAL) {
-                       switch (skb->protocol) {
-                       case __constant_htons(ETH_P_IP):
-                               type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
-                               if (ip_hdr(skb)->protocol == IPPROTO_TCP)
-                                       type_tucmd_mlhl |=
-                                           IXGBE_ADVTXD_TUCMD_L4T_TCP;
-                               break;
-                       case __constant_htons(ETH_P_IPV6):
-                               /* XXX what about other V6 headers?? */
-                               if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
-                                       type_tucmd_mlhl |=
-                                               IXGBE_ADVTXD_TUCMD_L4T_TCP;
-                               break;
-                       default:
-                               if (unlikely(net_ratelimit())) {
-                                       pr_warn("partial checksum but "
-                                               "proto=%x!\n", skb->protocol);
-                               }
-                               break;
-                       }
-               }
 
-               context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
-               /* use index zero for tx checksum offload */
-               context_desc->mss_l4len_idx = 0;
 
-               tx_buffer_info->time_stamp = jiffies;
-               tx_buffer_info->next_to_watch = i;
+       u32 vlan_macip_lens = 0;
+       u32 mss_l4len_idx = 0;
+       u32 type_tucmd = 0;
 
-               adapter->hw_csum_tx_good++;
-               i++;
-               if (i == tx_ring->count)
-                       i = 0;
-               tx_ring->next_to_use = i;
+       if (skb->ip_summed == CHECKSUM_PARTIAL) {
+               u8 l4_hdr = 0;
+               switch (skb->protocol) {
+               case __constant_htons(ETH_P_IP):
+                       vlan_macip_lens |= skb_network_header_len(skb);
+                       type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
+                       l4_hdr = ip_hdr(skb)->protocol;
+                       break;
+               case __constant_htons(ETH_P_IPV6):
+                       vlan_macip_lens |= skb_network_header_len(skb);
+                       l4_hdr = ipv6_hdr(skb)->nexthdr;
+                       break;
+               default:
+                       if (unlikely(net_ratelimit())) {
+                               dev_warn(tx_ring->dev,
+                                "partial checksum but proto=%x!\n",
+                                skb->protocol);
+                       }
+                       break;
+               }
 
-               return true;
+               switch (l4_hdr) {
+               case IPPROTO_TCP:
+                       type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
+                       mss_l4len_idx = tcp_hdrlen(skb) <<
+                                       IXGBE_ADVTXD_L4LEN_SHIFT;
+                       break;
+               case IPPROTO_SCTP:
+                       type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_SCTP;
+                       mss_l4len_idx = sizeof(struct sctphdr) <<
+                                       IXGBE_ADVTXD_L4LEN_SHIFT;
+                       break;
+               case IPPROTO_UDP:
+                       mss_l4len_idx = sizeof(struct udphdr) <<
+                                       IXGBE_ADVTXD_L4LEN_SHIFT;
+                       break;
+               default:
+                       if (unlikely(net_ratelimit())) {
+                               dev_warn(tx_ring->dev,
+                                "partial checksum but l4 proto=%x!\n",
+                                l4_hdr);
+                       }
+                       break;
+               }
        }
 
-       return false;
+       /* vlan_macip_lens: MACLEN, VLAN tag */
+       vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
+       vlan_macip_lens |= tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
+
+       ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens,
+                           type_tucmd, mss_l4len_idx);
+
+       return (skb->ip_summed == CHECKSUM_PARTIAL);
 }
 
-static int ixgbevf_tx_map(struct ixgbevf_adapter *adapter,
-                         struct ixgbevf_ring *tx_ring,
+static int ixgbevf_tx_map(struct ixgbevf_ring *tx_ring,
                          struct sk_buff *skb, u32 tx_flags,
                          unsigned int first)
 {
-       struct pci_dev *pdev = adapter->pdev;
        struct ixgbevf_tx_buffer *tx_buffer_info;
        unsigned int len;
        unsigned int total = skb->len;
@@ -2899,12 +2558,11 @@ static int ixgbevf_tx_map(struct ixgbevf_adapter *adapter,
 
                tx_buffer_info->length = size;
                tx_buffer_info->mapped_as_page = false;
-               tx_buffer_info->dma = dma_map_single(&adapter->pdev->dev,
+               tx_buffer_info->dma = dma_map_single(tx_ring->dev,
                                                     skb->data + offset,
                                                     size, DMA_TO_DEVICE);
-               if (dma_mapping_error(&pdev->dev, tx_buffer_info->dma))
+               if (dma_mapping_error(tx_ring->dev, tx_buffer_info->dma))
                        goto dma_error;
-               tx_buffer_info->time_stamp = jiffies;
                tx_buffer_info->next_to_watch = i;
 
                len -= size;
@@ -2929,12 +2587,12 @@ static int ixgbevf_tx_map(struct ixgbevf_adapter *adapter,
 
                        tx_buffer_info->length = size;
                        tx_buffer_info->dma =
-                               skb_frag_dma_map(&adapter->pdev->dev, frag,
+                               skb_frag_dma_map(tx_ring->dev, frag,
                                                 offset, size, DMA_TO_DEVICE);
                        tx_buffer_info->mapped_as_page = true;
-                       if (dma_mapping_error(&pdev->dev, tx_buffer_info->dma))
+                       if (dma_mapping_error(tx_ring->dev,
+                                             tx_buffer_info->dma))
                                goto dma_error;
-                       tx_buffer_info->time_stamp = jiffies;
                        tx_buffer_info->next_to_watch = i;
 
                        len -= size;
@@ -2955,15 +2613,15 @@ static int ixgbevf_tx_map(struct ixgbevf_adapter *adapter,
                i = i - 1;
        tx_ring->tx_buffer_info[i].skb = skb;
        tx_ring->tx_buffer_info[first].next_to_watch = i;
+       tx_ring->tx_buffer_info[first].time_stamp = jiffies;
 
        return count;
 
 dma_error:
-       dev_err(&pdev->dev, "TX DMA map failed\n");
+       dev_err(tx_ring->dev, "TX DMA map failed\n");
 
        /* clear timestamp and dma mappings for failed tx_buffer_info map */
        tx_buffer_info->dma = 0;
-       tx_buffer_info->time_stamp = 0;
        tx_buffer_info->next_to_watch = 0;
        count--;
 
@@ -2974,14 +2632,13 @@ dma_error:
                if (i < 0)
                        i += tx_ring->count;
                tx_buffer_info = &tx_ring->tx_buffer_info[i];
-               ixgbevf_unmap_and_free_tx_resource(adapter, tx_buffer_info);
+               ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
        }
 
        return count;
 }
 
-static void ixgbevf_tx_queue(struct ixgbevf_adapter *adapter,
-                            struct ixgbevf_ring *tx_ring, int tx_flags,
+static void ixgbevf_tx_queue(struct ixgbevf_ring *tx_ring, int tx_flags,
                             int count, u32 paylen, u8 hdr_len)
 {
        union ixgbe_adv_tx_desc *tx_desc = NULL;
@@ -2998,28 +2655,31 @@ static void ixgbevf_tx_queue(struct ixgbevf_adapter *adapter,
        if (tx_flags & IXGBE_TX_FLAGS_VLAN)
                cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
 
+       if (tx_flags & IXGBE_TX_FLAGS_CSUM)
+               olinfo_status |= IXGBE_ADVTXD_POPTS_TXSM;
+
        if (tx_flags & IXGBE_TX_FLAGS_TSO) {
                cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
 
-               olinfo_status |= IXGBE_TXD_POPTS_TXSM <<
-                       IXGBE_ADVTXD_POPTS_SHIFT;
-
                /* use index 1 context for tso */
                olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
                if (tx_flags & IXGBE_TX_FLAGS_IPV4)
-                       olinfo_status |= IXGBE_TXD_POPTS_IXSM <<
-                               IXGBE_ADVTXD_POPTS_SHIFT;
+                       olinfo_status |= IXGBE_ADVTXD_POPTS_IXSM;
+
+       }
 
-       } else if (tx_flags & IXGBE_TX_FLAGS_CSUM)
-               olinfo_status |= IXGBE_TXD_POPTS_TXSM <<
-                       IXGBE_ADVTXD_POPTS_SHIFT;
+       /*
+        * Check Context must be set if Tx switch is enabled, which it
+        * always is for case where virtual functions are running
+        */
+       olinfo_status |= IXGBE_ADVTXD_CC;
 
        olinfo_status |= ((paylen - hdr_len) << IXGBE_ADVTXD_PAYLEN_SHIFT);
 
        i = tx_ring->next_to_use;
        while (count--) {
                tx_buffer_info = &tx_ring->tx_buffer_info[i];
-               tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i);
+               tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
                tx_desc->read.buffer_addr = cpu_to_le64(tx_buffer_info->dma);
                tx_desc->read.cmd_type_len =
                        cpu_to_le32(cmd_type_len | tx_buffer_info->length);
@@ -3031,24 +2691,14 @@ static void ixgbevf_tx_queue(struct ixgbevf_adapter *adapter,
 
        tx_desc->read.cmd_type_len |= cpu_to_le32(txd_cmd);
 
-       /*
-        * Force memory writes to complete before letting h/w
-        * know there are new descriptors to fetch.  (Only
-        * applicable for weak-ordered memory model archs,
-        * such as IA-64).
-        */
-       wmb();
-
        tx_ring->next_to_use = i;
-       writel(i, adapter->hw.hw_addr + tx_ring->tail);
 }
 
-static int __ixgbevf_maybe_stop_tx(struct net_device *netdev,
-                                  struct ixgbevf_ring *tx_ring, int size)
+static int __ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
 {
-       struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+       struct ixgbevf_adapter *adapter = netdev_priv(tx_ring->netdev);
 
-       netif_stop_subqueue(netdev, tx_ring->queue_index);
+       netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
        /* Herbert's original patch had:
         *  smp_mb__after_netif_stop_queue();
         * but since that doesn't exist yet, just open code it. */
@@ -3060,17 +2710,16 @@ static int __ixgbevf_maybe_stop_tx(struct net_device *netdev,
                return -EBUSY;
 
        /* A reprieve! - use start_queue because it doesn't call schedule */
-       netif_start_subqueue(netdev, tx_ring->queue_index);
+       netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
        ++adapter->restart_queue;
        return 0;
 }
 
-static int ixgbevf_maybe_stop_tx(struct net_device *netdev,
-                                struct ixgbevf_ring *tx_ring, int size)
+static int ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
 {
        if (likely(IXGBE_DESC_UNUSED(tx_ring) >= size))
                return 0;
-       return __ixgbevf_maybe_stop_tx(netdev, tx_ring, size);
+       return __ixgbevf_maybe_stop_tx(tx_ring, size);
 }
 
 static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
@@ -3081,54 +2730,66 @@ static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
        unsigned int tx_flags = 0;
        u8 hdr_len = 0;
        int r_idx = 0, tso;
-       int count = 0;
-
-       unsigned int f;
+       u16 count = TXD_USE_COUNT(skb_headlen(skb));
+#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
+       unsigned short f;
+#endif
 
        tx_ring = &adapter->tx_ring[r_idx];
 
+       /*
+        * need: 1 descriptor per page * PAGE_SIZE/IXGBE_MAX_DATA_PER_TXD,
+        *       + 1 desc for skb_headlen/IXGBE_MAX_DATA_PER_TXD,
+        *       + 2 desc gap to keep tail from touching head,
+        *       + 1 desc for context descriptor,
+        * otherwise try next time
+        */
+#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
+       for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
+               count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
+#else
+       count += skb_shinfo(skb)->nr_frags;
+#endif
+       if (ixgbevf_maybe_stop_tx(tx_ring, count + 3)) {
+               adapter->tx_busy++;
+               return NETDEV_TX_BUSY;
+       }
+
        if (vlan_tx_tag_present(skb)) {
                tx_flags |= vlan_tx_tag_get(skb);
                tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
                tx_flags |= IXGBE_TX_FLAGS_VLAN;
        }
 
-       /* four things can cause us to need a context descriptor */
-       if (skb_is_gso(skb) ||
-           (skb->ip_summed == CHECKSUM_PARTIAL) ||
-           (tx_flags & IXGBE_TX_FLAGS_VLAN))
-               count++;
-
-       count += TXD_USE_COUNT(skb_headlen(skb));
-       for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
-               count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)->frags[f]));
-
-       if (ixgbevf_maybe_stop_tx(netdev, tx_ring, count)) {
-               adapter->tx_busy++;
-               return NETDEV_TX_BUSY;
-       }
-
        first = tx_ring->next_to_use;
 
        if (skb->protocol == htons(ETH_P_IP))
                tx_flags |= IXGBE_TX_FLAGS_IPV4;
-       tso = ixgbevf_tso(adapter, tx_ring, skb, tx_flags, &hdr_len);
+       tso = ixgbevf_tso(tx_ring, skb, tx_flags, &hdr_len);
        if (tso < 0) {
                dev_kfree_skb_any(skb);
                return NETDEV_TX_OK;
        }
 
        if (tso)
-               tx_flags |= IXGBE_TX_FLAGS_TSO;
-       else if (ixgbevf_tx_csum(adapter, tx_ring, skb, tx_flags) &&
-                (skb->ip_summed == CHECKSUM_PARTIAL))
+               tx_flags |= IXGBE_TX_FLAGS_TSO | IXGBE_TX_FLAGS_CSUM;
+       else if (ixgbevf_tx_csum(tx_ring, skb, tx_flags))
                tx_flags |= IXGBE_TX_FLAGS_CSUM;
 
-       ixgbevf_tx_queue(adapter, tx_ring, tx_flags,
-                        ixgbevf_tx_map(adapter, tx_ring, skb, tx_flags, first),
+       ixgbevf_tx_queue(tx_ring, tx_flags,
+                        ixgbevf_tx_map(tx_ring, skb, tx_flags, first),
                         skb->len, hdr_len);
+       /*
+        * Force memory writes to complete before letting h/w
+        * know there are new descriptors to fetch.  (Only
+        * applicable for weak-ordered memory model archs,
+        * such as IA-64).
+        */
+       wmb();
+
+       writel(tx_ring->next_to_use, adapter->hw.hw_addr + tx_ring->tail);
 
-       ixgbevf_maybe_stop_tx(netdev, tx_ring, DESC_NEEDED);
+       ixgbevf_maybe_stop_tx(tx_ring, DESC_NEEDED);
 
        return NETDEV_TX_OK;
 }
@@ -3211,9 +2872,7 @@ static void ixgbevf_shutdown(struct pci_dev *pdev)
                ixgbevf_free_all_rx_resources(adapter);
        }
 
-#ifdef CONFIG_PM
        pci_save_state(pdev);
-#endif
 
        pci_disable_device(pdev);
 }
@@ -3256,19 +2915,6 @@ static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev,
        return stats;
 }
 
-static int ixgbevf_set_features(struct net_device *netdev,
-       netdev_features_t features)
-{
-       struct ixgbevf_adapter *adapter = netdev_priv(netdev);
-
-       if (features & NETIF_F_RXCSUM)
-               adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED;
-       else
-               adapter->flags &= ~IXGBE_FLAG_RX_CSUM_ENABLED;
-
-       return 0;
-}
-
 static const struct net_device_ops ixgbe_netdev_ops = {
        .ndo_open               = ixgbevf_open,
        .ndo_stop               = ixgbevf_close,
@@ -3281,7 +2927,6 @@ static const struct net_device_ops ixgbe_netdev_ops = {
        .ndo_tx_timeout         = ixgbevf_tx_timeout,
        .ndo_vlan_rx_add_vid    = ixgbevf_vlan_rx_add_vid,
        .ndo_vlan_rx_kill_vid   = ixgbevf_vlan_rx_kill_vid,
-       .ndo_set_features       = ixgbevf_set_features,
 };
 
 static void ixgbevf_assign_netdev_ops(struct net_device *dev)
@@ -3341,12 +2986,8 @@ static int __devinit ixgbevf_probe(struct pci_dev *pdev,
 
        pci_set_master(pdev);
 
-#ifdef HAVE_TX_MQ
        netdev = alloc_etherdev_mq(sizeof(struct ixgbevf_adapter),
                                   MAX_TX_QUEUES);
-#else
-       netdev = alloc_etherdev(sizeof(struct ixgbevf_adapter));
-#endif
        if (!netdev) {
                err = -ENOMEM;
                goto err_alloc_etherdev;
@@ -3387,10 +3028,6 @@ static int __devinit ixgbevf_probe(struct pci_dev *pdev,
        memcpy(&hw->mbx.ops, &ixgbevf_mbx_ops,
               sizeof(struct ixgbe_mbx_operations));
 
-       adapter->flags &= ~IXGBE_FLAG_RX_PS_CAPABLE;
-       adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED;
-       adapter->flags |= IXGBE_FLAG_RX_1BUF_CAPABLE;
-
        /* setup the private structure */
        err = ixgbevf_sw_init(adapter);
        if (err)
@@ -3449,8 +3086,6 @@ static int __devinit ixgbevf_probe(struct pci_dev *pdev,
        if (err)
                goto err_register;
 
-       adapter->netdev_registered = true;
-
        netif_carrier_off(netdev);
 
        ixgbevf_init_last_counter_stats(adapter);
@@ -3460,8 +3095,6 @@ static int __devinit ixgbevf_probe(struct pci_dev *pdev,
 
        hw_dbg(hw, "MAC: %d\n", hw->mac.type);
 
-       hw_dbg(hw, "LRO is disabled\n");
-
        hw_dbg(hw, "Intel(R) 82599 Virtual Function\n");
        cards_found++;
        return 0;
@@ -3501,10 +3134,8 @@ static void __devexit ixgbevf_remove(struct pci_dev *pdev)
        cancel_work_sync(&adapter->reset_task);
        cancel_work_sync(&adapter->watchdog_task);
 
-       if (adapter->netdev_registered) {
+       if (netdev->reg_state == NETREG_REGISTERED)
                unregister_netdev(netdev);
-               adapter->netdev_registered = false;
-       }
 
        ixgbevf_reset_interrupt_capability(adapter);