1 /*******************************************************************************
3 Intel 82599 Virtual Function driver
4 Copyright(c) 1999 - 2015 Intel Corporation.
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, see <http://www.gnu.org/licenses/>.
18 The full GNU General Public License is included in this distribution in
19 the file called "COPYING".
22 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
23 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 *******************************************************************************/
27 /******************************************************************************
28 Copyright (c)2006 - 2007 Myricom, Inc. for some LRO specific code
29 ******************************************************************************/
31 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
33 #include <linux/types.h>
34 #include <linux/bitops.h>
35 #include <linux/module.h>
36 #include <linux/pci.h>
37 #include <linux/netdevice.h>
38 #include <linux/vmalloc.h>
39 #include <linux/string.h>
42 #include <linux/tcp.h>
43 #include <linux/sctp.h>
44 #include <linux/ipv6.h>
45 #include <linux/slab.h>
46 #include <net/checksum.h>
47 #include <net/ip6_checksum.h>
48 #include <linux/ethtool.h>
50 #include <linux/if_vlan.h>
51 #include <linux/prefetch.h>
56 const char ixgbevf_driver_name
[] = "ixgbevf";
57 static const char ixgbevf_driver_string
[] =
58 "Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver";
60 #define DRV_VERSION "4.1.0-k"
61 const char ixgbevf_driver_version
[] = DRV_VERSION
;
62 static char ixgbevf_copyright
[] =
63 "Copyright (c) 2009 - 2015 Intel Corporation.";
65 static const struct ixgbevf_info
*ixgbevf_info_tbl
[] = {
66 [board_82599_vf
] = &ixgbevf_82599_vf_info
,
67 [board_82599_vf_hv
] = &ixgbevf_82599_vf_hv_info
,
68 [board_X540_vf
] = &ixgbevf_X540_vf_info
,
69 [board_X540_vf_hv
] = &ixgbevf_X540_vf_hv_info
,
70 [board_X550_vf
] = &ixgbevf_X550_vf_info
,
71 [board_X550_vf_hv
] = &ixgbevf_X550_vf_hv_info
,
72 [board_X550EM_x_vf
] = &ixgbevf_X550EM_x_vf_info
,
73 [board_X550EM_x_vf_hv
] = &ixgbevf_X550EM_x_vf_hv_info
,
74 [board_x550em_a_vf
] = &ixgbevf_x550em_a_vf_info
,
77 /* ixgbevf_pci_tbl - PCI Device ID Table
79 * Wildcard entries (PCI_ANY_ID) should come last
80 * Last entry must be all 0s
82 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
83 * Class, Class Mask, private data (not used) }
85 static const struct pci_device_id ixgbevf_pci_tbl
[] = {
86 {PCI_VDEVICE(INTEL
, IXGBE_DEV_ID_82599_VF
), board_82599_vf
},
87 {PCI_VDEVICE(INTEL
, IXGBE_DEV_ID_82599_VF_HV
), board_82599_vf_hv
},
88 {PCI_VDEVICE(INTEL
, IXGBE_DEV_ID_X540_VF
), board_X540_vf
},
89 {PCI_VDEVICE(INTEL
, IXGBE_DEV_ID_X540_VF_HV
), board_X540_vf_hv
},
90 {PCI_VDEVICE(INTEL
, IXGBE_DEV_ID_X550_VF
), board_X550_vf
},
91 {PCI_VDEVICE(INTEL
, IXGBE_DEV_ID_X550_VF_HV
), board_X550_vf_hv
},
92 {PCI_VDEVICE(INTEL
, IXGBE_DEV_ID_X550EM_X_VF
), board_X550EM_x_vf
},
93 {PCI_VDEVICE(INTEL
, IXGBE_DEV_ID_X550EM_X_VF_HV
), board_X550EM_x_vf_hv
},
94 {PCI_VDEVICE(INTEL
, IXGBE_DEV_ID_X550EM_A_VF
), board_x550em_a_vf
},
95 /* required last entry */
98 MODULE_DEVICE_TABLE(pci
, ixgbevf_pci_tbl
);
100 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
101 MODULE_DESCRIPTION("Intel(R) 10 Gigabit Virtual Function Network Driver");
102 MODULE_LICENSE("GPL");
103 MODULE_VERSION(DRV_VERSION
);
105 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
106 static int debug
= -1;
107 module_param(debug
, int, 0);
108 MODULE_PARM_DESC(debug
, "Debug level (0=none,...,16=all)");
110 static struct workqueue_struct
*ixgbevf_wq
;
112 static void ixgbevf_service_event_schedule(struct ixgbevf_adapter
*adapter
)
114 if (!test_bit(__IXGBEVF_DOWN
, &adapter
->state
) &&
115 !test_bit(__IXGBEVF_REMOVING
, &adapter
->state
) &&
116 !test_and_set_bit(__IXGBEVF_SERVICE_SCHED
, &adapter
->state
))
117 queue_work(ixgbevf_wq
, &adapter
->service_task
);
120 static void ixgbevf_service_event_complete(struct ixgbevf_adapter
*adapter
)
122 BUG_ON(!test_bit(__IXGBEVF_SERVICE_SCHED
, &adapter
->state
));
124 /* flush memory to make sure state is correct before next watchdog */
125 smp_mb__before_atomic();
126 clear_bit(__IXGBEVF_SERVICE_SCHED
, &adapter
->state
);
130 static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter
*adapter
);
131 static void ixgbevf_set_itr(struct ixgbevf_q_vector
*q_vector
);
132 static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter
*adapter
);
134 static void ixgbevf_remove_adapter(struct ixgbe_hw
*hw
)
136 struct ixgbevf_adapter
*adapter
= hw
->back
;
141 dev_err(&adapter
->pdev
->dev
, "Adapter removed\n");
142 if (test_bit(__IXGBEVF_SERVICE_INITED
, &adapter
->state
))
143 ixgbevf_service_event_schedule(adapter
);
146 static void ixgbevf_check_remove(struct ixgbe_hw
*hw
, u32 reg
)
150 /* The following check not only optimizes a bit by not
151 * performing a read on the status register when the
152 * register just read was a status register read that
153 * returned IXGBE_FAILED_READ_REG. It also blocks any
154 * potential recursion.
156 if (reg
== IXGBE_VFSTATUS
) {
157 ixgbevf_remove_adapter(hw
);
160 value
= ixgbevf_read_reg(hw
, IXGBE_VFSTATUS
);
161 if (value
== IXGBE_FAILED_READ_REG
)
162 ixgbevf_remove_adapter(hw
);
165 u32
ixgbevf_read_reg(struct ixgbe_hw
*hw
, u32 reg
)
167 u8 __iomem
*reg_addr
= ACCESS_ONCE(hw
->hw_addr
);
170 if (IXGBE_REMOVED(reg_addr
))
171 return IXGBE_FAILED_READ_REG
;
172 value
= readl(reg_addr
+ reg
);
173 if (unlikely(value
== IXGBE_FAILED_READ_REG
))
174 ixgbevf_check_remove(hw
, reg
);
179 * ixgbevf_set_ivar - set IVAR registers - maps interrupt causes to vectors
180 * @adapter: pointer to adapter struct
181 * @direction: 0 for Rx, 1 for Tx, -1 for other causes
182 * @queue: queue to map the corresponding interrupt to
183 * @msix_vector: the vector to map to the corresponding queue
185 static void ixgbevf_set_ivar(struct ixgbevf_adapter
*adapter
, s8 direction
,
186 u8 queue
, u8 msix_vector
)
189 struct ixgbe_hw
*hw
= &adapter
->hw
;
191 if (direction
== -1) {
193 msix_vector
|= IXGBE_IVAR_ALLOC_VAL
;
194 ivar
= IXGBE_READ_REG(hw
, IXGBE_VTIVAR_MISC
);
197 IXGBE_WRITE_REG(hw
, IXGBE_VTIVAR_MISC
, ivar
);
199 /* Tx or Rx causes */
200 msix_vector
|= IXGBE_IVAR_ALLOC_VAL
;
201 index
= ((16 * (queue
& 1)) + (8 * direction
));
202 ivar
= IXGBE_READ_REG(hw
, IXGBE_VTIVAR(queue
>> 1));
203 ivar
&= ~(0xFF << index
);
204 ivar
|= (msix_vector
<< index
);
205 IXGBE_WRITE_REG(hw
, IXGBE_VTIVAR(queue
>> 1), ivar
);
209 static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_ring
*tx_ring
,
210 struct ixgbevf_tx_buffer
*tx_buffer
)
212 if (tx_buffer
->skb
) {
213 dev_kfree_skb_any(tx_buffer
->skb
);
214 if (dma_unmap_len(tx_buffer
, len
))
215 dma_unmap_single(tx_ring
->dev
,
216 dma_unmap_addr(tx_buffer
, dma
),
217 dma_unmap_len(tx_buffer
, len
),
219 } else if (dma_unmap_len(tx_buffer
, len
)) {
220 dma_unmap_page(tx_ring
->dev
,
221 dma_unmap_addr(tx_buffer
, dma
),
222 dma_unmap_len(tx_buffer
, len
),
225 tx_buffer
->next_to_watch
= NULL
;
226 tx_buffer
->skb
= NULL
;
227 dma_unmap_len_set(tx_buffer
, len
, 0);
228 /* tx_buffer must be completely set up in the transmit path */
231 static u64
ixgbevf_get_tx_completed(struct ixgbevf_ring
*ring
)
233 return ring
->stats
.packets
;
236 static u32
ixgbevf_get_tx_pending(struct ixgbevf_ring
*ring
)
238 struct ixgbevf_adapter
*adapter
= netdev_priv(ring
->netdev
);
239 struct ixgbe_hw
*hw
= &adapter
->hw
;
241 u32 head
= IXGBE_READ_REG(hw
, IXGBE_VFTDH(ring
->reg_idx
));
242 u32 tail
= IXGBE_READ_REG(hw
, IXGBE_VFTDT(ring
->reg_idx
));
245 return (head
< tail
) ?
246 tail
- head
: (tail
+ ring
->count
- head
);
251 static inline bool ixgbevf_check_tx_hang(struct ixgbevf_ring
*tx_ring
)
253 u32 tx_done
= ixgbevf_get_tx_completed(tx_ring
);
254 u32 tx_done_old
= tx_ring
->tx_stats
.tx_done_old
;
255 u32 tx_pending
= ixgbevf_get_tx_pending(tx_ring
);
257 clear_check_for_tx_hang(tx_ring
);
259 /* Check for a hung queue, but be thorough. This verifies
260 * that a transmit has been completed since the previous
261 * check AND there is at least one packet pending. The
262 * ARMED bit is set to indicate a potential hang.
264 if ((tx_done_old
== tx_done
) && tx_pending
) {
265 /* make sure it is true for two checks in a row */
266 return test_and_set_bit(__IXGBEVF_HANG_CHECK_ARMED
,
269 /* reset the countdown */
270 clear_bit(__IXGBEVF_HANG_CHECK_ARMED
, &tx_ring
->state
);
272 /* update completed stats and continue */
273 tx_ring
->tx_stats
.tx_done_old
= tx_done
;
278 static void ixgbevf_tx_timeout_reset(struct ixgbevf_adapter
*adapter
)
280 /* Do the reset outside of interrupt context */
281 if (!test_bit(__IXGBEVF_DOWN
, &adapter
->state
)) {
282 set_bit(__IXGBEVF_RESET_REQUESTED
, &adapter
->state
);
283 ixgbevf_service_event_schedule(adapter
);
288 * ixgbevf_tx_timeout - Respond to a Tx Hang
289 * @netdev: network interface device structure
291 static void ixgbevf_tx_timeout(struct net_device
*netdev
)
293 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
295 ixgbevf_tx_timeout_reset(adapter
);
299 * ixgbevf_clean_tx_irq - Reclaim resources after transmit completes
300 * @q_vector: board private structure
301 * @tx_ring: tx ring to clean
302 * @napi_budget: Used to determine if we are in netpoll
304 static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector
*q_vector
,
305 struct ixgbevf_ring
*tx_ring
, int napi_budget
)
307 struct ixgbevf_adapter
*adapter
= q_vector
->adapter
;
308 struct ixgbevf_tx_buffer
*tx_buffer
;
309 union ixgbe_adv_tx_desc
*tx_desc
;
310 unsigned int total_bytes
= 0, total_packets
= 0;
311 unsigned int budget
= tx_ring
->count
/ 2;
312 unsigned int i
= tx_ring
->next_to_clean
;
314 if (test_bit(__IXGBEVF_DOWN
, &adapter
->state
))
317 tx_buffer
= &tx_ring
->tx_buffer_info
[i
];
318 tx_desc
= IXGBEVF_TX_DESC(tx_ring
, i
);
322 union ixgbe_adv_tx_desc
*eop_desc
= tx_buffer
->next_to_watch
;
324 /* if next_to_watch is not set then there is no work pending */
328 /* prevent any other reads prior to eop_desc */
329 read_barrier_depends();
331 /* if DD is not set pending work has not been completed */
332 if (!(eop_desc
->wb
.status
& cpu_to_le32(IXGBE_TXD_STAT_DD
)))
335 /* clear next_to_watch to prevent false hangs */
336 tx_buffer
->next_to_watch
= NULL
;
338 /* update the statistics for this packet */
339 total_bytes
+= tx_buffer
->bytecount
;
340 total_packets
+= tx_buffer
->gso_segs
;
343 napi_consume_skb(tx_buffer
->skb
, napi_budget
);
345 /* unmap skb header data */
346 dma_unmap_single(tx_ring
->dev
,
347 dma_unmap_addr(tx_buffer
, dma
),
348 dma_unmap_len(tx_buffer
, len
),
351 /* clear tx_buffer data */
352 tx_buffer
->skb
= NULL
;
353 dma_unmap_len_set(tx_buffer
, len
, 0);
355 /* unmap remaining buffers */
356 while (tx_desc
!= eop_desc
) {
362 tx_buffer
= tx_ring
->tx_buffer_info
;
363 tx_desc
= IXGBEVF_TX_DESC(tx_ring
, 0);
366 /* unmap any remaining paged data */
367 if (dma_unmap_len(tx_buffer
, len
)) {
368 dma_unmap_page(tx_ring
->dev
,
369 dma_unmap_addr(tx_buffer
, dma
),
370 dma_unmap_len(tx_buffer
, len
),
372 dma_unmap_len_set(tx_buffer
, len
, 0);
376 /* move us one more past the eop_desc for start of next pkt */
382 tx_buffer
= tx_ring
->tx_buffer_info
;
383 tx_desc
= IXGBEVF_TX_DESC(tx_ring
, 0);
386 /* issue prefetch for next Tx descriptor */
389 /* update budget accounting */
391 } while (likely(budget
));
394 tx_ring
->next_to_clean
= i
;
395 u64_stats_update_begin(&tx_ring
->syncp
);
396 tx_ring
->stats
.bytes
+= total_bytes
;
397 tx_ring
->stats
.packets
+= total_packets
;
398 u64_stats_update_end(&tx_ring
->syncp
);
399 q_vector
->tx
.total_bytes
+= total_bytes
;
400 q_vector
->tx
.total_packets
+= total_packets
;
402 if (check_for_tx_hang(tx_ring
) && ixgbevf_check_tx_hang(tx_ring
)) {
403 struct ixgbe_hw
*hw
= &adapter
->hw
;
404 union ixgbe_adv_tx_desc
*eop_desc
;
406 eop_desc
= tx_ring
->tx_buffer_info
[i
].next_to_watch
;
408 pr_err("Detected Tx Unit Hang\n"
410 " TDH, TDT <%x>, <%x>\n"
411 " next_to_use <%x>\n"
412 " next_to_clean <%x>\n"
413 "tx_buffer_info[next_to_clean]\n"
414 " next_to_watch <%p>\n"
415 " eop_desc->wb.status <%x>\n"
416 " time_stamp <%lx>\n"
418 tx_ring
->queue_index
,
419 IXGBE_READ_REG(hw
, IXGBE_VFTDH(tx_ring
->reg_idx
)),
420 IXGBE_READ_REG(hw
, IXGBE_VFTDT(tx_ring
->reg_idx
)),
421 tx_ring
->next_to_use
, i
,
422 eop_desc
, (eop_desc
? eop_desc
->wb
.status
: 0),
423 tx_ring
->tx_buffer_info
[i
].time_stamp
, jiffies
);
425 netif_stop_subqueue(tx_ring
->netdev
, tx_ring
->queue_index
);
427 /* schedule immediate reset if we believe we hung */
428 ixgbevf_tx_timeout_reset(adapter
);
433 #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
434 if (unlikely(total_packets
&& netif_carrier_ok(tx_ring
->netdev
) &&
435 (ixgbevf_desc_unused(tx_ring
) >= TX_WAKE_THRESHOLD
))) {
436 /* Make sure that anybody stopping the queue after this
437 * sees the new next_to_clean.
441 if (__netif_subqueue_stopped(tx_ring
->netdev
,
442 tx_ring
->queue_index
) &&
443 !test_bit(__IXGBEVF_DOWN
, &adapter
->state
)) {
444 netif_wake_subqueue(tx_ring
->netdev
,
445 tx_ring
->queue_index
);
446 ++tx_ring
->tx_stats
.restart_queue
;
454 * ixgbevf_rx_skb - Helper function to determine proper Rx method
455 * @q_vector: structure containing interrupt and ring information
456 * @skb: packet to send up
458 static void ixgbevf_rx_skb(struct ixgbevf_q_vector
*q_vector
,
461 napi_gro_receive(&q_vector
->napi
, skb
);
464 #define IXGBE_RSS_L4_TYPES_MASK \
465 ((1ul << IXGBE_RXDADV_RSSTYPE_IPV4_TCP) | \
466 (1ul << IXGBE_RXDADV_RSSTYPE_IPV4_UDP) | \
467 (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_TCP) | \
468 (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_UDP))
470 static inline void ixgbevf_rx_hash(struct ixgbevf_ring
*ring
,
471 union ixgbe_adv_rx_desc
*rx_desc
,
476 if (!(ring
->netdev
->features
& NETIF_F_RXHASH
))
479 rss_type
= le16_to_cpu(rx_desc
->wb
.lower
.lo_dword
.hs_rss
.pkt_info
) &
480 IXGBE_RXDADV_RSSTYPE_MASK
;
485 skb_set_hash(skb
, le32_to_cpu(rx_desc
->wb
.lower
.hi_dword
.rss
),
486 (IXGBE_RSS_L4_TYPES_MASK
& (1ul << rss_type
)) ?
487 PKT_HASH_TYPE_L4
: PKT_HASH_TYPE_L3
);
491 * ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum
492 * @ring: structure containig ring specific data
493 * @rx_desc: current Rx descriptor being processed
494 * @skb: skb currently being received and modified
496 static inline void ixgbevf_rx_checksum(struct ixgbevf_ring
*ring
,
497 union ixgbe_adv_rx_desc
*rx_desc
,
500 skb_checksum_none_assert(skb
);
502 /* Rx csum disabled */
503 if (!(ring
->netdev
->features
& NETIF_F_RXCSUM
))
506 /* if IP and error */
507 if (ixgbevf_test_staterr(rx_desc
, IXGBE_RXD_STAT_IPCS
) &&
508 ixgbevf_test_staterr(rx_desc
, IXGBE_RXDADV_ERR_IPE
)) {
509 ring
->rx_stats
.csum_err
++;
513 if (!ixgbevf_test_staterr(rx_desc
, IXGBE_RXD_STAT_L4CS
))
516 if (ixgbevf_test_staterr(rx_desc
, IXGBE_RXDADV_ERR_TCPE
)) {
517 ring
->rx_stats
.csum_err
++;
521 /* It must be a TCP or UDP packet with a valid checksum */
522 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
526 * ixgbevf_process_skb_fields - Populate skb header fields from Rx descriptor
527 * @rx_ring: rx descriptor ring packet is being transacted on
528 * @rx_desc: pointer to the EOP Rx descriptor
529 * @skb: pointer to current skb being populated
531 * This function checks the ring, descriptor, and packet information in
532 * order to populate the checksum, VLAN, protocol, and other fields within
535 static void ixgbevf_process_skb_fields(struct ixgbevf_ring
*rx_ring
,
536 union ixgbe_adv_rx_desc
*rx_desc
,
539 ixgbevf_rx_hash(rx_ring
, rx_desc
, skb
);
540 ixgbevf_rx_checksum(rx_ring
, rx_desc
, skb
);
542 if (ixgbevf_test_staterr(rx_desc
, IXGBE_RXD_STAT_VP
)) {
543 u16 vid
= le16_to_cpu(rx_desc
->wb
.upper
.vlan
);
544 unsigned long *active_vlans
= netdev_priv(rx_ring
->netdev
);
546 if (test_bit(vid
& VLAN_VID_MASK
, active_vlans
))
547 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021Q
), vid
);
550 skb
->protocol
= eth_type_trans(skb
, rx_ring
->netdev
);
554 * ixgbevf_is_non_eop - process handling of non-EOP buffers
555 * @rx_ring: Rx ring being processed
556 * @rx_desc: Rx descriptor for current buffer
557 * @skb: current socket buffer containing buffer in progress
559 * This function updates next to clean. If the buffer is an EOP buffer
560 * this function exits returning false, otherwise it will place the
561 * sk_buff in the next buffer to be chained and return true indicating
562 * that this is in fact a non-EOP buffer.
564 static bool ixgbevf_is_non_eop(struct ixgbevf_ring
*rx_ring
,
565 union ixgbe_adv_rx_desc
*rx_desc
)
567 u32 ntc
= rx_ring
->next_to_clean
+ 1;
569 /* fetch, update, and store next to clean */
570 ntc
= (ntc
< rx_ring
->count
) ? ntc
: 0;
571 rx_ring
->next_to_clean
= ntc
;
573 prefetch(IXGBEVF_RX_DESC(rx_ring
, ntc
));
575 if (likely(ixgbevf_test_staterr(rx_desc
, IXGBE_RXD_STAT_EOP
)))
581 static bool ixgbevf_alloc_mapped_page(struct ixgbevf_ring
*rx_ring
,
582 struct ixgbevf_rx_buffer
*bi
)
584 struct page
*page
= bi
->page
;
585 dma_addr_t dma
= bi
->dma
;
587 /* since we are recycling buffers we should seldom need to alloc */
591 /* alloc new page for storage */
592 page
= dev_alloc_page();
593 if (unlikely(!page
)) {
594 rx_ring
->rx_stats
.alloc_rx_page_failed
++;
598 /* map page for use */
599 dma
= dma_map_page(rx_ring
->dev
, page
, 0,
600 PAGE_SIZE
, DMA_FROM_DEVICE
);
602 /* if mapping failed free memory back to system since
603 * there isn't much point in holding memory we can't use
605 if (dma_mapping_error(rx_ring
->dev
, dma
)) {
608 rx_ring
->rx_stats
.alloc_rx_buff_failed
++;
620 * ixgbevf_alloc_rx_buffers - Replace used receive buffers; packet split
621 * @rx_ring: rx descriptor ring (for a specific queue) to setup buffers on
622 * @cleaned_count: number of buffers to replace
624 static void ixgbevf_alloc_rx_buffers(struct ixgbevf_ring
*rx_ring
,
627 union ixgbe_adv_rx_desc
*rx_desc
;
628 struct ixgbevf_rx_buffer
*bi
;
629 unsigned int i
= rx_ring
->next_to_use
;
631 /* nothing to do or no valid netdev defined */
632 if (!cleaned_count
|| !rx_ring
->netdev
)
635 rx_desc
= IXGBEVF_RX_DESC(rx_ring
, i
);
636 bi
= &rx_ring
->rx_buffer_info
[i
];
640 if (!ixgbevf_alloc_mapped_page(rx_ring
, bi
))
643 /* Refresh the desc even if pkt_addr didn't change
644 * because each write-back erases this info.
646 rx_desc
->read
.pkt_addr
= cpu_to_le64(bi
->dma
+ bi
->page_offset
);
652 rx_desc
= IXGBEVF_RX_DESC(rx_ring
, 0);
653 bi
= rx_ring
->rx_buffer_info
;
657 /* clear the hdr_addr for the next_to_use descriptor */
658 rx_desc
->read
.hdr_addr
= 0;
661 } while (cleaned_count
);
665 if (rx_ring
->next_to_use
!= i
) {
666 /* record the next descriptor to use */
667 rx_ring
->next_to_use
= i
;
669 /* update next to alloc since we have filled the ring */
670 rx_ring
->next_to_alloc
= i
;
672 /* Force memory writes to complete before letting h/w
673 * know there are new descriptors to fetch. (Only
674 * applicable for weak-ordered memory model archs,
678 ixgbevf_write_tail(rx_ring
, i
);
683 * ixgbevf_cleanup_headers - Correct corrupted or empty headers
684 * @rx_ring: rx descriptor ring packet is being transacted on
685 * @rx_desc: pointer to the EOP Rx descriptor
686 * @skb: pointer to current skb being fixed
688 * Check for corrupted packet headers caused by senders on the local L2
689 * embedded NIC switch not setting up their Tx Descriptors right. These
690 * should be very rare.
692 * Also address the case where we are pulling data in on pages only
693 * and as such no data is present in the skb header.
695 * In addition if skb is not at least 60 bytes we need to pad it so that
696 * it is large enough to qualify as a valid Ethernet frame.
698 * Returns true if an error was encountered and skb was freed.
700 static bool ixgbevf_cleanup_headers(struct ixgbevf_ring
*rx_ring
,
701 union ixgbe_adv_rx_desc
*rx_desc
,
704 /* verify that the packet does not have any known errors */
705 if (unlikely(ixgbevf_test_staterr(rx_desc
,
706 IXGBE_RXDADV_ERR_FRAME_ERR_MASK
))) {
707 struct net_device
*netdev
= rx_ring
->netdev
;
709 if (!(netdev
->features
& NETIF_F_RXALL
)) {
710 dev_kfree_skb_any(skb
);
715 /* if eth_skb_pad returns an error the skb was freed */
716 if (eth_skb_pad(skb
))
723 * ixgbevf_reuse_rx_page - page flip buffer and store it back on the ring
724 * @rx_ring: rx descriptor ring to store buffers on
725 * @old_buff: donor buffer to have page reused
727 * Synchronizes page for reuse by the adapter
729 static void ixgbevf_reuse_rx_page(struct ixgbevf_ring
*rx_ring
,
730 struct ixgbevf_rx_buffer
*old_buff
)
732 struct ixgbevf_rx_buffer
*new_buff
;
733 u16 nta
= rx_ring
->next_to_alloc
;
735 new_buff
= &rx_ring
->rx_buffer_info
[nta
];
737 /* update, and store next to alloc */
739 rx_ring
->next_to_alloc
= (nta
< rx_ring
->count
) ? nta
: 0;
741 /* transfer page from old buffer to new buffer */
742 new_buff
->page
= old_buff
->page
;
743 new_buff
->dma
= old_buff
->dma
;
744 new_buff
->page_offset
= old_buff
->page_offset
;
746 /* sync the buffer for use by the device */
747 dma_sync_single_range_for_device(rx_ring
->dev
, new_buff
->dma
,
748 new_buff
->page_offset
,
753 static inline bool ixgbevf_page_is_reserved(struct page
*page
)
755 return (page_to_nid(page
) != numa_mem_id()) || page_is_pfmemalloc(page
);
759 * ixgbevf_add_rx_frag - Add contents of Rx buffer to sk_buff
760 * @rx_ring: rx descriptor ring to transact packets on
761 * @rx_buffer: buffer containing page to add
762 * @rx_desc: descriptor containing length of buffer written by hardware
763 * @skb: sk_buff to place the data into
765 * This function will add the data contained in rx_buffer->page to the skb.
766 * This is done either through a direct copy if the data in the buffer is
767 * less than the skb header size, otherwise it will just attach the page as
770 * The function will then update the page offset if necessary and return
771 * true if the buffer can be reused by the adapter.
773 static bool ixgbevf_add_rx_frag(struct ixgbevf_ring
*rx_ring
,
774 struct ixgbevf_rx_buffer
*rx_buffer
,
775 union ixgbe_adv_rx_desc
*rx_desc
,
778 struct page
*page
= rx_buffer
->page
;
779 unsigned char *va
= page_address(page
) + rx_buffer
->page_offset
;
780 unsigned int size
= le16_to_cpu(rx_desc
->wb
.upper
.length
);
781 #if (PAGE_SIZE < 8192)
782 unsigned int truesize
= IXGBEVF_RX_BUFSZ
;
784 unsigned int truesize
= ALIGN(size
, L1_CACHE_BYTES
);
786 unsigned int pull_len
;
788 if (unlikely(skb_is_nonlinear(skb
)))
791 if (likely(size
<= IXGBEVF_RX_HDR_SIZE
)) {
792 memcpy(__skb_put(skb
, size
), va
, ALIGN(size
, sizeof(long)));
794 /* page is not reserved, we can reuse buffer as is */
795 if (likely(!ixgbevf_page_is_reserved(page
)))
798 /* this page cannot be reused so discard it */
803 /* we need the header to contain the greater of either ETH_HLEN or
804 * 60 bytes if the skb->len is less than 60 for skb_pad.
806 pull_len
= eth_get_headlen(va
, IXGBEVF_RX_HDR_SIZE
);
808 /* align pull length to size of long to optimize memcpy performance */
809 memcpy(__skb_put(skb
, pull_len
), va
, ALIGN(pull_len
, sizeof(long)));
811 /* update all of the pointers */
816 skb_add_rx_frag(skb
, skb_shinfo(skb
)->nr_frags
, page
,
817 (unsigned long)va
& ~PAGE_MASK
, size
, truesize
);
819 /* avoid re-using remote pages */
820 if (unlikely(ixgbevf_page_is_reserved(page
)))
823 #if (PAGE_SIZE < 8192)
824 /* if we are only owner of page we can reuse it */
825 if (unlikely(page_count(page
) != 1))
828 /* flip page offset to other buffer */
829 rx_buffer
->page_offset
^= IXGBEVF_RX_BUFSZ
;
832 /* move offset up to the next cache line */
833 rx_buffer
->page_offset
+= truesize
;
835 if (rx_buffer
->page_offset
> (PAGE_SIZE
- IXGBEVF_RX_BUFSZ
))
839 /* Even if we own the page, we are not allowed to use atomic_set()
840 * This would break get_page_unless_zero() users.
847 static struct sk_buff
*ixgbevf_fetch_rx_buffer(struct ixgbevf_ring
*rx_ring
,
848 union ixgbe_adv_rx_desc
*rx_desc
,
851 struct ixgbevf_rx_buffer
*rx_buffer
;
854 rx_buffer
= &rx_ring
->rx_buffer_info
[rx_ring
->next_to_clean
];
855 page
= rx_buffer
->page
;
859 void *page_addr
= page_address(page
) +
860 rx_buffer
->page_offset
;
862 /* prefetch first cache line of first page */
864 #if L1_CACHE_BYTES < 128
865 prefetch(page_addr
+ L1_CACHE_BYTES
);
868 /* allocate a skb to store the frags */
869 skb
= netdev_alloc_skb_ip_align(rx_ring
->netdev
,
870 IXGBEVF_RX_HDR_SIZE
);
871 if (unlikely(!skb
)) {
872 rx_ring
->rx_stats
.alloc_rx_buff_failed
++;
876 /* we will be copying header into skb->data in
877 * pskb_may_pull so it is in our interest to prefetch
878 * it now to avoid a possible cache miss
880 prefetchw(skb
->data
);
883 /* we are reusing so sync this buffer for CPU use */
884 dma_sync_single_range_for_cpu(rx_ring
->dev
,
886 rx_buffer
->page_offset
,
890 /* pull page into skb */
891 if (ixgbevf_add_rx_frag(rx_ring
, rx_buffer
, rx_desc
, skb
)) {
892 /* hand second half of page back to the ring */
893 ixgbevf_reuse_rx_page(rx_ring
, rx_buffer
);
895 /* we are not reusing the buffer so unmap it */
896 dma_unmap_page(rx_ring
->dev
, rx_buffer
->dma
,
897 PAGE_SIZE
, DMA_FROM_DEVICE
);
900 /* clear contents of buffer_info */
902 rx_buffer
->page
= NULL
;
907 static inline void ixgbevf_irq_enable_queues(struct ixgbevf_adapter
*adapter
,
910 struct ixgbe_hw
*hw
= &adapter
->hw
;
912 IXGBE_WRITE_REG(hw
, IXGBE_VTEIMS
, qmask
);
915 static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector
*q_vector
,
916 struct ixgbevf_ring
*rx_ring
,
919 unsigned int total_rx_bytes
= 0, total_rx_packets
= 0;
920 u16 cleaned_count
= ixgbevf_desc_unused(rx_ring
);
921 struct sk_buff
*skb
= rx_ring
->skb
;
923 while (likely(total_rx_packets
< budget
)) {
924 union ixgbe_adv_rx_desc
*rx_desc
;
926 /* return some buffers to hardware, one at a time is too slow */
927 if (cleaned_count
>= IXGBEVF_RX_BUFFER_WRITE
) {
928 ixgbevf_alloc_rx_buffers(rx_ring
, cleaned_count
);
932 rx_desc
= IXGBEVF_RX_DESC(rx_ring
, rx_ring
->next_to_clean
);
934 if (!ixgbevf_test_staterr(rx_desc
, IXGBE_RXD_STAT_DD
))
937 /* This memory barrier is needed to keep us from reading
938 * any other fields out of the rx_desc until we know the
939 * RXD_STAT_DD bit is set
943 /* retrieve a buffer from the ring */
944 skb
= ixgbevf_fetch_rx_buffer(rx_ring
, rx_desc
, skb
);
946 /* exit if we failed to retrieve a buffer */
952 /* fetch next buffer in frame if non-eop */
953 if (ixgbevf_is_non_eop(rx_ring
, rx_desc
))
956 /* verify the packet layout is correct */
957 if (ixgbevf_cleanup_headers(rx_ring
, rx_desc
, skb
)) {
962 /* probably a little skewed due to removing CRC */
963 total_rx_bytes
+= skb
->len
;
965 /* Workaround hardware that can't do proper VEPA multicast
968 if ((skb
->pkt_type
== PACKET_BROADCAST
||
969 skb
->pkt_type
== PACKET_MULTICAST
) &&
970 ether_addr_equal(rx_ring
->netdev
->dev_addr
,
971 eth_hdr(skb
)->h_source
)) {
972 dev_kfree_skb_irq(skb
);
976 /* populate checksum, VLAN, and protocol */
977 ixgbevf_process_skb_fields(rx_ring
, rx_desc
, skb
);
979 ixgbevf_rx_skb(q_vector
, skb
);
981 /* reset skb pointer */
984 /* update budget accounting */
988 /* place incomplete frames back on ring for completion */
991 u64_stats_update_begin(&rx_ring
->syncp
);
992 rx_ring
->stats
.packets
+= total_rx_packets
;
993 rx_ring
->stats
.bytes
+= total_rx_bytes
;
994 u64_stats_update_end(&rx_ring
->syncp
);
995 q_vector
->rx
.total_packets
+= total_rx_packets
;
996 q_vector
->rx
.total_bytes
+= total_rx_bytes
;
998 return total_rx_packets
;
1002 * ixgbevf_poll - NAPI polling calback
1003 * @napi: napi struct with our devices info in it
1004 * @budget: amount of work driver is allowed to do this pass, in packets
1006 * This function will clean more than one or more rings associated with a
1009 static int ixgbevf_poll(struct napi_struct
*napi
, int budget
)
1011 struct ixgbevf_q_vector
*q_vector
=
1012 container_of(napi
, struct ixgbevf_q_vector
, napi
);
1013 struct ixgbevf_adapter
*adapter
= q_vector
->adapter
;
1014 struct ixgbevf_ring
*ring
;
1015 int per_ring_budget
, work_done
= 0;
1016 bool clean_complete
= true;
1018 ixgbevf_for_each_ring(ring
, q_vector
->tx
) {
1019 if (!ixgbevf_clean_tx_irq(q_vector
, ring
, budget
))
1020 clean_complete
= false;
1026 /* attempt to distribute budget to each queue fairly, but don't allow
1027 * the budget to go below 1 because we'll exit polling
1029 if (q_vector
->rx
.count
> 1)
1030 per_ring_budget
= max(budget
/q_vector
->rx
.count
, 1);
1032 per_ring_budget
= budget
;
1034 ixgbevf_for_each_ring(ring
, q_vector
->rx
) {
1035 int cleaned
= ixgbevf_clean_rx_irq(q_vector
, ring
,
1037 work_done
+= cleaned
;
1038 if (cleaned
>= per_ring_budget
)
1039 clean_complete
= false;
1042 /* If all work not completed, return budget and keep polling */
1043 if (!clean_complete
)
1045 /* all work done, exit the polling mode */
1046 napi_complete_done(napi
, work_done
);
1047 if (adapter
->rx_itr_setting
== 1)
1048 ixgbevf_set_itr(q_vector
);
1049 if (!test_bit(__IXGBEVF_DOWN
, &adapter
->state
) &&
1050 !test_bit(__IXGBEVF_REMOVING
, &adapter
->state
))
1051 ixgbevf_irq_enable_queues(adapter
,
1052 BIT(q_vector
->v_idx
));
1058 * ixgbevf_write_eitr - write VTEITR register in hardware specific way
1059 * @q_vector: structure containing interrupt and ring information
1061 void ixgbevf_write_eitr(struct ixgbevf_q_vector
*q_vector
)
1063 struct ixgbevf_adapter
*adapter
= q_vector
->adapter
;
1064 struct ixgbe_hw
*hw
= &adapter
->hw
;
1065 int v_idx
= q_vector
->v_idx
;
1066 u32 itr_reg
= q_vector
->itr
& IXGBE_MAX_EITR
;
1068 /* set the WDIS bit to not clear the timer bits and cause an
1069 * immediate assertion of the interrupt
1071 itr_reg
|= IXGBE_EITR_CNT_WDIS
;
1073 IXGBE_WRITE_REG(hw
, IXGBE_VTEITR(v_idx
), itr_reg
);
1077 * ixgbevf_configure_msix - Configure MSI-X hardware
1078 * @adapter: board private structure
1080 * ixgbevf_configure_msix sets up the hardware to properly generate MSI-X
1083 static void ixgbevf_configure_msix(struct ixgbevf_adapter
*adapter
)
1085 struct ixgbevf_q_vector
*q_vector
;
1086 int q_vectors
, v_idx
;
1088 q_vectors
= adapter
->num_msix_vectors
- NON_Q_VECTORS
;
1089 adapter
->eims_enable_mask
= 0;
1091 /* Populate the IVAR table and set the ITR values to the
1092 * corresponding register.
1094 for (v_idx
= 0; v_idx
< q_vectors
; v_idx
++) {
1095 struct ixgbevf_ring
*ring
;
1097 q_vector
= adapter
->q_vector
[v_idx
];
1099 ixgbevf_for_each_ring(ring
, q_vector
->rx
)
1100 ixgbevf_set_ivar(adapter
, 0, ring
->reg_idx
, v_idx
);
1102 ixgbevf_for_each_ring(ring
, q_vector
->tx
)
1103 ixgbevf_set_ivar(adapter
, 1, ring
->reg_idx
, v_idx
);
1105 if (q_vector
->tx
.ring
&& !q_vector
->rx
.ring
) {
1106 /* Tx only vector */
1107 if (adapter
->tx_itr_setting
== 1)
1108 q_vector
->itr
= IXGBE_12K_ITR
;
1110 q_vector
->itr
= adapter
->tx_itr_setting
;
1112 /* Rx or Rx/Tx vector */
1113 if (adapter
->rx_itr_setting
== 1)
1114 q_vector
->itr
= IXGBE_20K_ITR
;
1116 q_vector
->itr
= adapter
->rx_itr_setting
;
1119 /* add q_vector eims value to global eims_enable_mask */
1120 adapter
->eims_enable_mask
|= BIT(v_idx
);
1122 ixgbevf_write_eitr(q_vector
);
1125 ixgbevf_set_ivar(adapter
, -1, 1, v_idx
);
1126 /* setup eims_other and add value to global eims_enable_mask */
1127 adapter
->eims_other
= BIT(v_idx
);
1128 adapter
->eims_enable_mask
|= adapter
->eims_other
;
1131 enum latency_range
{
1135 latency_invalid
= 255
1139 * ixgbevf_update_itr - update the dynamic ITR value based on statistics
1140 * @q_vector: structure containing interrupt and ring information
1141 * @ring_container: structure containing ring performance data
1143 * Stores a new ITR value based on packets and byte
1144 * counts during the last interrupt. The advantage of per interrupt
1145 * computation is faster updates and more accurate ITR for the current
1146 * traffic pattern. Constants in this function were computed
1147 * based on theoretical maximum wire speed and thresholds were set based
1148 * on testing data as well as attempting to minimize response time
1149 * while increasing bulk throughput.
1151 static void ixgbevf_update_itr(struct ixgbevf_q_vector
*q_vector
,
1152 struct ixgbevf_ring_container
*ring_container
)
1154 int bytes
= ring_container
->total_bytes
;
1155 int packets
= ring_container
->total_packets
;
1158 u8 itr_setting
= ring_container
->itr
;
1163 /* simple throttle rate management
1164 * 0-20MB/s lowest (100000 ints/s)
1165 * 20-100MB/s low (20000 ints/s)
1166 * 100-1249MB/s bulk (12000 ints/s)
1168 /* what was last interrupt timeslice? */
1169 timepassed_us
= q_vector
->itr
>> 2;
1170 bytes_perint
= bytes
/ timepassed_us
; /* bytes/usec */
1172 switch (itr_setting
) {
1173 case lowest_latency
:
1174 if (bytes_perint
> 10)
1175 itr_setting
= low_latency
;
1178 if (bytes_perint
> 20)
1179 itr_setting
= bulk_latency
;
1180 else if (bytes_perint
<= 10)
1181 itr_setting
= lowest_latency
;
1184 if (bytes_perint
<= 20)
1185 itr_setting
= low_latency
;
1189 /* clear work counters since we have the values we need */
1190 ring_container
->total_bytes
= 0;
1191 ring_container
->total_packets
= 0;
1193 /* write updated itr to ring container */
1194 ring_container
->itr
= itr_setting
;
1197 static void ixgbevf_set_itr(struct ixgbevf_q_vector
*q_vector
)
1199 u32 new_itr
= q_vector
->itr
;
1202 ixgbevf_update_itr(q_vector
, &q_vector
->tx
);
1203 ixgbevf_update_itr(q_vector
, &q_vector
->rx
);
1205 current_itr
= max(q_vector
->rx
.itr
, q_vector
->tx
.itr
);
1207 switch (current_itr
) {
1208 /* counts and packets in update_itr are dependent on these numbers */
1209 case lowest_latency
:
1210 new_itr
= IXGBE_100K_ITR
;
1213 new_itr
= IXGBE_20K_ITR
;
1216 new_itr
= IXGBE_12K_ITR
;
1222 if (new_itr
!= q_vector
->itr
) {
1223 /* do an exponential smoothing */
1224 new_itr
= (10 * new_itr
* q_vector
->itr
) /
1225 ((9 * new_itr
) + q_vector
->itr
);
1227 /* save the algorithm value here */
1228 q_vector
->itr
= new_itr
;
1230 ixgbevf_write_eitr(q_vector
);
1234 static irqreturn_t
ixgbevf_msix_other(int irq
, void *data
)
1236 struct ixgbevf_adapter
*adapter
= data
;
1237 struct ixgbe_hw
*hw
= &adapter
->hw
;
1239 hw
->mac
.get_link_status
= 1;
1241 ixgbevf_service_event_schedule(adapter
);
1243 IXGBE_WRITE_REG(hw
, IXGBE_VTEIMS
, adapter
->eims_other
);
1249 * ixgbevf_msix_clean_rings - single unshared vector rx clean (all queues)
1251 * @data: pointer to our q_vector struct for this interrupt vector
1253 static irqreturn_t
ixgbevf_msix_clean_rings(int irq
, void *data
)
1255 struct ixgbevf_q_vector
*q_vector
= data
;
1257 /* EIAM disabled interrupts (on this vector) for us */
1258 if (q_vector
->rx
.ring
|| q_vector
->tx
.ring
)
1259 napi_schedule_irqoff(&q_vector
->napi
);
1264 static inline void map_vector_to_rxq(struct ixgbevf_adapter
*a
, int v_idx
,
1267 struct ixgbevf_q_vector
*q_vector
= a
->q_vector
[v_idx
];
1269 a
->rx_ring
[r_idx
]->next
= q_vector
->rx
.ring
;
1270 q_vector
->rx
.ring
= a
->rx_ring
[r_idx
];
1271 q_vector
->rx
.count
++;
1274 static inline void map_vector_to_txq(struct ixgbevf_adapter
*a
, int v_idx
,
1277 struct ixgbevf_q_vector
*q_vector
= a
->q_vector
[v_idx
];
1279 a
->tx_ring
[t_idx
]->next
= q_vector
->tx
.ring
;
1280 q_vector
->tx
.ring
= a
->tx_ring
[t_idx
];
1281 q_vector
->tx
.count
++;
1285 * ixgbevf_map_rings_to_vectors - Maps descriptor rings to vectors
1286 * @adapter: board private structure to initialize
1288 * This function maps descriptor rings to the queue-specific vectors
1289 * we were allotted through the MSI-X enabling code. Ideally, we'd have
1290 * one vector per ring/queue, but on a constrained vector budget, we
1291 * group the rings as "efficiently" as possible. You would add new
1292 * mapping configurations in here.
1294 static int ixgbevf_map_rings_to_vectors(struct ixgbevf_adapter
*adapter
)
1298 int rxr_idx
= 0, txr_idx
= 0;
1299 int rxr_remaining
= adapter
->num_rx_queues
;
1300 int txr_remaining
= adapter
->num_tx_queues
;
1304 q_vectors
= adapter
->num_msix_vectors
- NON_Q_VECTORS
;
1306 /* The ideal configuration...
1307 * We have enough vectors to map one per queue.
1309 if (q_vectors
== adapter
->num_rx_queues
+ adapter
->num_tx_queues
) {
1310 for (; rxr_idx
< rxr_remaining
; v_start
++, rxr_idx
++)
1311 map_vector_to_rxq(adapter
, v_start
, rxr_idx
);
1313 for (; txr_idx
< txr_remaining
; v_start
++, txr_idx
++)
1314 map_vector_to_txq(adapter
, v_start
, txr_idx
);
1318 /* If we don't have enough vectors for a 1-to-1
1319 * mapping, we'll have to group them so there are
1320 * multiple queues per vector.
1322 /* Re-adjusting *qpv takes care of the remainder. */
1323 for (i
= v_start
; i
< q_vectors
; i
++) {
1324 rqpv
= DIV_ROUND_UP(rxr_remaining
, q_vectors
- i
);
1325 for (j
= 0; j
< rqpv
; j
++) {
1326 map_vector_to_rxq(adapter
, i
, rxr_idx
);
1331 for (i
= v_start
; i
< q_vectors
; i
++) {
1332 tqpv
= DIV_ROUND_UP(txr_remaining
, q_vectors
- i
);
1333 for (j
= 0; j
< tqpv
; j
++) {
1334 map_vector_to_txq(adapter
, i
, txr_idx
);
1344 * ixgbevf_request_msix_irqs - Initialize MSI-X interrupts
1345 * @adapter: board private structure
1347 * ixgbevf_request_msix_irqs allocates MSI-X vectors and requests
1348 * interrupts from the kernel.
1350 static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter
*adapter
)
1352 struct net_device
*netdev
= adapter
->netdev
;
1353 int q_vectors
= adapter
->num_msix_vectors
- NON_Q_VECTORS
;
1354 unsigned int ri
= 0, ti
= 0;
1357 for (vector
= 0; vector
< q_vectors
; vector
++) {
1358 struct ixgbevf_q_vector
*q_vector
= adapter
->q_vector
[vector
];
1359 struct msix_entry
*entry
= &adapter
->msix_entries
[vector
];
1361 if (q_vector
->tx
.ring
&& q_vector
->rx
.ring
) {
1362 snprintf(q_vector
->name
, sizeof(q_vector
->name
),
1363 "%s-TxRx-%u", netdev
->name
, ri
++);
1365 } else if (q_vector
->rx
.ring
) {
1366 snprintf(q_vector
->name
, sizeof(q_vector
->name
),
1367 "%s-rx-%u", netdev
->name
, ri
++);
1368 } else if (q_vector
->tx
.ring
) {
1369 snprintf(q_vector
->name
, sizeof(q_vector
->name
),
1370 "%s-tx-%u", netdev
->name
, ti
++);
1372 /* skip this unused q_vector */
1375 err
= request_irq(entry
->vector
, &ixgbevf_msix_clean_rings
, 0,
1376 q_vector
->name
, q_vector
);
1378 hw_dbg(&adapter
->hw
,
1379 "request_irq failed for MSIX interrupt Error: %d\n",
1381 goto free_queue_irqs
;
1385 err
= request_irq(adapter
->msix_entries
[vector
].vector
,
1386 &ixgbevf_msix_other
, 0, netdev
->name
, adapter
);
1388 hw_dbg(&adapter
->hw
, "request_irq for msix_other failed: %d\n",
1390 goto free_queue_irqs
;
1398 free_irq(adapter
->msix_entries
[vector
].vector
,
1399 adapter
->q_vector
[vector
]);
1401 /* This failure is non-recoverable - it indicates the system is
1402 * out of MSIX vector resources and the VF driver cannot run
1403 * without them. Set the number of msix vectors to zero
1404 * indicating that not enough can be allocated. The error
1405 * will be returned to the user indicating device open failed.
1406 * Any further attempts to force the driver to open will also
1407 * fail. The only way to recover is to unload the driver and
1408 * reload it again. If the system has recovered some MSIX
1409 * vectors then it may succeed.
1411 adapter
->num_msix_vectors
= 0;
1415 static inline void ixgbevf_reset_q_vectors(struct ixgbevf_adapter
*adapter
)
1417 int i
, q_vectors
= adapter
->num_msix_vectors
- NON_Q_VECTORS
;
1419 for (i
= 0; i
< q_vectors
; i
++) {
1420 struct ixgbevf_q_vector
*q_vector
= adapter
->q_vector
[i
];
1422 q_vector
->rx
.ring
= NULL
;
1423 q_vector
->tx
.ring
= NULL
;
1424 q_vector
->rx
.count
= 0;
1425 q_vector
->tx
.count
= 0;
1430 * ixgbevf_request_irq - initialize interrupts
1431 * @adapter: board private structure
1433 * Attempts to configure interrupts using the best available
1434 * capabilities of the hardware and kernel.
1436 static int ixgbevf_request_irq(struct ixgbevf_adapter
*adapter
)
1438 int err
= ixgbevf_request_msix_irqs(adapter
);
1441 hw_dbg(&adapter
->hw
, "request_irq failed, Error %d\n", err
);
1446 static void ixgbevf_free_irq(struct ixgbevf_adapter
*adapter
)
1450 if (!adapter
->msix_entries
)
1453 q_vectors
= adapter
->num_msix_vectors
;
1456 free_irq(adapter
->msix_entries
[i
].vector
, adapter
);
1459 for (; i
>= 0; i
--) {
1460 /* free only the irqs that were actually requested */
1461 if (!adapter
->q_vector
[i
]->rx
.ring
&&
1462 !adapter
->q_vector
[i
]->tx
.ring
)
1465 free_irq(adapter
->msix_entries
[i
].vector
,
1466 adapter
->q_vector
[i
]);
1469 ixgbevf_reset_q_vectors(adapter
);
1473 * ixgbevf_irq_disable - Mask off interrupt generation on the NIC
1474 * @adapter: board private structure
1476 static inline void ixgbevf_irq_disable(struct ixgbevf_adapter
*adapter
)
1478 struct ixgbe_hw
*hw
= &adapter
->hw
;
1481 IXGBE_WRITE_REG(hw
, IXGBE_VTEIAM
, 0);
1482 IXGBE_WRITE_REG(hw
, IXGBE_VTEIMC
, ~0);
1483 IXGBE_WRITE_REG(hw
, IXGBE_VTEIAC
, 0);
1485 IXGBE_WRITE_FLUSH(hw
);
1487 for (i
= 0; i
< adapter
->num_msix_vectors
; i
++)
1488 synchronize_irq(adapter
->msix_entries
[i
].vector
);
1492 * ixgbevf_irq_enable - Enable default interrupt generation settings
1493 * @adapter: board private structure
1495 static inline void ixgbevf_irq_enable(struct ixgbevf_adapter
*adapter
)
1497 struct ixgbe_hw
*hw
= &adapter
->hw
;
1499 IXGBE_WRITE_REG(hw
, IXGBE_VTEIAM
, adapter
->eims_enable_mask
);
1500 IXGBE_WRITE_REG(hw
, IXGBE_VTEIAC
, adapter
->eims_enable_mask
);
1501 IXGBE_WRITE_REG(hw
, IXGBE_VTEIMS
, adapter
->eims_enable_mask
);
1505 * ixgbevf_configure_tx_ring - Configure 82599 VF Tx ring after Reset
1506 * @adapter: board private structure
1507 * @ring: structure containing ring specific data
1509 * Configure the Tx descriptor ring after a reset.
1511 static void ixgbevf_configure_tx_ring(struct ixgbevf_adapter
*adapter
,
1512 struct ixgbevf_ring
*ring
)
1514 struct ixgbe_hw
*hw
= &adapter
->hw
;
1515 u64 tdba
= ring
->dma
;
1517 u32 txdctl
= IXGBE_TXDCTL_ENABLE
;
1518 u8 reg_idx
= ring
->reg_idx
;
1520 /* disable queue to avoid issues while updating state */
1521 IXGBE_WRITE_REG(hw
, IXGBE_VFTXDCTL(reg_idx
), IXGBE_TXDCTL_SWFLSH
);
1522 IXGBE_WRITE_FLUSH(hw
);
1524 IXGBE_WRITE_REG(hw
, IXGBE_VFTDBAL(reg_idx
), tdba
& DMA_BIT_MASK(32));
1525 IXGBE_WRITE_REG(hw
, IXGBE_VFTDBAH(reg_idx
), tdba
>> 32);
1526 IXGBE_WRITE_REG(hw
, IXGBE_VFTDLEN(reg_idx
),
1527 ring
->count
* sizeof(union ixgbe_adv_tx_desc
));
1529 /* disable head writeback */
1530 IXGBE_WRITE_REG(hw
, IXGBE_VFTDWBAH(reg_idx
), 0);
1531 IXGBE_WRITE_REG(hw
, IXGBE_VFTDWBAL(reg_idx
), 0);
1533 /* enable relaxed ordering */
1534 IXGBE_WRITE_REG(hw
, IXGBE_VFDCA_TXCTRL(reg_idx
),
1535 (IXGBE_DCA_TXCTRL_DESC_RRO_EN
|
1536 IXGBE_DCA_TXCTRL_DATA_RRO_EN
));
1538 /* reset head and tail pointers */
1539 IXGBE_WRITE_REG(hw
, IXGBE_VFTDH(reg_idx
), 0);
1540 IXGBE_WRITE_REG(hw
, IXGBE_VFTDT(reg_idx
), 0);
1541 ring
->tail
= adapter
->io_addr
+ IXGBE_VFTDT(reg_idx
);
1543 /* reset ntu and ntc to place SW in sync with hardwdare */
1544 ring
->next_to_clean
= 0;
1545 ring
->next_to_use
= 0;
1547 /* In order to avoid issues WTHRESH + PTHRESH should always be equal
1548 * to or less than the number of on chip descriptors, which is
1551 txdctl
|= (8 << 16); /* WTHRESH = 8 */
1553 /* Setting PTHRESH to 32 both improves performance */
1554 txdctl
|= (1u << 8) | /* HTHRESH = 1 */
1555 32; /* PTHRESH = 32 */
1557 clear_bit(__IXGBEVF_HANG_CHECK_ARMED
, &ring
->state
);
1559 IXGBE_WRITE_REG(hw
, IXGBE_VFTXDCTL(reg_idx
), txdctl
);
1561 /* poll to verify queue is enabled */
1563 usleep_range(1000, 2000);
1564 txdctl
= IXGBE_READ_REG(hw
, IXGBE_VFTXDCTL(reg_idx
));
1565 } while (--wait_loop
&& !(txdctl
& IXGBE_TXDCTL_ENABLE
));
1567 hw_dbg(hw
, "Could not enable Tx Queue %d\n", reg_idx
);
1571 * ixgbevf_configure_tx - Configure 82599 VF Transmit Unit after Reset
1572 * @adapter: board private structure
1574 * Configure the Tx unit of the MAC after a reset.
1576 static void ixgbevf_configure_tx(struct ixgbevf_adapter
*adapter
)
1580 /* Setup the HW Tx Head and Tail descriptor pointers */
1581 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
1582 ixgbevf_configure_tx_ring(adapter
, adapter
->tx_ring
[i
]);
1585 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
1587 static void ixgbevf_configure_srrctl(struct ixgbevf_adapter
*adapter
, int index
)
1589 struct ixgbe_hw
*hw
= &adapter
->hw
;
1592 srrctl
= IXGBE_SRRCTL_DROP_EN
;
1594 srrctl
|= IXGBEVF_RX_HDR_SIZE
<< IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT
;
1595 srrctl
|= IXGBEVF_RX_BUFSZ
>> IXGBE_SRRCTL_BSIZEPKT_SHIFT
;
1596 srrctl
|= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF
;
1598 IXGBE_WRITE_REG(hw
, IXGBE_VFSRRCTL(index
), srrctl
);
1601 static void ixgbevf_setup_psrtype(struct ixgbevf_adapter
*adapter
)
1603 struct ixgbe_hw
*hw
= &adapter
->hw
;
1605 /* PSRTYPE must be initialized in 82599 */
1606 u32 psrtype
= IXGBE_PSRTYPE_TCPHDR
| IXGBE_PSRTYPE_UDPHDR
|
1607 IXGBE_PSRTYPE_IPV4HDR
| IXGBE_PSRTYPE_IPV6HDR
|
1608 IXGBE_PSRTYPE_L2HDR
;
1610 if (adapter
->num_rx_queues
> 1)
1613 IXGBE_WRITE_REG(hw
, IXGBE_VFPSRTYPE
, psrtype
);
1616 #define IXGBEVF_MAX_RX_DESC_POLL 10
1617 static void ixgbevf_disable_rx_queue(struct ixgbevf_adapter
*adapter
,
1618 struct ixgbevf_ring
*ring
)
1620 struct ixgbe_hw
*hw
= &adapter
->hw
;
1621 int wait_loop
= IXGBEVF_MAX_RX_DESC_POLL
;
1623 u8 reg_idx
= ring
->reg_idx
;
1625 if (IXGBE_REMOVED(hw
->hw_addr
))
1627 rxdctl
= IXGBE_READ_REG(hw
, IXGBE_VFRXDCTL(reg_idx
));
1628 rxdctl
&= ~IXGBE_RXDCTL_ENABLE
;
1630 /* write value back with RXDCTL.ENABLE bit cleared */
1631 IXGBE_WRITE_REG(hw
, IXGBE_VFRXDCTL(reg_idx
), rxdctl
);
1633 /* the hardware may take up to 100us to really disable the Rx queue */
1636 rxdctl
= IXGBE_READ_REG(hw
, IXGBE_VFRXDCTL(reg_idx
));
1637 } while (--wait_loop
&& (rxdctl
& IXGBE_RXDCTL_ENABLE
));
1640 pr_err("RXDCTL.ENABLE queue %d not cleared while polling\n",
1644 static void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter
*adapter
,
1645 struct ixgbevf_ring
*ring
)
1647 struct ixgbe_hw
*hw
= &adapter
->hw
;
1648 int wait_loop
= IXGBEVF_MAX_RX_DESC_POLL
;
1650 u8 reg_idx
= ring
->reg_idx
;
1652 if (IXGBE_REMOVED(hw
->hw_addr
))
1655 usleep_range(1000, 2000);
1656 rxdctl
= IXGBE_READ_REG(hw
, IXGBE_VFRXDCTL(reg_idx
));
1657 } while (--wait_loop
&& !(rxdctl
& IXGBE_RXDCTL_ENABLE
));
1660 pr_err("RXDCTL.ENABLE queue %d not set while polling\n",
1665 * ixgbevf_init_rss_key - Initialize adapter RSS key
1666 * @adapter: device handle
1668 * Allocates and initializes the RSS key if it is not allocated.
1670 static inline int ixgbevf_init_rss_key(struct ixgbevf_adapter
*adapter
)
1674 if (!adapter
->rss_key
) {
1675 rss_key
= kzalloc(IXGBEVF_RSS_HASH_KEY_SIZE
, GFP_KERNEL
);
1676 if (unlikely(!rss_key
))
1679 netdev_rss_key_fill(rss_key
, IXGBEVF_RSS_HASH_KEY_SIZE
);
1680 adapter
->rss_key
= rss_key
;
1686 static void ixgbevf_setup_vfmrqc(struct ixgbevf_adapter
*adapter
)
1688 struct ixgbe_hw
*hw
= &adapter
->hw
;
1689 u32 vfmrqc
= 0, vfreta
= 0;
1690 u16 rss_i
= adapter
->num_rx_queues
;
1693 /* Fill out hash function seeds */
1694 for (i
= 0; i
< IXGBEVF_VFRSSRK_REGS
; i
++)
1695 IXGBE_WRITE_REG(hw
, IXGBE_VFRSSRK(i
), *(adapter
->rss_key
+ i
));
1697 for (i
= 0, j
= 0; i
< IXGBEVF_X550_VFRETA_SIZE
; i
++, j
++) {
1701 adapter
->rss_indir_tbl
[i
] = j
;
1703 vfreta
|= j
<< (i
& 0x3) * 8;
1705 IXGBE_WRITE_REG(hw
, IXGBE_VFRETA(i
>> 2), vfreta
);
1710 /* Perform hash on these packet types */
1711 vfmrqc
|= IXGBE_VFMRQC_RSS_FIELD_IPV4
|
1712 IXGBE_VFMRQC_RSS_FIELD_IPV4_TCP
|
1713 IXGBE_VFMRQC_RSS_FIELD_IPV6
|
1714 IXGBE_VFMRQC_RSS_FIELD_IPV6_TCP
;
1716 vfmrqc
|= IXGBE_VFMRQC_RSSEN
;
1718 IXGBE_WRITE_REG(hw
, IXGBE_VFMRQC
, vfmrqc
);
1721 static void ixgbevf_configure_rx_ring(struct ixgbevf_adapter
*adapter
,
1722 struct ixgbevf_ring
*ring
)
1724 struct ixgbe_hw
*hw
= &adapter
->hw
;
1725 u64 rdba
= ring
->dma
;
1727 u8 reg_idx
= ring
->reg_idx
;
1729 /* disable queue to avoid issues while updating state */
1730 rxdctl
= IXGBE_READ_REG(hw
, IXGBE_VFRXDCTL(reg_idx
));
1731 ixgbevf_disable_rx_queue(adapter
, ring
);
1733 IXGBE_WRITE_REG(hw
, IXGBE_VFRDBAL(reg_idx
), rdba
& DMA_BIT_MASK(32));
1734 IXGBE_WRITE_REG(hw
, IXGBE_VFRDBAH(reg_idx
), rdba
>> 32);
1735 IXGBE_WRITE_REG(hw
, IXGBE_VFRDLEN(reg_idx
),
1736 ring
->count
* sizeof(union ixgbe_adv_rx_desc
));
1738 #ifndef CONFIG_SPARC
1739 /* enable relaxed ordering */
1740 IXGBE_WRITE_REG(hw
, IXGBE_VFDCA_RXCTRL(reg_idx
),
1741 IXGBE_DCA_RXCTRL_DESC_RRO_EN
);
1743 IXGBE_WRITE_REG(hw
, IXGBE_VFDCA_RXCTRL(reg_idx
),
1744 IXGBE_DCA_RXCTRL_DESC_RRO_EN
|
1745 IXGBE_DCA_RXCTRL_DATA_WRO_EN
);
1748 /* reset head and tail pointers */
1749 IXGBE_WRITE_REG(hw
, IXGBE_VFRDH(reg_idx
), 0);
1750 IXGBE_WRITE_REG(hw
, IXGBE_VFRDT(reg_idx
), 0);
1751 ring
->tail
= adapter
->io_addr
+ IXGBE_VFRDT(reg_idx
);
1753 /* reset ntu and ntc to place SW in sync with hardwdare */
1754 ring
->next_to_clean
= 0;
1755 ring
->next_to_use
= 0;
1756 ring
->next_to_alloc
= 0;
1758 ixgbevf_configure_srrctl(adapter
, reg_idx
);
1760 /* allow any size packet since we can handle overflow */
1761 rxdctl
&= ~IXGBE_RXDCTL_RLPML_EN
;
1763 rxdctl
|= IXGBE_RXDCTL_ENABLE
| IXGBE_RXDCTL_VME
;
1764 IXGBE_WRITE_REG(hw
, IXGBE_VFRXDCTL(reg_idx
), rxdctl
);
1766 ixgbevf_rx_desc_queue_enable(adapter
, ring
);
1767 ixgbevf_alloc_rx_buffers(ring
, ixgbevf_desc_unused(ring
));
1771 * ixgbevf_configure_rx - Configure 82599 VF Receive Unit after Reset
1772 * @adapter: board private structure
1774 * Configure the Rx unit of the MAC after a reset.
1776 static void ixgbevf_configure_rx(struct ixgbevf_adapter
*adapter
)
1778 struct ixgbe_hw
*hw
= &adapter
->hw
;
1779 struct net_device
*netdev
= adapter
->netdev
;
1782 ixgbevf_setup_psrtype(adapter
);
1783 if (hw
->mac
.type
>= ixgbe_mac_X550_vf
)
1784 ixgbevf_setup_vfmrqc(adapter
);
1786 spin_lock_bh(&adapter
->mbx_lock
);
1787 /* notify the PF of our intent to use this size of frame */
1788 ret
= hw
->mac
.ops
.set_rlpml(hw
, netdev
->mtu
+ ETH_HLEN
+ ETH_FCS_LEN
);
1789 spin_unlock_bh(&adapter
->mbx_lock
);
1791 dev_err(&adapter
->pdev
->dev
,
1792 "Failed to set MTU at %d\n", netdev
->mtu
);
1794 /* Setup the HW Rx Head and Tail Descriptor Pointers and
1795 * the Base and Length of the Rx Descriptor Ring
1797 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
1798 ixgbevf_configure_rx_ring(adapter
, adapter
->rx_ring
[i
]);
1801 static int ixgbevf_vlan_rx_add_vid(struct net_device
*netdev
,
1802 __be16 proto
, u16 vid
)
1804 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
1805 struct ixgbe_hw
*hw
= &adapter
->hw
;
1808 spin_lock_bh(&adapter
->mbx_lock
);
1810 /* add VID to filter table */
1811 err
= hw
->mac
.ops
.set_vfta(hw
, vid
, 0, true);
1813 spin_unlock_bh(&adapter
->mbx_lock
);
1815 /* translate error return types so error makes sense */
1816 if (err
== IXGBE_ERR_MBX
)
1819 if (err
== IXGBE_ERR_INVALID_ARGUMENT
)
1822 set_bit(vid
, adapter
->active_vlans
);
1827 static int ixgbevf_vlan_rx_kill_vid(struct net_device
*netdev
,
1828 __be16 proto
, u16 vid
)
1830 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
1831 struct ixgbe_hw
*hw
= &adapter
->hw
;
1834 spin_lock_bh(&adapter
->mbx_lock
);
1836 /* remove VID from filter table */
1837 err
= hw
->mac
.ops
.set_vfta(hw
, vid
, 0, false);
1839 spin_unlock_bh(&adapter
->mbx_lock
);
1841 clear_bit(vid
, adapter
->active_vlans
);
1846 static void ixgbevf_restore_vlan(struct ixgbevf_adapter
*adapter
)
1850 for_each_set_bit(vid
, adapter
->active_vlans
, VLAN_N_VID
)
1851 ixgbevf_vlan_rx_add_vid(adapter
->netdev
,
1852 htons(ETH_P_8021Q
), vid
);
1855 static int ixgbevf_write_uc_addr_list(struct net_device
*netdev
)
1857 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
1858 struct ixgbe_hw
*hw
= &adapter
->hw
;
1861 if ((netdev_uc_count(netdev
)) > 10) {
1862 pr_err("Too many unicast filters - No Space\n");
1866 if (!netdev_uc_empty(netdev
)) {
1867 struct netdev_hw_addr
*ha
;
1869 netdev_for_each_uc_addr(ha
, netdev
) {
1870 hw
->mac
.ops
.set_uc_addr(hw
, ++count
, ha
->addr
);
1874 /* If the list is empty then send message to PF driver to
1875 * clear all MAC VLANs on this VF.
1877 hw
->mac
.ops
.set_uc_addr(hw
, 0, NULL
);
1884 * ixgbevf_set_rx_mode - Multicast and unicast set
1885 * @netdev: network interface device structure
1887 * The set_rx_method entry point is called whenever the multicast address
1888 * list, unicast address list or the network interface flags are updated.
1889 * This routine is responsible for configuring the hardware for proper
1890 * multicast mode and configuring requested unicast filters.
1892 static void ixgbevf_set_rx_mode(struct net_device
*netdev
)
1894 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
1895 struct ixgbe_hw
*hw
= &adapter
->hw
;
1896 unsigned int flags
= netdev
->flags
;
1899 xcast_mode
= (flags
& IFF_ALLMULTI
) ? IXGBEVF_XCAST_MODE_ALLMULTI
:
1900 (flags
& (IFF_BROADCAST
| IFF_MULTICAST
)) ?
1901 IXGBEVF_XCAST_MODE_MULTI
: IXGBEVF_XCAST_MODE_NONE
;
1903 /* request the most inclusive mode we need */
1904 if (flags
& IFF_PROMISC
)
1905 xcast_mode
= IXGBEVF_XCAST_MODE_PROMISC
;
1906 else if (flags
& IFF_ALLMULTI
)
1907 xcast_mode
= IXGBEVF_XCAST_MODE_ALLMULTI
;
1908 else if (flags
& (IFF_BROADCAST
| IFF_MULTICAST
))
1909 xcast_mode
= IXGBEVF_XCAST_MODE_MULTI
;
1911 xcast_mode
= IXGBEVF_XCAST_MODE_NONE
;
1913 spin_lock_bh(&adapter
->mbx_lock
);
1915 hw
->mac
.ops
.update_xcast_mode(hw
, xcast_mode
);
1917 /* reprogram multicast list */
1918 hw
->mac
.ops
.update_mc_addr_list(hw
, netdev
);
1920 ixgbevf_write_uc_addr_list(netdev
);
1922 spin_unlock_bh(&adapter
->mbx_lock
);
1925 static void ixgbevf_napi_enable_all(struct ixgbevf_adapter
*adapter
)
1928 struct ixgbevf_q_vector
*q_vector
;
1929 int q_vectors
= adapter
->num_msix_vectors
- NON_Q_VECTORS
;
1931 for (q_idx
= 0; q_idx
< q_vectors
; q_idx
++) {
1932 q_vector
= adapter
->q_vector
[q_idx
];
1933 napi_enable(&q_vector
->napi
);
1937 static void ixgbevf_napi_disable_all(struct ixgbevf_adapter
*adapter
)
1940 struct ixgbevf_q_vector
*q_vector
;
1941 int q_vectors
= adapter
->num_msix_vectors
- NON_Q_VECTORS
;
1943 for (q_idx
= 0; q_idx
< q_vectors
; q_idx
++) {
1944 q_vector
= adapter
->q_vector
[q_idx
];
1945 napi_disable(&q_vector
->napi
);
1949 static int ixgbevf_configure_dcb(struct ixgbevf_adapter
*adapter
)
1951 struct ixgbe_hw
*hw
= &adapter
->hw
;
1952 unsigned int def_q
= 0;
1953 unsigned int num_tcs
= 0;
1954 unsigned int num_rx_queues
= adapter
->num_rx_queues
;
1955 unsigned int num_tx_queues
= adapter
->num_tx_queues
;
1958 spin_lock_bh(&adapter
->mbx_lock
);
1960 /* fetch queue configuration from the PF */
1961 err
= ixgbevf_get_queues(hw
, &num_tcs
, &def_q
);
1963 spin_unlock_bh(&adapter
->mbx_lock
);
1969 /* we need only one Tx queue */
1972 /* update default Tx ring register index */
1973 adapter
->tx_ring
[0]->reg_idx
= def_q
;
1975 /* we need as many queues as traffic classes */
1976 num_rx_queues
= num_tcs
;
1979 /* if we have a bad config abort request queue reset */
1980 if ((adapter
->num_rx_queues
!= num_rx_queues
) ||
1981 (adapter
->num_tx_queues
!= num_tx_queues
)) {
1982 /* force mailbox timeout to prevent further messages */
1983 hw
->mbx
.timeout
= 0;
1985 /* wait for watchdog to come around and bail us out */
1986 set_bit(__IXGBEVF_QUEUE_RESET_REQUESTED
, &adapter
->state
);
1992 static void ixgbevf_configure(struct ixgbevf_adapter
*adapter
)
1994 ixgbevf_configure_dcb(adapter
);
1996 ixgbevf_set_rx_mode(adapter
->netdev
);
1998 ixgbevf_restore_vlan(adapter
);
2000 ixgbevf_configure_tx(adapter
);
2001 ixgbevf_configure_rx(adapter
);
2004 static void ixgbevf_save_reset_stats(struct ixgbevf_adapter
*adapter
)
2006 /* Only save pre-reset stats if there are some */
2007 if (adapter
->stats
.vfgprc
|| adapter
->stats
.vfgptc
) {
2008 adapter
->stats
.saved_reset_vfgprc
+= adapter
->stats
.vfgprc
-
2009 adapter
->stats
.base_vfgprc
;
2010 adapter
->stats
.saved_reset_vfgptc
+= adapter
->stats
.vfgptc
-
2011 adapter
->stats
.base_vfgptc
;
2012 adapter
->stats
.saved_reset_vfgorc
+= adapter
->stats
.vfgorc
-
2013 adapter
->stats
.base_vfgorc
;
2014 adapter
->stats
.saved_reset_vfgotc
+= adapter
->stats
.vfgotc
-
2015 adapter
->stats
.base_vfgotc
;
2016 adapter
->stats
.saved_reset_vfmprc
+= adapter
->stats
.vfmprc
-
2017 adapter
->stats
.base_vfmprc
;
2021 static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter
*adapter
)
2023 struct ixgbe_hw
*hw
= &adapter
->hw
;
2025 adapter
->stats
.last_vfgprc
= IXGBE_READ_REG(hw
, IXGBE_VFGPRC
);
2026 adapter
->stats
.last_vfgorc
= IXGBE_READ_REG(hw
, IXGBE_VFGORC_LSB
);
2027 adapter
->stats
.last_vfgorc
|=
2028 (((u64
)(IXGBE_READ_REG(hw
, IXGBE_VFGORC_MSB
))) << 32);
2029 adapter
->stats
.last_vfgptc
= IXGBE_READ_REG(hw
, IXGBE_VFGPTC
);
2030 adapter
->stats
.last_vfgotc
= IXGBE_READ_REG(hw
, IXGBE_VFGOTC_LSB
);
2031 adapter
->stats
.last_vfgotc
|=
2032 (((u64
)(IXGBE_READ_REG(hw
, IXGBE_VFGOTC_MSB
))) << 32);
2033 adapter
->stats
.last_vfmprc
= IXGBE_READ_REG(hw
, IXGBE_VFMPRC
);
2035 adapter
->stats
.base_vfgprc
= adapter
->stats
.last_vfgprc
;
2036 adapter
->stats
.base_vfgorc
= adapter
->stats
.last_vfgorc
;
2037 adapter
->stats
.base_vfgptc
= adapter
->stats
.last_vfgptc
;
2038 adapter
->stats
.base_vfgotc
= adapter
->stats
.last_vfgotc
;
2039 adapter
->stats
.base_vfmprc
= adapter
->stats
.last_vfmprc
;
2042 static void ixgbevf_negotiate_api(struct ixgbevf_adapter
*adapter
)
2044 struct ixgbe_hw
*hw
= &adapter
->hw
;
2045 int api
[] = { ixgbe_mbox_api_13
,
2049 ixgbe_mbox_api_unknown
};
2052 spin_lock_bh(&adapter
->mbx_lock
);
2054 while (api
[idx
] != ixgbe_mbox_api_unknown
) {
2055 err
= hw
->mac
.ops
.negotiate_api_version(hw
, api
[idx
]);
2061 spin_unlock_bh(&adapter
->mbx_lock
);
2064 static void ixgbevf_up_complete(struct ixgbevf_adapter
*adapter
)
2066 struct net_device
*netdev
= adapter
->netdev
;
2067 struct ixgbe_hw
*hw
= &adapter
->hw
;
2069 ixgbevf_configure_msix(adapter
);
2071 spin_lock_bh(&adapter
->mbx_lock
);
2073 if (is_valid_ether_addr(hw
->mac
.addr
))
2074 hw
->mac
.ops
.set_rar(hw
, 0, hw
->mac
.addr
, 0);
2076 hw
->mac
.ops
.set_rar(hw
, 0, hw
->mac
.perm_addr
, 0);
2078 spin_unlock_bh(&adapter
->mbx_lock
);
2080 smp_mb__before_atomic();
2081 clear_bit(__IXGBEVF_DOWN
, &adapter
->state
);
2082 ixgbevf_napi_enable_all(adapter
);
2084 /* clear any pending interrupts, may auto mask */
2085 IXGBE_READ_REG(hw
, IXGBE_VTEICR
);
2086 ixgbevf_irq_enable(adapter
);
2088 /* enable transmits */
2089 netif_tx_start_all_queues(netdev
);
2091 ixgbevf_save_reset_stats(adapter
);
2092 ixgbevf_init_last_counter_stats(adapter
);
2094 hw
->mac
.get_link_status
= 1;
2095 mod_timer(&adapter
->service_timer
, jiffies
);
2098 void ixgbevf_up(struct ixgbevf_adapter
*adapter
)
2100 ixgbevf_configure(adapter
);
2102 ixgbevf_up_complete(adapter
);
2106 * ixgbevf_clean_rx_ring - Free Rx Buffers per Queue
2107 * @rx_ring: ring to free buffers from
2109 static void ixgbevf_clean_rx_ring(struct ixgbevf_ring
*rx_ring
)
2111 struct device
*dev
= rx_ring
->dev
;
2115 /* Free Rx ring sk_buff */
2117 dev_kfree_skb(rx_ring
->skb
);
2118 rx_ring
->skb
= NULL
;
2121 /* ring already cleared, nothing to do */
2122 if (!rx_ring
->rx_buffer_info
)
2125 /* Free all the Rx ring pages */
2126 for (i
= 0; i
< rx_ring
->count
; i
++) {
2127 struct ixgbevf_rx_buffer
*rx_buffer
;
2129 rx_buffer
= &rx_ring
->rx_buffer_info
[i
];
2131 dma_unmap_page(dev
, rx_buffer
->dma
,
2132 PAGE_SIZE
, DMA_FROM_DEVICE
);
2134 if (rx_buffer
->page
)
2135 __free_page(rx_buffer
->page
);
2136 rx_buffer
->page
= NULL
;
2139 size
= sizeof(struct ixgbevf_rx_buffer
) * rx_ring
->count
;
2140 memset(rx_ring
->rx_buffer_info
, 0, size
);
2142 /* Zero out the descriptor ring */
2143 memset(rx_ring
->desc
, 0, rx_ring
->size
);
2147 * ixgbevf_clean_tx_ring - Free Tx Buffers
2148 * @tx_ring: ring to be cleaned
2150 static void ixgbevf_clean_tx_ring(struct ixgbevf_ring
*tx_ring
)
2152 struct ixgbevf_tx_buffer
*tx_buffer_info
;
2156 if (!tx_ring
->tx_buffer_info
)
2159 /* Free all the Tx ring sk_buffs */
2160 for (i
= 0; i
< tx_ring
->count
; i
++) {
2161 tx_buffer_info
= &tx_ring
->tx_buffer_info
[i
];
2162 ixgbevf_unmap_and_free_tx_resource(tx_ring
, tx_buffer_info
);
2165 size
= sizeof(struct ixgbevf_tx_buffer
) * tx_ring
->count
;
2166 memset(tx_ring
->tx_buffer_info
, 0, size
);
2168 memset(tx_ring
->desc
, 0, tx_ring
->size
);
2172 * ixgbevf_clean_all_rx_rings - Free Rx Buffers for all queues
2173 * @adapter: board private structure
2175 static void ixgbevf_clean_all_rx_rings(struct ixgbevf_adapter
*adapter
)
2179 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
2180 ixgbevf_clean_rx_ring(adapter
->rx_ring
[i
]);
2184 * ixgbevf_clean_all_tx_rings - Free Tx Buffers for all queues
2185 * @adapter: board private structure
2187 static void ixgbevf_clean_all_tx_rings(struct ixgbevf_adapter
*adapter
)
2191 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
2192 ixgbevf_clean_tx_ring(adapter
->tx_ring
[i
]);
2195 void ixgbevf_down(struct ixgbevf_adapter
*adapter
)
2197 struct net_device
*netdev
= adapter
->netdev
;
2198 struct ixgbe_hw
*hw
= &adapter
->hw
;
2201 /* signal that we are down to the interrupt handler */
2202 if (test_and_set_bit(__IXGBEVF_DOWN
, &adapter
->state
))
2203 return; /* do nothing if already down */
2205 /* disable all enabled Rx queues */
2206 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
2207 ixgbevf_disable_rx_queue(adapter
, adapter
->rx_ring
[i
]);
2209 usleep_range(10000, 20000);
2211 netif_tx_stop_all_queues(netdev
);
2213 /* call carrier off first to avoid false dev_watchdog timeouts */
2214 netif_carrier_off(netdev
);
2215 netif_tx_disable(netdev
);
2217 ixgbevf_irq_disable(adapter
);
2219 ixgbevf_napi_disable_all(adapter
);
2221 del_timer_sync(&adapter
->service_timer
);
2223 /* disable transmits in the hardware now that interrupts are off */
2224 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
2225 u8 reg_idx
= adapter
->tx_ring
[i
]->reg_idx
;
2227 IXGBE_WRITE_REG(hw
, IXGBE_VFTXDCTL(reg_idx
),
2228 IXGBE_TXDCTL_SWFLSH
);
2231 if (!pci_channel_offline(adapter
->pdev
))
2232 ixgbevf_reset(adapter
);
2234 ixgbevf_clean_all_tx_rings(adapter
);
2235 ixgbevf_clean_all_rx_rings(adapter
);
2238 void ixgbevf_reinit_locked(struct ixgbevf_adapter
*adapter
)
2240 WARN_ON(in_interrupt());
2242 while (test_and_set_bit(__IXGBEVF_RESETTING
, &adapter
->state
))
2245 ixgbevf_down(adapter
);
2246 ixgbevf_up(adapter
);
2248 clear_bit(__IXGBEVF_RESETTING
, &adapter
->state
);
2251 void ixgbevf_reset(struct ixgbevf_adapter
*adapter
)
2253 struct ixgbe_hw
*hw
= &adapter
->hw
;
2254 struct net_device
*netdev
= adapter
->netdev
;
2256 if (hw
->mac
.ops
.reset_hw(hw
)) {
2257 hw_dbg(hw
, "PF still resetting\n");
2259 hw
->mac
.ops
.init_hw(hw
);
2260 ixgbevf_negotiate_api(adapter
);
2263 if (is_valid_ether_addr(adapter
->hw
.mac
.addr
)) {
2264 ether_addr_copy(netdev
->dev_addr
, adapter
->hw
.mac
.addr
);
2265 ether_addr_copy(netdev
->perm_addr
, adapter
->hw
.mac
.addr
);
2268 adapter
->last_reset
= jiffies
;
2271 static int ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter
*adapter
,
2274 int vector_threshold
;
2276 /* We'll want at least 2 (vector_threshold):
2277 * 1) TxQ[0] + RxQ[0] handler
2278 * 2) Other (Link Status Change, etc.)
2280 vector_threshold
= MIN_MSIX_COUNT
;
2282 /* The more we get, the more we will assign to Tx/Rx Cleanup
2283 * for the separate queues...where Rx Cleanup >= Tx Cleanup.
2284 * Right now, we simply care about how many we'll get; we'll
2285 * set them up later while requesting irq's.
2287 vectors
= pci_enable_msix_range(adapter
->pdev
, adapter
->msix_entries
,
2288 vector_threshold
, vectors
);
2291 dev_err(&adapter
->pdev
->dev
,
2292 "Unable to allocate MSI-X interrupts\n");
2293 kfree(adapter
->msix_entries
);
2294 adapter
->msix_entries
= NULL
;
2298 /* Adjust for only the vectors we'll use, which is minimum
2299 * of max_msix_q_vectors + NON_Q_VECTORS, or the number of
2300 * vectors we were allocated.
2302 adapter
->num_msix_vectors
= vectors
;
2308 * ixgbevf_set_num_queues - Allocate queues for device, feature dependent
2309 * @adapter: board private structure to initialize
2311 * This is the top level queue allocation routine. The order here is very
2312 * important, starting with the "most" number of features turned on at once,
2313 * and ending with the smallest set of features. This way large combinations
2314 * can be allocated if they're turned on, and smaller combinations are the
2315 * fallthrough conditions.
2318 static void ixgbevf_set_num_queues(struct ixgbevf_adapter
*adapter
)
2320 struct ixgbe_hw
*hw
= &adapter
->hw
;
2321 unsigned int def_q
= 0;
2322 unsigned int num_tcs
= 0;
2325 /* Start with base case */
2326 adapter
->num_rx_queues
= 1;
2327 adapter
->num_tx_queues
= 1;
2329 spin_lock_bh(&adapter
->mbx_lock
);
2331 /* fetch queue configuration from the PF */
2332 err
= ixgbevf_get_queues(hw
, &num_tcs
, &def_q
);
2334 spin_unlock_bh(&adapter
->mbx_lock
);
2339 /* we need as many queues as traffic classes */
2341 adapter
->num_rx_queues
= num_tcs
;
2343 u16 rss
= min_t(u16
, num_online_cpus(), IXGBEVF_MAX_RSS_QUEUES
);
2345 switch (hw
->api_version
) {
2346 case ixgbe_mbox_api_11
:
2347 case ixgbe_mbox_api_12
:
2348 case ixgbe_mbox_api_13
:
2349 adapter
->num_rx_queues
= rss
;
2350 adapter
->num_tx_queues
= rss
;
2358 * ixgbevf_alloc_queues - Allocate memory for all rings
2359 * @adapter: board private structure to initialize
2361 * We allocate one ring per queue at run-time since we don't know the
2362 * number of queues at compile-time. The polling_netdev array is
2363 * intended for Multiqueue, but should work fine with a single queue.
2365 static int ixgbevf_alloc_queues(struct ixgbevf_adapter
*adapter
)
2367 struct ixgbevf_ring
*ring
;
2370 for (; tx
< adapter
->num_tx_queues
; tx
++) {
2371 ring
= kzalloc(sizeof(*ring
), GFP_KERNEL
);
2373 goto err_allocation
;
2375 ring
->dev
= &adapter
->pdev
->dev
;
2376 ring
->netdev
= adapter
->netdev
;
2377 ring
->count
= adapter
->tx_ring_count
;
2378 ring
->queue_index
= tx
;
2381 adapter
->tx_ring
[tx
] = ring
;
2384 for (; rx
< adapter
->num_rx_queues
; rx
++) {
2385 ring
= kzalloc(sizeof(*ring
), GFP_KERNEL
);
2387 goto err_allocation
;
2389 ring
->dev
= &adapter
->pdev
->dev
;
2390 ring
->netdev
= adapter
->netdev
;
2392 ring
->count
= adapter
->rx_ring_count
;
2393 ring
->queue_index
= rx
;
2396 adapter
->rx_ring
[rx
] = ring
;
2403 kfree(adapter
->tx_ring
[--tx
]);
2404 adapter
->tx_ring
[tx
] = NULL
;
2408 kfree(adapter
->rx_ring
[--rx
]);
2409 adapter
->rx_ring
[rx
] = NULL
;
2415 * ixgbevf_set_interrupt_capability - set MSI-X or FAIL if not supported
2416 * @adapter: board private structure to initialize
2418 * Attempt to configure the interrupts using the best available
2419 * capabilities of the hardware and the kernel.
2421 static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter
*adapter
)
2423 struct net_device
*netdev
= adapter
->netdev
;
2425 int vector
, v_budget
;
2427 /* It's easy to be greedy for MSI-X vectors, but it really
2428 * doesn't do us much good if we have a lot more vectors
2429 * than CPU's. So let's be conservative and only ask for
2430 * (roughly) the same number of vectors as there are CPU's.
2431 * The default is to use pairs of vectors.
2433 v_budget
= max(adapter
->num_rx_queues
, adapter
->num_tx_queues
);
2434 v_budget
= min_t(int, v_budget
, num_online_cpus());
2435 v_budget
+= NON_Q_VECTORS
;
2437 /* A failure in MSI-X entry allocation isn't fatal, but it does
2438 * mean we disable MSI-X capabilities of the adapter.
2440 adapter
->msix_entries
= kcalloc(v_budget
,
2441 sizeof(struct msix_entry
), GFP_KERNEL
);
2442 if (!adapter
->msix_entries
)
2445 for (vector
= 0; vector
< v_budget
; vector
++)
2446 adapter
->msix_entries
[vector
].entry
= vector
;
2448 err
= ixgbevf_acquire_msix_vectors(adapter
, v_budget
);
2452 err
= netif_set_real_num_tx_queues(netdev
, adapter
->num_tx_queues
);
2456 return netif_set_real_num_rx_queues(netdev
, adapter
->num_rx_queues
);
2460 * ixgbevf_alloc_q_vectors - Allocate memory for interrupt vectors
2461 * @adapter: board private structure to initialize
2463 * We allocate one q_vector per queue interrupt. If allocation fails we
2466 static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter
*adapter
)
2468 int q_idx
, num_q_vectors
;
2469 struct ixgbevf_q_vector
*q_vector
;
2471 num_q_vectors
= adapter
->num_msix_vectors
- NON_Q_VECTORS
;
2473 for (q_idx
= 0; q_idx
< num_q_vectors
; q_idx
++) {
2474 q_vector
= kzalloc(sizeof(struct ixgbevf_q_vector
), GFP_KERNEL
);
2477 q_vector
->adapter
= adapter
;
2478 q_vector
->v_idx
= q_idx
;
2479 netif_napi_add(adapter
->netdev
, &q_vector
->napi
,
2481 adapter
->q_vector
[q_idx
] = q_vector
;
2489 q_vector
= adapter
->q_vector
[q_idx
];
2490 #ifdef CONFIG_NET_RX_BUSY_POLL
2491 napi_hash_del(&q_vector
->napi
);
2493 netif_napi_del(&q_vector
->napi
);
2495 adapter
->q_vector
[q_idx
] = NULL
;
2501 * ixgbevf_free_q_vectors - Free memory allocated for interrupt vectors
2502 * @adapter: board private structure to initialize
2504 * This function frees the memory allocated to the q_vectors. In addition if
2505 * NAPI is enabled it will delete any references to the NAPI struct prior
2506 * to freeing the q_vector.
2508 static void ixgbevf_free_q_vectors(struct ixgbevf_adapter
*adapter
)
2510 int q_idx
, num_q_vectors
= adapter
->num_msix_vectors
- NON_Q_VECTORS
;
2512 for (q_idx
= 0; q_idx
< num_q_vectors
; q_idx
++) {
2513 struct ixgbevf_q_vector
*q_vector
= adapter
->q_vector
[q_idx
];
2515 adapter
->q_vector
[q_idx
] = NULL
;
2516 #ifdef CONFIG_NET_RX_BUSY_POLL
2517 napi_hash_del(&q_vector
->napi
);
2519 netif_napi_del(&q_vector
->napi
);
2525 * ixgbevf_reset_interrupt_capability - Reset MSIX setup
2526 * @adapter: board private structure
2529 static void ixgbevf_reset_interrupt_capability(struct ixgbevf_adapter
*adapter
)
2531 if (!adapter
->msix_entries
)
2534 pci_disable_msix(adapter
->pdev
);
2535 kfree(adapter
->msix_entries
);
2536 adapter
->msix_entries
= NULL
;
2540 * ixgbevf_init_interrupt_scheme - Determine if MSIX is supported and init
2541 * @adapter: board private structure to initialize
2544 static int ixgbevf_init_interrupt_scheme(struct ixgbevf_adapter
*adapter
)
2548 /* Number of supported queues */
2549 ixgbevf_set_num_queues(adapter
);
2551 err
= ixgbevf_set_interrupt_capability(adapter
);
2553 hw_dbg(&adapter
->hw
,
2554 "Unable to setup interrupt capabilities\n");
2555 goto err_set_interrupt
;
2558 err
= ixgbevf_alloc_q_vectors(adapter
);
2560 hw_dbg(&adapter
->hw
, "Unable to allocate memory for queue vectors\n");
2561 goto err_alloc_q_vectors
;
2564 err
= ixgbevf_alloc_queues(adapter
);
2566 pr_err("Unable to allocate memory for queues\n");
2567 goto err_alloc_queues
;
2570 hw_dbg(&adapter
->hw
, "Multiqueue %s: Rx Queue count = %u, Tx Queue count = %u\n",
2571 (adapter
->num_rx_queues
> 1) ? "Enabled" :
2572 "Disabled", adapter
->num_rx_queues
, adapter
->num_tx_queues
);
2574 set_bit(__IXGBEVF_DOWN
, &adapter
->state
);
2578 ixgbevf_free_q_vectors(adapter
);
2579 err_alloc_q_vectors
:
2580 ixgbevf_reset_interrupt_capability(adapter
);
2586 * ixgbevf_clear_interrupt_scheme - Clear the current interrupt scheme settings
2587 * @adapter: board private structure to clear interrupt scheme on
2589 * We go through and clear interrupt specific resources and reset the structure
2590 * to pre-load conditions
2592 static void ixgbevf_clear_interrupt_scheme(struct ixgbevf_adapter
*adapter
)
2596 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
2597 kfree(adapter
->tx_ring
[i
]);
2598 adapter
->tx_ring
[i
] = NULL
;
2600 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
2601 kfree(adapter
->rx_ring
[i
]);
2602 adapter
->rx_ring
[i
] = NULL
;
2605 adapter
->num_tx_queues
= 0;
2606 adapter
->num_rx_queues
= 0;
2608 ixgbevf_free_q_vectors(adapter
);
2609 ixgbevf_reset_interrupt_capability(adapter
);
2613 * ixgbevf_sw_init - Initialize general software structures
2614 * @adapter: board private structure to initialize
2616 * ixgbevf_sw_init initializes the Adapter private data structure.
2617 * Fields are initialized based on PCI device information and
2618 * OS network device settings (MTU size).
2620 static int ixgbevf_sw_init(struct ixgbevf_adapter
*adapter
)
2622 struct ixgbe_hw
*hw
= &adapter
->hw
;
2623 struct pci_dev
*pdev
= adapter
->pdev
;
2624 struct net_device
*netdev
= adapter
->netdev
;
2627 /* PCI config space info */
2628 hw
->vendor_id
= pdev
->vendor
;
2629 hw
->device_id
= pdev
->device
;
2630 hw
->revision_id
= pdev
->revision
;
2631 hw
->subsystem_vendor_id
= pdev
->subsystem_vendor
;
2632 hw
->subsystem_device_id
= pdev
->subsystem_device
;
2634 hw
->mbx
.ops
.init_params(hw
);
2636 if (hw
->mac
.type
>= ixgbe_mac_X550_vf
) {
2637 err
= ixgbevf_init_rss_key(adapter
);
2642 /* assume legacy case in which PF would only give VF 2 queues */
2643 hw
->mac
.max_tx_queues
= 2;
2644 hw
->mac
.max_rx_queues
= 2;
2646 /* lock to protect mailbox accesses */
2647 spin_lock_init(&adapter
->mbx_lock
);
2649 err
= hw
->mac
.ops
.reset_hw(hw
);
2651 dev_info(&pdev
->dev
,
2652 "PF still in reset state. Is the PF interface up?\n");
2654 err
= hw
->mac
.ops
.init_hw(hw
);
2656 pr_err("init_shared_code failed: %d\n", err
);
2659 ixgbevf_negotiate_api(adapter
);
2660 err
= hw
->mac
.ops
.get_mac_addr(hw
, hw
->mac
.addr
);
2662 dev_info(&pdev
->dev
, "Error reading MAC address\n");
2663 else if (is_zero_ether_addr(adapter
->hw
.mac
.addr
))
2664 dev_info(&pdev
->dev
,
2665 "MAC address not assigned by administrator.\n");
2666 ether_addr_copy(netdev
->dev_addr
, hw
->mac
.addr
);
2669 if (!is_valid_ether_addr(netdev
->dev_addr
)) {
2670 dev_info(&pdev
->dev
, "Assigning random MAC address\n");
2671 eth_hw_addr_random(netdev
);
2672 ether_addr_copy(hw
->mac
.addr
, netdev
->dev_addr
);
2673 ether_addr_copy(hw
->mac
.perm_addr
, netdev
->dev_addr
);
2676 /* Enable dynamic interrupt throttling rates */
2677 adapter
->rx_itr_setting
= 1;
2678 adapter
->tx_itr_setting
= 1;
2680 /* set default ring sizes */
2681 adapter
->tx_ring_count
= IXGBEVF_DEFAULT_TXD
;
2682 adapter
->rx_ring_count
= IXGBEVF_DEFAULT_RXD
;
2684 set_bit(__IXGBEVF_DOWN
, &adapter
->state
);
2691 #define UPDATE_VF_COUNTER_32bit(reg, last_counter, counter) \
2693 u32 current_counter = IXGBE_READ_REG(hw, reg); \
2694 if (current_counter < last_counter) \
2695 counter += 0x100000000LL; \
2696 last_counter = current_counter; \
2697 counter &= 0xFFFFFFFF00000000LL; \
2698 counter |= current_counter; \
2701 #define UPDATE_VF_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \
2703 u64 current_counter_lsb = IXGBE_READ_REG(hw, reg_lsb); \
2704 u64 current_counter_msb = IXGBE_READ_REG(hw, reg_msb); \
2705 u64 current_counter = (current_counter_msb << 32) | \
2706 current_counter_lsb; \
2707 if (current_counter < last_counter) \
2708 counter += 0x1000000000LL; \
2709 last_counter = current_counter; \
2710 counter &= 0xFFFFFFF000000000LL; \
2711 counter |= current_counter; \
2714 * ixgbevf_update_stats - Update the board statistics counters.
2715 * @adapter: board private structure
2717 void ixgbevf_update_stats(struct ixgbevf_adapter
*adapter
)
2719 struct ixgbe_hw
*hw
= &adapter
->hw
;
2722 if (test_bit(__IXGBEVF_DOWN
, &adapter
->state
) ||
2723 test_bit(__IXGBEVF_RESETTING
, &adapter
->state
))
2726 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPRC
, adapter
->stats
.last_vfgprc
,
2727 adapter
->stats
.vfgprc
);
2728 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPTC
, adapter
->stats
.last_vfgptc
,
2729 adapter
->stats
.vfgptc
);
2730 UPDATE_VF_COUNTER_36bit(IXGBE_VFGORC_LSB
, IXGBE_VFGORC_MSB
,
2731 adapter
->stats
.last_vfgorc
,
2732 adapter
->stats
.vfgorc
);
2733 UPDATE_VF_COUNTER_36bit(IXGBE_VFGOTC_LSB
, IXGBE_VFGOTC_MSB
,
2734 adapter
->stats
.last_vfgotc
,
2735 adapter
->stats
.vfgotc
);
2736 UPDATE_VF_COUNTER_32bit(IXGBE_VFMPRC
, adapter
->stats
.last_vfmprc
,
2737 adapter
->stats
.vfmprc
);
2739 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
2740 adapter
->hw_csum_rx_error
+=
2741 adapter
->rx_ring
[i
]->hw_csum_rx_error
;
2742 adapter
->rx_ring
[i
]->hw_csum_rx_error
= 0;
2747 * ixgbevf_service_timer - Timer Call-back
2748 * @data: pointer to adapter cast into an unsigned long
2750 static void ixgbevf_service_timer(unsigned long data
)
2752 struct ixgbevf_adapter
*adapter
= (struct ixgbevf_adapter
*)data
;
2754 /* Reset the timer */
2755 mod_timer(&adapter
->service_timer
, (HZ
* 2) + jiffies
);
2757 ixgbevf_service_event_schedule(adapter
);
2760 static void ixgbevf_reset_subtask(struct ixgbevf_adapter
*adapter
)
2762 if (!test_and_clear_bit(__IXGBEVF_RESET_REQUESTED
, &adapter
->state
))
2765 /* If we're already down or resetting, just bail */
2766 if (test_bit(__IXGBEVF_DOWN
, &adapter
->state
) ||
2767 test_bit(__IXGBEVF_REMOVING
, &adapter
->state
) ||
2768 test_bit(__IXGBEVF_RESETTING
, &adapter
->state
))
2771 adapter
->tx_timeout_count
++;
2774 ixgbevf_reinit_locked(adapter
);
2779 * ixgbevf_check_hang_subtask - check for hung queues and dropped interrupts
2780 * @adapter: pointer to the device adapter structure
2782 * This function serves two purposes. First it strobes the interrupt lines
2783 * in order to make certain interrupts are occurring. Secondly it sets the
2784 * bits needed to check for TX hangs. As a result we should immediately
2785 * determine if a hang has occurred.
2787 static void ixgbevf_check_hang_subtask(struct ixgbevf_adapter
*adapter
)
2789 struct ixgbe_hw
*hw
= &adapter
->hw
;
2793 /* If we're down or resetting, just bail */
2794 if (test_bit(__IXGBEVF_DOWN
, &adapter
->state
) ||
2795 test_bit(__IXGBEVF_RESETTING
, &adapter
->state
))
2798 /* Force detection of hung controller */
2799 if (netif_carrier_ok(adapter
->netdev
)) {
2800 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
2801 set_check_for_tx_hang(adapter
->tx_ring
[i
]);
2804 /* get one bit for every active Tx/Rx interrupt vector */
2805 for (i
= 0; i
< adapter
->num_msix_vectors
- NON_Q_VECTORS
; i
++) {
2806 struct ixgbevf_q_vector
*qv
= adapter
->q_vector
[i
];
2808 if (qv
->rx
.ring
|| qv
->tx
.ring
)
2812 /* Cause software interrupt to ensure rings are cleaned */
2813 IXGBE_WRITE_REG(hw
, IXGBE_VTEICS
, eics
);
2817 * ixgbevf_watchdog_update_link - update the link status
2818 * @adapter: pointer to the device adapter structure
2820 static void ixgbevf_watchdog_update_link(struct ixgbevf_adapter
*adapter
)
2822 struct ixgbe_hw
*hw
= &adapter
->hw
;
2823 u32 link_speed
= adapter
->link_speed
;
2824 bool link_up
= adapter
->link_up
;
2827 spin_lock_bh(&adapter
->mbx_lock
);
2829 err
= hw
->mac
.ops
.check_link(hw
, &link_speed
, &link_up
, false);
2831 spin_unlock_bh(&adapter
->mbx_lock
);
2833 /* if check for link returns error we will need to reset */
2834 if (err
&& time_after(jiffies
, adapter
->last_reset
+ (10 * HZ
))) {
2835 set_bit(__IXGBEVF_RESET_REQUESTED
, &adapter
->state
);
2839 adapter
->link_up
= link_up
;
2840 adapter
->link_speed
= link_speed
;
2844 * ixgbevf_watchdog_link_is_up - update netif_carrier status and
2845 * print link up message
2846 * @adapter: pointer to the device adapter structure
2848 static void ixgbevf_watchdog_link_is_up(struct ixgbevf_adapter
*adapter
)
2850 struct net_device
*netdev
= adapter
->netdev
;
2852 /* only continue if link was previously down */
2853 if (netif_carrier_ok(netdev
))
2856 dev_info(&adapter
->pdev
->dev
, "NIC Link is Up %s\n",
2857 (adapter
->link_speed
== IXGBE_LINK_SPEED_10GB_FULL
) ?
2859 (adapter
->link_speed
== IXGBE_LINK_SPEED_1GB_FULL
) ?
2861 (adapter
->link_speed
== IXGBE_LINK_SPEED_100_FULL
) ?
2865 netif_carrier_on(netdev
);
2869 * ixgbevf_watchdog_link_is_down - update netif_carrier status and
2870 * print link down message
2871 * @adapter: pointer to the adapter structure
2873 static void ixgbevf_watchdog_link_is_down(struct ixgbevf_adapter
*adapter
)
2875 struct net_device
*netdev
= adapter
->netdev
;
2877 adapter
->link_speed
= 0;
2879 /* only continue if link was up previously */
2880 if (!netif_carrier_ok(netdev
))
2883 dev_info(&adapter
->pdev
->dev
, "NIC Link is Down\n");
2885 netif_carrier_off(netdev
);
2889 * ixgbevf_watchdog_subtask - worker thread to bring link up
2890 * @work: pointer to work_struct containing our data
2892 static void ixgbevf_watchdog_subtask(struct ixgbevf_adapter
*adapter
)
2894 /* if interface is down do nothing */
2895 if (test_bit(__IXGBEVF_DOWN
, &adapter
->state
) ||
2896 test_bit(__IXGBEVF_RESETTING
, &adapter
->state
))
2899 ixgbevf_watchdog_update_link(adapter
);
2901 if (adapter
->link_up
)
2902 ixgbevf_watchdog_link_is_up(adapter
);
2904 ixgbevf_watchdog_link_is_down(adapter
);
2906 ixgbevf_update_stats(adapter
);
2910 * ixgbevf_service_task - manages and runs subtasks
2911 * @work: pointer to work_struct containing our data
2913 static void ixgbevf_service_task(struct work_struct
*work
)
2915 struct ixgbevf_adapter
*adapter
= container_of(work
,
2916 struct ixgbevf_adapter
,
2918 struct ixgbe_hw
*hw
= &adapter
->hw
;
2920 if (IXGBE_REMOVED(hw
->hw_addr
)) {
2921 if (!test_bit(__IXGBEVF_DOWN
, &adapter
->state
)) {
2923 ixgbevf_down(adapter
);
2929 ixgbevf_queue_reset_subtask(adapter
);
2930 ixgbevf_reset_subtask(adapter
);
2931 ixgbevf_watchdog_subtask(adapter
);
2932 ixgbevf_check_hang_subtask(adapter
);
2934 ixgbevf_service_event_complete(adapter
);
2938 * ixgbevf_free_tx_resources - Free Tx Resources per Queue
2939 * @tx_ring: Tx descriptor ring for a specific queue
2941 * Free all transmit software resources
2943 void ixgbevf_free_tx_resources(struct ixgbevf_ring
*tx_ring
)
2945 ixgbevf_clean_tx_ring(tx_ring
);
2947 vfree(tx_ring
->tx_buffer_info
);
2948 tx_ring
->tx_buffer_info
= NULL
;
2950 /* if not set, then don't free */
2954 dma_free_coherent(tx_ring
->dev
, tx_ring
->size
, tx_ring
->desc
,
2957 tx_ring
->desc
= NULL
;
2961 * ixgbevf_free_all_tx_resources - Free Tx Resources for All Queues
2962 * @adapter: board private structure
2964 * Free all transmit software resources
2966 static void ixgbevf_free_all_tx_resources(struct ixgbevf_adapter
*adapter
)
2970 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
2971 if (adapter
->tx_ring
[i
]->desc
)
2972 ixgbevf_free_tx_resources(adapter
->tx_ring
[i
]);
2976 * ixgbevf_setup_tx_resources - allocate Tx resources (Descriptors)
2977 * @tx_ring: Tx descriptor ring (for a specific queue) to setup
2979 * Return 0 on success, negative on failure
2981 int ixgbevf_setup_tx_resources(struct ixgbevf_ring
*tx_ring
)
2983 struct ixgbevf_adapter
*adapter
= netdev_priv(tx_ring
->netdev
);
2986 size
= sizeof(struct ixgbevf_tx_buffer
) * tx_ring
->count
;
2987 tx_ring
->tx_buffer_info
= vzalloc(size
);
2988 if (!tx_ring
->tx_buffer_info
)
2991 /* round up to nearest 4K */
2992 tx_ring
->size
= tx_ring
->count
* sizeof(union ixgbe_adv_tx_desc
);
2993 tx_ring
->size
= ALIGN(tx_ring
->size
, 4096);
2995 tx_ring
->desc
= dma_alloc_coherent(tx_ring
->dev
, tx_ring
->size
,
2996 &tx_ring
->dma
, GFP_KERNEL
);
3003 vfree(tx_ring
->tx_buffer_info
);
3004 tx_ring
->tx_buffer_info
= NULL
;
3005 hw_dbg(&adapter
->hw
, "Unable to allocate memory for the transmit descriptor ring\n");
3010 * ixgbevf_setup_all_tx_resources - allocate all queues Tx resources
3011 * @adapter: board private structure
3013 * If this function returns with an error, then it's possible one or
3014 * more of the rings is populated (while the rest are not). It is the
3015 * callers duty to clean those orphaned rings.
3017 * Return 0 on success, negative on failure
3019 static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter
*adapter
)
3023 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
3024 err
= ixgbevf_setup_tx_resources(adapter
->tx_ring
[i
]);
3027 hw_dbg(&adapter
->hw
, "Allocation for Tx Queue %u failed\n", i
);
3035 * ixgbevf_setup_rx_resources - allocate Rx resources (Descriptors)
3036 * @rx_ring: Rx descriptor ring (for a specific queue) to setup
3038 * Returns 0 on success, negative on failure
3040 int ixgbevf_setup_rx_resources(struct ixgbevf_ring
*rx_ring
)
3044 size
= sizeof(struct ixgbevf_rx_buffer
) * rx_ring
->count
;
3045 rx_ring
->rx_buffer_info
= vzalloc(size
);
3046 if (!rx_ring
->rx_buffer_info
)
3049 /* Round up to nearest 4K */
3050 rx_ring
->size
= rx_ring
->count
* sizeof(union ixgbe_adv_rx_desc
);
3051 rx_ring
->size
= ALIGN(rx_ring
->size
, 4096);
3053 rx_ring
->desc
= dma_alloc_coherent(rx_ring
->dev
, rx_ring
->size
,
3054 &rx_ring
->dma
, GFP_KERNEL
);
3061 vfree(rx_ring
->rx_buffer_info
);
3062 rx_ring
->rx_buffer_info
= NULL
;
3063 dev_err(rx_ring
->dev
, "Unable to allocate memory for the Rx descriptor ring\n");
3068 * ixgbevf_setup_all_rx_resources - allocate all queues Rx resources
3069 * @adapter: board private structure
3071 * If this function returns with an error, then it's possible one or
3072 * more of the rings is populated (while the rest are not). It is the
3073 * callers duty to clean those orphaned rings.
3075 * Return 0 on success, negative on failure
3077 static int ixgbevf_setup_all_rx_resources(struct ixgbevf_adapter
*adapter
)
3081 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
3082 err
= ixgbevf_setup_rx_resources(adapter
->rx_ring
[i
]);
3085 hw_dbg(&adapter
->hw
, "Allocation for Rx Queue %u failed\n", i
);
3092 * ixgbevf_free_rx_resources - Free Rx Resources
3093 * @rx_ring: ring to clean the resources from
3095 * Free all receive software resources
3097 void ixgbevf_free_rx_resources(struct ixgbevf_ring
*rx_ring
)
3099 ixgbevf_clean_rx_ring(rx_ring
);
3101 vfree(rx_ring
->rx_buffer_info
);
3102 rx_ring
->rx_buffer_info
= NULL
;
3104 dma_free_coherent(rx_ring
->dev
, rx_ring
->size
, rx_ring
->desc
,
3107 rx_ring
->desc
= NULL
;
3111 * ixgbevf_free_all_rx_resources - Free Rx Resources for All Queues
3112 * @adapter: board private structure
3114 * Free all receive software resources
3116 static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter
*adapter
)
3120 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
3121 if (adapter
->rx_ring
[i
]->desc
)
3122 ixgbevf_free_rx_resources(adapter
->rx_ring
[i
]);
3126 * ixgbevf_open - Called when a network interface is made active
3127 * @netdev: network interface device structure
3129 * Returns 0 on success, negative value on failure
3131 * The open entry point is called when a network interface is made
3132 * active by the system (IFF_UP). At this point all resources needed
3133 * for transmit and receive operations are allocated, the interrupt
3134 * handler is registered with the OS, the watchdog timer is started,
3135 * and the stack is notified that the interface is ready.
3137 int ixgbevf_open(struct net_device
*netdev
)
3139 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
3140 struct ixgbe_hw
*hw
= &adapter
->hw
;
3143 /* A previous failure to open the device because of a lack of
3144 * available MSIX vector resources may have reset the number
3145 * of msix vectors variable to zero. The only way to recover
3146 * is to unload/reload the driver and hope that the system has
3147 * been able to recover some MSIX vector resources.
3149 if (!adapter
->num_msix_vectors
)
3152 if (hw
->adapter_stopped
) {
3153 ixgbevf_reset(adapter
);
3154 /* if adapter is still stopped then PF isn't up and
3155 * the VF can't start.
3157 if (hw
->adapter_stopped
) {
3158 err
= IXGBE_ERR_MBX
;
3159 pr_err("Unable to start - perhaps the PF Driver isn't up yet\n");
3160 goto err_setup_reset
;
3164 /* disallow open during test */
3165 if (test_bit(__IXGBEVF_TESTING
, &adapter
->state
))
3168 netif_carrier_off(netdev
);
3170 /* allocate transmit descriptors */
3171 err
= ixgbevf_setup_all_tx_resources(adapter
);
3175 /* allocate receive descriptors */
3176 err
= ixgbevf_setup_all_rx_resources(adapter
);
3180 ixgbevf_configure(adapter
);
3182 /* Map the Tx/Rx rings to the vectors we were allotted.
3183 * if request_irq will be called in this function map_rings
3184 * must be called *before* up_complete
3186 ixgbevf_map_rings_to_vectors(adapter
);
3188 err
= ixgbevf_request_irq(adapter
);
3192 ixgbevf_up_complete(adapter
);
3197 ixgbevf_down(adapter
);
3199 ixgbevf_free_all_rx_resources(adapter
);
3201 ixgbevf_free_all_tx_resources(adapter
);
3202 ixgbevf_reset(adapter
);
3210 * ixgbevf_close_suspend - actions necessary to both suspend and close flows
3211 * @adapter: the private adapter struct
3213 * This function should contain the necessary work common to both suspending
3214 * and closing of the device.
3216 static void ixgbevf_close_suspend(struct ixgbevf_adapter
*adapter
)
3218 ixgbevf_down(adapter
);
3219 ixgbevf_free_irq(adapter
);
3220 ixgbevf_free_all_tx_resources(adapter
);
3221 ixgbevf_free_all_rx_resources(adapter
);
3225 * ixgbevf_close - Disables a network interface
3226 * @netdev: network interface device structure
3228 * Returns 0, this is not allowed to fail
3230 * The close entry point is called when an interface is de-activated
3231 * by the OS. The hardware is still under the drivers control, but
3232 * needs to be disabled. A global MAC reset is issued to stop the
3233 * hardware, and all transmit and receive resources are freed.
3235 int ixgbevf_close(struct net_device
*netdev
)
3237 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
3239 if (netif_device_present(netdev
))
3240 ixgbevf_close_suspend(adapter
);
3245 static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter
*adapter
)
3247 struct net_device
*dev
= adapter
->netdev
;
3249 if (!test_and_clear_bit(__IXGBEVF_QUEUE_RESET_REQUESTED
,
3253 /* if interface is down do nothing */
3254 if (test_bit(__IXGBEVF_DOWN
, &adapter
->state
) ||
3255 test_bit(__IXGBEVF_RESETTING
, &adapter
->state
))
3258 /* Hardware has to reinitialize queues and interrupts to
3259 * match packet buffer alignment. Unfortunately, the
3260 * hardware is not flexible enough to do this dynamically.
3264 if (netif_running(dev
))
3267 ixgbevf_clear_interrupt_scheme(adapter
);
3268 ixgbevf_init_interrupt_scheme(adapter
);
3270 if (netif_running(dev
))
3276 static void ixgbevf_tx_ctxtdesc(struct ixgbevf_ring
*tx_ring
,
3277 u32 vlan_macip_lens
, u32 type_tucmd
,
3280 struct ixgbe_adv_tx_context_desc
*context_desc
;
3281 u16 i
= tx_ring
->next_to_use
;
3283 context_desc
= IXGBEVF_TX_CTXTDESC(tx_ring
, i
);
3286 tx_ring
->next_to_use
= (i
< tx_ring
->count
) ? i
: 0;
3288 /* set bits to identify this as an advanced context descriptor */
3289 type_tucmd
|= IXGBE_TXD_CMD_DEXT
| IXGBE_ADVTXD_DTYP_CTXT
;
3291 context_desc
->vlan_macip_lens
= cpu_to_le32(vlan_macip_lens
);
3292 context_desc
->seqnum_seed
= 0;
3293 context_desc
->type_tucmd_mlhl
= cpu_to_le32(type_tucmd
);
3294 context_desc
->mss_l4len_idx
= cpu_to_le32(mss_l4len_idx
);
3297 static int ixgbevf_tso(struct ixgbevf_ring
*tx_ring
,
3298 struct ixgbevf_tx_buffer
*first
,
3301 u32 vlan_macip_lens
, type_tucmd
, mss_l4len_idx
;
3302 struct sk_buff
*skb
= first
->skb
;
3312 u32 paylen
, l4_offset
;
3315 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
)
3318 if (!skb_is_gso(skb
))
3321 err
= skb_cow_head(skb
, 0);
3325 if (eth_p_mpls(first
->protocol
))
3326 ip
.hdr
= skb_inner_network_header(skb
);
3328 ip
.hdr
= skb_network_header(skb
);
3329 l4
.hdr
= skb_checksum_start(skb
);
3331 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
3332 type_tucmd
= IXGBE_ADVTXD_TUCMD_L4T_TCP
;
3334 /* initialize outer IP header fields */
3335 if (ip
.v4
->version
== 4) {
3336 unsigned char *csum_start
= skb_checksum_start(skb
);
3337 unsigned char *trans_start
= ip
.hdr
+ (ip
.v4
->ihl
* 4);
3339 /* IP header will have to cancel out any data that
3340 * is not a part of the outer IP header
3342 ip
.v4
->check
= csum_fold(csum_partial(trans_start
,
3343 csum_start
- trans_start
,
3345 type_tucmd
|= IXGBE_ADVTXD_TUCMD_IPV4
;
3348 first
->tx_flags
|= IXGBE_TX_FLAGS_TSO
|
3349 IXGBE_TX_FLAGS_CSUM
|
3350 IXGBE_TX_FLAGS_IPV4
;
3352 ip
.v6
->payload_len
= 0;
3353 first
->tx_flags
|= IXGBE_TX_FLAGS_TSO
|
3354 IXGBE_TX_FLAGS_CSUM
;
3357 /* determine offset of inner transport header */
3358 l4_offset
= l4
.hdr
- skb
->data
;
3360 /* compute length of segmentation header */
3361 *hdr_len
= (l4
.tcp
->doff
* 4) + l4_offset
;
3363 /* remove payload length from inner checksum */
3364 paylen
= skb
->len
- l4_offset
;
3365 csum_replace_by_diff(&l4
.tcp
->check
, htonl(paylen
));
3367 /* update gso size and bytecount with header size */
3368 first
->gso_segs
= skb_shinfo(skb
)->gso_segs
;
3369 first
->bytecount
+= (first
->gso_segs
- 1) * *hdr_len
;
3371 /* mss_l4len_id: use 1 as index for TSO */
3372 mss_l4len_idx
= (*hdr_len
- l4_offset
) << IXGBE_ADVTXD_L4LEN_SHIFT
;
3373 mss_l4len_idx
|= skb_shinfo(skb
)->gso_size
<< IXGBE_ADVTXD_MSS_SHIFT
;
3374 mss_l4len_idx
|= (1u << IXGBE_ADVTXD_IDX_SHIFT
);
3376 /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */
3377 vlan_macip_lens
= l4
.hdr
- ip
.hdr
;
3378 vlan_macip_lens
|= (ip
.hdr
- skb
->data
) << IXGBE_ADVTXD_MACLEN_SHIFT
;
3379 vlan_macip_lens
|= first
->tx_flags
& IXGBE_TX_FLAGS_VLAN_MASK
;
3381 ixgbevf_tx_ctxtdesc(tx_ring
, vlan_macip_lens
,
3382 type_tucmd
, mss_l4len_idx
);
3387 static inline bool ixgbevf_ipv6_csum_is_sctp(struct sk_buff
*skb
)
3389 unsigned int offset
= 0;
3391 ipv6_find_hdr(skb
, &offset
, IPPROTO_SCTP
, NULL
, NULL
);
3393 return offset
== skb_checksum_start_offset(skb
);
3396 static void ixgbevf_tx_csum(struct ixgbevf_ring
*tx_ring
,
3397 struct ixgbevf_tx_buffer
*first
)
3399 struct sk_buff
*skb
= first
->skb
;
3400 u32 vlan_macip_lens
= 0;
3403 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
)
3406 switch (skb
->csum_offset
) {
3407 case offsetof(struct tcphdr
, check
):
3408 type_tucmd
= IXGBE_ADVTXD_TUCMD_L4T_TCP
;
3410 case offsetof(struct udphdr
, check
):
3412 case offsetof(struct sctphdr
, checksum
):
3413 /* validate that this is actually an SCTP request */
3414 if (((first
->protocol
== htons(ETH_P_IP
)) &&
3415 (ip_hdr(skb
)->protocol
== IPPROTO_SCTP
)) ||
3416 ((first
->protocol
== htons(ETH_P_IPV6
)) &&
3417 ixgbevf_ipv6_csum_is_sctp(skb
))) {
3418 type_tucmd
= IXGBE_ADVTXD_TUCMD_L4T_SCTP
;
3423 skb_checksum_help(skb
);
3426 /* update TX checksum flag */
3427 first
->tx_flags
|= IXGBE_TX_FLAGS_CSUM
;
3428 vlan_macip_lens
= skb_checksum_start_offset(skb
) -
3429 skb_network_offset(skb
);
3431 /* vlan_macip_lens: MACLEN, VLAN tag */
3432 vlan_macip_lens
|= skb_network_offset(skb
) << IXGBE_ADVTXD_MACLEN_SHIFT
;
3433 vlan_macip_lens
|= first
->tx_flags
& IXGBE_TX_FLAGS_VLAN_MASK
;
3435 ixgbevf_tx_ctxtdesc(tx_ring
, vlan_macip_lens
, type_tucmd
, 0);
3438 static __le32
ixgbevf_tx_cmd_type(u32 tx_flags
)
3440 /* set type for advanced descriptor with frame checksum insertion */
3441 __le32 cmd_type
= cpu_to_le32(IXGBE_ADVTXD_DTYP_DATA
|
3442 IXGBE_ADVTXD_DCMD_IFCS
|
3443 IXGBE_ADVTXD_DCMD_DEXT
);
3445 /* set HW VLAN bit if VLAN is present */
3446 if (tx_flags
& IXGBE_TX_FLAGS_VLAN
)
3447 cmd_type
|= cpu_to_le32(IXGBE_ADVTXD_DCMD_VLE
);
3449 /* set segmentation enable bits for TSO/FSO */
3450 if (tx_flags
& IXGBE_TX_FLAGS_TSO
)
3451 cmd_type
|= cpu_to_le32(IXGBE_ADVTXD_DCMD_TSE
);
3456 static void ixgbevf_tx_olinfo_status(union ixgbe_adv_tx_desc
*tx_desc
,
3457 u32 tx_flags
, unsigned int paylen
)
3459 __le32 olinfo_status
= cpu_to_le32(paylen
<< IXGBE_ADVTXD_PAYLEN_SHIFT
);
3461 /* enable L4 checksum for TSO and TX checksum offload */
3462 if (tx_flags
& IXGBE_TX_FLAGS_CSUM
)
3463 olinfo_status
|= cpu_to_le32(IXGBE_ADVTXD_POPTS_TXSM
);
3465 /* enble IPv4 checksum for TSO */
3466 if (tx_flags
& IXGBE_TX_FLAGS_IPV4
)
3467 olinfo_status
|= cpu_to_le32(IXGBE_ADVTXD_POPTS_IXSM
);
3469 /* use index 1 context for TSO/FSO/FCOE */
3470 if (tx_flags
& IXGBE_TX_FLAGS_TSO
)
3471 olinfo_status
|= cpu_to_le32(1u << IXGBE_ADVTXD_IDX_SHIFT
);
3473 /* Check Context must be set if Tx switch is enabled, which it
3474 * always is for case where virtual functions are running
3476 olinfo_status
|= cpu_to_le32(IXGBE_ADVTXD_CC
);
3478 tx_desc
->read
.olinfo_status
= olinfo_status
;
3481 static void ixgbevf_tx_map(struct ixgbevf_ring
*tx_ring
,
3482 struct ixgbevf_tx_buffer
*first
,
3486 struct sk_buff
*skb
= first
->skb
;
3487 struct ixgbevf_tx_buffer
*tx_buffer
;
3488 union ixgbe_adv_tx_desc
*tx_desc
;
3489 struct skb_frag_struct
*frag
= &skb_shinfo(skb
)->frags
[0];
3490 unsigned int data_len
= skb
->data_len
;
3491 unsigned int size
= skb_headlen(skb
);
3492 unsigned int paylen
= skb
->len
- hdr_len
;
3493 u32 tx_flags
= first
->tx_flags
;
3495 u16 i
= tx_ring
->next_to_use
;
3497 tx_desc
= IXGBEVF_TX_DESC(tx_ring
, i
);
3499 ixgbevf_tx_olinfo_status(tx_desc
, tx_flags
, paylen
);
3500 cmd_type
= ixgbevf_tx_cmd_type(tx_flags
);
3502 dma
= dma_map_single(tx_ring
->dev
, skb
->data
, size
, DMA_TO_DEVICE
);
3503 if (dma_mapping_error(tx_ring
->dev
, dma
))
3506 /* record length, and DMA address */
3507 dma_unmap_len_set(first
, len
, size
);
3508 dma_unmap_addr_set(first
, dma
, dma
);
3510 tx_desc
->read
.buffer_addr
= cpu_to_le64(dma
);
3513 while (unlikely(size
> IXGBE_MAX_DATA_PER_TXD
)) {
3514 tx_desc
->read
.cmd_type_len
=
3515 cmd_type
| cpu_to_le32(IXGBE_MAX_DATA_PER_TXD
);
3519 if (i
== tx_ring
->count
) {
3520 tx_desc
= IXGBEVF_TX_DESC(tx_ring
, 0);
3524 dma
+= IXGBE_MAX_DATA_PER_TXD
;
3525 size
-= IXGBE_MAX_DATA_PER_TXD
;
3527 tx_desc
->read
.buffer_addr
= cpu_to_le64(dma
);
3528 tx_desc
->read
.olinfo_status
= 0;
3531 if (likely(!data_len
))
3534 tx_desc
->read
.cmd_type_len
= cmd_type
| cpu_to_le32(size
);
3538 if (i
== tx_ring
->count
) {
3539 tx_desc
= IXGBEVF_TX_DESC(tx_ring
, 0);
3543 size
= skb_frag_size(frag
);
3546 dma
= skb_frag_dma_map(tx_ring
->dev
, frag
, 0, size
,
3548 if (dma_mapping_error(tx_ring
->dev
, dma
))
3551 tx_buffer
= &tx_ring
->tx_buffer_info
[i
];
3552 dma_unmap_len_set(tx_buffer
, len
, size
);
3553 dma_unmap_addr_set(tx_buffer
, dma
, dma
);
3555 tx_desc
->read
.buffer_addr
= cpu_to_le64(dma
);
3556 tx_desc
->read
.olinfo_status
= 0;
3561 /* write last descriptor with RS and EOP bits */
3562 cmd_type
|= cpu_to_le32(size
) | cpu_to_le32(IXGBE_TXD_CMD
);
3563 tx_desc
->read
.cmd_type_len
= cmd_type
;
3565 /* set the timestamp */
3566 first
->time_stamp
= jiffies
;
3568 /* Force memory writes to complete before letting h/w know there
3569 * are new descriptors to fetch. (Only applicable for weak-ordered
3570 * memory model archs, such as IA-64).
3572 * We also need this memory barrier (wmb) to make certain all of the
3573 * status bits have been updated before next_to_watch is written.
3577 /* set next_to_watch value indicating a packet is present */
3578 first
->next_to_watch
= tx_desc
;
3581 if (i
== tx_ring
->count
)
3584 tx_ring
->next_to_use
= i
;
3586 /* notify HW of packet */
3587 ixgbevf_write_tail(tx_ring
, i
);
3591 dev_err(tx_ring
->dev
, "TX DMA map failed\n");
3593 /* clear dma mappings for failed tx_buffer_info map */
3595 tx_buffer
= &tx_ring
->tx_buffer_info
[i
];
3596 ixgbevf_unmap_and_free_tx_resource(tx_ring
, tx_buffer
);
3597 if (tx_buffer
== first
)
3604 tx_ring
->next_to_use
= i
;
3607 static int __ixgbevf_maybe_stop_tx(struct ixgbevf_ring
*tx_ring
, int size
)
3609 netif_stop_subqueue(tx_ring
->netdev
, tx_ring
->queue_index
);
3610 /* Herbert's original patch had:
3611 * smp_mb__after_netif_stop_queue();
3612 * but since that doesn't exist yet, just open code it.
3616 /* We need to check again in a case another CPU has just
3617 * made room available.
3619 if (likely(ixgbevf_desc_unused(tx_ring
) < size
))
3622 /* A reprieve! - use start_queue because it doesn't call schedule */
3623 netif_start_subqueue(tx_ring
->netdev
, tx_ring
->queue_index
);
3624 ++tx_ring
->tx_stats
.restart_queue
;
3629 static int ixgbevf_maybe_stop_tx(struct ixgbevf_ring
*tx_ring
, int size
)
3631 if (likely(ixgbevf_desc_unused(tx_ring
) >= size
))
3633 return __ixgbevf_maybe_stop_tx(tx_ring
, size
);
3636 static int ixgbevf_xmit_frame(struct sk_buff
*skb
, struct net_device
*netdev
)
3638 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
3639 struct ixgbevf_tx_buffer
*first
;
3640 struct ixgbevf_ring
*tx_ring
;
3643 u16 count
= TXD_USE_COUNT(skb_headlen(skb
));
3644 #if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
3648 u8
*dst_mac
= skb_header_pointer(skb
, 0, 0, NULL
);
3650 if (!dst_mac
|| is_link_local_ether_addr(dst_mac
)) {
3651 dev_kfree_skb_any(skb
);
3652 return NETDEV_TX_OK
;
3655 tx_ring
= adapter
->tx_ring
[skb
->queue_mapping
];
3657 /* need: 1 descriptor per page * PAGE_SIZE/IXGBE_MAX_DATA_PER_TXD,
3658 * + 1 desc for skb_headlen/IXGBE_MAX_DATA_PER_TXD,
3659 * + 2 desc gap to keep tail from touching head,
3660 * + 1 desc for context descriptor,
3661 * otherwise try next time
3663 #if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
3664 for (f
= 0; f
< skb_shinfo(skb
)->nr_frags
; f
++)
3665 count
+= TXD_USE_COUNT(skb_shinfo(skb
)->frags
[f
].size
);
3667 count
+= skb_shinfo(skb
)->nr_frags
;
3669 if (ixgbevf_maybe_stop_tx(tx_ring
, count
+ 3)) {
3670 tx_ring
->tx_stats
.tx_busy
++;
3671 return NETDEV_TX_BUSY
;
3674 /* record the location of the first descriptor for this packet */
3675 first
= &tx_ring
->tx_buffer_info
[tx_ring
->next_to_use
];
3677 first
->bytecount
= skb
->len
;
3678 first
->gso_segs
= 1;
3680 if (skb_vlan_tag_present(skb
)) {
3681 tx_flags
|= skb_vlan_tag_get(skb
);
3682 tx_flags
<<= IXGBE_TX_FLAGS_VLAN_SHIFT
;
3683 tx_flags
|= IXGBE_TX_FLAGS_VLAN
;
3686 /* record initial flags and protocol */
3687 first
->tx_flags
= tx_flags
;
3688 first
->protocol
= vlan_get_protocol(skb
);
3690 tso
= ixgbevf_tso(tx_ring
, first
, &hdr_len
);
3694 ixgbevf_tx_csum(tx_ring
, first
);
3696 ixgbevf_tx_map(tx_ring
, first
, hdr_len
);
3698 ixgbevf_maybe_stop_tx(tx_ring
, DESC_NEEDED
);
3700 return NETDEV_TX_OK
;
3703 dev_kfree_skb_any(first
->skb
);
3706 return NETDEV_TX_OK
;
3710 * ixgbevf_set_mac - Change the Ethernet Address of the NIC
3711 * @netdev: network interface device structure
3712 * @p: pointer to an address structure
3714 * Returns 0 on success, negative on failure
3716 static int ixgbevf_set_mac(struct net_device
*netdev
, void *p
)
3718 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
3719 struct ixgbe_hw
*hw
= &adapter
->hw
;
3720 struct sockaddr
*addr
= p
;
3723 if (!is_valid_ether_addr(addr
->sa_data
))
3724 return -EADDRNOTAVAIL
;
3726 spin_lock_bh(&adapter
->mbx_lock
);
3728 err
= hw
->mac
.ops
.set_rar(hw
, 0, addr
->sa_data
, 0);
3730 spin_unlock_bh(&adapter
->mbx_lock
);
3735 ether_addr_copy(hw
->mac
.addr
, addr
->sa_data
);
3736 ether_addr_copy(netdev
->dev_addr
, addr
->sa_data
);
3742 * ixgbevf_change_mtu - Change the Maximum Transfer Unit
3743 * @netdev: network interface device structure
3744 * @new_mtu: new value for maximum frame size
3746 * Returns 0 on success, negative on failure
3748 static int ixgbevf_change_mtu(struct net_device
*netdev
, int new_mtu
)
3750 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
3751 struct ixgbe_hw
*hw
= &adapter
->hw
;
3752 int max_frame
= new_mtu
+ ETH_HLEN
+ ETH_FCS_LEN
;
3755 spin_lock_bh(&adapter
->mbx_lock
);
3756 /* notify the PF of our intent to use this size of frame */
3757 ret
= hw
->mac
.ops
.set_rlpml(hw
, max_frame
);
3758 spin_unlock_bh(&adapter
->mbx_lock
);
3762 hw_dbg(hw
, "changing MTU from %d to %d\n",
3763 netdev
->mtu
, new_mtu
);
3765 /* must set new MTU before calling down or up */
3766 netdev
->mtu
= new_mtu
;
3771 #ifdef CONFIG_NET_POLL_CONTROLLER
3772 /* Polling 'interrupt' - used by things like netconsole to send skbs
3773 * without having to re-enable interrupts. It's not called while
3774 * the interrupt routine is executing.
3776 static void ixgbevf_netpoll(struct net_device
*netdev
)
3778 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
3781 /* if interface is down do nothing */
3782 if (test_bit(__IXGBEVF_DOWN
, &adapter
->state
))
3784 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
3785 ixgbevf_msix_clean_rings(0, adapter
->q_vector
[i
]);
3787 #endif /* CONFIG_NET_POLL_CONTROLLER */
3789 static int ixgbevf_suspend(struct pci_dev
*pdev
, pm_message_t state
)
3791 struct net_device
*netdev
= pci_get_drvdata(pdev
);
3792 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
3798 netif_device_detach(netdev
);
3800 if (netif_running(netdev
))
3801 ixgbevf_close_suspend(adapter
);
3803 ixgbevf_clear_interrupt_scheme(adapter
);
3807 retval
= pci_save_state(pdev
);
3812 if (!test_and_set_bit(__IXGBEVF_DISABLED
, &adapter
->state
))
3813 pci_disable_device(pdev
);
3819 static int ixgbevf_resume(struct pci_dev
*pdev
)
3821 struct net_device
*netdev
= pci_get_drvdata(pdev
);
3822 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
3825 pci_restore_state(pdev
);
3826 /* pci_restore_state clears dev->state_saved so call
3827 * pci_save_state to restore it.
3829 pci_save_state(pdev
);
3831 err
= pci_enable_device_mem(pdev
);
3833 dev_err(&pdev
->dev
, "Cannot enable PCI device from suspend\n");
3837 adapter
->hw
.hw_addr
= adapter
->io_addr
;
3838 smp_mb__before_atomic();
3839 clear_bit(__IXGBEVF_DISABLED
, &adapter
->state
);
3840 pci_set_master(pdev
);
3842 ixgbevf_reset(adapter
);
3845 err
= ixgbevf_init_interrupt_scheme(adapter
);
3848 dev_err(&pdev
->dev
, "Cannot initialize interrupts\n");
3852 if (netif_running(netdev
)) {
3853 err
= ixgbevf_open(netdev
);
3858 netif_device_attach(netdev
);
3863 #endif /* CONFIG_PM */
3864 static void ixgbevf_shutdown(struct pci_dev
*pdev
)
3866 ixgbevf_suspend(pdev
, PMSG_SUSPEND
);
3869 static void ixgbevf_get_stats(struct net_device
*netdev
,
3870 struct rtnl_link_stats64
*stats
)
3872 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
3875 const struct ixgbevf_ring
*ring
;
3878 ixgbevf_update_stats(adapter
);
3880 stats
->multicast
= adapter
->stats
.vfmprc
- adapter
->stats
.base_vfmprc
;
3882 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
3883 ring
= adapter
->rx_ring
[i
];
3885 start
= u64_stats_fetch_begin_irq(&ring
->syncp
);
3886 bytes
= ring
->stats
.bytes
;
3887 packets
= ring
->stats
.packets
;
3888 } while (u64_stats_fetch_retry_irq(&ring
->syncp
, start
));
3889 stats
->rx_bytes
+= bytes
;
3890 stats
->rx_packets
+= packets
;
3893 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
3894 ring
= adapter
->tx_ring
[i
];
3896 start
= u64_stats_fetch_begin_irq(&ring
->syncp
);
3897 bytes
= ring
->stats
.bytes
;
3898 packets
= ring
->stats
.packets
;
3899 } while (u64_stats_fetch_retry_irq(&ring
->syncp
, start
));
3900 stats
->tx_bytes
+= bytes
;
3901 stats
->tx_packets
+= packets
;
3905 #define IXGBEVF_MAX_MAC_HDR_LEN 127
3906 #define IXGBEVF_MAX_NETWORK_HDR_LEN 511
3908 static netdev_features_t
3909 ixgbevf_features_check(struct sk_buff
*skb
, struct net_device
*dev
,
3910 netdev_features_t features
)
3912 unsigned int network_hdr_len
, mac_hdr_len
;
3914 /* Make certain the headers can be described by a context descriptor */
3915 mac_hdr_len
= skb_network_header(skb
) - skb
->data
;
3916 if (unlikely(mac_hdr_len
> IXGBEVF_MAX_MAC_HDR_LEN
))
3917 return features
& ~(NETIF_F_HW_CSUM
|
3919 NETIF_F_HW_VLAN_CTAG_TX
|
3923 network_hdr_len
= skb_checksum_start(skb
) - skb_network_header(skb
);
3924 if (unlikely(network_hdr_len
> IXGBEVF_MAX_NETWORK_HDR_LEN
))
3925 return features
& ~(NETIF_F_HW_CSUM
|
3930 /* We can only support IPV4 TSO in tunnels if we can mangle the
3931 * inner IP ID field, so strip TSO if MANGLEID is not supported.
3933 if (skb
->encapsulation
&& !(features
& NETIF_F_TSO_MANGLEID
))
3934 features
&= ~NETIF_F_TSO
;
3939 static const struct net_device_ops ixgbevf_netdev_ops
= {
3940 .ndo_open
= ixgbevf_open
,
3941 .ndo_stop
= ixgbevf_close
,
3942 .ndo_start_xmit
= ixgbevf_xmit_frame
,
3943 .ndo_set_rx_mode
= ixgbevf_set_rx_mode
,
3944 .ndo_get_stats64
= ixgbevf_get_stats
,
3945 .ndo_validate_addr
= eth_validate_addr
,
3946 .ndo_set_mac_address
= ixgbevf_set_mac
,
3947 .ndo_change_mtu
= ixgbevf_change_mtu
,
3948 .ndo_tx_timeout
= ixgbevf_tx_timeout
,
3949 .ndo_vlan_rx_add_vid
= ixgbevf_vlan_rx_add_vid
,
3950 .ndo_vlan_rx_kill_vid
= ixgbevf_vlan_rx_kill_vid
,
3951 #ifdef CONFIG_NET_POLL_CONTROLLER
3952 .ndo_poll_controller
= ixgbevf_netpoll
,
3954 .ndo_features_check
= ixgbevf_features_check
,
3957 static void ixgbevf_assign_netdev_ops(struct net_device
*dev
)
3959 dev
->netdev_ops
= &ixgbevf_netdev_ops
;
3960 ixgbevf_set_ethtool_ops(dev
);
3961 dev
->watchdog_timeo
= 5 * HZ
;
3965 * ixgbevf_probe - Device Initialization Routine
3966 * @pdev: PCI device information struct
3967 * @ent: entry in ixgbevf_pci_tbl
3969 * Returns 0 on success, negative on failure
3971 * ixgbevf_probe initializes an adapter identified by a pci_dev structure.
3972 * The OS initialization, configuring of the adapter private structure,
3973 * and a hardware reset occur.
3975 static int ixgbevf_probe(struct pci_dev
*pdev
, const struct pci_device_id
*ent
)
3977 struct net_device
*netdev
;
3978 struct ixgbevf_adapter
*adapter
= NULL
;
3979 struct ixgbe_hw
*hw
= NULL
;
3980 const struct ixgbevf_info
*ii
= ixgbevf_info_tbl
[ent
->driver_data
];
3981 int err
, pci_using_dac
;
3982 bool disable_dev
= false;
3984 err
= pci_enable_device(pdev
);
3988 if (!dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(64))) {
3991 err
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(32));
3993 dev_err(&pdev
->dev
, "No usable DMA configuration, aborting\n");
3999 err
= pci_request_regions(pdev
, ixgbevf_driver_name
);
4001 dev_err(&pdev
->dev
, "pci_request_regions failed 0x%x\n", err
);
4005 pci_set_master(pdev
);
4007 netdev
= alloc_etherdev_mq(sizeof(struct ixgbevf_adapter
),
4011 goto err_alloc_etherdev
;
4014 SET_NETDEV_DEV(netdev
, &pdev
->dev
);
4016 adapter
= netdev_priv(netdev
);
4018 adapter
->netdev
= netdev
;
4019 adapter
->pdev
= pdev
;
4022 adapter
->msg_enable
= netif_msg_init(debug
, DEFAULT_MSG_ENABLE
);
4024 /* call save state here in standalone driver because it relies on
4025 * adapter struct to exist, and needs to call netdev_priv
4027 pci_save_state(pdev
);
4029 hw
->hw_addr
= ioremap(pci_resource_start(pdev
, 0),
4030 pci_resource_len(pdev
, 0));
4031 adapter
->io_addr
= hw
->hw_addr
;
4037 ixgbevf_assign_netdev_ops(netdev
);
4040 memcpy(&hw
->mac
.ops
, ii
->mac_ops
, sizeof(hw
->mac
.ops
));
4041 hw
->mac
.type
= ii
->mac
;
4043 memcpy(&hw
->mbx
.ops
, &ixgbevf_mbx_ops
,
4044 sizeof(struct ixgbe_mbx_operations
));
4046 /* setup the private structure */
4047 err
= ixgbevf_sw_init(adapter
);
4051 /* The HW MAC address was set and/or determined in sw_init */
4052 if (!is_valid_ether_addr(netdev
->dev_addr
)) {
4053 pr_err("invalid MAC address\n");
4058 netdev
->hw_features
= NETIF_F_SG
|
4065 #define IXGBEVF_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \
4066 NETIF_F_GSO_GRE_CSUM | \
4067 NETIF_F_GSO_IPXIP4 | \
4068 NETIF_F_GSO_IPXIP6 | \
4069 NETIF_F_GSO_UDP_TUNNEL | \
4070 NETIF_F_GSO_UDP_TUNNEL_CSUM)
4072 netdev
->gso_partial_features
= IXGBEVF_GSO_PARTIAL_FEATURES
;
4073 netdev
->hw_features
|= NETIF_F_GSO_PARTIAL
|
4074 IXGBEVF_GSO_PARTIAL_FEATURES
;
4076 netdev
->features
= netdev
->hw_features
;
4079 netdev
->features
|= NETIF_F_HIGHDMA
;
4081 netdev
->vlan_features
|= netdev
->features
| NETIF_F_TSO_MANGLEID
;
4082 netdev
->mpls_features
|= NETIF_F_SG
|
4086 netdev
->mpls_features
|= IXGBEVF_GSO_PARTIAL_FEATURES
;
4087 netdev
->hw_enc_features
|= netdev
->vlan_features
;
4089 /* set this bit last since it cannot be part of vlan_features */
4090 netdev
->features
|= NETIF_F_HW_VLAN_CTAG_FILTER
|
4091 NETIF_F_HW_VLAN_CTAG_RX
|
4092 NETIF_F_HW_VLAN_CTAG_TX
;
4094 netdev
->priv_flags
|= IFF_UNICAST_FLT
;
4096 /* MTU range: 68 - 1504 or 9710 */
4097 netdev
->min_mtu
= ETH_MIN_MTU
;
4098 switch (adapter
->hw
.api_version
) {
4099 case ixgbe_mbox_api_11
:
4100 case ixgbe_mbox_api_12
:
4101 case ixgbe_mbox_api_13
:
4102 netdev
->max_mtu
= IXGBE_MAX_JUMBO_FRAME_SIZE
-
4103 (ETH_HLEN
+ ETH_FCS_LEN
);
4106 if (adapter
->hw
.mac
.type
!= ixgbe_mac_82599_vf
)
4107 netdev
->max_mtu
= IXGBE_MAX_JUMBO_FRAME_SIZE
-
4108 (ETH_HLEN
+ ETH_FCS_LEN
);
4110 netdev
->max_mtu
= ETH_DATA_LEN
+ ETH_FCS_LEN
;
4114 if (IXGBE_REMOVED(hw
->hw_addr
)) {
4119 setup_timer(&adapter
->service_timer
, &ixgbevf_service_timer
,
4120 (unsigned long)adapter
);
4122 INIT_WORK(&adapter
->service_task
, ixgbevf_service_task
);
4123 set_bit(__IXGBEVF_SERVICE_INITED
, &adapter
->state
);
4124 clear_bit(__IXGBEVF_SERVICE_SCHED
, &adapter
->state
);
4126 err
= ixgbevf_init_interrupt_scheme(adapter
);
4130 strcpy(netdev
->name
, "eth%d");
4132 err
= register_netdev(netdev
);
4136 pci_set_drvdata(pdev
, netdev
);
4137 netif_carrier_off(netdev
);
4139 ixgbevf_init_last_counter_stats(adapter
);
4141 /* print the VF info */
4142 dev_info(&pdev
->dev
, "%pM\n", netdev
->dev_addr
);
4143 dev_info(&pdev
->dev
, "MAC: %d\n", hw
->mac
.type
);
4145 switch (hw
->mac
.type
) {
4146 case ixgbe_mac_X550_vf
:
4147 dev_info(&pdev
->dev
, "Intel(R) X550 Virtual Function\n");
4149 case ixgbe_mac_X540_vf
:
4150 dev_info(&pdev
->dev
, "Intel(R) X540 Virtual Function\n");
4152 case ixgbe_mac_82599_vf
:
4154 dev_info(&pdev
->dev
, "Intel(R) 82599 Virtual Function\n");
4161 ixgbevf_clear_interrupt_scheme(adapter
);
4163 ixgbevf_reset_interrupt_capability(adapter
);
4164 iounmap(adapter
->io_addr
);
4165 kfree(adapter
->rss_key
);
4167 disable_dev
= !test_and_set_bit(__IXGBEVF_DISABLED
, &adapter
->state
);
4168 free_netdev(netdev
);
4170 pci_release_regions(pdev
);
4173 if (!adapter
|| disable_dev
)
4174 pci_disable_device(pdev
);
4179 * ixgbevf_remove - Device Removal Routine
4180 * @pdev: PCI device information struct
4182 * ixgbevf_remove is called by the PCI subsystem to alert the driver
4183 * that it should release a PCI device. The could be caused by a
4184 * Hot-Plug event, or because the driver is going to be removed from
4187 static void ixgbevf_remove(struct pci_dev
*pdev
)
4189 struct net_device
*netdev
= pci_get_drvdata(pdev
);
4190 struct ixgbevf_adapter
*adapter
;
4196 adapter
= netdev_priv(netdev
);
4198 set_bit(__IXGBEVF_REMOVING
, &adapter
->state
);
4199 cancel_work_sync(&adapter
->service_task
);
4201 if (netdev
->reg_state
== NETREG_REGISTERED
)
4202 unregister_netdev(netdev
);
4204 ixgbevf_clear_interrupt_scheme(adapter
);
4205 ixgbevf_reset_interrupt_capability(adapter
);
4207 iounmap(adapter
->io_addr
);
4208 pci_release_regions(pdev
);
4210 hw_dbg(&adapter
->hw
, "Remove complete\n");
4212 kfree(adapter
->rss_key
);
4213 disable_dev
= !test_and_set_bit(__IXGBEVF_DISABLED
, &adapter
->state
);
4214 free_netdev(netdev
);
4217 pci_disable_device(pdev
);
4221 * ixgbevf_io_error_detected - called when PCI error is detected
4222 * @pdev: Pointer to PCI device
4223 * @state: The current pci connection state
4225 * This function is called after a PCI bus error affecting
4226 * this device has been detected.
4228 static pci_ers_result_t
ixgbevf_io_error_detected(struct pci_dev
*pdev
,
4229 pci_channel_state_t state
)
4231 struct net_device
*netdev
= pci_get_drvdata(pdev
);
4232 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
4234 if (!test_bit(__IXGBEVF_SERVICE_INITED
, &adapter
->state
))
4235 return PCI_ERS_RESULT_DISCONNECT
;
4238 netif_device_detach(netdev
);
4240 if (state
== pci_channel_io_perm_failure
) {
4242 return PCI_ERS_RESULT_DISCONNECT
;
4245 if (netif_running(netdev
))
4246 ixgbevf_close_suspend(adapter
);
4248 if (!test_and_set_bit(__IXGBEVF_DISABLED
, &adapter
->state
))
4249 pci_disable_device(pdev
);
4252 /* Request a slot slot reset. */
4253 return PCI_ERS_RESULT_NEED_RESET
;
4257 * ixgbevf_io_slot_reset - called after the pci bus has been reset.
4258 * @pdev: Pointer to PCI device
4260 * Restart the card from scratch, as if from a cold-boot. Implementation
4261 * resembles the first-half of the ixgbevf_resume routine.
4263 static pci_ers_result_t
ixgbevf_io_slot_reset(struct pci_dev
*pdev
)
4265 struct net_device
*netdev
= pci_get_drvdata(pdev
);
4266 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
4268 if (pci_enable_device_mem(pdev
)) {
4270 "Cannot re-enable PCI device after reset.\n");
4271 return PCI_ERS_RESULT_DISCONNECT
;
4274 adapter
->hw
.hw_addr
= adapter
->io_addr
;
4275 smp_mb__before_atomic();
4276 clear_bit(__IXGBEVF_DISABLED
, &adapter
->state
);
4277 pci_set_master(pdev
);
4279 ixgbevf_reset(adapter
);
4281 return PCI_ERS_RESULT_RECOVERED
;
4285 * ixgbevf_io_resume - called when traffic can start flowing again.
4286 * @pdev: Pointer to PCI device
4288 * This callback is called when the error recovery driver tells us that
4289 * its OK to resume normal operation. Implementation resembles the
4290 * second-half of the ixgbevf_resume routine.
4292 static void ixgbevf_io_resume(struct pci_dev
*pdev
)
4294 struct net_device
*netdev
= pci_get_drvdata(pdev
);
4297 if (netif_running(netdev
))
4298 ixgbevf_open(netdev
);
4300 netif_device_attach(netdev
);
4304 /* PCI Error Recovery (ERS) */
4305 static const struct pci_error_handlers ixgbevf_err_handler
= {
4306 .error_detected
= ixgbevf_io_error_detected
,
4307 .slot_reset
= ixgbevf_io_slot_reset
,
4308 .resume
= ixgbevf_io_resume
,
4311 static struct pci_driver ixgbevf_driver
= {
4312 .name
= ixgbevf_driver_name
,
4313 .id_table
= ixgbevf_pci_tbl
,
4314 .probe
= ixgbevf_probe
,
4315 .remove
= ixgbevf_remove
,
4317 /* Power Management Hooks */
4318 .suspend
= ixgbevf_suspend
,
4319 .resume
= ixgbevf_resume
,
4321 .shutdown
= ixgbevf_shutdown
,
4322 .err_handler
= &ixgbevf_err_handler
4326 * ixgbevf_init_module - Driver Registration Routine
4328 * ixgbevf_init_module is the first routine called when the driver is
4329 * loaded. All it does is register with the PCI subsystem.
4331 static int __init
ixgbevf_init_module(void)
4333 pr_info("%s - version %s\n", ixgbevf_driver_string
,
4334 ixgbevf_driver_version
);
4336 pr_info("%s\n", ixgbevf_copyright
);
4337 ixgbevf_wq
= create_singlethread_workqueue(ixgbevf_driver_name
);
4339 pr_err("%s: Failed to create workqueue\n", ixgbevf_driver_name
);
4343 return pci_register_driver(&ixgbevf_driver
);
4346 module_init(ixgbevf_init_module
);
4349 * ixgbevf_exit_module - Driver Exit Cleanup Routine
4351 * ixgbevf_exit_module is called just before the driver is removed
4354 static void __exit
ixgbevf_exit_module(void)
4356 pci_unregister_driver(&ixgbevf_driver
);
4358 destroy_workqueue(ixgbevf_wq
);
4365 * ixgbevf_get_hw_dev_name - return device name string
4366 * used by hardware layer to print debugging information
4368 char *ixgbevf_get_hw_dev_name(struct ixgbe_hw
*hw
)
4370 struct ixgbevf_adapter
*adapter
= hw
->back
;
4372 return adapter
->netdev
->name
;
4376 module_exit(ixgbevf_exit_module
);
4378 /* ixgbevf_main.c */