]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
ixgbevf: set the disable state when ixgbevf_qv_disable is called
[mirror_ubuntu-zesty-kernel.git] / drivers / net / ethernet / intel / ixgbevf / ixgbevf_main.c
CommitLineData
92915f71
GR
1/*******************************************************************************
2
3 Intel 82599 Virtual Function driver
5c47a2b6 4 Copyright(c) 1999 - 2012 Intel Corporation.
92915f71
GR
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28
29/******************************************************************************
30 Copyright (c)2006 - 2007 Myricom, Inc. for some LRO specific code
31******************************************************************************/
dbd9636e
JK
32
33#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
34
92915f71 35#include <linux/types.h>
dadcd65f 36#include <linux/bitops.h>
92915f71
GR
37#include <linux/module.h>
38#include <linux/pci.h>
39#include <linux/netdevice.h>
40#include <linux/vmalloc.h>
41#include <linux/string.h>
42#include <linux/in.h>
43#include <linux/ip.h>
44#include <linux/tcp.h>
70a10e25 45#include <linux/sctp.h>
92915f71 46#include <linux/ipv6.h>
5a0e3ad6 47#include <linux/slab.h>
92915f71
GR
48#include <net/checksum.h>
49#include <net/ip6_checksum.h>
50#include <linux/ethtool.h>
01789349 51#include <linux/if.h>
92915f71 52#include <linux/if_vlan.h>
70c71606 53#include <linux/prefetch.h>
92915f71
GR
54
55#include "ixgbevf.h"
56
3d8fe98f 57const char ixgbevf_driver_name[] = "ixgbevf";
92915f71 58static const char ixgbevf_driver_string[] =
422e05d1 59 "Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver";
92915f71 60
9e6fcae7 61#define DRV_VERSION "2.11.3-k"
92915f71 62const char ixgbevf_driver_version[] = DRV_VERSION;
66c87bd5 63static char ixgbevf_copyright[] =
5c47a2b6 64 "Copyright (c) 2009 - 2012 Intel Corporation.";
92915f71
GR
65
66static const struct ixgbevf_info *ixgbevf_info_tbl[] = {
2316aa2a
GR
67 [board_82599_vf] = &ixgbevf_82599_vf_info,
68 [board_X540_vf] = &ixgbevf_X540_vf_info,
92915f71
GR
69};
70
71/* ixgbevf_pci_tbl - PCI Device ID Table
72 *
73 * Wildcard entries (PCI_ANY_ID) should come last
74 * Last entry must be all 0s
75 *
76 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
77 * Class, Class Mask, private data (not used) }
78 */
39ba22b4
SH
79static DEFINE_PCI_DEVICE_TABLE(ixgbevf_pci_tbl) = {
80 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF), board_82599_vf },
81 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540_VF), board_X540_vf },
92915f71
GR
82 /* required last entry */
83 {0, }
84};
85MODULE_DEVICE_TABLE(pci, ixgbevf_pci_tbl);
86
87MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
88MODULE_DESCRIPTION("Intel(R) 82599 Virtual Function Driver");
89MODULE_LICENSE("GPL");
90MODULE_VERSION(DRV_VERSION);
91
b3f4d599 92#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
93static int debug = -1;
94module_param(debug, int, 0);
95MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
92915f71
GR
96
97/* forward decls */
220fe050 98static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter *adapter);
fa71ae27 99static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector);
56e94095 100static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter);
92915f71 101
5cdab2f6 102static inline void ixgbevf_release_rx_desc(struct ixgbevf_ring *rx_ring,
92915f71
GR
103 u32 val)
104{
5cdab2f6
DS
105 rx_ring->next_to_use = val;
106
92915f71
GR
107 /*
108 * Force memory writes to complete before letting h/w
109 * know there are new descriptors to fetch. (Only
110 * applicable for weak-ordered memory model archs,
111 * such as IA-64).
112 */
113 wmb();
5cdab2f6 114 writel(val, rx_ring->tail);
92915f71
GR
115}
116
49ce9c2c 117/**
65d676c8 118 * ixgbevf_set_ivar - set IVAR registers - maps interrupt causes to vectors
92915f71
GR
119 * @adapter: pointer to adapter struct
120 * @direction: 0 for Rx, 1 for Tx, -1 for other causes
121 * @queue: queue to map the corresponding interrupt to
122 * @msix_vector: the vector to map to the corresponding queue
92915f71
GR
123 */
124static void ixgbevf_set_ivar(struct ixgbevf_adapter *adapter, s8 direction,
125 u8 queue, u8 msix_vector)
126{
127 u32 ivar, index;
128 struct ixgbe_hw *hw = &adapter->hw;
129 if (direction == -1) {
130 /* other causes */
131 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
132 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
133 ivar &= ~0xFF;
134 ivar |= msix_vector;
135 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
136 } else {
137 /* tx or rx causes */
138 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
139 index = ((16 * (queue & 1)) + (8 * direction));
140 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(queue >> 1));
141 ivar &= ~(0xFF << index);
142 ivar |= (msix_vector << index);
143 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(queue >> 1), ivar);
144 }
145}
146
70a10e25 147static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_ring *tx_ring,
92915f71
GR
148 struct ixgbevf_tx_buffer
149 *tx_buffer_info)
150{
151 if (tx_buffer_info->dma) {
152 if (tx_buffer_info->mapped_as_page)
70a10e25 153 dma_unmap_page(tx_ring->dev,
92915f71
GR
154 tx_buffer_info->dma,
155 tx_buffer_info->length,
2a1f8794 156 DMA_TO_DEVICE);
92915f71 157 else
70a10e25 158 dma_unmap_single(tx_ring->dev,
92915f71
GR
159 tx_buffer_info->dma,
160 tx_buffer_info->length,
2a1f8794 161 DMA_TO_DEVICE);
92915f71
GR
162 tx_buffer_info->dma = 0;
163 }
164 if (tx_buffer_info->skb) {
165 dev_kfree_skb_any(tx_buffer_info->skb);
166 tx_buffer_info->skb = NULL;
167 }
168 tx_buffer_info->time_stamp = 0;
169 /* tx_buffer_info must be completely set up in the transmit path */
170}
171
92915f71
GR
172#define IXGBE_MAX_TXD_PWR 14
173#define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR)
174
175/* Tx Descriptors needed, worst case */
3595990a
AD
176#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IXGBE_MAX_DATA_PER_TXD)
177#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
92915f71
GR
178
179static void ixgbevf_tx_timeout(struct net_device *netdev);
180
181/**
182 * ixgbevf_clean_tx_irq - Reclaim resources after transmit completes
fa71ae27 183 * @q_vector: board private structure
92915f71
GR
184 * @tx_ring: tx ring to clean
185 **/
fa71ae27 186static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
92915f71
GR
187 struct ixgbevf_ring *tx_ring)
188{
fa71ae27 189 struct ixgbevf_adapter *adapter = q_vector->adapter;
92915f71
GR
190 union ixgbe_adv_tx_desc *tx_desc, *eop_desc;
191 struct ixgbevf_tx_buffer *tx_buffer_info;
e757e3e1 192 unsigned int i, count = 0;
92915f71
GR
193 unsigned int total_bytes = 0, total_packets = 0;
194
10cc1bdd
AD
195 if (test_bit(__IXGBEVF_DOWN, &adapter->state))
196 return true;
197
92915f71 198 i = tx_ring->next_to_clean;
e757e3e1
AD
199 tx_buffer_info = &tx_ring->tx_buffer_info[i];
200 eop_desc = tx_buffer_info->next_to_watch;
92915f71 201
e757e3e1 202 do {
92915f71 203 bool cleaned = false;
e757e3e1
AD
204
205 /* if next_to_watch is not set then there is no work pending */
206 if (!eop_desc)
207 break;
208
209 /* prevent any other reads prior to eop_desc */
210 read_barrier_depends();
211
212 /* if DD is not set pending work has not been completed */
213 if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
214 break;
215
216 /* clear next_to_watch to prevent false hangs */
217 tx_buffer_info->next_to_watch = NULL;
218
92915f71
GR
219 for ( ; !cleaned; count++) {
220 struct sk_buff *skb;
908421f6 221 tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
e757e3e1 222 cleaned = (tx_desc == eop_desc);
92915f71
GR
223 skb = tx_buffer_info->skb;
224
225 if (cleaned && skb) {
226 unsigned int segs, bytecount;
227
228 /* gso_segs is currently only valid for tcp */
229 segs = skb_shinfo(skb)->gso_segs ?: 1;
230 /* multiply data chunks by size of headers */
231 bytecount = ((segs - 1) * skb_headlen(skb)) +
232 skb->len;
233 total_packets += segs;
234 total_bytes += bytecount;
235 }
236
70a10e25 237 ixgbevf_unmap_and_free_tx_resource(tx_ring,
92915f71
GR
238 tx_buffer_info);
239
240 tx_desc->wb.status = 0;
241
242 i++;
243 if (i == tx_ring->count)
244 i = 0;
e757e3e1
AD
245
246 tx_buffer_info = &tx_ring->tx_buffer_info[i];
92915f71
GR
247 }
248
e757e3e1
AD
249 eop_desc = tx_buffer_info->next_to_watch;
250 } while (count < tx_ring->count);
92915f71
GR
251
252 tx_ring->next_to_clean = i;
253
254#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
fb40195c 255 if (unlikely(count && netif_carrier_ok(tx_ring->netdev) &&
f880d07b 256 (ixgbevf_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) {
92915f71
GR
257 /* Make sure that anybody stopping the queue after this
258 * sees the new next_to_clean.
259 */
260 smp_mb();
fb40195c
AD
261 if (__netif_subqueue_stopped(tx_ring->netdev,
262 tx_ring->queue_index) &&
92915f71 263 !test_bit(__IXGBEVF_DOWN, &adapter->state)) {
fb40195c
AD
264 netif_wake_subqueue(tx_ring->netdev,
265 tx_ring->queue_index);
92915f71
GR
266 ++adapter->restart_queue;
267 }
92915f71
GR
268 }
269
4197aa7b 270 u64_stats_update_begin(&tx_ring->syncp);
92915f71
GR
271 tx_ring->total_bytes += total_bytes;
272 tx_ring->total_packets += total_packets;
4197aa7b 273 u64_stats_update_end(&tx_ring->syncp);
ac6ed8f0
GR
274 q_vector->tx.total_bytes += total_bytes;
275 q_vector->tx.total_packets += total_packets;
92915f71 276
fa71ae27 277 return count < tx_ring->count;
92915f71
GR
278}
279
280/**
281 * ixgbevf_receive_skb - Send a completed packet up the stack
282 * @q_vector: structure containing interrupt and ring information
283 * @skb: packet to send up
284 * @status: hardware indication of status of receive
92915f71
GR
285 * @rx_desc: rx descriptor
286 **/
287static void ixgbevf_receive_skb(struct ixgbevf_q_vector *q_vector,
288 struct sk_buff *skb, u8 status,
92915f71
GR
289 union ixgbe_adv_rx_desc *rx_desc)
290{
291 struct ixgbevf_adapter *adapter = q_vector->adapter;
292 bool is_vlan = (status & IXGBE_RXD_STAT_VP);
dd1ed3b7 293 u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan);
92915f71 294
5d9a533b 295 if (is_vlan && test_bit(tag & VLAN_VID_MASK, adapter->active_vlans))
86a9bad3 296 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tag);
dadcd65f 297
366c1099
GR
298 if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL))
299 napi_gro_receive(&q_vector->napi, skb);
300 else
301 netif_rx(skb);
92915f71
GR
302}
303
08681618
JK
304/**
305 * ixgbevf_rx_skb - Helper function to determine proper Rx method
306 * @q_vector: structure containing interrupt and ring information
307 * @skb: packet to send up
308 * @status: hardware indication of status of receive
309 * @rx_desc: rx descriptor
310 **/
311static void ixgbevf_rx_skb(struct ixgbevf_q_vector *q_vector,
312 struct sk_buff *skb, u8 status,
313 union ixgbe_adv_rx_desc *rx_desc)
314{
c777cdfa
JK
315#ifdef CONFIG_NET_RX_BUSY_POLL
316 skb_mark_napi_id(skb, &q_vector->napi);
317
318 if (ixgbevf_qv_busy_polling(q_vector)) {
319 netif_receive_skb(skb);
320 /* exit early if we busy polled */
321 return;
322 }
323#endif /* CONFIG_NET_RX_BUSY_POLL */
324
08681618
JK
325 ixgbevf_receive_skb(q_vector, skb, status, rx_desc);
326}
327
92915f71
GR
328/**
329 * ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum
55fb277c 330 * @ring: pointer to Rx descriptor ring structure
92915f71
GR
331 * @status_err: hardware indication of status of receive
332 * @skb: skb currently being received and modified
333 **/
55fb277c 334static inline void ixgbevf_rx_checksum(struct ixgbevf_ring *ring,
92915f71
GR
335 u32 status_err, struct sk_buff *skb)
336{
bc8acf2c 337 skb_checksum_none_assert(skb);
92915f71
GR
338
339 /* Rx csum disabled */
fb40195c 340 if (!(ring->netdev->features & NETIF_F_RXCSUM))
92915f71
GR
341 return;
342
343 /* if IP and error */
344 if ((status_err & IXGBE_RXD_STAT_IPCS) &&
345 (status_err & IXGBE_RXDADV_ERR_IPE)) {
55fb277c 346 ring->hw_csum_rx_error++;
92915f71
GR
347 return;
348 }
349
350 if (!(status_err & IXGBE_RXD_STAT_L4CS))
351 return;
352
353 if (status_err & IXGBE_RXDADV_ERR_TCPE) {
55fb277c 354 ring->hw_csum_rx_error++;
92915f71
GR
355 return;
356 }
357
358 /* It must be a TCP or UDP packet with a valid checksum */
359 skb->ip_summed = CHECKSUM_UNNECESSARY;
55fb277c 360 ring->hw_csum_rx_good++;
92915f71
GR
361}
362
363/**
364 * ixgbevf_alloc_rx_buffers - Replace used receive buffers; packet split
365 * @adapter: address of board private structure
366 **/
367static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter,
368 struct ixgbevf_ring *rx_ring,
369 int cleaned_count)
370{
371 struct pci_dev *pdev = adapter->pdev;
372 union ixgbe_adv_rx_desc *rx_desc;
373 struct ixgbevf_rx_buffer *bi;
fb40195c 374 unsigned int i = rx_ring->next_to_use;
92915f71 375
92915f71
GR
376 bi = &rx_ring->rx_buffer_info[i];
377
378 while (cleaned_count--) {
908421f6 379 rx_desc = IXGBEVF_RX_DESC(rx_ring, i);
b9dd245b
GR
380
381 if (!bi->skb) {
382 struct sk_buff *skb;
383
fb40195c
AD
384 skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
385 rx_ring->rx_buf_len);
92915f71
GR
386 if (!skb) {
387 adapter->alloc_rx_buff_failed++;
388 goto no_buffers;
389 }
92915f71 390 bi->skb = skb;
b9dd245b 391
2a1f8794 392 bi->dma = dma_map_single(&pdev->dev, skb->data,
92915f71 393 rx_ring->rx_buf_len,
2a1f8794 394 DMA_FROM_DEVICE);
6132ee8a
GR
395 if (dma_mapping_error(&pdev->dev, bi->dma)) {
396 dev_kfree_skb(skb);
397 bi->skb = NULL;
398 dev_err(&pdev->dev, "RX DMA map failed\n");
399 break;
400 }
92915f71 401 }
77d5dfca 402 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
92915f71
GR
403
404 i++;
405 if (i == rx_ring->count)
406 i = 0;
407 bi = &rx_ring->rx_buffer_info[i];
408 }
409
410no_buffers:
5cdab2f6
DS
411 if (rx_ring->next_to_use != i)
412 ixgbevf_release_rx_desc(rx_ring, i);
92915f71
GR
413}
414
415static inline void ixgbevf_irq_enable_queues(struct ixgbevf_adapter *adapter,
5f3600eb 416 u32 qmask)
92915f71 417{
92915f71
GR
418 struct ixgbe_hw *hw = &adapter->hw;
419
5f3600eb 420 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, qmask);
92915f71
GR
421}
422
08e50a20
JK
423static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
424 struct ixgbevf_ring *rx_ring,
425 int budget)
92915f71
GR
426{
427 struct ixgbevf_adapter *adapter = q_vector->adapter;
428 struct pci_dev *pdev = adapter->pdev;
429 union ixgbe_adv_rx_desc *rx_desc, *next_rxd;
430 struct ixgbevf_rx_buffer *rx_buffer_info, *next_buffer;
431 struct sk_buff *skb;
432 unsigned int i;
433 u32 len, staterr;
92915f71
GR
434 int cleaned_count = 0;
435 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
436
437 i = rx_ring->next_to_clean;
908421f6 438 rx_desc = IXGBEVF_RX_DESC(rx_ring, i);
92915f71
GR
439 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
440 rx_buffer_info = &rx_ring->rx_buffer_info[i];
441
442 while (staterr & IXGBE_RXD_STAT_DD) {
fa71ae27 443 if (!budget)
92915f71 444 break;
fa71ae27 445 budget--;
92915f71 446
2d0bb1c1 447 rmb(); /* read descriptor and rx_buffer_info after status DD */
77d5dfca 448 len = le16_to_cpu(rx_desc->wb.upper.length);
92915f71
GR
449 skb = rx_buffer_info->skb;
450 prefetch(skb->data - NET_IP_ALIGN);
451 rx_buffer_info->skb = NULL;
452
453 if (rx_buffer_info->dma) {
2a1f8794 454 dma_unmap_single(&pdev->dev, rx_buffer_info->dma,
92915f71 455 rx_ring->rx_buf_len,
2a1f8794 456 DMA_FROM_DEVICE);
92915f71
GR
457 rx_buffer_info->dma = 0;
458 skb_put(skb, len);
459 }
460
92915f71
GR
461 i++;
462 if (i == rx_ring->count)
463 i = 0;
464
908421f6 465 next_rxd = IXGBEVF_RX_DESC(rx_ring, i);
92915f71
GR
466 prefetch(next_rxd);
467 cleaned_count++;
468
469 next_buffer = &rx_ring->rx_buffer_info[i];
470
471 if (!(staterr & IXGBE_RXD_STAT_EOP)) {
77d5dfca 472 skb->next = next_buffer->skb;
5c60f81a 473 IXGBE_CB(skb->next)->prev = skb;
92915f71
GR
474 adapter->non_eop_descs++;
475 goto next_desc;
476 }
477
5c60f81a
AD
478 /* we should not be chaining buffers, if we did drop the skb */
479 if (IXGBE_CB(skb)->prev) {
480 do {
481 struct sk_buff *this = skb;
482 skb = IXGBE_CB(skb)->prev;
483 dev_kfree_skb(this);
484 } while (skb);
485 goto next_desc;
486 }
487
92915f71
GR
488 /* ERR_MASK will only have valid bits if EOP set */
489 if (unlikely(staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK)) {
490 dev_kfree_skb_irq(skb);
491 goto next_desc;
492 }
493
55fb277c 494 ixgbevf_rx_checksum(rx_ring, staterr, skb);
92915f71
GR
495
496 /* probably a little skewed due to removing CRC */
497 total_rx_bytes += skb->len;
498 total_rx_packets++;
499
fb40195c 500 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
92915f71 501
815cccbf
JF
502 /* Workaround hardware that can't do proper VEPA multicast
503 * source pruning.
504 */
505 if ((skb->pkt_type & (PACKET_BROADCAST | PACKET_MULTICAST)) &&
7367d0b5
JP
506 ether_addr_equal(adapter->netdev->dev_addr,
507 eth_hdr(skb)->h_source)) {
815cccbf
JF
508 dev_kfree_skb_irq(skb);
509 goto next_desc;
510 }
511
08681618 512 ixgbevf_rx_skb(q_vector, skb, staterr, rx_desc);
92915f71
GR
513
514next_desc:
515 rx_desc->wb.upper.status_error = 0;
516
517 /* return some buffers to hardware, one at a time is too slow */
518 if (cleaned_count >= IXGBEVF_RX_BUFFER_WRITE) {
519 ixgbevf_alloc_rx_buffers(adapter, rx_ring,
520 cleaned_count);
521 cleaned_count = 0;
522 }
523
524 /* use prefetched values */
525 rx_desc = next_rxd;
526 rx_buffer_info = &rx_ring->rx_buffer_info[i];
527
528 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
529 }
530
531 rx_ring->next_to_clean = i;
f880d07b 532 cleaned_count = ixgbevf_desc_unused(rx_ring);
92915f71
GR
533
534 if (cleaned_count)
535 ixgbevf_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
536
4197aa7b 537 u64_stats_update_begin(&rx_ring->syncp);
92915f71
GR
538 rx_ring->total_packets += total_rx_packets;
539 rx_ring->total_bytes += total_rx_bytes;
4197aa7b 540 u64_stats_update_end(&rx_ring->syncp);
ac6ed8f0
GR
541 q_vector->rx.total_packets += total_rx_packets;
542 q_vector->rx.total_bytes += total_rx_bytes;
92915f71 543
08e50a20 544 return total_rx_packets;
92915f71
GR
545}
546
547/**
fa71ae27 548 * ixgbevf_poll - NAPI polling calback
92915f71
GR
549 * @napi: napi struct with our devices info in it
550 * @budget: amount of work driver is allowed to do this pass, in packets
551 *
fa71ae27 552 * This function will clean more than one or more rings associated with a
92915f71
GR
553 * q_vector.
554 **/
fa71ae27 555static int ixgbevf_poll(struct napi_struct *napi, int budget)
92915f71
GR
556{
557 struct ixgbevf_q_vector *q_vector =
558 container_of(napi, struct ixgbevf_q_vector, napi);
559 struct ixgbevf_adapter *adapter = q_vector->adapter;
fa71ae27
AD
560 struct ixgbevf_ring *ring;
561 int per_ring_budget;
562 bool clean_complete = true;
563
564 ixgbevf_for_each_ring(ring, q_vector->tx)
565 clean_complete &= ixgbevf_clean_tx_irq(q_vector, ring);
92915f71 566
c777cdfa
JK
567#ifdef CONFIG_NET_RX_BUSY_POLL
568 if (!ixgbevf_qv_lock_napi(q_vector))
569 return budget;
570#endif
571
92915f71
GR
572 /* attempt to distribute budget to each queue fairly, but don't allow
573 * the budget to go below 1 because we'll exit polling */
fa71ae27
AD
574 if (q_vector->rx.count > 1)
575 per_ring_budget = max(budget/q_vector->rx.count, 1);
576 else
577 per_ring_budget = budget;
578
366c1099 579 adapter->flags |= IXGBE_FLAG_IN_NETPOLL;
fa71ae27 580 ixgbevf_for_each_ring(ring, q_vector->rx)
08e50a20
JK
581 clean_complete &= (ixgbevf_clean_rx_irq(q_vector, ring,
582 per_ring_budget)
583 < per_ring_budget);
366c1099 584 adapter->flags &= ~IXGBE_FLAG_IN_NETPOLL;
fa71ae27 585
c777cdfa
JK
586#ifdef CONFIG_NET_RX_BUSY_POLL
587 ixgbevf_qv_unlock_napi(q_vector);
588#endif
589
fa71ae27
AD
590 /* If all work not completed, return budget and keep polling */
591 if (!clean_complete)
592 return budget;
593 /* all work done, exit the polling mode */
594 napi_complete(napi);
595 if (adapter->rx_itr_setting & 1)
596 ixgbevf_set_itr(q_vector);
597 if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
598 ixgbevf_irq_enable_queues(adapter,
599 1 << q_vector->v_idx);
92915f71 600
fa71ae27 601 return 0;
92915f71
GR
602}
603
ce422606
GR
604/**
605 * ixgbevf_write_eitr - write VTEITR register in hardware specific way
606 * @q_vector: structure containing interrupt and ring information
607 */
3849623e 608void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector)
ce422606
GR
609{
610 struct ixgbevf_adapter *adapter = q_vector->adapter;
611 struct ixgbe_hw *hw = &adapter->hw;
612 int v_idx = q_vector->v_idx;
613 u32 itr_reg = q_vector->itr & IXGBE_MAX_EITR;
614
615 /*
616 * set the WDIS bit to not clear the timer bits and cause an
617 * immediate assertion of the interrupt
618 */
619 itr_reg |= IXGBE_EITR_CNT_WDIS;
620
621 IXGBE_WRITE_REG(hw, IXGBE_VTEITR(v_idx), itr_reg);
622}
92915f71 623
c777cdfa
JK
624#ifdef CONFIG_NET_RX_BUSY_POLL
625/* must be called with local_bh_disable()d */
626static int ixgbevf_busy_poll_recv(struct napi_struct *napi)
627{
628 struct ixgbevf_q_vector *q_vector =
629 container_of(napi, struct ixgbevf_q_vector, napi);
630 struct ixgbevf_adapter *adapter = q_vector->adapter;
631 struct ixgbevf_ring *ring;
632 int found = 0;
633
634 if (test_bit(__IXGBEVF_DOWN, &adapter->state))
635 return LL_FLUSH_FAILED;
636
637 if (!ixgbevf_qv_lock_poll(q_vector))
638 return LL_FLUSH_BUSY;
639
640 ixgbevf_for_each_ring(ring, q_vector->rx) {
641 found = ixgbevf_clean_rx_irq(q_vector, ring, 4);
3b5dca26
JK
642#ifdef BP_EXTENDED_STATS
643 if (found)
644 ring->bp_cleaned += found;
645 else
646 ring->bp_misses++;
647#endif
c777cdfa
JK
648 if (found)
649 break;
650 }
651
652 ixgbevf_qv_unlock_poll(q_vector);
653
654 return found;
655}
656#endif /* CONFIG_NET_RX_BUSY_POLL */
657
92915f71
GR
658/**
659 * ixgbevf_configure_msix - Configure MSI-X hardware
660 * @adapter: board private structure
661 *
662 * ixgbevf_configure_msix sets up the hardware to properly generate MSI-X
663 * interrupts.
664 **/
665static void ixgbevf_configure_msix(struct ixgbevf_adapter *adapter)
666{
667 struct ixgbevf_q_vector *q_vector;
6b43c446 668 int q_vectors, v_idx;
92915f71
GR
669
670 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
5f3600eb 671 adapter->eims_enable_mask = 0;
92915f71
GR
672
673 /*
674 * Populate the IVAR table and set the ITR values to the
675 * corresponding register.
676 */
677 for (v_idx = 0; v_idx < q_vectors; v_idx++) {
6b43c446 678 struct ixgbevf_ring *ring;
92915f71 679 q_vector = adapter->q_vector[v_idx];
6b43c446
AD
680
681 ixgbevf_for_each_ring(ring, q_vector->rx)
682 ixgbevf_set_ivar(adapter, 0, ring->reg_idx, v_idx);
683
684 ixgbevf_for_each_ring(ring, q_vector->tx)
685 ixgbevf_set_ivar(adapter, 1, ring->reg_idx, v_idx);
92915f71 686
5f3600eb
AD
687 if (q_vector->tx.ring && !q_vector->rx.ring) {
688 /* tx only vector */
689 if (adapter->tx_itr_setting == 1)
690 q_vector->itr = IXGBE_10K_ITR;
691 else
692 q_vector->itr = adapter->tx_itr_setting;
693 } else {
694 /* rx or rx/tx vector */
695 if (adapter->rx_itr_setting == 1)
696 q_vector->itr = IXGBE_20K_ITR;
697 else
698 q_vector->itr = adapter->rx_itr_setting;
699 }
700
701 /* add q_vector eims value to global eims_enable_mask */
702 adapter->eims_enable_mask |= 1 << v_idx;
92915f71 703
5f3600eb 704 ixgbevf_write_eitr(q_vector);
92915f71
GR
705 }
706
707 ixgbevf_set_ivar(adapter, -1, 1, v_idx);
5f3600eb
AD
708 /* setup eims_other and add value to global eims_enable_mask */
709 adapter->eims_other = 1 << v_idx;
710 adapter->eims_enable_mask |= adapter->eims_other;
92915f71
GR
711}
712
713enum latency_range {
714 lowest_latency = 0,
715 low_latency = 1,
716 bulk_latency = 2,
717 latency_invalid = 255
718};
719
720/**
721 * ixgbevf_update_itr - update the dynamic ITR value based on statistics
5f3600eb
AD
722 * @q_vector: structure containing interrupt and ring information
723 * @ring_container: structure containing ring performance data
92915f71
GR
724 *
725 * Stores a new ITR value based on packets and byte
726 * counts during the last interrupt. The advantage of per interrupt
727 * computation is faster updates and more accurate ITR for the current
728 * traffic pattern. Constants in this function were computed
729 * based on theoretical maximum wire speed and thresholds were set based
730 * on testing data as well as attempting to minimize response time
731 * while increasing bulk throughput.
732 **/
5f3600eb
AD
733static void ixgbevf_update_itr(struct ixgbevf_q_vector *q_vector,
734 struct ixgbevf_ring_container *ring_container)
92915f71 735{
5f3600eb
AD
736 int bytes = ring_container->total_bytes;
737 int packets = ring_container->total_packets;
92915f71
GR
738 u32 timepassed_us;
739 u64 bytes_perint;
5f3600eb 740 u8 itr_setting = ring_container->itr;
92915f71
GR
741
742 if (packets == 0)
5f3600eb 743 return;
92915f71
GR
744
745 /* simple throttlerate management
746 * 0-20MB/s lowest (100000 ints/s)
747 * 20-100MB/s low (20000 ints/s)
748 * 100-1249MB/s bulk (8000 ints/s)
749 */
750 /* what was last interrupt timeslice? */
5f3600eb 751 timepassed_us = q_vector->itr >> 2;
92915f71
GR
752 bytes_perint = bytes / timepassed_us; /* bytes/usec */
753
754 switch (itr_setting) {
755 case lowest_latency:
e2c28ce7 756 if (bytes_perint > 10)
5f3600eb 757 itr_setting = low_latency;
92915f71
GR
758 break;
759 case low_latency:
e2c28ce7 760 if (bytes_perint > 20)
5f3600eb 761 itr_setting = bulk_latency;
e2c28ce7 762 else if (bytes_perint <= 10)
5f3600eb 763 itr_setting = lowest_latency;
92915f71
GR
764 break;
765 case bulk_latency:
e2c28ce7 766 if (bytes_perint <= 20)
5f3600eb 767 itr_setting = low_latency;
92915f71
GR
768 break;
769 }
770
5f3600eb
AD
771 /* clear work counters since we have the values we need */
772 ring_container->total_bytes = 0;
773 ring_container->total_packets = 0;
774
775 /* write updated itr to ring container */
776 ring_container->itr = itr_setting;
92915f71
GR
777}
778
fa71ae27 779static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector)
92915f71 780{
5f3600eb
AD
781 u32 new_itr = q_vector->itr;
782 u8 current_itr;
92915f71 783
5f3600eb
AD
784 ixgbevf_update_itr(q_vector, &q_vector->tx);
785 ixgbevf_update_itr(q_vector, &q_vector->rx);
92915f71 786
6b43c446 787 current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
92915f71
GR
788
789 switch (current_itr) {
790 /* counts and packets in update_itr are dependent on these numbers */
791 case lowest_latency:
5f3600eb 792 new_itr = IXGBE_100K_ITR;
92915f71
GR
793 break;
794 case low_latency:
5f3600eb 795 new_itr = IXGBE_20K_ITR;
92915f71
GR
796 break;
797 case bulk_latency:
798 default:
5f3600eb 799 new_itr = IXGBE_8K_ITR;
92915f71
GR
800 break;
801 }
802
5f3600eb 803 if (new_itr != q_vector->itr) {
92915f71 804 /* do an exponential smoothing */
5f3600eb
AD
805 new_itr = (10 * new_itr * q_vector->itr) /
806 ((9 * new_itr) + q_vector->itr);
807
808 /* save the algorithm value here */
809 q_vector->itr = new_itr;
810
811 ixgbevf_write_eitr(q_vector);
92915f71 812 }
92915f71
GR
813}
814
4b2cd27f 815static irqreturn_t ixgbevf_msix_other(int irq, void *data)
92915f71 816{
fa71ae27 817 struct ixgbevf_adapter *adapter = data;
92915f71 818 struct ixgbe_hw *hw = &adapter->hw;
08259594 819
4b2cd27f 820 hw->mac.get_link_status = 1;
1e72bfc3 821
c7bb417d
DS
822 if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
823 mod_timer(&adapter->watchdog_timer, jiffies);
3a2c4033 824
5f3600eb
AD
825 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_other);
826
92915f71
GR
827 return IRQ_HANDLED;
828}
829
92915f71 830/**
fa71ae27 831 * ixgbevf_msix_clean_rings - single unshared vector rx clean (all queues)
92915f71
GR
832 * @irq: unused
833 * @data: pointer to our q_vector struct for this interrupt vector
834 **/
fa71ae27 835static irqreturn_t ixgbevf_msix_clean_rings(int irq, void *data)
92915f71
GR
836{
837 struct ixgbevf_q_vector *q_vector = data;
92915f71 838
5f3600eb 839 /* EIAM disabled interrupts (on this vector) for us */
fa71ae27
AD
840 if (q_vector->rx.ring || q_vector->tx.ring)
841 napi_schedule(&q_vector->napi);
92915f71
GR
842
843 return IRQ_HANDLED;
844}
845
846static inline void map_vector_to_rxq(struct ixgbevf_adapter *a, int v_idx,
847 int r_idx)
848{
849 struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx];
850
6b43c446
AD
851 a->rx_ring[r_idx].next = q_vector->rx.ring;
852 q_vector->rx.ring = &a->rx_ring[r_idx];
853 q_vector->rx.count++;
92915f71
GR
854}
855
856static inline void map_vector_to_txq(struct ixgbevf_adapter *a, int v_idx,
857 int t_idx)
858{
859 struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx];
860
6b43c446
AD
861 a->tx_ring[t_idx].next = q_vector->tx.ring;
862 q_vector->tx.ring = &a->tx_ring[t_idx];
863 q_vector->tx.count++;
92915f71
GR
864}
865
866/**
867 * ixgbevf_map_rings_to_vectors - Maps descriptor rings to vectors
868 * @adapter: board private structure to initialize
869 *
870 * This function maps descriptor rings to the queue-specific vectors
871 * we were allotted through the MSI-X enabling code. Ideally, we'd have
872 * one vector per ring/queue, but on a constrained vector budget, we
873 * group the rings as "efficiently" as possible. You would add new
874 * mapping configurations in here.
875 **/
876static int ixgbevf_map_rings_to_vectors(struct ixgbevf_adapter *adapter)
877{
878 int q_vectors;
879 int v_start = 0;
880 int rxr_idx = 0, txr_idx = 0;
881 int rxr_remaining = adapter->num_rx_queues;
882 int txr_remaining = adapter->num_tx_queues;
883 int i, j;
884 int rqpv, tqpv;
885 int err = 0;
886
887 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
888
889 /*
890 * The ideal configuration...
891 * We have enough vectors to map one per queue.
892 */
893 if (q_vectors == adapter->num_rx_queues + adapter->num_tx_queues) {
894 for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++)
895 map_vector_to_rxq(adapter, v_start, rxr_idx);
896
897 for (; txr_idx < txr_remaining; v_start++, txr_idx++)
898 map_vector_to_txq(adapter, v_start, txr_idx);
899 goto out;
900 }
901
902 /*
903 * If we don't have enough vectors for a 1-to-1
904 * mapping, we'll have to group them so there are
905 * multiple queues per vector.
906 */
907 /* Re-adjusting *qpv takes care of the remainder. */
908 for (i = v_start; i < q_vectors; i++) {
909 rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - i);
910 for (j = 0; j < rqpv; j++) {
911 map_vector_to_rxq(adapter, i, rxr_idx);
912 rxr_idx++;
913 rxr_remaining--;
914 }
915 }
916 for (i = v_start; i < q_vectors; i++) {
917 tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - i);
918 for (j = 0; j < tqpv; j++) {
919 map_vector_to_txq(adapter, i, txr_idx);
920 txr_idx++;
921 txr_remaining--;
922 }
923 }
924
925out:
926 return err;
927}
928
929/**
930 * ixgbevf_request_msix_irqs - Initialize MSI-X interrupts
931 * @adapter: board private structure
932 *
933 * ixgbevf_request_msix_irqs allocates MSI-X vectors and requests
934 * interrupts from the kernel.
935 **/
936static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter *adapter)
937{
938 struct net_device *netdev = adapter->netdev;
fa71ae27
AD
939 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
940 int vector, err;
92915f71
GR
941 int ri = 0, ti = 0;
942
92915f71 943 for (vector = 0; vector < q_vectors; vector++) {
fa71ae27
AD
944 struct ixgbevf_q_vector *q_vector = adapter->q_vector[vector];
945 struct msix_entry *entry = &adapter->msix_entries[vector];
946
947 if (q_vector->tx.ring && q_vector->rx.ring) {
948 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
949 "%s-%s-%d", netdev->name, "TxRx", ri++);
950 ti++;
951 } else if (q_vector->rx.ring) {
952 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
953 "%s-%s-%d", netdev->name, "rx", ri++);
954 } else if (q_vector->tx.ring) {
955 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
956 "%s-%s-%d", netdev->name, "tx", ti++);
92915f71
GR
957 } else {
958 /* skip this unused q_vector */
959 continue;
960 }
fa71ae27
AD
961 err = request_irq(entry->vector, &ixgbevf_msix_clean_rings, 0,
962 q_vector->name, q_vector);
92915f71
GR
963 if (err) {
964 hw_dbg(&adapter->hw,
965 "request_irq failed for MSIX interrupt "
966 "Error: %d\n", err);
967 goto free_queue_irqs;
968 }
969 }
970
92915f71 971 err = request_irq(adapter->msix_entries[vector].vector,
4b2cd27f 972 &ixgbevf_msix_other, 0, netdev->name, adapter);
92915f71
GR
973 if (err) {
974 hw_dbg(&adapter->hw,
4b2cd27f 975 "request_irq for msix_other failed: %d\n", err);
92915f71
GR
976 goto free_queue_irqs;
977 }
978
979 return 0;
980
981free_queue_irqs:
fa71ae27
AD
982 while (vector) {
983 vector--;
984 free_irq(adapter->msix_entries[vector].vector,
985 adapter->q_vector[vector]);
986 }
a1f6c6b1 987 /* This failure is non-recoverable - it indicates the system is
988 * out of MSIX vector resources and the VF driver cannot run
989 * without them. Set the number of msix vectors to zero
990 * indicating that not enough can be allocated. The error
991 * will be returned to the user indicating device open failed.
992 * Any further attempts to force the driver to open will also
993 * fail. The only way to recover is to unload the driver and
994 * reload it again. If the system has recovered some MSIX
995 * vectors then it may succeed.
996 */
997 adapter->num_msix_vectors = 0;
92915f71
GR
998 return err;
999}
1000
1001static inline void ixgbevf_reset_q_vectors(struct ixgbevf_adapter *adapter)
1002{
1003 int i, q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1004
1005 for (i = 0; i < q_vectors; i++) {
1006 struct ixgbevf_q_vector *q_vector = adapter->q_vector[i];
6b43c446
AD
1007 q_vector->rx.ring = NULL;
1008 q_vector->tx.ring = NULL;
1009 q_vector->rx.count = 0;
1010 q_vector->tx.count = 0;
92915f71
GR
1011 }
1012}
1013
1014/**
1015 * ixgbevf_request_irq - initialize interrupts
1016 * @adapter: board private structure
1017 *
1018 * Attempts to configure interrupts using the best available
1019 * capabilities of the hardware and kernel.
1020 **/
1021static int ixgbevf_request_irq(struct ixgbevf_adapter *adapter)
1022{
1023 int err = 0;
1024
1025 err = ixgbevf_request_msix_irqs(adapter);
1026
1027 if (err)
1028 hw_dbg(&adapter->hw,
1029 "request_irq failed, Error %d\n", err);
1030
1031 return err;
1032}
1033
1034static void ixgbevf_free_irq(struct ixgbevf_adapter *adapter)
1035{
92915f71
GR
1036 int i, q_vectors;
1037
1038 q_vectors = adapter->num_msix_vectors;
92915f71
GR
1039 i = q_vectors - 1;
1040
fa71ae27 1041 free_irq(adapter->msix_entries[i].vector, adapter);
92915f71
GR
1042 i--;
1043
1044 for (; i >= 0; i--) {
fa71ae27
AD
1045 /* free only the irqs that were actually requested */
1046 if (!adapter->q_vector[i]->rx.ring &&
1047 !adapter->q_vector[i]->tx.ring)
1048 continue;
1049
92915f71
GR
1050 free_irq(adapter->msix_entries[i].vector,
1051 adapter->q_vector[i]);
1052 }
1053
1054 ixgbevf_reset_q_vectors(adapter);
1055}
1056
1057/**
1058 * ixgbevf_irq_disable - Mask off interrupt generation on the NIC
1059 * @adapter: board private structure
1060 **/
1061static inline void ixgbevf_irq_disable(struct ixgbevf_adapter *adapter)
1062{
92915f71 1063 struct ixgbe_hw *hw = &adapter->hw;
5f3600eb 1064 int i;
92915f71 1065
5f3600eb 1066 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, 0);
92915f71 1067 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, ~0);
5f3600eb 1068 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, 0);
92915f71
GR
1069
1070 IXGBE_WRITE_FLUSH(hw);
1071
1072 for (i = 0; i < adapter->num_msix_vectors; i++)
1073 synchronize_irq(adapter->msix_entries[i].vector);
1074}
1075
1076/**
1077 * ixgbevf_irq_enable - Enable default interrupt generation settings
1078 * @adapter: board private structure
1079 **/
5f3600eb 1080static inline void ixgbevf_irq_enable(struct ixgbevf_adapter *adapter)
92915f71
GR
1081{
1082 struct ixgbe_hw *hw = &adapter->hw;
92915f71 1083
5f3600eb
AD
1084 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, adapter->eims_enable_mask);
1085 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, adapter->eims_enable_mask);
1086 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_enable_mask);
92915f71
GR
1087}
1088
1089/**
1090 * ixgbevf_configure_tx - Configure 82599 VF Transmit Unit after Reset
1091 * @adapter: board private structure
1092 *
1093 * Configure the Tx unit of the MAC after a reset.
1094 **/
1095static void ixgbevf_configure_tx(struct ixgbevf_adapter *adapter)
1096{
1097 u64 tdba;
1098 struct ixgbe_hw *hw = &adapter->hw;
1099 u32 i, j, tdlen, txctrl;
1100
1101 /* Setup the HW Tx Head and Tail descriptor pointers */
1102 for (i = 0; i < adapter->num_tx_queues; i++) {
1103 struct ixgbevf_ring *ring = &adapter->tx_ring[i];
1104 j = ring->reg_idx;
1105 tdba = ring->dma;
1106 tdlen = ring->count * sizeof(union ixgbe_adv_tx_desc);
1107 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(j),
1108 (tdba & DMA_BIT_MASK(32)));
1109 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(j), (tdba >> 32));
1110 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(j), tdlen);
1111 IXGBE_WRITE_REG(hw, IXGBE_VFTDH(j), 0);
1112 IXGBE_WRITE_REG(hw, IXGBE_VFTDT(j), 0);
5cdab2f6
DS
1113 ring->tail = hw->hw_addr + IXGBE_VFTDT(j);
1114 ring->next_to_clean = 0;
1115 ring->next_to_use = 0;
92915f71
GR
1116 /* Disable Tx Head Writeback RO bit, since this hoses
1117 * bookkeeping if things aren't delivered in order.
1118 */
1119 txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(j));
1120 txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
1121 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(j), txctrl);
1122 }
1123}
1124
1125#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
1126
1127static void ixgbevf_configure_srrctl(struct ixgbevf_adapter *adapter, int index)
1128{
1129 struct ixgbevf_ring *rx_ring;
1130 struct ixgbe_hw *hw = &adapter->hw;
1131 u32 srrctl;
1132
1133 rx_ring = &adapter->rx_ring[index];
1134
1135 srrctl = IXGBE_SRRCTL_DROP_EN;
1136
77d5dfca 1137 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
92915f71 1138
dd1fe113
AD
1139 srrctl |= ALIGN(rx_ring->rx_buf_len, 1024) >>
1140 IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1141
92915f71
GR
1142 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(index), srrctl);
1143}
1144
1bb9c639
DS
1145static void ixgbevf_setup_psrtype(struct ixgbevf_adapter *adapter)
1146{
1147 struct ixgbe_hw *hw = &adapter->hw;
1148
1149 /* PSRTYPE must be initialized in 82599 */
1150 u32 psrtype = IXGBE_PSRTYPE_TCPHDR | IXGBE_PSRTYPE_UDPHDR |
1151 IXGBE_PSRTYPE_IPV4HDR | IXGBE_PSRTYPE_IPV6HDR |
1152 IXGBE_PSRTYPE_L2HDR;
1153
1154 if (adapter->num_rx_queues > 1)
1155 psrtype |= 1 << 29;
1156
1157 IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
1158}
1159
dd1fe113
AD
1160static void ixgbevf_set_rx_buffer_len(struct ixgbevf_adapter *adapter)
1161{
1162 struct ixgbe_hw *hw = &adapter->hw;
1163 struct net_device *netdev = adapter->netdev;
1164 int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
1165 int i;
1166 u16 rx_buf_len;
1167
1168 /* notify the PF of our intent to use this size of frame */
1169 ixgbevf_rlpml_set_vf(hw, max_frame);
1170
1171 /* PF will allow an extra 4 bytes past for vlan tagged frames */
1172 max_frame += VLAN_HLEN;
1173
1174 /*
85624caf
GR
1175 * Allocate buffer sizes that fit well into 32K and
1176 * take into account max frame size of 9.5K
dd1fe113
AD
1177 */
1178 if ((hw->mac.type == ixgbe_mac_X540_vf) &&
1179 (max_frame <= MAXIMUM_ETHERNET_VLAN_SIZE))
1180 rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE;
85624caf
GR
1181 else if (max_frame <= IXGBEVF_RXBUFFER_2K)
1182 rx_buf_len = IXGBEVF_RXBUFFER_2K;
1183 else if (max_frame <= IXGBEVF_RXBUFFER_4K)
1184 rx_buf_len = IXGBEVF_RXBUFFER_4K;
1185 else if (max_frame <= IXGBEVF_RXBUFFER_8K)
1186 rx_buf_len = IXGBEVF_RXBUFFER_8K;
dd1fe113 1187 else
85624caf 1188 rx_buf_len = IXGBEVF_RXBUFFER_10K;
dd1fe113
AD
1189
1190 for (i = 0; i < adapter->num_rx_queues; i++)
1191 adapter->rx_ring[i].rx_buf_len = rx_buf_len;
1192}
1193
92915f71
GR
1194/**
1195 * ixgbevf_configure_rx - Configure 82599 VF Receive Unit after Reset
1196 * @adapter: board private structure
1197 *
1198 * Configure the Rx unit of the MAC after a reset.
1199 **/
1200static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter)
1201{
1202 u64 rdba;
1203 struct ixgbe_hw *hw = &adapter->hw;
92915f71
GR
1204 int i, j;
1205 u32 rdlen;
92915f71 1206
1bb9c639 1207 ixgbevf_setup_psrtype(adapter);
dd1fe113
AD
1208
1209 /* set_rx_buffer_len must be called before ring initialization */
1210 ixgbevf_set_rx_buffer_len(adapter);
92915f71 1211
92915f71
GR
1212 /* Setup the HW Rx Head and Tail Descriptor Pointers and
1213 * the Base and Length of the Rx Descriptor Ring */
1214 for (i = 0; i < adapter->num_rx_queues; i++) {
5cdab2f6
DS
1215 struct ixgbevf_ring *ring = &adapter->rx_ring[i];
1216 rdba = ring->dma;
1217 j = ring->reg_idx;
1218 rdlen = ring->count * sizeof(union ixgbe_adv_rx_desc);
92915f71
GR
1219 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(j),
1220 (rdba & DMA_BIT_MASK(32)));
1221 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(j), (rdba >> 32));
1222 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(j), rdlen);
1223 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(j), 0);
1224 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(j), 0);
5cdab2f6
DS
1225 ring->tail = hw->hw_addr + IXGBE_VFRDT(j);
1226 ring->next_to_clean = 0;
1227 ring->next_to_use = 0;
92915f71
GR
1228
1229 ixgbevf_configure_srrctl(adapter, j);
1230 }
1231}
1232
80d5c368
PM
1233static int ixgbevf_vlan_rx_add_vid(struct net_device *netdev,
1234 __be16 proto, u16 vid)
92915f71
GR
1235{
1236 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1237 struct ixgbe_hw *hw = &adapter->hw;
2ddc7fe1
AD
1238 int err;
1239
55fdd45b 1240 spin_lock_bh(&adapter->mbx_lock);
1c55ed76 1241
92915f71 1242 /* add VID to filter table */
2ddc7fe1 1243 err = hw->mac.ops.set_vfta(hw, vid, 0, true);
1c55ed76 1244
55fdd45b 1245 spin_unlock_bh(&adapter->mbx_lock);
1c55ed76 1246
2ddc7fe1
AD
1247 /* translate error return types so error makes sense */
1248 if (err == IXGBE_ERR_MBX)
1249 return -EIO;
1250
1251 if (err == IXGBE_ERR_INVALID_ARGUMENT)
1252 return -EACCES;
1253
dadcd65f 1254 set_bit(vid, adapter->active_vlans);
8e586137 1255
2ddc7fe1 1256 return err;
92915f71
GR
1257}
1258
80d5c368
PM
1259static int ixgbevf_vlan_rx_kill_vid(struct net_device *netdev,
1260 __be16 proto, u16 vid)
92915f71
GR
1261{
1262 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1263 struct ixgbe_hw *hw = &adapter->hw;
2ddc7fe1 1264 int err = -EOPNOTSUPP;
92915f71 1265
55fdd45b 1266 spin_lock_bh(&adapter->mbx_lock);
1c55ed76 1267
92915f71 1268 /* remove VID from filter table */
92fe0bf7 1269 err = hw->mac.ops.set_vfta(hw, vid, 0, false);
1c55ed76 1270
55fdd45b 1271 spin_unlock_bh(&adapter->mbx_lock);
1c55ed76 1272
dadcd65f 1273 clear_bit(vid, adapter->active_vlans);
8e586137 1274
2ddc7fe1 1275 return err;
92915f71
GR
1276}
1277
1278static void ixgbevf_restore_vlan(struct ixgbevf_adapter *adapter)
1279{
dadcd65f 1280 u16 vid;
92915f71 1281
dadcd65f 1282 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
80d5c368
PM
1283 ixgbevf_vlan_rx_add_vid(adapter->netdev,
1284 htons(ETH_P_8021Q), vid);
92915f71
GR
1285}
1286
46ec20ff
GR
1287static int ixgbevf_write_uc_addr_list(struct net_device *netdev)
1288{
1289 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1290 struct ixgbe_hw *hw = &adapter->hw;
1291 int count = 0;
1292
1293 if ((netdev_uc_count(netdev)) > 10) {
dbd9636e 1294 pr_err("Too many unicast filters - No Space\n");
46ec20ff
GR
1295 return -ENOSPC;
1296 }
1297
1298 if (!netdev_uc_empty(netdev)) {
1299 struct netdev_hw_addr *ha;
1300 netdev_for_each_uc_addr(ha, netdev) {
1301 hw->mac.ops.set_uc_addr(hw, ++count, ha->addr);
1302 udelay(200);
1303 }
1304 } else {
1305 /*
1306 * If the list is empty then send message to PF driver to
1307 * clear all macvlans on this VF.
1308 */
1309 hw->mac.ops.set_uc_addr(hw, 0, NULL);
1310 }
1311
1312 return count;
1313}
1314
92915f71 1315/**
dee847f5 1316 * ixgbevf_set_rx_mode - Multicast and unicast set
92915f71
GR
1317 * @netdev: network interface device structure
1318 *
1319 * The set_rx_method entry point is called whenever the multicast address
dee847f5
GR
1320 * list, unicast address list or the network interface flags are updated.
1321 * This routine is responsible for configuring the hardware for proper
1322 * multicast mode and configuring requested unicast filters.
92915f71
GR
1323 **/
1324static void ixgbevf_set_rx_mode(struct net_device *netdev)
1325{
1326 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1327 struct ixgbe_hw *hw = &adapter->hw;
92915f71 1328
55fdd45b 1329 spin_lock_bh(&adapter->mbx_lock);
1c55ed76 1330
92915f71 1331 /* reprogram multicast list */
92fe0bf7 1332 hw->mac.ops.update_mc_addr_list(hw, netdev);
46ec20ff
GR
1333
1334 ixgbevf_write_uc_addr_list(netdev);
1c55ed76 1335
55fdd45b 1336 spin_unlock_bh(&adapter->mbx_lock);
92915f71
GR
1337}
1338
1339static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter)
1340{
1341 int q_idx;
1342 struct ixgbevf_q_vector *q_vector;
1343 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1344
1345 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
92915f71 1346 q_vector = adapter->q_vector[q_idx];
c777cdfa
JK
1347#ifdef CONFIG_NET_RX_BUSY_POLL
1348 ixgbevf_qv_init_lock(adapter->q_vector[q_idx]);
1349#endif
fa71ae27 1350 napi_enable(&q_vector->napi);
92915f71
GR
1351 }
1352}
1353
1354static void ixgbevf_napi_disable_all(struct ixgbevf_adapter *adapter)
1355{
1356 int q_idx;
1357 struct ixgbevf_q_vector *q_vector;
1358 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1359
1360 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
1361 q_vector = adapter->q_vector[q_idx];
92915f71 1362 napi_disable(&q_vector->napi);
c777cdfa
JK
1363#ifdef CONFIG_NET_RX_BUSY_POLL
1364 while (!ixgbevf_qv_disable(adapter->q_vector[q_idx])) {
1365 pr_info("QV %d locked\n", q_idx);
1366 usleep_range(1000, 20000);
1367 }
1368#endif /* CONFIG_NET_RX_BUSY_POLL */
92915f71
GR
1369 }
1370}
1371
220fe050
DS
1372static int ixgbevf_configure_dcb(struct ixgbevf_adapter *adapter)
1373{
1374 struct ixgbe_hw *hw = &adapter->hw;
1375 unsigned int def_q = 0;
1376 unsigned int num_tcs = 0;
1377 unsigned int num_rx_queues = 1;
1378 int err;
1379
1380 spin_lock_bh(&adapter->mbx_lock);
1381
1382 /* fetch queue configuration from the PF */
1383 err = ixgbevf_get_queues(hw, &num_tcs, &def_q);
1384
1385 spin_unlock_bh(&adapter->mbx_lock);
1386
1387 if (err)
1388 return err;
1389
1390 if (num_tcs > 1) {
1391 /* update default Tx ring register index */
1392 adapter->tx_ring[0].reg_idx = def_q;
1393
1394 /* we need as many queues as traffic classes */
1395 num_rx_queues = num_tcs;
1396 }
1397
1398 /* if we have a bad config abort request queue reset */
1399 if (adapter->num_rx_queues != num_rx_queues) {
1400 /* force mailbox timeout to prevent further messages */
1401 hw->mbx.timeout = 0;
1402
1403 /* wait for watchdog to come around and bail us out */
1404 adapter->flags |= IXGBEVF_FLAG_QUEUE_RESET_REQUESTED;
1405 }
1406
1407 return 0;
1408}
1409
92915f71
GR
1410static void ixgbevf_configure(struct ixgbevf_adapter *adapter)
1411{
1412 struct net_device *netdev = adapter->netdev;
1413 int i;
1414
220fe050
DS
1415 ixgbevf_configure_dcb(adapter);
1416
92915f71
GR
1417 ixgbevf_set_rx_mode(netdev);
1418
1419 ixgbevf_restore_vlan(adapter);
1420
1421 ixgbevf_configure_tx(adapter);
1422 ixgbevf_configure_rx(adapter);
1423 for (i = 0; i < adapter->num_rx_queues; i++) {
1424 struct ixgbevf_ring *ring = &adapter->rx_ring[i];
18c63089 1425 ixgbevf_alloc_rx_buffers(adapter, ring,
f880d07b 1426 ixgbevf_desc_unused(ring));
92915f71
GR
1427 }
1428}
1429
858c3dda
DS
1430#define IXGBEVF_MAX_RX_DESC_POLL 10
1431static void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter,
1432 int rxr)
92915f71
GR
1433{
1434 struct ixgbe_hw *hw = &adapter->hw;
858c3dda
DS
1435 int wait_loop = IXGBEVF_MAX_RX_DESC_POLL;
1436 u32 rxdctl;
92915f71 1437 int j = adapter->rx_ring[rxr].reg_idx;
92915f71 1438
858c3dda
DS
1439 do {
1440 usleep_range(1000, 2000);
1441 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j));
1442 } while (--wait_loop && !(rxdctl & IXGBE_RXDCTL_ENABLE));
1443
1444 if (!wait_loop)
1445 hw_dbg(hw, "RXDCTL.ENABLE queue %d not set while polling\n",
1446 rxr);
1447
5cdab2f6 1448 ixgbevf_release_rx_desc(&adapter->rx_ring[rxr],
858c3dda
DS
1449 (adapter->rx_ring[rxr].count - 1));
1450}
92915f71 1451
858c3dda
DS
1452static void ixgbevf_disable_rx_queue(struct ixgbevf_adapter *adapter,
1453 struct ixgbevf_ring *ring)
1454{
1455 struct ixgbe_hw *hw = &adapter->hw;
1456 int wait_loop = IXGBEVF_MAX_RX_DESC_POLL;
1457 u32 rxdctl;
1458 u8 reg_idx = ring->reg_idx;
1459
1460 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
1461 rxdctl &= ~IXGBE_RXDCTL_ENABLE;
1462
1463 /* write value back with RXDCTL.ENABLE bit cleared */
1464 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(reg_idx), rxdctl);
1465
1466 /* the hardware may take up to 100us to really disable the rx queue */
1467 do {
1468 udelay(10);
1469 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
1470 } while (--wait_loop && (rxdctl & IXGBE_RXDCTL_ENABLE));
1471
1472 if (!wait_loop)
1473 hw_dbg(hw, "RXDCTL.ENABLE queue %d not cleared while polling\n",
1474 reg_idx);
92915f71
GR
1475}
1476
33bd9f60
GR
1477static void ixgbevf_save_reset_stats(struct ixgbevf_adapter *adapter)
1478{
1479 /* Only save pre-reset stats if there are some */
1480 if (adapter->stats.vfgprc || adapter->stats.vfgptc) {
1481 adapter->stats.saved_reset_vfgprc += adapter->stats.vfgprc -
1482 adapter->stats.base_vfgprc;
1483 adapter->stats.saved_reset_vfgptc += adapter->stats.vfgptc -
1484 adapter->stats.base_vfgptc;
1485 adapter->stats.saved_reset_vfgorc += adapter->stats.vfgorc -
1486 adapter->stats.base_vfgorc;
1487 adapter->stats.saved_reset_vfgotc += adapter->stats.vfgotc -
1488 adapter->stats.base_vfgotc;
1489 adapter->stats.saved_reset_vfmprc += adapter->stats.vfmprc -
1490 adapter->stats.base_vfmprc;
1491 }
1492}
1493
1494static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter *adapter)
1495{
1496 struct ixgbe_hw *hw = &adapter->hw;
1497
1498 adapter->stats.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
1499 adapter->stats.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
1500 adapter->stats.last_vfgorc |=
1501 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
1502 adapter->stats.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
1503 adapter->stats.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
1504 adapter->stats.last_vfgotc |=
1505 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
1506 adapter->stats.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
1507
1508 adapter->stats.base_vfgprc = adapter->stats.last_vfgprc;
1509 adapter->stats.base_vfgorc = adapter->stats.last_vfgorc;
1510 adapter->stats.base_vfgptc = adapter->stats.last_vfgptc;
1511 adapter->stats.base_vfgotc = adapter->stats.last_vfgotc;
1512 adapter->stats.base_vfmprc = adapter->stats.last_vfmprc;
1513}
1514
31186785
AD
1515static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter)
1516{
1517 struct ixgbe_hw *hw = &adapter->hw;
56e94095
AD
1518 int api[] = { ixgbe_mbox_api_11,
1519 ixgbe_mbox_api_10,
31186785
AD
1520 ixgbe_mbox_api_unknown };
1521 int err = 0, idx = 0;
1522
55fdd45b 1523 spin_lock_bh(&adapter->mbx_lock);
31186785
AD
1524
1525 while (api[idx] != ixgbe_mbox_api_unknown) {
1526 err = ixgbevf_negotiate_api_version(hw, api[idx]);
1527 if (!err)
1528 break;
1529 idx++;
1530 }
1531
55fdd45b 1532 spin_unlock_bh(&adapter->mbx_lock);
31186785
AD
1533}
1534
795180d8 1535static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
92915f71
GR
1536{
1537 struct net_device *netdev = adapter->netdev;
1538 struct ixgbe_hw *hw = &adapter->hw;
1539 int i, j = 0;
1540 int num_rx_rings = adapter->num_rx_queues;
1541 u32 txdctl, rxdctl;
1542
1543 for (i = 0; i < adapter->num_tx_queues; i++) {
1544 j = adapter->tx_ring[i].reg_idx;
1545 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
1546 /* enable WTHRESH=8 descriptors, to encourage burst writeback */
1547 txdctl |= (8 << 16);
1548 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
1549 }
1550
1551 for (i = 0; i < adapter->num_tx_queues; i++) {
1552 j = adapter->tx_ring[i].reg_idx;
1553 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
1554 txdctl |= IXGBE_TXDCTL_ENABLE;
1555 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
1556 }
1557
1558 for (i = 0; i < num_rx_rings; i++) {
1559 j = adapter->rx_ring[i].reg_idx;
1560 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j));
dadcd65f 1561 rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
69bfbec4
GR
1562 if (hw->mac.type == ixgbe_mac_X540_vf) {
1563 rxdctl &= ~IXGBE_RXDCTL_RLPMLMASK;
1564 rxdctl |= ((netdev->mtu + ETH_HLEN + ETH_FCS_LEN) |
1565 IXGBE_RXDCTL_RLPML_EN);
1566 }
92915f71
GR
1567 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl);
1568 ixgbevf_rx_desc_queue_enable(adapter, i);
1569 }
1570
1571 ixgbevf_configure_msix(adapter);
1572
55fdd45b 1573 spin_lock_bh(&adapter->mbx_lock);
1c55ed76 1574
92fe0bf7
GR
1575 if (is_valid_ether_addr(hw->mac.addr))
1576 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
1577 else
1578 hw->mac.ops.set_rar(hw, 0, hw->mac.perm_addr, 0);
92915f71 1579
55fdd45b 1580 spin_unlock_bh(&adapter->mbx_lock);
1c55ed76 1581
92915f71
GR
1582 clear_bit(__IXGBEVF_DOWN, &adapter->state);
1583 ixgbevf_napi_enable_all(adapter);
1584
1585 /* enable transmits */
1586 netif_tx_start_all_queues(netdev);
1587
33bd9f60
GR
1588 ixgbevf_save_reset_stats(adapter);
1589 ixgbevf_init_last_counter_stats(adapter);
1590
4b2cd27f 1591 hw->mac.get_link_status = 1;
92915f71 1592 mod_timer(&adapter->watchdog_timer, jiffies);
92915f71
GR
1593}
1594
795180d8 1595void ixgbevf_up(struct ixgbevf_adapter *adapter)
92915f71 1596{
92915f71
GR
1597 struct ixgbe_hw *hw = &adapter->hw;
1598
1599 ixgbevf_configure(adapter);
1600
795180d8 1601 ixgbevf_up_complete(adapter);
92915f71
GR
1602
1603 /* clear any pending interrupts, may auto mask */
1604 IXGBE_READ_REG(hw, IXGBE_VTEICR);
1605
5f3600eb 1606 ixgbevf_irq_enable(adapter);
92915f71
GR
1607}
1608
1609/**
1610 * ixgbevf_clean_rx_ring - Free Rx Buffers per Queue
1611 * @adapter: board private structure
1612 * @rx_ring: ring to free buffers from
1613 **/
1614static void ixgbevf_clean_rx_ring(struct ixgbevf_adapter *adapter,
1615 struct ixgbevf_ring *rx_ring)
1616{
1617 struct pci_dev *pdev = adapter->pdev;
1618 unsigned long size;
1619 unsigned int i;
1620
c0456c23
GR
1621 if (!rx_ring->rx_buffer_info)
1622 return;
92915f71 1623
c0456c23 1624 /* Free all the Rx ring sk_buffs */
92915f71
GR
1625 for (i = 0; i < rx_ring->count; i++) {
1626 struct ixgbevf_rx_buffer *rx_buffer_info;
1627
1628 rx_buffer_info = &rx_ring->rx_buffer_info[i];
1629 if (rx_buffer_info->dma) {
2a1f8794 1630 dma_unmap_single(&pdev->dev, rx_buffer_info->dma,
92915f71 1631 rx_ring->rx_buf_len,
2a1f8794 1632 DMA_FROM_DEVICE);
92915f71
GR
1633 rx_buffer_info->dma = 0;
1634 }
1635 if (rx_buffer_info->skb) {
1636 struct sk_buff *skb = rx_buffer_info->skb;
1637 rx_buffer_info->skb = NULL;
1638 do {
1639 struct sk_buff *this = skb;
5c60f81a 1640 skb = IXGBE_CB(skb)->prev;
92915f71
GR
1641 dev_kfree_skb(this);
1642 } while (skb);
1643 }
92915f71
GR
1644 }
1645
1646 size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count;
1647 memset(rx_ring->rx_buffer_info, 0, size);
1648
1649 /* Zero out the descriptor ring */
1650 memset(rx_ring->desc, 0, rx_ring->size);
92915f71
GR
1651}
1652
1653/**
1654 * ixgbevf_clean_tx_ring - Free Tx Buffers
1655 * @adapter: board private structure
1656 * @tx_ring: ring to be cleaned
1657 **/
1658static void ixgbevf_clean_tx_ring(struct ixgbevf_adapter *adapter,
1659 struct ixgbevf_ring *tx_ring)
1660{
1661 struct ixgbevf_tx_buffer *tx_buffer_info;
1662 unsigned long size;
1663 unsigned int i;
1664
c0456c23
GR
1665 if (!tx_ring->tx_buffer_info)
1666 return;
1667
92915f71 1668 /* Free all the Tx ring sk_buffs */
92915f71
GR
1669 for (i = 0; i < tx_ring->count; i++) {
1670 tx_buffer_info = &tx_ring->tx_buffer_info[i];
70a10e25 1671 ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
92915f71
GR
1672 }
1673
1674 size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count;
1675 memset(tx_ring->tx_buffer_info, 0, size);
1676
1677 memset(tx_ring->desc, 0, tx_ring->size);
92915f71
GR
1678}
1679
1680/**
1681 * ixgbevf_clean_all_rx_rings - Free Rx Buffers for all queues
1682 * @adapter: board private structure
1683 **/
1684static void ixgbevf_clean_all_rx_rings(struct ixgbevf_adapter *adapter)
1685{
1686 int i;
1687
1688 for (i = 0; i < adapter->num_rx_queues; i++)
1689 ixgbevf_clean_rx_ring(adapter, &adapter->rx_ring[i]);
1690}
1691
1692/**
1693 * ixgbevf_clean_all_tx_rings - Free Tx Buffers for all queues
1694 * @adapter: board private structure
1695 **/
1696static void ixgbevf_clean_all_tx_rings(struct ixgbevf_adapter *adapter)
1697{
1698 int i;
1699
1700 for (i = 0; i < adapter->num_tx_queues; i++)
1701 ixgbevf_clean_tx_ring(adapter, &adapter->tx_ring[i]);
1702}
1703
1704void ixgbevf_down(struct ixgbevf_adapter *adapter)
1705{
1706 struct net_device *netdev = adapter->netdev;
1707 struct ixgbe_hw *hw = &adapter->hw;
1708 u32 txdctl;
1709 int i, j;
1710
1711 /* signal that we are down to the interrupt handler */
1712 set_bit(__IXGBEVF_DOWN, &adapter->state);
858c3dda
DS
1713
1714 /* disable all enabled rx queues */
1715 for (i = 0; i < adapter->num_rx_queues; i++)
1716 ixgbevf_disable_rx_queue(adapter, &adapter->rx_ring[i]);
92915f71
GR
1717
1718 netif_tx_disable(netdev);
1719
1720 msleep(10);
1721
1722 netif_tx_stop_all_queues(netdev);
1723
1724 ixgbevf_irq_disable(adapter);
1725
1726 ixgbevf_napi_disable_all(adapter);
1727
1728 del_timer_sync(&adapter->watchdog_timer);
1729 /* can't call flush scheduled work here because it can deadlock
1730 * if linkwatch_event tries to acquire the rtnl_lock which we are
1731 * holding */
1732 while (adapter->flags & IXGBE_FLAG_IN_WATCHDOG_TASK)
1733 msleep(1);
1734
1735 /* disable transmits in the hardware now that interrupts are off */
1736 for (i = 0; i < adapter->num_tx_queues; i++) {
1737 j = adapter->tx_ring[i].reg_idx;
1738 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
1739 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j),
1740 (txdctl & ~IXGBE_TXDCTL_ENABLE));
1741 }
1742
1743 netif_carrier_off(netdev);
1744
1745 if (!pci_channel_offline(adapter->pdev))
1746 ixgbevf_reset(adapter);
1747
1748 ixgbevf_clean_all_tx_rings(adapter);
1749 ixgbevf_clean_all_rx_rings(adapter);
1750}
1751
1752void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter)
1753{
1754 WARN_ON(in_interrupt());
c0456c23 1755
92915f71
GR
1756 while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state))
1757 msleep(1);
1758
4b2cd27f
AD
1759 ixgbevf_down(adapter);
1760 ixgbevf_up(adapter);
92915f71
GR
1761
1762 clear_bit(__IXGBEVF_RESETTING, &adapter->state);
1763}
1764
1765void ixgbevf_reset(struct ixgbevf_adapter *adapter)
1766{
1767 struct ixgbe_hw *hw = &adapter->hw;
1768 struct net_device *netdev = adapter->netdev;
1769
798e381a 1770 if (hw->mac.ops.reset_hw(hw)) {
92915f71 1771 hw_dbg(hw, "PF still resetting\n");
798e381a 1772 } else {
92915f71 1773 hw->mac.ops.init_hw(hw);
798e381a
DS
1774 ixgbevf_negotiate_api(adapter);
1775 }
92915f71
GR
1776
1777 if (is_valid_ether_addr(adapter->hw.mac.addr)) {
1778 memcpy(netdev->dev_addr, adapter->hw.mac.addr,
1779 netdev->addr_len);
1780 memcpy(netdev->perm_addr, adapter->hw.mac.addr,
1781 netdev->addr_len);
1782 }
1783}
1784
e45dd5fe
JK
1785static int ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter,
1786 int vectors)
92915f71 1787{
a5f9337b
ET
1788 int err = 0;
1789 int vector_threshold;
92915f71 1790
fa71ae27
AD
1791 /* We'll want at least 2 (vector_threshold):
1792 * 1) TxQ[0] + RxQ[0] handler
1793 * 2) Other (Link Status Change, etc.)
92915f71
GR
1794 */
1795 vector_threshold = MIN_MSIX_COUNT;
1796
1797 /* The more we get, the more we will assign to Tx/Rx Cleanup
1798 * for the separate queues...where Rx Cleanup >= Tx Cleanup.
1799 * Right now, we simply care about how many we'll get; we'll
1800 * set them up later while requesting irq's.
1801 */
1802 while (vectors >= vector_threshold) {
1803 err = pci_enable_msix(adapter->pdev, adapter->msix_entries,
1804 vectors);
e45dd5fe 1805 if (!err || err < 0) /* Success or a nasty failure. */
92915f71 1806 break;
92915f71
GR
1807 else /* err == number of vectors we should try again with */
1808 vectors = err;
1809 }
1810
e45dd5fe
JK
1811 if (vectors < vector_threshold)
1812 err = -ENOMEM;
1813
1814 if (err) {
1815 dev_err(&adapter->pdev->dev,
1816 "Unable to allocate MSI-X interrupts\n");
92915f71
GR
1817 kfree(adapter->msix_entries);
1818 adapter->msix_entries = NULL;
1819 } else {
1820 /*
1821 * Adjust for only the vectors we'll use, which is minimum
1822 * of max_msix_q_vectors + NON_Q_VECTORS, or the number of
1823 * vectors we were allocated.
1824 */
1825 adapter->num_msix_vectors = vectors;
1826 }
dee847f5 1827
e45dd5fe 1828 return err;
92915f71
GR
1829}
1830
49ce9c2c
BH
1831/**
1832 * ixgbevf_set_num_queues - Allocate queues for device, feature dependent
92915f71
GR
1833 * @adapter: board private structure to initialize
1834 *
1835 * This is the top level queue allocation routine. The order here is very
1836 * important, starting with the "most" number of features turned on at once,
1837 * and ending with the smallest set of features. This way large combinations
1838 * can be allocated if they're turned on, and smaller combinations are the
1839 * fallthrough conditions.
1840 *
1841 **/
1842static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter)
1843{
220fe050
DS
1844 struct ixgbe_hw *hw = &adapter->hw;
1845 unsigned int def_q = 0;
1846 unsigned int num_tcs = 0;
1847 int err;
1848
92915f71
GR
1849 /* Start with base case */
1850 adapter->num_rx_queues = 1;
1851 adapter->num_tx_queues = 1;
220fe050
DS
1852
1853 spin_lock_bh(&adapter->mbx_lock);
1854
1855 /* fetch queue configuration from the PF */
1856 err = ixgbevf_get_queues(hw, &num_tcs, &def_q);
1857
1858 spin_unlock_bh(&adapter->mbx_lock);
1859
1860 if (err)
1861 return;
1862
1863 /* we need as many queues as traffic classes */
1864 if (num_tcs > 1)
1865 adapter->num_rx_queues = num_tcs;
92915f71
GR
1866}
1867
1868/**
1869 * ixgbevf_alloc_queues - Allocate memory for all rings
1870 * @adapter: board private structure to initialize
1871 *
1872 * We allocate one ring per queue at run-time since we don't know the
1873 * number of queues at compile-time. The polling_netdev array is
1874 * intended for Multiqueue, but should work fine with a single queue.
1875 **/
1876static int ixgbevf_alloc_queues(struct ixgbevf_adapter *adapter)
1877{
1878 int i;
1879
1880 adapter->tx_ring = kcalloc(adapter->num_tx_queues,
1881 sizeof(struct ixgbevf_ring), GFP_KERNEL);
1882 if (!adapter->tx_ring)
1883 goto err_tx_ring_allocation;
1884
1885 adapter->rx_ring = kcalloc(adapter->num_rx_queues,
1886 sizeof(struct ixgbevf_ring), GFP_KERNEL);
1887 if (!adapter->rx_ring)
1888 goto err_rx_ring_allocation;
1889
1890 for (i = 0; i < adapter->num_tx_queues; i++) {
1891 adapter->tx_ring[i].count = adapter->tx_ring_count;
1892 adapter->tx_ring[i].queue_index = i;
56e94095 1893 /* reg_idx may be remapped later by DCB config */
92915f71 1894 adapter->tx_ring[i].reg_idx = i;
fb40195c
AD
1895 adapter->tx_ring[i].dev = &adapter->pdev->dev;
1896 adapter->tx_ring[i].netdev = adapter->netdev;
92915f71
GR
1897 }
1898
1899 for (i = 0; i < adapter->num_rx_queues; i++) {
1900 adapter->rx_ring[i].count = adapter->rx_ring_count;
1901 adapter->rx_ring[i].queue_index = i;
1902 adapter->rx_ring[i].reg_idx = i;
fb40195c
AD
1903 adapter->rx_ring[i].dev = &adapter->pdev->dev;
1904 adapter->rx_ring[i].netdev = adapter->netdev;
92915f71
GR
1905 }
1906
1907 return 0;
1908
1909err_rx_ring_allocation:
1910 kfree(adapter->tx_ring);
1911err_tx_ring_allocation:
1912 return -ENOMEM;
1913}
1914
1915/**
1916 * ixgbevf_set_interrupt_capability - set MSI-X or FAIL if not supported
1917 * @adapter: board private structure to initialize
1918 *
1919 * Attempt to configure the interrupts using the best available
1920 * capabilities of the hardware and the kernel.
1921 **/
1922static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter)
1923{
91e2b89b 1924 struct net_device *netdev = adapter->netdev;
92915f71
GR
1925 int err = 0;
1926 int vector, v_budget;
1927
1928 /*
1929 * It's easy to be greedy for MSI-X vectors, but it really
1930 * doesn't do us much good if we have a lot more vectors
1931 * than CPU's. So let's be conservative and only ask for
fa71ae27
AD
1932 * (roughly) the same number of vectors as there are CPU's.
1933 * The default is to use pairs of vectors.
92915f71 1934 */
fa71ae27
AD
1935 v_budget = max(adapter->num_rx_queues, adapter->num_tx_queues);
1936 v_budget = min_t(int, v_budget, num_online_cpus());
1937 v_budget += NON_Q_VECTORS;
92915f71
GR
1938
1939 /* A failure in MSI-X entry allocation isn't fatal, but it does
1940 * mean we disable MSI-X capabilities of the adapter. */
1941 adapter->msix_entries = kcalloc(v_budget,
1942 sizeof(struct msix_entry), GFP_KERNEL);
1943 if (!adapter->msix_entries) {
1944 err = -ENOMEM;
1945 goto out;
1946 }
1947
1948 for (vector = 0; vector < v_budget; vector++)
1949 adapter->msix_entries[vector].entry = vector;
1950
e45dd5fe
JK
1951 err = ixgbevf_acquire_msix_vectors(adapter, v_budget);
1952 if (err)
1953 goto out;
92915f71 1954
91e2b89b
GR
1955 err = netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues);
1956 if (err)
1957 goto out;
1958
1959 err = netif_set_real_num_rx_queues(netdev, adapter->num_rx_queues);
1960
92915f71
GR
1961out:
1962 return err;
1963}
1964
1965/**
1966 * ixgbevf_alloc_q_vectors - Allocate memory for interrupt vectors
1967 * @adapter: board private structure to initialize
1968 *
1969 * We allocate one q_vector per queue interrupt. If allocation fails we
1970 * return -ENOMEM.
1971 **/
1972static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter *adapter)
1973{
1974 int q_idx, num_q_vectors;
1975 struct ixgbevf_q_vector *q_vector;
92915f71
GR
1976
1977 num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
92915f71
GR
1978
1979 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
1980 q_vector = kzalloc(sizeof(struct ixgbevf_q_vector), GFP_KERNEL);
1981 if (!q_vector)
1982 goto err_out;
1983 q_vector->adapter = adapter;
1984 q_vector->v_idx = q_idx;
fa71ae27
AD
1985 netif_napi_add(adapter->netdev, &q_vector->napi,
1986 ixgbevf_poll, 64);
c777cdfa
JK
1987#ifdef CONFIG_NET_RX_BUSY_POLL
1988 napi_hash_add(&q_vector->napi);
1989#endif
92915f71
GR
1990 adapter->q_vector[q_idx] = q_vector;
1991 }
1992
1993 return 0;
1994
1995err_out:
1996 while (q_idx) {
1997 q_idx--;
1998 q_vector = adapter->q_vector[q_idx];
c777cdfa
JK
1999#ifdef CONFIG_NET_RX_BUSY_POLL
2000 napi_hash_del(&q_vector->napi);
2001#endif
92915f71
GR
2002 netif_napi_del(&q_vector->napi);
2003 kfree(q_vector);
2004 adapter->q_vector[q_idx] = NULL;
2005 }
2006 return -ENOMEM;
2007}
2008
2009/**
2010 * ixgbevf_free_q_vectors - Free memory allocated for interrupt vectors
2011 * @adapter: board private structure to initialize
2012 *
2013 * This function frees the memory allocated to the q_vectors. In addition if
2014 * NAPI is enabled it will delete any references to the NAPI struct prior
2015 * to freeing the q_vector.
2016 **/
2017static void ixgbevf_free_q_vectors(struct ixgbevf_adapter *adapter)
2018{
f4477702 2019 int q_idx, num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
92915f71
GR
2020
2021 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
2022 struct ixgbevf_q_vector *q_vector = adapter->q_vector[q_idx];
2023
2024 adapter->q_vector[q_idx] = NULL;
c777cdfa
JK
2025#ifdef CONFIG_NET_RX_BUSY_POLL
2026 napi_hash_del(&q_vector->napi);
2027#endif
f4477702 2028 netif_napi_del(&q_vector->napi);
92915f71
GR
2029 kfree(q_vector);
2030 }
2031}
2032
2033/**
2034 * ixgbevf_reset_interrupt_capability - Reset MSIX setup
2035 * @adapter: board private structure
2036 *
2037 **/
2038static void ixgbevf_reset_interrupt_capability(struct ixgbevf_adapter *adapter)
2039{
2040 pci_disable_msix(adapter->pdev);
2041 kfree(adapter->msix_entries);
2042 adapter->msix_entries = NULL;
92915f71
GR
2043}
2044
2045/**
2046 * ixgbevf_init_interrupt_scheme - Determine if MSIX is supported and init
2047 * @adapter: board private structure to initialize
2048 *
2049 **/
2050static int ixgbevf_init_interrupt_scheme(struct ixgbevf_adapter *adapter)
2051{
2052 int err;
2053
2054 /* Number of supported queues */
2055 ixgbevf_set_num_queues(adapter);
2056
2057 err = ixgbevf_set_interrupt_capability(adapter);
2058 if (err) {
2059 hw_dbg(&adapter->hw,
2060 "Unable to setup interrupt capabilities\n");
2061 goto err_set_interrupt;
2062 }
2063
2064 err = ixgbevf_alloc_q_vectors(adapter);
2065 if (err) {
2066 hw_dbg(&adapter->hw, "Unable to allocate memory for queue "
2067 "vectors\n");
2068 goto err_alloc_q_vectors;
2069 }
2070
2071 err = ixgbevf_alloc_queues(adapter);
2072 if (err) {
dbd9636e 2073 pr_err("Unable to allocate memory for queues\n");
92915f71
GR
2074 goto err_alloc_queues;
2075 }
2076
2077 hw_dbg(&adapter->hw, "Multiqueue %s: Rx Queue count = %u, "
2078 "Tx Queue count = %u\n",
2079 (adapter->num_rx_queues > 1) ? "Enabled" :
2080 "Disabled", adapter->num_rx_queues, adapter->num_tx_queues);
2081
2082 set_bit(__IXGBEVF_DOWN, &adapter->state);
2083
2084 return 0;
2085err_alloc_queues:
2086 ixgbevf_free_q_vectors(adapter);
2087err_alloc_q_vectors:
2088 ixgbevf_reset_interrupt_capability(adapter);
2089err_set_interrupt:
2090 return err;
2091}
2092
0ac1e8ce
AD
2093/**
2094 * ixgbevf_clear_interrupt_scheme - Clear the current interrupt scheme settings
2095 * @adapter: board private structure to clear interrupt scheme on
2096 *
2097 * We go through and clear interrupt specific resources and reset the structure
2098 * to pre-load conditions
2099 **/
2100static void ixgbevf_clear_interrupt_scheme(struct ixgbevf_adapter *adapter)
2101{
2102 adapter->num_tx_queues = 0;
2103 adapter->num_rx_queues = 0;
2104
2105 ixgbevf_free_q_vectors(adapter);
2106 ixgbevf_reset_interrupt_capability(adapter);
2107}
2108
92915f71
GR
2109/**
2110 * ixgbevf_sw_init - Initialize general software structures
2111 * (struct ixgbevf_adapter)
2112 * @adapter: board private structure to initialize
2113 *
2114 * ixgbevf_sw_init initializes the Adapter private data structure.
2115 * Fields are initialized based on PCI device information and
2116 * OS network device settings (MTU size).
2117 **/
9f9a12f8 2118static int ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
92915f71
GR
2119{
2120 struct ixgbe_hw *hw = &adapter->hw;
2121 struct pci_dev *pdev = adapter->pdev;
e1941a74 2122 struct net_device *netdev = adapter->netdev;
92915f71
GR
2123 int err;
2124
2125 /* PCI config space info */
2126
2127 hw->vendor_id = pdev->vendor;
2128 hw->device_id = pdev->device;
ff938e43 2129 hw->revision_id = pdev->revision;
92915f71
GR
2130 hw->subsystem_vendor_id = pdev->subsystem_vendor;
2131 hw->subsystem_device_id = pdev->subsystem_device;
2132
2133 hw->mbx.ops.init_params(hw);
56e94095
AD
2134
2135 /* assume legacy case in which PF would only give VF 2 queues */
2136 hw->mac.max_tx_queues = 2;
2137 hw->mac.max_rx_queues = 2;
2138
798e381a
DS
2139 /* lock to protect mailbox accesses */
2140 spin_lock_init(&adapter->mbx_lock);
2141
92915f71
GR
2142 err = hw->mac.ops.reset_hw(hw);
2143 if (err) {
2144 dev_info(&pdev->dev,
e1941a74 2145 "PF still in reset state. Is the PF interface up?\n");
92915f71
GR
2146 } else {
2147 err = hw->mac.ops.init_hw(hw);
2148 if (err) {
dbd9636e 2149 pr_err("init_shared_code failed: %d\n", err);
92915f71
GR
2150 goto out;
2151 }
798e381a 2152 ixgbevf_negotiate_api(adapter);
e1941a74
GR
2153 err = hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
2154 if (err)
2155 dev_info(&pdev->dev, "Error reading MAC address\n");
2156 else if (is_zero_ether_addr(adapter->hw.mac.addr))
2157 dev_info(&pdev->dev,
2158 "MAC address not assigned by administrator.\n");
2159 memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len);
2160 }
2161
2162 if (!is_valid_ether_addr(netdev->dev_addr)) {
2163 dev_info(&pdev->dev, "Assigning random MAC address\n");
2164 eth_hw_addr_random(netdev);
2165 memcpy(hw->mac.addr, netdev->dev_addr, netdev->addr_len);
92915f71
GR
2166 }
2167
2168 /* Enable dynamic interrupt throttling rates */
5f3600eb
AD
2169 adapter->rx_itr_setting = 1;
2170 adapter->tx_itr_setting = 1;
92915f71 2171
92915f71
GR
2172 /* set default ring sizes */
2173 adapter->tx_ring_count = IXGBEVF_DEFAULT_TXD;
2174 adapter->rx_ring_count = IXGBEVF_DEFAULT_RXD;
2175
92915f71 2176 set_bit(__IXGBEVF_DOWN, &adapter->state);
1a0d6ae5 2177 return 0;
92915f71
GR
2178
2179out:
2180 return err;
2181}
2182
92915f71
GR
2183#define UPDATE_VF_COUNTER_32bit(reg, last_counter, counter) \
2184 { \
2185 u32 current_counter = IXGBE_READ_REG(hw, reg); \
2186 if (current_counter < last_counter) \
2187 counter += 0x100000000LL; \
2188 last_counter = current_counter; \
2189 counter &= 0xFFFFFFFF00000000LL; \
2190 counter |= current_counter; \
2191 }
2192
2193#define UPDATE_VF_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \
2194 { \
2195 u64 current_counter_lsb = IXGBE_READ_REG(hw, reg_lsb); \
2196 u64 current_counter_msb = IXGBE_READ_REG(hw, reg_msb); \
2197 u64 current_counter = (current_counter_msb << 32) | \
2198 current_counter_lsb; \
2199 if (current_counter < last_counter) \
2200 counter += 0x1000000000LL; \
2201 last_counter = current_counter; \
2202 counter &= 0xFFFFFFF000000000LL; \
2203 counter |= current_counter; \
2204 }
2205/**
2206 * ixgbevf_update_stats - Update the board statistics counters.
2207 * @adapter: board private structure
2208 **/
2209void ixgbevf_update_stats(struct ixgbevf_adapter *adapter)
2210{
2211 struct ixgbe_hw *hw = &adapter->hw;
55fb277c 2212 int i;
92915f71 2213
088245a3
GR
2214 if (!adapter->link_up)
2215 return;
2216
92915f71
GR
2217 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPRC, adapter->stats.last_vfgprc,
2218 adapter->stats.vfgprc);
2219 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPTC, adapter->stats.last_vfgptc,
2220 adapter->stats.vfgptc);
2221 UPDATE_VF_COUNTER_36bit(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
2222 adapter->stats.last_vfgorc,
2223 adapter->stats.vfgorc);
2224 UPDATE_VF_COUNTER_36bit(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
2225 adapter->stats.last_vfgotc,
2226 adapter->stats.vfgotc);
2227 UPDATE_VF_COUNTER_32bit(IXGBE_VFMPRC, adapter->stats.last_vfmprc,
2228 adapter->stats.vfmprc);
55fb277c
GR
2229
2230 for (i = 0; i < adapter->num_rx_queues; i++) {
2231 adapter->hw_csum_rx_error +=
2232 adapter->rx_ring[i].hw_csum_rx_error;
2233 adapter->hw_csum_rx_good +=
2234 adapter->rx_ring[i].hw_csum_rx_good;
2235 adapter->rx_ring[i].hw_csum_rx_error = 0;
2236 adapter->rx_ring[i].hw_csum_rx_good = 0;
2237 }
92915f71
GR
2238}
2239
2240/**
2241 * ixgbevf_watchdog - Timer Call-back
2242 * @data: pointer to adapter cast into an unsigned long
2243 **/
2244static void ixgbevf_watchdog(unsigned long data)
2245{
2246 struct ixgbevf_adapter *adapter = (struct ixgbevf_adapter *)data;
2247 struct ixgbe_hw *hw = &adapter->hw;
5f3600eb 2248 u32 eics = 0;
92915f71
GR
2249 int i;
2250
2251 /*
2252 * Do the watchdog outside of interrupt context due to the lovely
2253 * delays that some of the newer hardware requires
2254 */
2255
2256 if (test_bit(__IXGBEVF_DOWN, &adapter->state))
2257 goto watchdog_short_circuit;
2258
2259 /* get one bit for every active tx/rx interrupt vector */
2260 for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) {
2261 struct ixgbevf_q_vector *qv = adapter->q_vector[i];
6b43c446 2262 if (qv->rx.ring || qv->tx.ring)
5f3600eb 2263 eics |= 1 << i;
92915f71
GR
2264 }
2265
5f3600eb 2266 IXGBE_WRITE_REG(hw, IXGBE_VTEICS, eics);
92915f71
GR
2267
2268watchdog_short_circuit:
2269 schedule_work(&adapter->watchdog_task);
2270}
2271
2272/**
2273 * ixgbevf_tx_timeout - Respond to a Tx Hang
2274 * @netdev: network interface device structure
2275 **/
2276static void ixgbevf_tx_timeout(struct net_device *netdev)
2277{
2278 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2279
2280 /* Do the reset outside of interrupt context */
2281 schedule_work(&adapter->reset_task);
2282}
2283
2284static void ixgbevf_reset_task(struct work_struct *work)
2285{
2286 struct ixgbevf_adapter *adapter;
2287 adapter = container_of(work, struct ixgbevf_adapter, reset_task);
2288
2289 /* If we're already down or resetting, just bail */
2290 if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
2291 test_bit(__IXGBEVF_RESETTING, &adapter->state))
2292 return;
2293
2294 adapter->tx_timeout_count++;
2295
2296 ixgbevf_reinit_locked(adapter);
2297}
2298
2299/**
2300 * ixgbevf_watchdog_task - worker thread to bring link up
2301 * @work: pointer to work_struct containing our data
2302 **/
2303static void ixgbevf_watchdog_task(struct work_struct *work)
2304{
2305 struct ixgbevf_adapter *adapter = container_of(work,
2306 struct ixgbevf_adapter,
2307 watchdog_task);
2308 struct net_device *netdev = adapter->netdev;
2309 struct ixgbe_hw *hw = &adapter->hw;
2310 u32 link_speed = adapter->link_speed;
2311 bool link_up = adapter->link_up;
92fe0bf7 2312 s32 need_reset;
92915f71 2313
220fe050
DS
2314 ixgbevf_queue_reset_subtask(adapter);
2315
92915f71
GR
2316 adapter->flags |= IXGBE_FLAG_IN_WATCHDOG_TASK;
2317
2318 /*
2319 * Always check the link on the watchdog because we have
2320 * no LSC interrupt
2321 */
92fe0bf7 2322 spin_lock_bh(&adapter->mbx_lock);
1c55ed76 2323
92fe0bf7 2324 need_reset = hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
1c55ed76 2325
92fe0bf7 2326 spin_unlock_bh(&adapter->mbx_lock);
1c55ed76 2327
92fe0bf7
GR
2328 if (need_reset) {
2329 adapter->link_up = link_up;
2330 adapter->link_speed = link_speed;
2331 netif_carrier_off(netdev);
2332 netif_tx_stop_all_queues(netdev);
2333 schedule_work(&adapter->reset_task);
2334 goto pf_has_reset;
92915f71
GR
2335 }
2336 adapter->link_up = link_up;
2337 adapter->link_speed = link_speed;
2338
2339 if (link_up) {
2340 if (!netif_carrier_ok(netdev)) {
b876a744
GR
2341 char *link_speed_string;
2342 switch (link_speed) {
2343 case IXGBE_LINK_SPEED_10GB_FULL:
2344 link_speed_string = "10 Gbps";
2345 break;
2346 case IXGBE_LINK_SPEED_1GB_FULL:
2347 link_speed_string = "1 Gbps";
2348 break;
2349 case IXGBE_LINK_SPEED_100_FULL:
2350 link_speed_string = "100 Mbps";
2351 break;
2352 default:
2353 link_speed_string = "unknown speed";
2354 break;
2355 }
6fe59675 2356 dev_info(&adapter->pdev->dev,
b876a744 2357 "NIC Link is Up, %s\n", link_speed_string);
92915f71
GR
2358 netif_carrier_on(netdev);
2359 netif_tx_wake_all_queues(netdev);
92915f71
GR
2360 }
2361 } else {
2362 adapter->link_up = false;
2363 adapter->link_speed = 0;
2364 if (netif_carrier_ok(netdev)) {
6fe59675 2365 dev_info(&adapter->pdev->dev, "NIC Link is Down\n");
92915f71
GR
2366 netif_carrier_off(netdev);
2367 netif_tx_stop_all_queues(netdev);
2368 }
2369 }
2370
92915f71
GR
2371 ixgbevf_update_stats(adapter);
2372
33bd9f60 2373pf_has_reset:
92915f71
GR
2374 /* Reset the timer */
2375 if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
2376 mod_timer(&adapter->watchdog_timer,
2377 round_jiffies(jiffies + (2 * HZ)));
2378
2379 adapter->flags &= ~IXGBE_FLAG_IN_WATCHDOG_TASK;
2380}
2381
2382/**
2383 * ixgbevf_free_tx_resources - Free Tx Resources per Queue
2384 * @adapter: board private structure
2385 * @tx_ring: Tx descriptor ring for a specific queue
2386 *
2387 * Free all transmit software resources
2388 **/
2389void ixgbevf_free_tx_resources(struct ixgbevf_adapter *adapter,
2390 struct ixgbevf_ring *tx_ring)
2391{
2392 struct pci_dev *pdev = adapter->pdev;
2393
92915f71
GR
2394 ixgbevf_clean_tx_ring(adapter, tx_ring);
2395
2396 vfree(tx_ring->tx_buffer_info);
2397 tx_ring->tx_buffer_info = NULL;
2398
2a1f8794
NN
2399 dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
2400 tx_ring->dma);
92915f71
GR
2401
2402 tx_ring->desc = NULL;
2403}
2404
2405/**
2406 * ixgbevf_free_all_tx_resources - Free Tx Resources for All Queues
2407 * @adapter: board private structure
2408 *
2409 * Free all transmit software resources
2410 **/
2411static void ixgbevf_free_all_tx_resources(struct ixgbevf_adapter *adapter)
2412{
2413 int i;
2414
2415 for (i = 0; i < adapter->num_tx_queues; i++)
2416 if (adapter->tx_ring[i].desc)
2417 ixgbevf_free_tx_resources(adapter,
2418 &adapter->tx_ring[i]);
2419
2420}
2421
2422/**
2423 * ixgbevf_setup_tx_resources - allocate Tx resources (Descriptors)
2424 * @adapter: board private structure
2425 * @tx_ring: tx descriptor ring (for a specific queue) to setup
2426 *
2427 * Return 0 on success, negative on failure
2428 **/
2429int ixgbevf_setup_tx_resources(struct ixgbevf_adapter *adapter,
2430 struct ixgbevf_ring *tx_ring)
2431{
2432 struct pci_dev *pdev = adapter->pdev;
2433 int size;
2434
2435 size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count;
89bf67f1 2436 tx_ring->tx_buffer_info = vzalloc(size);
92915f71
GR
2437 if (!tx_ring->tx_buffer_info)
2438 goto err;
92915f71
GR
2439
2440 /* round up to nearest 4K */
2441 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
2442 tx_ring->size = ALIGN(tx_ring->size, 4096);
2443
2a1f8794
NN
2444 tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
2445 &tx_ring->dma, GFP_KERNEL);
92915f71
GR
2446 if (!tx_ring->desc)
2447 goto err;
2448
92915f71
GR
2449 return 0;
2450
2451err:
2452 vfree(tx_ring->tx_buffer_info);
2453 tx_ring->tx_buffer_info = NULL;
2454 hw_dbg(&adapter->hw, "Unable to allocate memory for the transmit "
2455 "descriptor ring\n");
2456 return -ENOMEM;
2457}
2458
2459/**
2460 * ixgbevf_setup_all_tx_resources - allocate all queues Tx resources
2461 * @adapter: board private structure
2462 *
2463 * If this function returns with an error, then it's possible one or
2464 * more of the rings is populated (while the rest are not). It is the
2465 * callers duty to clean those orphaned rings.
2466 *
2467 * Return 0 on success, negative on failure
2468 **/
2469static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter *adapter)
2470{
2471 int i, err = 0;
2472
2473 for (i = 0; i < adapter->num_tx_queues; i++) {
2474 err = ixgbevf_setup_tx_resources(adapter, &adapter->tx_ring[i]);
2475 if (!err)
2476 continue;
2477 hw_dbg(&adapter->hw,
2478 "Allocation for Tx Queue %u failed\n", i);
2479 break;
2480 }
2481
2482 return err;
2483}
2484
2485/**
2486 * ixgbevf_setup_rx_resources - allocate Rx resources (Descriptors)
2487 * @adapter: board private structure
2488 * @rx_ring: rx descriptor ring (for a specific queue) to setup
2489 *
2490 * Returns 0 on success, negative on failure
2491 **/
2492int ixgbevf_setup_rx_resources(struct ixgbevf_adapter *adapter,
2493 struct ixgbevf_ring *rx_ring)
2494{
2495 struct pci_dev *pdev = adapter->pdev;
2496 int size;
2497
2498 size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count;
89bf67f1 2499 rx_ring->rx_buffer_info = vzalloc(size);
e404decb 2500 if (!rx_ring->rx_buffer_info)
92915f71 2501 goto alloc_failed;
92915f71
GR
2502
2503 /* Round up to nearest 4K */
2504 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
2505 rx_ring->size = ALIGN(rx_ring->size, 4096);
2506
2a1f8794
NN
2507 rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
2508 &rx_ring->dma, GFP_KERNEL);
92915f71
GR
2509
2510 if (!rx_ring->desc) {
92915f71
GR
2511 vfree(rx_ring->rx_buffer_info);
2512 rx_ring->rx_buffer_info = NULL;
2513 goto alloc_failed;
2514 }
2515
92915f71
GR
2516 return 0;
2517alloc_failed:
2518 return -ENOMEM;
2519}
2520
2521/**
2522 * ixgbevf_setup_all_rx_resources - allocate all queues Rx resources
2523 * @adapter: board private structure
2524 *
2525 * If this function returns with an error, then it's possible one or
2526 * more of the rings is populated (while the rest are not). It is the
2527 * callers duty to clean those orphaned rings.
2528 *
2529 * Return 0 on success, negative on failure
2530 **/
2531static int ixgbevf_setup_all_rx_resources(struct ixgbevf_adapter *adapter)
2532{
2533 int i, err = 0;
2534
2535 for (i = 0; i < adapter->num_rx_queues; i++) {
2536 err = ixgbevf_setup_rx_resources(adapter, &adapter->rx_ring[i]);
2537 if (!err)
2538 continue;
2539 hw_dbg(&adapter->hw,
2540 "Allocation for Rx Queue %u failed\n", i);
2541 break;
2542 }
2543 return err;
2544}
2545
2546/**
2547 * ixgbevf_free_rx_resources - Free Rx Resources
2548 * @adapter: board private structure
2549 * @rx_ring: ring to clean the resources from
2550 *
2551 * Free all receive software resources
2552 **/
2553void ixgbevf_free_rx_resources(struct ixgbevf_adapter *adapter,
2554 struct ixgbevf_ring *rx_ring)
2555{
2556 struct pci_dev *pdev = adapter->pdev;
2557
2558 ixgbevf_clean_rx_ring(adapter, rx_ring);
2559
2560 vfree(rx_ring->rx_buffer_info);
2561 rx_ring->rx_buffer_info = NULL;
2562
2a1f8794
NN
2563 dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
2564 rx_ring->dma);
92915f71
GR
2565
2566 rx_ring->desc = NULL;
2567}
2568
2569/**
2570 * ixgbevf_free_all_rx_resources - Free Rx Resources for All Queues
2571 * @adapter: board private structure
2572 *
2573 * Free all receive software resources
2574 **/
2575static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter)
2576{
2577 int i;
2578
2579 for (i = 0; i < adapter->num_rx_queues; i++)
2580 if (adapter->rx_ring[i].desc)
2581 ixgbevf_free_rx_resources(adapter,
2582 &adapter->rx_ring[i]);
2583}
2584
2585/**
2586 * ixgbevf_open - Called when a network interface is made active
2587 * @netdev: network interface device structure
2588 *
2589 * Returns 0 on success, negative value on failure
2590 *
2591 * The open entry point is called when a network interface is made
2592 * active by the system (IFF_UP). At this point all resources needed
2593 * for transmit and receive operations are allocated, the interrupt
2594 * handler is registered with the OS, the watchdog timer is started,
2595 * and the stack is notified that the interface is ready.
2596 **/
2597static int ixgbevf_open(struct net_device *netdev)
2598{
2599 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2600 struct ixgbe_hw *hw = &adapter->hw;
2601 int err;
2602
a1f6c6b1 2603 /* A previous failure to open the device because of a lack of
2604 * available MSIX vector resources may have reset the number
2605 * of msix vectors variable to zero. The only way to recover
2606 * is to unload/reload the driver and hope that the system has
2607 * been able to recover some MSIX vector resources.
2608 */
2609 if (!adapter->num_msix_vectors)
2610 return -ENOMEM;
2611
92915f71
GR
2612 /* disallow open during test */
2613 if (test_bit(__IXGBEVF_TESTING, &adapter->state))
2614 return -EBUSY;
2615
2616 if (hw->adapter_stopped) {
2617 ixgbevf_reset(adapter);
2618 /* if adapter is still stopped then PF isn't up and
2619 * the vf can't start. */
2620 if (hw->adapter_stopped) {
2621 err = IXGBE_ERR_MBX;
dbd9636e
JK
2622 pr_err("Unable to start - perhaps the PF Driver isn't "
2623 "up yet\n");
92915f71
GR
2624 goto err_setup_reset;
2625 }
2626 }
2627
2628 /* allocate transmit descriptors */
2629 err = ixgbevf_setup_all_tx_resources(adapter);
2630 if (err)
2631 goto err_setup_tx;
2632
2633 /* allocate receive descriptors */
2634 err = ixgbevf_setup_all_rx_resources(adapter);
2635 if (err)
2636 goto err_setup_rx;
2637
2638 ixgbevf_configure(adapter);
2639
2640 /*
2641 * Map the Tx/Rx rings to the vectors we were allotted.
2642 * if request_irq will be called in this function map_rings
2643 * must be called *before* up_complete
2644 */
2645 ixgbevf_map_rings_to_vectors(adapter);
2646
795180d8 2647 ixgbevf_up_complete(adapter);
92915f71
GR
2648
2649 /* clear any pending interrupts, may auto mask */
2650 IXGBE_READ_REG(hw, IXGBE_VTEICR);
2651 err = ixgbevf_request_irq(adapter);
2652 if (err)
2653 goto err_req_irq;
2654
5f3600eb 2655 ixgbevf_irq_enable(adapter);
92915f71
GR
2656
2657 return 0;
2658
2659err_req_irq:
2660 ixgbevf_down(adapter);
92915f71
GR
2661err_setup_rx:
2662 ixgbevf_free_all_rx_resources(adapter);
2663err_setup_tx:
2664 ixgbevf_free_all_tx_resources(adapter);
2665 ixgbevf_reset(adapter);
2666
2667err_setup_reset:
2668
2669 return err;
2670}
2671
2672/**
2673 * ixgbevf_close - Disables a network interface
2674 * @netdev: network interface device structure
2675 *
2676 * Returns 0, this is not allowed to fail
2677 *
2678 * The close entry point is called when an interface is de-activated
2679 * by the OS. The hardware is still under the drivers control, but
2680 * needs to be disabled. A global MAC reset is issued to stop the
2681 * hardware, and all transmit and receive resources are freed.
2682 **/
2683static int ixgbevf_close(struct net_device *netdev)
2684{
2685 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2686
2687 ixgbevf_down(adapter);
2688 ixgbevf_free_irq(adapter);
2689
2690 ixgbevf_free_all_tx_resources(adapter);
2691 ixgbevf_free_all_rx_resources(adapter);
2692
2693 return 0;
2694}
2695
220fe050
DS
2696static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter *adapter)
2697{
2698 struct net_device *dev = adapter->netdev;
2699
2700 if (!(adapter->flags & IXGBEVF_FLAG_QUEUE_RESET_REQUESTED))
2701 return;
2702
2703 adapter->flags &= ~IXGBEVF_FLAG_QUEUE_RESET_REQUESTED;
2704
2705 /* if interface is down do nothing */
2706 if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
2707 test_bit(__IXGBEVF_RESETTING, &adapter->state))
2708 return;
2709
2710 /* Hardware has to reinitialize queues and interrupts to
2711 * match packet buffer alignment. Unfortunately, the
2712 * hardware is not flexible enough to do this dynamically.
2713 */
2714 if (netif_running(dev))
2715 ixgbevf_close(dev);
2716
2717 ixgbevf_clear_interrupt_scheme(adapter);
2718 ixgbevf_init_interrupt_scheme(adapter);
2719
2720 if (netif_running(dev))
2721 ixgbevf_open(dev);
2722}
2723
70a10e25
AD
2724static void ixgbevf_tx_ctxtdesc(struct ixgbevf_ring *tx_ring,
2725 u32 vlan_macip_lens, u32 type_tucmd,
2726 u32 mss_l4len_idx)
92915f71
GR
2727{
2728 struct ixgbe_adv_tx_context_desc *context_desc;
70a10e25 2729 u16 i = tx_ring->next_to_use;
92915f71 2730
70a10e25 2731 context_desc = IXGBEVF_TX_CTXTDESC(tx_ring, i);
92915f71 2732
70a10e25
AD
2733 i++;
2734 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
92915f71 2735
70a10e25
AD
2736 /* set bits to identify this as an advanced context descriptor */
2737 type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
92915f71 2738
70a10e25
AD
2739 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
2740 context_desc->seqnum_seed = 0;
2741 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
2742 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
2743}
2744
2745static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
2746 struct sk_buff *skb, u32 tx_flags, u8 *hdr_len)
2747{
2748 u32 vlan_macip_lens, type_tucmd;
2749 u32 mss_l4len_idx, l4len;
2750
2751 if (!skb_is_gso(skb))
2752 return 0;
92915f71 2753
70a10e25
AD
2754 if (skb_header_cloned(skb)) {
2755 int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2756 if (err)
2757 return err;
92915f71
GR
2758 }
2759
70a10e25
AD
2760 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
2761 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
2762
2763 if (skb->protocol == htons(ETH_P_IP)) {
2764 struct iphdr *iph = ip_hdr(skb);
2765 iph->tot_len = 0;
2766 iph->check = 0;
2767 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2768 iph->daddr, 0,
2769 IPPROTO_TCP,
2770 0);
2771 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
2772 } else if (skb_is_gso_v6(skb)) {
2773 ipv6_hdr(skb)->payload_len = 0;
2774 tcp_hdr(skb)->check =
2775 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2776 &ipv6_hdr(skb)->daddr,
2777 0, IPPROTO_TCP, 0);
2778 }
2779
2780 /* compute header lengths */
2781 l4len = tcp_hdrlen(skb);
2782 *hdr_len += l4len;
2783 *hdr_len = skb_transport_offset(skb) + l4len;
2784
2785 /* mss_l4len_id: use 1 as index for TSO */
2786 mss_l4len_idx = l4len << IXGBE_ADVTXD_L4LEN_SHIFT;
2787 mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
2788 mss_l4len_idx |= 1 << IXGBE_ADVTXD_IDX_SHIFT;
2789
2790 /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */
2791 vlan_macip_lens = skb_network_header_len(skb);
2792 vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
2793 vlan_macip_lens |= tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
2794
2795 ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens,
2796 type_tucmd, mss_l4len_idx);
2797
2798 return 1;
92915f71
GR
2799}
2800
70a10e25 2801static bool ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring,
92915f71
GR
2802 struct sk_buff *skb, u32 tx_flags)
2803{
70a10e25
AD
2804 u32 vlan_macip_lens = 0;
2805 u32 mss_l4len_idx = 0;
2806 u32 type_tucmd = 0;
92915f71 2807
70a10e25
AD
2808 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2809 u8 l4_hdr = 0;
2810 switch (skb->protocol) {
2811 case __constant_htons(ETH_P_IP):
2812 vlan_macip_lens |= skb_network_header_len(skb);
2813 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
2814 l4_hdr = ip_hdr(skb)->protocol;
2815 break;
2816 case __constant_htons(ETH_P_IPV6):
2817 vlan_macip_lens |= skb_network_header_len(skb);
2818 l4_hdr = ipv6_hdr(skb)->nexthdr;
2819 break;
2820 default:
2821 if (unlikely(net_ratelimit())) {
2822 dev_warn(tx_ring->dev,
2823 "partial checksum but proto=%x!\n",
2824 skb->protocol);
2825 }
2826 break;
2827 }
92915f71 2828
70a10e25
AD
2829 switch (l4_hdr) {
2830 case IPPROTO_TCP:
2831 type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
2832 mss_l4len_idx = tcp_hdrlen(skb) <<
2833 IXGBE_ADVTXD_L4LEN_SHIFT;
2834 break;
2835 case IPPROTO_SCTP:
2836 type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_SCTP;
2837 mss_l4len_idx = sizeof(struct sctphdr) <<
2838 IXGBE_ADVTXD_L4LEN_SHIFT;
2839 break;
2840 case IPPROTO_UDP:
2841 mss_l4len_idx = sizeof(struct udphdr) <<
2842 IXGBE_ADVTXD_L4LEN_SHIFT;
2843 break;
2844 default:
2845 if (unlikely(net_ratelimit())) {
2846 dev_warn(tx_ring->dev,
2847 "partial checksum but l4 proto=%x!\n",
2848 l4_hdr);
2849 }
2850 break;
2851 }
92915f71
GR
2852 }
2853
70a10e25
AD
2854 /* vlan_macip_lens: MACLEN, VLAN tag */
2855 vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
2856 vlan_macip_lens |= tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
2857
2858 ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens,
2859 type_tucmd, mss_l4len_idx);
2860
2861 return (skb->ip_summed == CHECKSUM_PARTIAL);
92915f71
GR
2862}
2863
70a10e25 2864static int ixgbevf_tx_map(struct ixgbevf_ring *tx_ring,
e757e3e1 2865 struct sk_buff *skb, u32 tx_flags)
92915f71 2866{
92915f71
GR
2867 struct ixgbevf_tx_buffer *tx_buffer_info;
2868 unsigned int len;
2869 unsigned int total = skb->len;
2540ddb5
KV
2870 unsigned int offset = 0, size;
2871 int count = 0;
92915f71
GR
2872 unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
2873 unsigned int f;
65deeed7 2874 int i;
92915f71
GR
2875
2876 i = tx_ring->next_to_use;
2877
2878 len = min(skb_headlen(skb), total);
2879 while (len) {
2880 tx_buffer_info = &tx_ring->tx_buffer_info[i];
2881 size = min(len, (unsigned int)IXGBE_MAX_DATA_PER_TXD);
2882
2883 tx_buffer_info->length = size;
2884 tx_buffer_info->mapped_as_page = false;
70a10e25 2885 tx_buffer_info->dma = dma_map_single(tx_ring->dev,
92915f71 2886 skb->data + offset,
2a1f8794 2887 size, DMA_TO_DEVICE);
70a10e25 2888 if (dma_mapping_error(tx_ring->dev, tx_buffer_info->dma))
92915f71 2889 goto dma_error;
92915f71
GR
2890
2891 len -= size;
2892 total -= size;
2893 offset += size;
2894 count++;
2895 i++;
2896 if (i == tx_ring->count)
2897 i = 0;
2898 }
2899
2900 for (f = 0; f < nr_frags; f++) {
9e903e08 2901 const struct skb_frag_struct *frag;
92915f71
GR
2902
2903 frag = &skb_shinfo(skb)->frags[f];
9e903e08 2904 len = min((unsigned int)skb_frag_size(frag), total);
877749bf 2905 offset = 0;
92915f71
GR
2906
2907 while (len) {
2908 tx_buffer_info = &tx_ring->tx_buffer_info[i];
2909 size = min(len, (unsigned int)IXGBE_MAX_DATA_PER_TXD);
2910
2911 tx_buffer_info->length = size;
877749bf 2912 tx_buffer_info->dma =
70a10e25 2913 skb_frag_dma_map(tx_ring->dev, frag,
877749bf 2914 offset, size, DMA_TO_DEVICE);
70a10e25
AD
2915 if (dma_mapping_error(tx_ring->dev,
2916 tx_buffer_info->dma))
92915f71 2917 goto dma_error;
6132ee8a 2918 tx_buffer_info->mapped_as_page = true;
92915f71
GR
2919
2920 len -= size;
2921 total -= size;
2922 offset += size;
2923 count++;
2924 i++;
2925 if (i == tx_ring->count)
2926 i = 0;
2927 }
2928 if (total == 0)
2929 break;
2930 }
2931
2932 if (i == 0)
2933 i = tx_ring->count - 1;
2934 else
2935 i = i - 1;
2936 tx_ring->tx_buffer_info[i].skb = skb;
92915f71
GR
2937
2938 return count;
2939
2940dma_error:
70a10e25 2941 dev_err(tx_ring->dev, "TX DMA map failed\n");
92915f71
GR
2942
2943 /* clear timestamp and dma mappings for failed tx_buffer_info map */
2944 tx_buffer_info->dma = 0;
92915f71
GR
2945 count--;
2946
2947 /* clear timestamp and dma mappings for remaining portion of packet */
2948 while (count >= 0) {
2949 count--;
2950 i--;
2951 if (i < 0)
2952 i += tx_ring->count;
2953 tx_buffer_info = &tx_ring->tx_buffer_info[i];
70a10e25 2954 ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
92915f71
GR
2955 }
2956
2957 return count;
2958}
2959
70a10e25 2960static void ixgbevf_tx_queue(struct ixgbevf_ring *tx_ring, int tx_flags,
e757e3e1
AD
2961 int count, unsigned int first, u32 paylen,
2962 u8 hdr_len)
92915f71
GR
2963{
2964 union ixgbe_adv_tx_desc *tx_desc = NULL;
2965 struct ixgbevf_tx_buffer *tx_buffer_info;
2966 u32 olinfo_status = 0, cmd_type_len = 0;
2967 unsigned int i;
2968
2969 u32 txd_cmd = IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS | IXGBE_TXD_CMD_IFCS;
2970
2971 cmd_type_len |= IXGBE_ADVTXD_DTYP_DATA;
2972
2973 cmd_type_len |= IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT;
2974
2975 if (tx_flags & IXGBE_TX_FLAGS_VLAN)
2976 cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
2977
70a10e25
AD
2978 if (tx_flags & IXGBE_TX_FLAGS_CSUM)
2979 olinfo_status |= IXGBE_ADVTXD_POPTS_TXSM;
2980
92915f71
GR
2981 if (tx_flags & IXGBE_TX_FLAGS_TSO) {
2982 cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
2983
92915f71
GR
2984 /* use index 1 context for tso */
2985 olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
2986 if (tx_flags & IXGBE_TX_FLAGS_IPV4)
70a10e25 2987 olinfo_status |= IXGBE_ADVTXD_POPTS_IXSM;
70a10e25 2988 }
92915f71 2989
70a10e25
AD
2990 /*
2991 * Check Context must be set if Tx switch is enabled, which it
2992 * always is for case where virtual functions are running
2993 */
2994 olinfo_status |= IXGBE_ADVTXD_CC;
92915f71
GR
2995
2996 olinfo_status |= ((paylen - hdr_len) << IXGBE_ADVTXD_PAYLEN_SHIFT);
2997
2998 i = tx_ring->next_to_use;
2999 while (count--) {
3000 tx_buffer_info = &tx_ring->tx_buffer_info[i];
908421f6 3001 tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
92915f71
GR
3002 tx_desc->read.buffer_addr = cpu_to_le64(tx_buffer_info->dma);
3003 tx_desc->read.cmd_type_len =
3004 cpu_to_le32(cmd_type_len | tx_buffer_info->length);
3005 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
3006 i++;
3007 if (i == tx_ring->count)
3008 i = 0;
3009 }
3010
3011 tx_desc->read.cmd_type_len |= cpu_to_le32(txd_cmd);
3012
e757e3e1
AD
3013 tx_ring->tx_buffer_info[first].time_stamp = jiffies;
3014
3015 /* Force memory writes to complete before letting h/w
3016 * know there are new descriptors to fetch. (Only
3017 * applicable for weak-ordered memory model archs,
3018 * such as IA-64).
3019 */
3020 wmb();
3021
3022 tx_ring->tx_buffer_info[first].next_to_watch = tx_desc;
92915f71 3023 tx_ring->next_to_use = i;
92915f71
GR
3024}
3025
fb40195c 3026static int __ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
92915f71 3027{
fb40195c 3028 struct ixgbevf_adapter *adapter = netdev_priv(tx_ring->netdev);
92915f71 3029
fb40195c 3030 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
92915f71
GR
3031 /* Herbert's original patch had:
3032 * smp_mb__after_netif_stop_queue();
3033 * but since that doesn't exist yet, just open code it. */
3034 smp_mb();
3035
3036 /* We need to check again in a case another CPU has just
3037 * made room available. */
f880d07b 3038 if (likely(ixgbevf_desc_unused(tx_ring) < size))
92915f71
GR
3039 return -EBUSY;
3040
3041 /* A reprieve! - use start_queue because it doesn't call schedule */
fb40195c 3042 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
92915f71
GR
3043 ++adapter->restart_queue;
3044 return 0;
3045}
3046
fb40195c 3047static int ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
92915f71 3048{
f880d07b 3049 if (likely(ixgbevf_desc_unused(tx_ring) >= size))
92915f71 3050 return 0;
fb40195c 3051 return __ixgbevf_maybe_stop_tx(tx_ring, size);
92915f71
GR
3052}
3053
3054static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
3055{
3056 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3057 struct ixgbevf_ring *tx_ring;
3058 unsigned int first;
3059 unsigned int tx_flags = 0;
3060 u8 hdr_len = 0;
3061 int r_idx = 0, tso;
3595990a
AD
3062 u16 count = TXD_USE_COUNT(skb_headlen(skb));
3063#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
3064 unsigned short f;
3065#endif
f9d08f16 3066 u8 *dst_mac = skb_header_pointer(skb, 0, 0, NULL);
46acc460 3067 if (!dst_mac || is_link_local_ether_addr(dst_mac)) {
f9d08f16
GR
3068 dev_kfree_skb(skb);
3069 return NETDEV_TX_OK;
3070 }
92915f71
GR
3071
3072 tx_ring = &adapter->tx_ring[r_idx];
3073
3595990a
AD
3074 /*
3075 * need: 1 descriptor per page * PAGE_SIZE/IXGBE_MAX_DATA_PER_TXD,
3076 * + 1 desc for skb_headlen/IXGBE_MAX_DATA_PER_TXD,
3077 * + 2 desc gap to keep tail from touching head,
3078 * + 1 desc for context descriptor,
3079 * otherwise try next time
3080 */
3081#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
3082 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
3083 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
3084#else
3085 count += skb_shinfo(skb)->nr_frags;
3086#endif
fb40195c 3087 if (ixgbevf_maybe_stop_tx(tx_ring, count + 3)) {
3595990a
AD
3088 adapter->tx_busy++;
3089 return NETDEV_TX_BUSY;
3090 }
3091
eab6d18d 3092 if (vlan_tx_tag_present(skb)) {
92915f71
GR
3093 tx_flags |= vlan_tx_tag_get(skb);
3094 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
3095 tx_flags |= IXGBE_TX_FLAGS_VLAN;
3096 }
3097
92915f71
GR
3098 first = tx_ring->next_to_use;
3099
3100 if (skb->protocol == htons(ETH_P_IP))
3101 tx_flags |= IXGBE_TX_FLAGS_IPV4;
70a10e25 3102 tso = ixgbevf_tso(tx_ring, skb, tx_flags, &hdr_len);
92915f71
GR
3103 if (tso < 0) {
3104 dev_kfree_skb_any(skb);
3105 return NETDEV_TX_OK;
3106 }
3107
3108 if (tso)
70a10e25
AD
3109 tx_flags |= IXGBE_TX_FLAGS_TSO | IXGBE_TX_FLAGS_CSUM;
3110 else if (ixgbevf_tx_csum(tx_ring, skb, tx_flags))
92915f71
GR
3111 tx_flags |= IXGBE_TX_FLAGS_CSUM;
3112
70a10e25 3113 ixgbevf_tx_queue(tx_ring, tx_flags,
e757e3e1
AD
3114 ixgbevf_tx_map(tx_ring, skb, tx_flags),
3115 first, skb->len, hdr_len);
70a10e25 3116
5cdab2f6 3117 writel(tx_ring->next_to_use, tx_ring->tail);
92915f71 3118
fb40195c 3119 ixgbevf_maybe_stop_tx(tx_ring, DESC_NEEDED);
92915f71
GR
3120
3121 return NETDEV_TX_OK;
3122}
3123
92915f71
GR
3124/**
3125 * ixgbevf_set_mac - Change the Ethernet Address of the NIC
3126 * @netdev: network interface device structure
3127 * @p: pointer to an address structure
3128 *
3129 * Returns 0 on success, negative on failure
3130 **/
3131static int ixgbevf_set_mac(struct net_device *netdev, void *p)
3132{
3133 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3134 struct ixgbe_hw *hw = &adapter->hw;
3135 struct sockaddr *addr = p;
3136
3137 if (!is_valid_ether_addr(addr->sa_data))
3138 return -EADDRNOTAVAIL;
3139
3140 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
3141 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
3142
55fdd45b 3143 spin_lock_bh(&adapter->mbx_lock);
1c55ed76 3144
92fe0bf7 3145 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
92915f71 3146
55fdd45b 3147 spin_unlock_bh(&adapter->mbx_lock);
1c55ed76 3148
92915f71
GR
3149 return 0;
3150}
3151
3152/**
3153 * ixgbevf_change_mtu - Change the Maximum Transfer Unit
3154 * @netdev: network interface device structure
3155 * @new_mtu: new value for maximum frame size
3156 *
3157 * Returns 0 on success, negative on failure
3158 **/
3159static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
3160{
3161 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3162 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
69bfbec4 3163 int max_possible_frame = MAXIMUM_ETHERNET_VLAN_SIZE;
69bfbec4 3164
56e94095
AD
3165 switch (adapter->hw.api_version) {
3166 case ixgbe_mbox_api_11:
69bfbec4 3167 max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE;
56e94095
AD
3168 break;
3169 default:
3170 if (adapter->hw.mac.type == ixgbe_mac_X540_vf)
3171 max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE;
3172 break;
3173 }
92915f71
GR
3174
3175 /* MTU < 68 is an error and causes problems on some kernels */
69bfbec4 3176 if ((new_mtu < 68) || (max_frame > max_possible_frame))
92915f71
GR
3177 return -EINVAL;
3178
3179 hw_dbg(&adapter->hw, "changing MTU from %d to %d\n",
3180 netdev->mtu, new_mtu);
3181 /* must set new MTU before calling down or up */
3182 netdev->mtu = new_mtu;
3183
3184 if (netif_running(netdev))
3185 ixgbevf_reinit_locked(adapter);
3186
3187 return 0;
3188}
3189
0ac1e8ce 3190static int ixgbevf_suspend(struct pci_dev *pdev, pm_message_t state)
92915f71
GR
3191{
3192 struct net_device *netdev = pci_get_drvdata(pdev);
3193 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
0ac1e8ce
AD
3194#ifdef CONFIG_PM
3195 int retval = 0;
3196#endif
92915f71
GR
3197
3198 netif_device_detach(netdev);
3199
3200 if (netif_running(netdev)) {
0ac1e8ce 3201 rtnl_lock();
92915f71
GR
3202 ixgbevf_down(adapter);
3203 ixgbevf_free_irq(adapter);
3204 ixgbevf_free_all_tx_resources(adapter);
3205 ixgbevf_free_all_rx_resources(adapter);
0ac1e8ce 3206 rtnl_unlock();
92915f71
GR
3207 }
3208
0ac1e8ce 3209 ixgbevf_clear_interrupt_scheme(adapter);
92915f71 3210
0ac1e8ce
AD
3211#ifdef CONFIG_PM
3212 retval = pci_save_state(pdev);
3213 if (retval)
3214 return retval;
92915f71 3215
0ac1e8ce 3216#endif
92915f71 3217 pci_disable_device(pdev);
0ac1e8ce
AD
3218
3219 return 0;
3220}
3221
3222#ifdef CONFIG_PM
3223static int ixgbevf_resume(struct pci_dev *pdev)
3224{
3225 struct ixgbevf_adapter *adapter = pci_get_drvdata(pdev);
3226 struct net_device *netdev = adapter->netdev;
3227 u32 err;
3228
3229 pci_set_power_state(pdev, PCI_D0);
3230 pci_restore_state(pdev);
3231 /*
3232 * pci_restore_state clears dev->state_saved so call
3233 * pci_save_state to restore it.
3234 */
3235 pci_save_state(pdev);
3236
3237 err = pci_enable_device_mem(pdev);
3238 if (err) {
3239 dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n");
3240 return err;
3241 }
3242 pci_set_master(pdev);
3243
798e381a
DS
3244 ixgbevf_reset(adapter);
3245
0ac1e8ce
AD
3246 rtnl_lock();
3247 err = ixgbevf_init_interrupt_scheme(adapter);
3248 rtnl_unlock();
3249 if (err) {
3250 dev_err(&pdev->dev, "Cannot initialize interrupts\n");
3251 return err;
3252 }
3253
0ac1e8ce
AD
3254 if (netif_running(netdev)) {
3255 err = ixgbevf_open(netdev);
3256 if (err)
3257 return err;
3258 }
3259
3260 netif_device_attach(netdev);
3261
3262 return err;
3263}
3264
3265#endif /* CONFIG_PM */
3266static void ixgbevf_shutdown(struct pci_dev *pdev)
3267{
3268 ixgbevf_suspend(pdev, PMSG_SUSPEND);
92915f71
GR
3269}
3270
4197aa7b
ED
3271static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev,
3272 struct rtnl_link_stats64 *stats)
3273{
3274 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3275 unsigned int start;
3276 u64 bytes, packets;
3277 const struct ixgbevf_ring *ring;
3278 int i;
3279
3280 ixgbevf_update_stats(adapter);
3281
3282 stats->multicast = adapter->stats.vfmprc - adapter->stats.base_vfmprc;
3283
3284 for (i = 0; i < adapter->num_rx_queues; i++) {
3285 ring = &adapter->rx_ring[i];
3286 do {
3287 start = u64_stats_fetch_begin_bh(&ring->syncp);
3288 bytes = ring->total_bytes;
3289 packets = ring->total_packets;
3290 } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
3291 stats->rx_bytes += bytes;
3292 stats->rx_packets += packets;
3293 }
3294
3295 for (i = 0; i < adapter->num_tx_queues; i++) {
3296 ring = &adapter->tx_ring[i];
3297 do {
3298 start = u64_stats_fetch_begin_bh(&ring->syncp);
3299 bytes = ring->total_bytes;
3300 packets = ring->total_packets;
3301 } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
3302 stats->tx_bytes += bytes;
3303 stats->tx_packets += packets;
3304 }
3305
3306 return stats;
3307}
3308
0ac1e8ce 3309static const struct net_device_ops ixgbevf_netdev_ops = {
c12db769
SH
3310 .ndo_open = ixgbevf_open,
3311 .ndo_stop = ixgbevf_close,
3312 .ndo_start_xmit = ixgbevf_xmit_frame,
3313 .ndo_set_rx_mode = ixgbevf_set_rx_mode,
4197aa7b 3314 .ndo_get_stats64 = ixgbevf_get_stats,
92915f71 3315 .ndo_validate_addr = eth_validate_addr,
c12db769
SH
3316 .ndo_set_mac_address = ixgbevf_set_mac,
3317 .ndo_change_mtu = ixgbevf_change_mtu,
3318 .ndo_tx_timeout = ixgbevf_tx_timeout,
c12db769
SH
3319 .ndo_vlan_rx_add_vid = ixgbevf_vlan_rx_add_vid,
3320 .ndo_vlan_rx_kill_vid = ixgbevf_vlan_rx_kill_vid,
c777cdfa
JK
3321#ifdef CONFIG_NET_RX_BUSY_POLL
3322 .ndo_busy_poll = ixgbevf_busy_poll_recv,
3323#endif
92915f71 3324};
92915f71
GR
3325
3326static void ixgbevf_assign_netdev_ops(struct net_device *dev)
3327{
0ac1e8ce 3328 dev->netdev_ops = &ixgbevf_netdev_ops;
92915f71
GR
3329 ixgbevf_set_ethtool_ops(dev);
3330 dev->watchdog_timeo = 5 * HZ;
3331}
3332
3333/**
3334 * ixgbevf_probe - Device Initialization Routine
3335 * @pdev: PCI device information struct
3336 * @ent: entry in ixgbevf_pci_tbl
3337 *
3338 * Returns 0 on success, negative on failure
3339 *
3340 * ixgbevf_probe initializes an adapter identified by a pci_dev structure.
3341 * The OS initialization, configuring of the adapter private structure,
3342 * and a hardware reset occur.
3343 **/
1dd06ae8 3344static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
92915f71
GR
3345{
3346 struct net_device *netdev;
3347 struct ixgbevf_adapter *adapter = NULL;
3348 struct ixgbe_hw *hw = NULL;
3349 const struct ixgbevf_info *ii = ixgbevf_info_tbl[ent->driver_data];
3350 static int cards_found;
3351 int err, pci_using_dac;
3352
3353 err = pci_enable_device(pdev);
3354 if (err)
3355 return err;
3356
53567aa4 3357 if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
92915f71
GR
3358 pci_using_dac = 1;
3359 } else {
53567aa4 3360 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
92915f71 3361 if (err) {
53567aa4
RK
3362 dev_err(&pdev->dev, "No usable DMA "
3363 "configuration, aborting\n");
3364 goto err_dma;
92915f71
GR
3365 }
3366 pci_using_dac = 0;
3367 }
3368
3369 err = pci_request_regions(pdev, ixgbevf_driver_name);
3370 if (err) {
3371 dev_err(&pdev->dev, "pci_request_regions failed 0x%x\n", err);
3372 goto err_pci_reg;
3373 }
3374
3375 pci_set_master(pdev);
3376
92915f71
GR
3377 netdev = alloc_etherdev_mq(sizeof(struct ixgbevf_adapter),
3378 MAX_TX_QUEUES);
92915f71
GR
3379 if (!netdev) {
3380 err = -ENOMEM;
3381 goto err_alloc_etherdev;
3382 }
3383
3384 SET_NETDEV_DEV(netdev, &pdev->dev);
3385
3386 pci_set_drvdata(pdev, netdev);
3387 adapter = netdev_priv(netdev);
3388
3389 adapter->netdev = netdev;
3390 adapter->pdev = pdev;
3391 hw = &adapter->hw;
3392 hw->back = adapter;
b3f4d599 3393 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
92915f71
GR
3394
3395 /*
3396 * call save state here in standalone driver because it relies on
3397 * adapter struct to exist, and needs to call netdev_priv
3398 */
3399 pci_save_state(pdev);
3400
3401 hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
3402 pci_resource_len(pdev, 0));
3403 if (!hw->hw_addr) {
3404 err = -EIO;
3405 goto err_ioremap;
3406 }
3407
3408 ixgbevf_assign_netdev_ops(netdev);
3409
3410 adapter->bd_number = cards_found;
3411
3412 /* Setup hw api */
3413 memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops));
3414 hw->mac.type = ii->mac;
3415
3416 memcpy(&hw->mbx.ops, &ixgbevf_mbx_ops,
f416dfc0 3417 sizeof(struct ixgbe_mbx_operations));
92915f71 3418
92915f71
GR
3419 /* setup the private structure */
3420 err = ixgbevf_sw_init(adapter);
1a0d6ae5
DK
3421 if (err)
3422 goto err_sw_init;
3423
3424 /* The HW MAC address was set and/or determined in sw_init */
1a0d6ae5
DK
3425 if (!is_valid_ether_addr(netdev->dev_addr)) {
3426 pr_err("invalid MAC address\n");
3427 err = -EIO;
3428 goto err_sw_init;
3429 }
92915f71 3430
471a76de 3431 netdev->hw_features = NETIF_F_SG |
92915f71 3432 NETIF_F_IP_CSUM |
471a76de
MM
3433 NETIF_F_IPV6_CSUM |
3434 NETIF_F_TSO |
3435 NETIF_F_TSO6 |
3436 NETIF_F_RXCSUM;
3437
3438 netdev->features = netdev->hw_features |
f646968f
PM
3439 NETIF_F_HW_VLAN_CTAG_TX |
3440 NETIF_F_HW_VLAN_CTAG_RX |
3441 NETIF_F_HW_VLAN_CTAG_FILTER;
92915f71 3442
92915f71
GR
3443 netdev->vlan_features |= NETIF_F_TSO;
3444 netdev->vlan_features |= NETIF_F_TSO6;
3445 netdev->vlan_features |= NETIF_F_IP_CSUM;
3bfacf96 3446 netdev->vlan_features |= NETIF_F_IPV6_CSUM;
92915f71
GR
3447 netdev->vlan_features |= NETIF_F_SG;
3448
3449 if (pci_using_dac)
3450 netdev->features |= NETIF_F_HIGHDMA;
3451
01789349
JP
3452 netdev->priv_flags |= IFF_UNICAST_FLT;
3453
92915f71 3454 init_timer(&adapter->watchdog_timer);
c061b18d 3455 adapter->watchdog_timer.function = ixgbevf_watchdog;
92915f71
GR
3456 adapter->watchdog_timer.data = (unsigned long)adapter;
3457
3458 INIT_WORK(&adapter->reset_task, ixgbevf_reset_task);
3459 INIT_WORK(&adapter->watchdog_task, ixgbevf_watchdog_task);
3460
3461 err = ixgbevf_init_interrupt_scheme(adapter);
3462 if (err)
3463 goto err_sw_init;
3464
92915f71
GR
3465 strcpy(netdev->name, "eth%d");
3466
3467 err = register_netdev(netdev);
3468 if (err)
3469 goto err_register;
3470
5d426ad1
GR
3471 netif_carrier_off(netdev);
3472
33bd9f60
GR
3473 ixgbevf_init_last_counter_stats(adapter);
3474
92915f71 3475 /* print the MAC address */
f794e7ef 3476 hw_dbg(hw, "%pM\n", netdev->dev_addr);
92915f71
GR
3477
3478 hw_dbg(hw, "MAC: %d\n", hw->mac.type);
3479
92915f71
GR
3480 hw_dbg(hw, "Intel(R) 82599 Virtual Function\n");
3481 cards_found++;
3482 return 0;
3483
3484err_register:
0ac1e8ce 3485 ixgbevf_clear_interrupt_scheme(adapter);
92915f71
GR
3486err_sw_init:
3487 ixgbevf_reset_interrupt_capability(adapter);
3488 iounmap(hw->hw_addr);
3489err_ioremap:
3490 free_netdev(netdev);
3491err_alloc_etherdev:
3492 pci_release_regions(pdev);
3493err_pci_reg:
3494err_dma:
3495 pci_disable_device(pdev);
3496 return err;
3497}
3498
3499/**
3500 * ixgbevf_remove - Device Removal Routine
3501 * @pdev: PCI device information struct
3502 *
3503 * ixgbevf_remove is called by the PCI subsystem to alert the driver
3504 * that it should release a PCI device. The could be caused by a
3505 * Hot-Plug event, or because the driver is going to be removed from
3506 * memory.
3507 **/
9f9a12f8 3508static void ixgbevf_remove(struct pci_dev *pdev)
92915f71
GR
3509{
3510 struct net_device *netdev = pci_get_drvdata(pdev);
3511 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3512
3513 set_bit(__IXGBEVF_DOWN, &adapter->state);
3514
3515 del_timer_sync(&adapter->watchdog_timer);
3516
23f333a2 3517 cancel_work_sync(&adapter->reset_task);
92915f71
GR
3518 cancel_work_sync(&adapter->watchdog_task);
3519
fd13a9ab 3520 if (netdev->reg_state == NETREG_REGISTERED)
92915f71 3521 unregister_netdev(netdev);
92915f71 3522
0ac1e8ce 3523 ixgbevf_clear_interrupt_scheme(adapter);
92915f71
GR
3524 ixgbevf_reset_interrupt_capability(adapter);
3525
3526 iounmap(adapter->hw.hw_addr);
3527 pci_release_regions(pdev);
3528
3529 hw_dbg(&adapter->hw, "Remove complete\n");
3530
3531 kfree(adapter->tx_ring);
3532 kfree(adapter->rx_ring);
3533
3534 free_netdev(netdev);
3535
3536 pci_disable_device(pdev);
3537}
3538
9f19f31d
AD
3539/**
3540 * ixgbevf_io_error_detected - called when PCI error is detected
3541 * @pdev: Pointer to PCI device
3542 * @state: The current pci connection state
3543 *
3544 * This function is called after a PCI bus error affecting
3545 * this device has been detected.
3546 */
3547static pci_ers_result_t ixgbevf_io_error_detected(struct pci_dev *pdev,
3548 pci_channel_state_t state)
3549{
3550 struct net_device *netdev = pci_get_drvdata(pdev);
3551 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3552
3553 netif_device_detach(netdev);
3554
3555 if (state == pci_channel_io_perm_failure)
3556 return PCI_ERS_RESULT_DISCONNECT;
3557
3558 if (netif_running(netdev))
3559 ixgbevf_down(adapter);
3560
3561 pci_disable_device(pdev);
3562
3563 /* Request a slot slot reset. */
3564 return PCI_ERS_RESULT_NEED_RESET;
3565}
3566
3567/**
3568 * ixgbevf_io_slot_reset - called after the pci bus has been reset.
3569 * @pdev: Pointer to PCI device
3570 *
3571 * Restart the card from scratch, as if from a cold-boot. Implementation
3572 * resembles the first-half of the ixgbevf_resume routine.
3573 */
3574static pci_ers_result_t ixgbevf_io_slot_reset(struct pci_dev *pdev)
3575{
3576 struct net_device *netdev = pci_get_drvdata(pdev);
3577 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3578
3579 if (pci_enable_device_mem(pdev)) {
3580 dev_err(&pdev->dev,
3581 "Cannot re-enable PCI device after reset.\n");
3582 return PCI_ERS_RESULT_DISCONNECT;
3583 }
3584
3585 pci_set_master(pdev);
3586
3587 ixgbevf_reset(adapter);
3588
3589 return PCI_ERS_RESULT_RECOVERED;
3590}
3591
3592/**
3593 * ixgbevf_io_resume - called when traffic can start flowing again.
3594 * @pdev: Pointer to PCI device
3595 *
3596 * This callback is called when the error recovery driver tells us that
3597 * its OK to resume normal operation. Implementation resembles the
3598 * second-half of the ixgbevf_resume routine.
3599 */
3600static void ixgbevf_io_resume(struct pci_dev *pdev)
3601{
3602 struct net_device *netdev = pci_get_drvdata(pdev);
3603 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3604
3605 if (netif_running(netdev))
3606 ixgbevf_up(adapter);
3607
3608 netif_device_attach(netdev);
3609}
3610
3611/* PCI Error Recovery (ERS) */
3646f0e5 3612static const struct pci_error_handlers ixgbevf_err_handler = {
9f19f31d
AD
3613 .error_detected = ixgbevf_io_error_detected,
3614 .slot_reset = ixgbevf_io_slot_reset,
3615 .resume = ixgbevf_io_resume,
3616};
3617
92915f71
GR
3618static struct pci_driver ixgbevf_driver = {
3619 .name = ixgbevf_driver_name,
3620 .id_table = ixgbevf_pci_tbl,
3621 .probe = ixgbevf_probe,
9f9a12f8 3622 .remove = ixgbevf_remove,
0ac1e8ce
AD
3623#ifdef CONFIG_PM
3624 /* Power Management Hooks */
3625 .suspend = ixgbevf_suspend,
3626 .resume = ixgbevf_resume,
3627#endif
92915f71 3628 .shutdown = ixgbevf_shutdown,
9f19f31d 3629 .err_handler = &ixgbevf_err_handler
92915f71
GR
3630};
3631
3632/**
65d676c8 3633 * ixgbevf_init_module - Driver Registration Routine
92915f71 3634 *
65d676c8 3635 * ixgbevf_init_module is the first routine called when the driver is
92915f71
GR
3636 * loaded. All it does is register with the PCI subsystem.
3637 **/
3638static int __init ixgbevf_init_module(void)
3639{
3640 int ret;
dbd9636e
JK
3641 pr_info("%s - version %s\n", ixgbevf_driver_string,
3642 ixgbevf_driver_version);
92915f71 3643
dbd9636e 3644 pr_info("%s\n", ixgbevf_copyright);
92915f71
GR
3645
3646 ret = pci_register_driver(&ixgbevf_driver);
3647 return ret;
3648}
3649
3650module_init(ixgbevf_init_module);
3651
3652/**
65d676c8 3653 * ixgbevf_exit_module - Driver Exit Cleanup Routine
92915f71 3654 *
65d676c8 3655 * ixgbevf_exit_module is called just before the driver is removed
92915f71
GR
3656 * from memory.
3657 **/
3658static void __exit ixgbevf_exit_module(void)
3659{
3660 pci_unregister_driver(&ixgbevf_driver);
3661}
3662
3663#ifdef DEBUG
3664/**
65d676c8 3665 * ixgbevf_get_hw_dev_name - return device name string
92915f71
GR
3666 * used by hardware layer to print debugging information
3667 **/
3668char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw)
3669{
3670 struct ixgbevf_adapter *adapter = hw->back;
3671 return adapter->netdev->name;
3672}
3673
3674#endif
3675module_exit(ixgbevf_exit_module);
3676
3677/* ixgbevf_main.c */