]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
ixgbe: fix crash on rmmod after probe fail
[mirror_ubuntu-artful-kernel.git] / drivers / net / ethernet / intel / ixgbevf / ixgbevf_main.c
CommitLineData
92915f71
GR
1/*******************************************************************************
2
3 Intel 82599 Virtual Function driver
2e7cfbdd 4 Copyright(c) 1999 - 2014 Intel Corporation.
92915f71
GR
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28
29/******************************************************************************
30 Copyright (c)2006 - 2007 Myricom, Inc. for some LRO specific code
31******************************************************************************/
dbd9636e
JK
32
33#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
34
92915f71 35#include <linux/types.h>
dadcd65f 36#include <linux/bitops.h>
92915f71
GR
37#include <linux/module.h>
38#include <linux/pci.h>
39#include <linux/netdevice.h>
40#include <linux/vmalloc.h>
41#include <linux/string.h>
42#include <linux/in.h>
43#include <linux/ip.h>
44#include <linux/tcp.h>
70a10e25 45#include <linux/sctp.h>
92915f71 46#include <linux/ipv6.h>
5a0e3ad6 47#include <linux/slab.h>
92915f71
GR
48#include <net/checksum.h>
49#include <net/ip6_checksum.h>
50#include <linux/ethtool.h>
01789349 51#include <linux/if.h>
92915f71 52#include <linux/if_vlan.h>
70c71606 53#include <linux/prefetch.h>
92915f71
GR
54
55#include "ixgbevf.h"
56
3d8fe98f 57const char ixgbevf_driver_name[] = "ixgbevf";
92915f71 58static const char ixgbevf_driver_string[] =
422e05d1 59 "Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver";
92915f71 60
86f359f6 61#define DRV_VERSION "2.12.1-k"
92915f71 62const char ixgbevf_driver_version[] = DRV_VERSION;
66c87bd5 63static char ixgbevf_copyright[] =
5c47a2b6 64 "Copyright (c) 2009 - 2012 Intel Corporation.";
92915f71
GR
65
66static const struct ixgbevf_info *ixgbevf_info_tbl[] = {
2316aa2a
GR
67 [board_82599_vf] = &ixgbevf_82599_vf_info,
68 [board_X540_vf] = &ixgbevf_X540_vf_info,
92915f71
GR
69};
70
71/* ixgbevf_pci_tbl - PCI Device ID Table
72 *
73 * Wildcard entries (PCI_ANY_ID) should come last
74 * Last entry must be all 0s
75 *
76 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
77 * Class, Class Mask, private data (not used) }
78 */
9baa3c34 79static const struct pci_device_id ixgbevf_pci_tbl[] = {
39ba22b4
SH
80 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF), board_82599_vf },
81 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540_VF), board_X540_vf },
92915f71
GR
82 /* required last entry */
83 {0, }
84};
85MODULE_DEVICE_TABLE(pci, ixgbevf_pci_tbl);
86
87MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
b8ce18cd 88MODULE_DESCRIPTION("Intel(R) 10 Gigabit Virtual Function Network Driver");
92915f71
GR
89MODULE_LICENSE("GPL");
90MODULE_VERSION(DRV_VERSION);
91
b3f4d599 92#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
93static int debug = -1;
94module_param(debug, int, 0);
95MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
92915f71
GR
96
97/* forward decls */
220fe050 98static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter *adapter);
fa71ae27 99static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector);
56e94095 100static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter);
92915f71 101
dbf8b0d8
MR
102static void ixgbevf_remove_adapter(struct ixgbe_hw *hw)
103{
104 struct ixgbevf_adapter *adapter = hw->back;
105
106 if (!hw->hw_addr)
107 return;
108 hw->hw_addr = NULL;
109 dev_err(&adapter->pdev->dev, "Adapter removed\n");
ea699569
MR
110 if (test_bit(__IXGBEVF_WORK_INIT, &adapter->state))
111 schedule_work(&adapter->watchdog_task);
dbf8b0d8
MR
112}
113
114static void ixgbevf_check_remove(struct ixgbe_hw *hw, u32 reg)
115{
116 u32 value;
117
118 /* The following check not only optimizes a bit by not
119 * performing a read on the status register when the
120 * register just read was a status register read that
121 * returned IXGBE_FAILED_READ_REG. It also blocks any
122 * potential recursion.
123 */
124 if (reg == IXGBE_VFSTATUS) {
125 ixgbevf_remove_adapter(hw);
126 return;
127 }
32c74949 128 value = ixgbevf_read_reg(hw, IXGBE_VFSTATUS);
dbf8b0d8
MR
129 if (value == IXGBE_FAILED_READ_REG)
130 ixgbevf_remove_adapter(hw);
131}
132
32c74949 133u32 ixgbevf_read_reg(struct ixgbe_hw *hw, u32 reg)
dbf8b0d8
MR
134{
135 u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr);
136 u32 value;
137
138 if (IXGBE_REMOVED(reg_addr))
139 return IXGBE_FAILED_READ_REG;
140 value = readl(reg_addr + reg);
141 if (unlikely(value == IXGBE_FAILED_READ_REG))
142 ixgbevf_check_remove(hw, reg);
143 return value;
144}
145
49ce9c2c 146/**
65d676c8 147 * ixgbevf_set_ivar - set IVAR registers - maps interrupt causes to vectors
92915f71
GR
148 * @adapter: pointer to adapter struct
149 * @direction: 0 for Rx, 1 for Tx, -1 for other causes
150 * @queue: queue to map the corresponding interrupt to
151 * @msix_vector: the vector to map to the corresponding queue
92915f71
GR
152 */
153static void ixgbevf_set_ivar(struct ixgbevf_adapter *adapter, s8 direction,
154 u8 queue, u8 msix_vector)
155{
156 u32 ivar, index;
157 struct ixgbe_hw *hw = &adapter->hw;
158 if (direction == -1) {
159 /* other causes */
160 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
161 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
162 ivar &= ~0xFF;
163 ivar |= msix_vector;
164 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
165 } else {
166 /* tx or rx causes */
167 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
168 index = ((16 * (queue & 1)) + (8 * direction));
169 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(queue >> 1));
170 ivar &= ~(0xFF << index);
171 ivar |= (msix_vector << index);
172 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(queue >> 1), ivar);
173 }
174}
175
70a10e25 176static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_ring *tx_ring,
9bdfefd2
ET
177 struct ixgbevf_tx_buffer *tx_buffer)
178{
179 if (tx_buffer->skb) {
180 dev_kfree_skb_any(tx_buffer->skb);
181 if (dma_unmap_len(tx_buffer, len))
70a10e25 182 dma_unmap_single(tx_ring->dev,
9bdfefd2
ET
183 dma_unmap_addr(tx_buffer, dma),
184 dma_unmap_len(tx_buffer, len),
2a1f8794 185 DMA_TO_DEVICE);
9bdfefd2
ET
186 } else if (dma_unmap_len(tx_buffer, len)) {
187 dma_unmap_page(tx_ring->dev,
188 dma_unmap_addr(tx_buffer, dma),
189 dma_unmap_len(tx_buffer, len),
190 DMA_TO_DEVICE);
92915f71 191 }
9bdfefd2
ET
192 tx_buffer->next_to_watch = NULL;
193 tx_buffer->skb = NULL;
194 dma_unmap_len_set(tx_buffer, len, 0);
195 /* tx_buffer must be completely set up in the transmit path */
92915f71
GR
196}
197
92915f71
GR
198#define IXGBE_MAX_TXD_PWR 14
199#define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR)
200
201/* Tx Descriptors needed, worst case */
3595990a
AD
202#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IXGBE_MAX_DATA_PER_TXD)
203#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
92915f71
GR
204
205static void ixgbevf_tx_timeout(struct net_device *netdev);
206
207/**
208 * ixgbevf_clean_tx_irq - Reclaim resources after transmit completes
fa71ae27 209 * @q_vector: board private structure
92915f71
GR
210 * @tx_ring: tx ring to clean
211 **/
fa71ae27 212static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
92915f71
GR
213 struct ixgbevf_ring *tx_ring)
214{
fa71ae27 215 struct ixgbevf_adapter *adapter = q_vector->adapter;
7ad1a093
ET
216 struct ixgbevf_tx_buffer *tx_buffer;
217 union ixgbe_adv_tx_desc *tx_desc;
92915f71 218 unsigned int total_bytes = 0, total_packets = 0;
7ad1a093
ET
219 unsigned int budget = tx_ring->count / 2;
220 unsigned int i = tx_ring->next_to_clean;
92915f71 221
10cc1bdd
AD
222 if (test_bit(__IXGBEVF_DOWN, &adapter->state))
223 return true;
224
7ad1a093
ET
225 tx_buffer = &tx_ring->tx_buffer_info[i];
226 tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
227 i -= tx_ring->count;
92915f71 228
e757e3e1 229 do {
7ad1a093 230 union ixgbe_adv_tx_desc *eop_desc = tx_buffer->next_to_watch;
e757e3e1
AD
231
232 /* if next_to_watch is not set then there is no work pending */
233 if (!eop_desc)
234 break;
235
236 /* prevent any other reads prior to eop_desc */
237 read_barrier_depends();
238
239 /* if DD is not set pending work has not been completed */
240 if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
241 break;
242
243 /* clear next_to_watch to prevent false hangs */
7ad1a093 244 tx_buffer->next_to_watch = NULL;
e757e3e1 245
7ad1a093
ET
246 /* update the statistics for this packet */
247 total_bytes += tx_buffer->bytecount;
248 total_packets += tx_buffer->gso_segs;
92915f71 249
9bdfefd2
ET
250 /* free the skb */
251 dev_kfree_skb_any(tx_buffer->skb);
252
253 /* unmap skb header data */
254 dma_unmap_single(tx_ring->dev,
255 dma_unmap_addr(tx_buffer, dma),
256 dma_unmap_len(tx_buffer, len),
257 DMA_TO_DEVICE);
258
7ad1a093 259 /* clear tx_buffer data */
9bdfefd2
ET
260 tx_buffer->skb = NULL;
261 dma_unmap_len_set(tx_buffer, len, 0);
92915f71 262
7ad1a093
ET
263 /* unmap remaining buffers */
264 while (tx_desc != eop_desc) {
7ad1a093
ET
265 tx_buffer++;
266 tx_desc++;
92915f71 267 i++;
7ad1a093
ET
268 if (unlikely(!i)) {
269 i -= tx_ring->count;
270 tx_buffer = tx_ring->tx_buffer_info;
271 tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
272 }
e757e3e1 273
9bdfefd2
ET
274 /* unmap any remaining paged data */
275 if (dma_unmap_len(tx_buffer, len)) {
276 dma_unmap_page(tx_ring->dev,
277 dma_unmap_addr(tx_buffer, dma),
278 dma_unmap_len(tx_buffer, len),
279 DMA_TO_DEVICE);
280 dma_unmap_len_set(tx_buffer, len, 0);
281 }
92915f71
GR
282 }
283
7ad1a093
ET
284 /* move us one more past the eop_desc for start of next pkt */
285 tx_buffer++;
286 tx_desc++;
287 i++;
288 if (unlikely(!i)) {
289 i -= tx_ring->count;
290 tx_buffer = tx_ring->tx_buffer_info;
291 tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
292 }
293
294 /* issue prefetch for next Tx descriptor */
295 prefetch(tx_desc);
296
297 /* update budget accounting */
298 budget--;
299 } while (likely(budget));
300
301 i += tx_ring->count;
92915f71 302 tx_ring->next_to_clean = i;
7ad1a093
ET
303 u64_stats_update_begin(&tx_ring->syncp);
304 tx_ring->stats.bytes += total_bytes;
305 tx_ring->stats.packets += total_packets;
306 u64_stats_update_end(&tx_ring->syncp);
307 q_vector->tx.total_bytes += total_bytes;
308 q_vector->tx.total_packets += total_packets;
92915f71
GR
309
310#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
7ad1a093 311 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
f880d07b 312 (ixgbevf_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) {
92915f71
GR
313 /* Make sure that anybody stopping the queue after this
314 * sees the new next_to_clean.
315 */
316 smp_mb();
7ad1a093 317
fb40195c
AD
318 if (__netif_subqueue_stopped(tx_ring->netdev,
319 tx_ring->queue_index) &&
92915f71 320 !test_bit(__IXGBEVF_DOWN, &adapter->state)) {
fb40195c
AD
321 netif_wake_subqueue(tx_ring->netdev,
322 tx_ring->queue_index);
7ad1a093 323 ++tx_ring->tx_stats.restart_queue;
92915f71 324 }
92915f71
GR
325 }
326
7ad1a093 327 return !!budget;
92915f71
GR
328}
329
08681618
JK
330/**
331 * ixgbevf_rx_skb - Helper function to determine proper Rx method
332 * @q_vector: structure containing interrupt and ring information
333 * @skb: packet to send up
08681618
JK
334 **/
335static void ixgbevf_rx_skb(struct ixgbevf_q_vector *q_vector,
dff80520 336 struct sk_buff *skb)
08681618 337{
c777cdfa
JK
338#ifdef CONFIG_NET_RX_BUSY_POLL
339 skb_mark_napi_id(skb, &q_vector->napi);
340
341 if (ixgbevf_qv_busy_polling(q_vector)) {
342 netif_receive_skb(skb);
343 /* exit early if we busy polled */
344 return;
345 }
346#endif /* CONFIG_NET_RX_BUSY_POLL */
688ff32d
ET
347
348 napi_gro_receive(&q_vector->napi, skb);
08681618
JK
349}
350
ec62fe26
ET
351/* ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum
352 * @ring: structure containig ring specific data
353 * @rx_desc: current Rx descriptor being processed
92915f71 354 * @skb: skb currently being received and modified
ec62fe26 355 */
55fb277c 356static inline void ixgbevf_rx_checksum(struct ixgbevf_ring *ring,
ec62fe26
ET
357 union ixgbe_adv_rx_desc *rx_desc,
358 struct sk_buff *skb)
92915f71 359{
bc8acf2c 360 skb_checksum_none_assert(skb);
92915f71
GR
361
362 /* Rx csum disabled */
fb40195c 363 if (!(ring->netdev->features & NETIF_F_RXCSUM))
92915f71
GR
364 return;
365
366 /* if IP and error */
ec62fe26
ET
367 if (ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_IPCS) &&
368 ixgbevf_test_staterr(rx_desc, IXGBE_RXDADV_ERR_IPE)) {
095e2617 369 ring->rx_stats.csum_err++;
92915f71
GR
370 return;
371 }
372
ec62fe26 373 if (!ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_L4CS))
92915f71
GR
374 return;
375
ec62fe26 376 if (ixgbevf_test_staterr(rx_desc, IXGBE_RXDADV_ERR_TCPE)) {
095e2617 377 ring->rx_stats.csum_err++;
92915f71
GR
378 return;
379 }
380
381 /* It must be a TCP or UDP packet with a valid checksum */
382 skb->ip_summed = CHECKSUM_UNNECESSARY;
92915f71
GR
383}
384
dff80520
ET
385/* ixgbevf_process_skb_fields - Populate skb header fields from Rx descriptor
386 * @rx_ring: rx descriptor ring packet is being transacted on
387 * @rx_desc: pointer to the EOP Rx descriptor
388 * @skb: pointer to current skb being populated
389 *
390 * This function checks the ring, descriptor, and packet information in
391 * order to populate the checksum, VLAN, protocol, and other fields within
392 * the skb.
393 */
394static void ixgbevf_process_skb_fields(struct ixgbevf_ring *rx_ring,
395 union ixgbe_adv_rx_desc *rx_desc,
396 struct sk_buff *skb)
397{
398 ixgbevf_rx_checksum(rx_ring, rx_desc, skb);
399
400 if (ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) {
401 u16 vid = le16_to_cpu(rx_desc->wb.upper.vlan);
402 unsigned long *active_vlans = netdev_priv(rx_ring->netdev);
403
404 if (test_bit(vid & VLAN_VID_MASK, active_vlans))
405 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
406 }
407
408 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
409}
410
4b95fe3d
ET
411/**
412 * ixgbevf_is_non_eop - process handling of non-EOP buffers
413 * @rx_ring: Rx ring being processed
414 * @rx_desc: Rx descriptor for current buffer
415 * @skb: current socket buffer containing buffer in progress
416 *
417 * This function updates next to clean. If the buffer is an EOP buffer
418 * this function exits returning false, otherwise it will place the
419 * sk_buff in the next buffer to be chained and return true indicating
420 * that this is in fact a non-EOP buffer.
421 **/
422static bool ixgbevf_is_non_eop(struct ixgbevf_ring *rx_ring,
bad17234 423 union ixgbe_adv_rx_desc *rx_desc)
4b95fe3d
ET
424{
425 u32 ntc = rx_ring->next_to_clean + 1;
426
427 /* fetch, update, and store next to clean */
428 ntc = (ntc < rx_ring->count) ? ntc : 0;
429 rx_ring->next_to_clean = ntc;
430
431 prefetch(IXGBEVF_RX_DESC(rx_ring, ntc));
432
433 if (likely(ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)))
434 return false;
435
436 return true;
437}
438
bad17234
ET
439static bool ixgbevf_alloc_mapped_page(struct ixgbevf_ring *rx_ring,
440 struct ixgbevf_rx_buffer *bi)
bafa578f 441{
bad17234 442 struct page *page = bi->page;
bafa578f
ET
443 dma_addr_t dma = bi->dma;
444
bad17234
ET
445 /* since we are recycling buffers we should seldom need to alloc */
446 if (likely(page))
bafa578f
ET
447 return true;
448
bad17234
ET
449 /* alloc new page for storage */
450 page = dev_alloc_page();
451 if (unlikely(!page)) {
452 rx_ring->rx_stats.alloc_rx_page_failed++;
bafa578f
ET
453 return false;
454 }
455
bad17234
ET
456 /* map page for use */
457 dma = dma_map_page(rx_ring->dev, page, 0,
458 PAGE_SIZE, DMA_FROM_DEVICE);
bafa578f
ET
459
460 /* if mapping failed free memory back to system since
461 * there isn't much point in holding memory we can't use
462 */
463 if (dma_mapping_error(rx_ring->dev, dma)) {
bad17234 464 __free_page(page);
bafa578f
ET
465
466 rx_ring->rx_stats.alloc_rx_buff_failed++;
467 return false;
468 }
469
bafa578f 470 bi->dma = dma;
bad17234
ET
471 bi->page = page;
472 bi->page_offset = 0;
bafa578f
ET
473
474 return true;
475}
476
92915f71
GR
477/**
478 * ixgbevf_alloc_rx_buffers - Replace used receive buffers; packet split
095e2617 479 * @rx_ring: rx descriptor ring (for a specific queue) to setup buffers on
bafa578f 480 * @cleaned_count: number of buffers to replace
92915f71 481 **/
095e2617 482static void ixgbevf_alloc_rx_buffers(struct ixgbevf_ring *rx_ring,
bafa578f 483 u16 cleaned_count)
92915f71 484{
92915f71
GR
485 union ixgbe_adv_rx_desc *rx_desc;
486 struct ixgbevf_rx_buffer *bi;
fb40195c 487 unsigned int i = rx_ring->next_to_use;
92915f71 488
bafa578f
ET
489 /* nothing to do or no valid netdev defined */
490 if (!cleaned_count || !rx_ring->netdev)
491 return;
b9dd245b 492
bafa578f
ET
493 rx_desc = IXGBEVF_RX_DESC(rx_ring, i);
494 bi = &rx_ring->rx_buffer_info[i];
495 i -= rx_ring->count;
05d063aa 496
bafa578f 497 do {
bad17234 498 if (!ixgbevf_alloc_mapped_page(rx_ring, bi))
bafa578f 499 break;
b9dd245b 500
bafa578f
ET
501 /* Refresh the desc even if pkt_addr didn't change
502 * because each write-back erases this info.
503 */
bad17234 504 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
92915f71 505
bafa578f
ET
506 rx_desc++;
507 bi++;
92915f71 508 i++;
bafa578f
ET
509 if (unlikely(!i)) {
510 rx_desc = IXGBEVF_RX_DESC(rx_ring, 0);
511 bi = rx_ring->rx_buffer_info;
512 i -= rx_ring->count;
513 }
514
515 /* clear the hdr_addr for the next_to_use descriptor */
516 rx_desc->read.hdr_addr = 0;
517
518 cleaned_count--;
519 } while (cleaned_count);
520
521 i += rx_ring->count;
92915f71 522
bafa578f
ET
523 if (rx_ring->next_to_use != i) {
524 /* record the next descriptor to use */
525 rx_ring->next_to_use = i;
526
bad17234
ET
527 /* update next to alloc since we have filled the ring */
528 rx_ring->next_to_alloc = i;
529
bafa578f
ET
530 /* Force memory writes to complete before letting h/w
531 * know there are new descriptors to fetch. (Only
532 * applicable for weak-ordered memory model archs,
533 * such as IA-64).
534 */
535 wmb();
536 ixgbevf_write_tail(rx_ring, i);
537 }
92915f71
GR
538}
539
bad17234
ET
540/* ixgbevf_pull_tail - ixgbevf specific version of skb_pull_tail
541 * @rx_ring: rx descriptor ring packet is being transacted on
542 * @skb: pointer to current skb being adjusted
543 *
544 * This function is an ixgbevf specific version of __pskb_pull_tail. The
545 * main difference between this version and the original function is that
546 * this function can make several assumptions about the state of things
547 * that allow for significant optimizations versus the standard function.
548 * As a result we can do things like drop a frag and maintain an accurate
549 * truesize for the skb.
550 */
551static void ixgbevf_pull_tail(struct ixgbevf_ring *rx_ring,
552 struct sk_buff *skb)
553{
554 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
555 unsigned char *va;
556 unsigned int pull_len;
557
558 /* it is valid to use page_address instead of kmap since we are
559 * working with pages allocated out of the lomem pool per
560 * alloc_page(GFP_ATOMIC)
561 */
562 va = skb_frag_address(frag);
563
564 /* we need the header to contain the greater of either ETH_HLEN or
565 * 60 bytes if the skb->len is less than 60 for skb_pad.
566 */
567 pull_len = eth_get_headlen(va, IXGBEVF_RX_HDR_SIZE);
568
569 /* align pull length to size of long to optimize memcpy performance */
570 skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
571
572 /* update all of the pointers */
573 skb_frag_size_sub(frag, pull_len);
574 frag->page_offset += pull_len;
575 skb->data_len -= pull_len;
576 skb->tail += pull_len;
577}
578
579/* ixgbevf_cleanup_headers - Correct corrupted or empty headers
580 * @rx_ring: rx descriptor ring packet is being transacted on
581 * @rx_desc: pointer to the EOP Rx descriptor
582 * @skb: pointer to current skb being fixed
583 *
584 * Check for corrupted packet headers caused by senders on the local L2
585 * embedded NIC switch not setting up their Tx Descriptors right. These
586 * should be very rare.
587 *
588 * Also address the case where we are pulling data in on pages only
589 * and as such no data is present in the skb header.
590 *
591 * In addition if skb is not at least 60 bytes we need to pad it so that
592 * it is large enough to qualify as a valid Ethernet frame.
593 *
594 * Returns true if an error was encountered and skb was freed.
595 */
596static bool ixgbevf_cleanup_headers(struct ixgbevf_ring *rx_ring,
597 union ixgbe_adv_rx_desc *rx_desc,
598 struct sk_buff *skb)
599{
600 /* verify that the packet does not have any known errors */
601 if (unlikely(ixgbevf_test_staterr(rx_desc,
602 IXGBE_RXDADV_ERR_FRAME_ERR_MASK))) {
603 struct net_device *netdev = rx_ring->netdev;
604
605 if (!(netdev->features & NETIF_F_RXALL)) {
606 dev_kfree_skb_any(skb);
607 return true;
608 }
609 }
610
611 /* place header in linear portion of buffer */
612 if (skb_is_nonlinear(skb))
613 ixgbevf_pull_tail(rx_ring, skb);
614
615 /* if skb_pad returns an error the skb was freed */
616 if (unlikely(skb->len < 60)) {
617 int pad_len = 60 - skb->len;
618
619 if (skb_pad(skb, pad_len))
620 return true;
621 __skb_put(skb, pad_len);
622 }
623
624 return false;
625}
626
627/* ixgbevf_reuse_rx_page - page flip buffer and store it back on the ring
628 * @rx_ring: rx descriptor ring to store buffers on
629 * @old_buff: donor buffer to have page reused
630 *
631 * Synchronizes page for reuse by the adapter
632 */
633static void ixgbevf_reuse_rx_page(struct ixgbevf_ring *rx_ring,
634 struct ixgbevf_rx_buffer *old_buff)
635{
636 struct ixgbevf_rx_buffer *new_buff;
637 u16 nta = rx_ring->next_to_alloc;
638
639 new_buff = &rx_ring->rx_buffer_info[nta];
640
641 /* update, and store next to alloc */
642 nta++;
643 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
644
645 /* transfer page from old buffer to new buffer */
646 new_buff->page = old_buff->page;
647 new_buff->dma = old_buff->dma;
648 new_buff->page_offset = old_buff->page_offset;
649
650 /* sync the buffer for use by the device */
651 dma_sync_single_range_for_device(rx_ring->dev, new_buff->dma,
652 new_buff->page_offset,
653 IXGBEVF_RX_BUFSZ,
654 DMA_FROM_DEVICE);
655}
656
657static inline bool ixgbevf_page_is_reserved(struct page *page)
658{
659 return (page_to_nid(page) != numa_mem_id()) || page->pfmemalloc;
660}
661
662/* ixgbevf_add_rx_frag - Add contents of Rx buffer to sk_buff
663 * @rx_ring: rx descriptor ring to transact packets on
664 * @rx_buffer: buffer containing page to add
665 * @rx_desc: descriptor containing length of buffer written by hardware
666 * @skb: sk_buff to place the data into
667 *
668 * This function will add the data contained in rx_buffer->page to the skb.
669 * This is done either through a direct copy if the data in the buffer is
670 * less than the skb header size, otherwise it will just attach the page as
671 * a frag to the skb.
672 *
673 * The function will then update the page offset if necessary and return
674 * true if the buffer can be reused by the adapter.
675 */
676static bool ixgbevf_add_rx_frag(struct ixgbevf_ring *rx_ring,
677 struct ixgbevf_rx_buffer *rx_buffer,
678 union ixgbe_adv_rx_desc *rx_desc,
679 struct sk_buff *skb)
680{
681 struct page *page = rx_buffer->page;
682 unsigned int size = le16_to_cpu(rx_desc->wb.upper.length);
683#if (PAGE_SIZE < 8192)
684 unsigned int truesize = IXGBEVF_RX_BUFSZ;
685#else
686 unsigned int truesize = ALIGN(size, L1_CACHE_BYTES);
687#endif
688
689 if ((size <= IXGBEVF_RX_HDR_SIZE) && !skb_is_nonlinear(skb)) {
690 unsigned char *va = page_address(page) + rx_buffer->page_offset;
691
692 memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
693
694 /* page is not reserved, we can reuse buffer as is */
695 if (likely(!ixgbevf_page_is_reserved(page)))
696 return true;
697
698 /* this page cannot be reused so discard it */
699 put_page(page);
700 return false;
701 }
702
703 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
704 rx_buffer->page_offset, size, truesize);
705
706 /* avoid re-using remote pages */
707 if (unlikely(ixgbevf_page_is_reserved(page)))
708 return false;
709
710#if (PAGE_SIZE < 8192)
711 /* if we are only owner of page we can reuse it */
712 if (unlikely(page_count(page) != 1))
713 return false;
714
715 /* flip page offset to other buffer */
716 rx_buffer->page_offset ^= IXGBEVF_RX_BUFSZ;
717
718#else
719 /* move offset up to the next cache line */
720 rx_buffer->page_offset += truesize;
721
722 if (rx_buffer->page_offset > (PAGE_SIZE - IXGBEVF_RX_BUFSZ))
723 return false;
724
725#endif
726 /* Even if we own the page, we are not allowed to use atomic_set()
727 * This would break get_page_unless_zero() users.
728 */
729 atomic_inc(&page->_count);
730
731 return true;
732}
733
734static struct sk_buff *ixgbevf_fetch_rx_buffer(struct ixgbevf_ring *rx_ring,
735 union ixgbe_adv_rx_desc *rx_desc,
736 struct sk_buff *skb)
737{
738 struct ixgbevf_rx_buffer *rx_buffer;
739 struct page *page;
740
741 rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
742 page = rx_buffer->page;
743 prefetchw(page);
744
745 if (likely(!skb)) {
746 void *page_addr = page_address(page) +
747 rx_buffer->page_offset;
748
749 /* prefetch first cache line of first page */
750 prefetch(page_addr);
751#if L1_CACHE_BYTES < 128
752 prefetch(page_addr + L1_CACHE_BYTES);
753#endif
754
755 /* allocate a skb to store the frags */
756 skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
757 IXGBEVF_RX_HDR_SIZE);
758 if (unlikely(!skb)) {
759 rx_ring->rx_stats.alloc_rx_buff_failed++;
760 return NULL;
761 }
762
763 /* we will be copying header into skb->data in
764 * pskb_may_pull so it is in our interest to prefetch
765 * it now to avoid a possible cache miss
766 */
767 prefetchw(skb->data);
768 }
769
770 /* we are reusing so sync this buffer for CPU use */
771 dma_sync_single_range_for_cpu(rx_ring->dev,
772 rx_buffer->dma,
773 rx_buffer->page_offset,
774 IXGBEVF_RX_BUFSZ,
775 DMA_FROM_DEVICE);
776
777 /* pull page into skb */
778 if (ixgbevf_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) {
779 /* hand second half of page back to the ring */
780 ixgbevf_reuse_rx_page(rx_ring, rx_buffer);
781 } else {
782 /* we are not reusing the buffer so unmap it */
783 dma_unmap_page(rx_ring->dev, rx_buffer->dma,
784 PAGE_SIZE, DMA_FROM_DEVICE);
785 }
786
787 /* clear contents of buffer_info */
788 rx_buffer->dma = 0;
789 rx_buffer->page = NULL;
790
791 return skb;
792}
793
92915f71 794static inline void ixgbevf_irq_enable_queues(struct ixgbevf_adapter *adapter,
5f3600eb 795 u32 qmask)
92915f71 796{
92915f71
GR
797 struct ixgbe_hw *hw = &adapter->hw;
798
5f3600eb 799 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, qmask);
92915f71
GR
800}
801
08e50a20
JK
802static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
803 struct ixgbevf_ring *rx_ring,
804 int budget)
92915f71 805{
92915f71 806 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
bafa578f 807 u16 cleaned_count = ixgbevf_desc_unused(rx_ring);
bad17234 808 struct sk_buff *skb = rx_ring->skb;
92915f71 809
6622402a 810 while (likely(total_rx_packets < budget)) {
4b95fe3d 811 union ixgbe_adv_rx_desc *rx_desc;
b97fe3b1 812
0579eefc
ET
813 /* return some buffers to hardware, one at a time is too slow */
814 if (cleaned_count >= IXGBEVF_RX_BUFFER_WRITE) {
815 ixgbevf_alloc_rx_buffers(rx_ring, cleaned_count);
816 cleaned_count = 0;
817 }
818
bad17234 819 rx_desc = IXGBEVF_RX_DESC(rx_ring, rx_ring->next_to_clean);
0579eefc
ET
820
821 if (!ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_DD))
92915f71 822 break;
92915f71 823
0579eefc
ET
824 /* This memory barrier is needed to keep us from reading
825 * any other fields out of the rx_desc until we know the
826 * RXD_STAT_DD bit is set
827 */
828 rmb();
ec62fe26 829
bad17234
ET
830 /* retrieve a buffer from the ring */
831 skb = ixgbevf_fetch_rx_buffer(rx_ring, rx_desc, skb);
0579eefc 832
bad17234
ET
833 /* exit if we failed to retrieve a buffer */
834 if (!skb)
835 break;
92915f71 836
b97fe3b1
ET
837 cleaned_count++;
838
bad17234
ET
839 /* fetch next buffer in frame if non-eop */
840 if (ixgbevf_is_non_eop(rx_ring, rx_desc))
0579eefc 841 continue;
5c60f81a 842
bad17234
ET
843 /* verify the packet layout is correct */
844 if (ixgbevf_cleanup_headers(rx_ring, rx_desc, skb)) {
845 skb = NULL;
0579eefc 846 continue;
92915f71
GR
847 }
848
92915f71
GR
849 /* probably a little skewed due to removing CRC */
850 total_rx_bytes += skb->len;
92915f71 851
815cccbf
JF
852 /* Workaround hardware that can't do proper VEPA multicast
853 * source pruning.
854 */
bd9d5592
FF
855 if ((skb->pkt_type == PACKET_BROADCAST ||
856 skb->pkt_type == PACKET_MULTICAST) &&
095e2617 857 ether_addr_equal(rx_ring->netdev->dev_addr,
7367d0b5 858 eth_hdr(skb)->h_source)) {
815cccbf 859 dev_kfree_skb_irq(skb);
0579eefc 860 continue;
815cccbf
JF
861 }
862
dff80520
ET
863 /* populate checksum, VLAN, and protocol */
864 ixgbevf_process_skb_fields(rx_ring, rx_desc, skb);
865
866 ixgbevf_rx_skb(q_vector, skb);
92915f71 867
bad17234
ET
868 /* reset skb pointer */
869 skb = NULL;
870
0579eefc 871 /* update budget accounting */
6622402a
ET
872 total_rx_packets++;
873 }
92915f71 874
bad17234
ET
875 /* place incomplete frames back on ring for completion */
876 rx_ring->skb = skb;
877
4197aa7b 878 u64_stats_update_begin(&rx_ring->syncp);
095e2617
ET
879 rx_ring->stats.packets += total_rx_packets;
880 rx_ring->stats.bytes += total_rx_bytes;
4197aa7b 881 u64_stats_update_end(&rx_ring->syncp);
ac6ed8f0
GR
882 q_vector->rx.total_packets += total_rx_packets;
883 q_vector->rx.total_bytes += total_rx_bytes;
92915f71 884
08e50a20 885 return total_rx_packets;
92915f71
GR
886}
887
888/**
fa71ae27 889 * ixgbevf_poll - NAPI polling calback
92915f71
GR
890 * @napi: napi struct with our devices info in it
891 * @budget: amount of work driver is allowed to do this pass, in packets
892 *
fa71ae27 893 * This function will clean more than one or more rings associated with a
92915f71
GR
894 * q_vector.
895 **/
fa71ae27 896static int ixgbevf_poll(struct napi_struct *napi, int budget)
92915f71
GR
897{
898 struct ixgbevf_q_vector *q_vector =
899 container_of(napi, struct ixgbevf_q_vector, napi);
900 struct ixgbevf_adapter *adapter = q_vector->adapter;
fa71ae27
AD
901 struct ixgbevf_ring *ring;
902 int per_ring_budget;
903 bool clean_complete = true;
904
905 ixgbevf_for_each_ring(ring, q_vector->tx)
906 clean_complete &= ixgbevf_clean_tx_irq(q_vector, ring);
92915f71 907
c777cdfa
JK
908#ifdef CONFIG_NET_RX_BUSY_POLL
909 if (!ixgbevf_qv_lock_napi(q_vector))
910 return budget;
911#endif
912
92915f71
GR
913 /* attempt to distribute budget to each queue fairly, but don't allow
914 * the budget to go below 1 because we'll exit polling */
fa71ae27
AD
915 if (q_vector->rx.count > 1)
916 per_ring_budget = max(budget/q_vector->rx.count, 1);
917 else
918 per_ring_budget = budget;
919
920 ixgbevf_for_each_ring(ring, q_vector->rx)
08e50a20
JK
921 clean_complete &= (ixgbevf_clean_rx_irq(q_vector, ring,
922 per_ring_budget)
923 < per_ring_budget);
fa71ae27 924
c777cdfa
JK
925#ifdef CONFIG_NET_RX_BUSY_POLL
926 ixgbevf_qv_unlock_napi(q_vector);
927#endif
928
fa71ae27
AD
929 /* If all work not completed, return budget and keep polling */
930 if (!clean_complete)
931 return budget;
932 /* all work done, exit the polling mode */
933 napi_complete(napi);
934 if (adapter->rx_itr_setting & 1)
935 ixgbevf_set_itr(q_vector);
2e7cfbdd
MR
936 if (!test_bit(__IXGBEVF_DOWN, &adapter->state) &&
937 !test_bit(__IXGBEVF_REMOVING, &adapter->state))
fa71ae27
AD
938 ixgbevf_irq_enable_queues(adapter,
939 1 << q_vector->v_idx);
92915f71 940
fa71ae27 941 return 0;
92915f71
GR
942}
943
ce422606
GR
944/**
945 * ixgbevf_write_eitr - write VTEITR register in hardware specific way
946 * @q_vector: structure containing interrupt and ring information
947 */
3849623e 948void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector)
ce422606
GR
949{
950 struct ixgbevf_adapter *adapter = q_vector->adapter;
951 struct ixgbe_hw *hw = &adapter->hw;
952 int v_idx = q_vector->v_idx;
953 u32 itr_reg = q_vector->itr & IXGBE_MAX_EITR;
954
955 /*
956 * set the WDIS bit to not clear the timer bits and cause an
957 * immediate assertion of the interrupt
958 */
959 itr_reg |= IXGBE_EITR_CNT_WDIS;
960
961 IXGBE_WRITE_REG(hw, IXGBE_VTEITR(v_idx), itr_reg);
962}
92915f71 963
c777cdfa
JK
964#ifdef CONFIG_NET_RX_BUSY_POLL
965/* must be called with local_bh_disable()d */
966static int ixgbevf_busy_poll_recv(struct napi_struct *napi)
967{
968 struct ixgbevf_q_vector *q_vector =
969 container_of(napi, struct ixgbevf_q_vector, napi);
970 struct ixgbevf_adapter *adapter = q_vector->adapter;
971 struct ixgbevf_ring *ring;
972 int found = 0;
973
974 if (test_bit(__IXGBEVF_DOWN, &adapter->state))
975 return LL_FLUSH_FAILED;
976
977 if (!ixgbevf_qv_lock_poll(q_vector))
978 return LL_FLUSH_BUSY;
979
980 ixgbevf_for_each_ring(ring, q_vector->rx) {
981 found = ixgbevf_clean_rx_irq(q_vector, ring, 4);
3b5dca26
JK
982#ifdef BP_EXTENDED_STATS
983 if (found)
095e2617 984 ring->stats.cleaned += found;
3b5dca26 985 else
095e2617 986 ring->stats.misses++;
3b5dca26 987#endif
c777cdfa
JK
988 if (found)
989 break;
990 }
991
992 ixgbevf_qv_unlock_poll(q_vector);
993
994 return found;
995}
996#endif /* CONFIG_NET_RX_BUSY_POLL */
997
92915f71
GR
998/**
999 * ixgbevf_configure_msix - Configure MSI-X hardware
1000 * @adapter: board private structure
1001 *
1002 * ixgbevf_configure_msix sets up the hardware to properly generate MSI-X
1003 * interrupts.
1004 **/
1005static void ixgbevf_configure_msix(struct ixgbevf_adapter *adapter)
1006{
1007 struct ixgbevf_q_vector *q_vector;
6b43c446 1008 int q_vectors, v_idx;
92915f71
GR
1009
1010 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
5f3600eb 1011 adapter->eims_enable_mask = 0;
92915f71
GR
1012
1013 /*
1014 * Populate the IVAR table and set the ITR values to the
1015 * corresponding register.
1016 */
1017 for (v_idx = 0; v_idx < q_vectors; v_idx++) {
6b43c446 1018 struct ixgbevf_ring *ring;
92915f71 1019 q_vector = adapter->q_vector[v_idx];
6b43c446
AD
1020
1021 ixgbevf_for_each_ring(ring, q_vector->rx)
1022 ixgbevf_set_ivar(adapter, 0, ring->reg_idx, v_idx);
1023
1024 ixgbevf_for_each_ring(ring, q_vector->tx)
1025 ixgbevf_set_ivar(adapter, 1, ring->reg_idx, v_idx);
92915f71 1026
5f3600eb
AD
1027 if (q_vector->tx.ring && !q_vector->rx.ring) {
1028 /* tx only vector */
1029 if (adapter->tx_itr_setting == 1)
1030 q_vector->itr = IXGBE_10K_ITR;
1031 else
1032 q_vector->itr = adapter->tx_itr_setting;
1033 } else {
1034 /* rx or rx/tx vector */
1035 if (adapter->rx_itr_setting == 1)
1036 q_vector->itr = IXGBE_20K_ITR;
1037 else
1038 q_vector->itr = adapter->rx_itr_setting;
1039 }
1040
1041 /* add q_vector eims value to global eims_enable_mask */
1042 adapter->eims_enable_mask |= 1 << v_idx;
92915f71 1043
5f3600eb 1044 ixgbevf_write_eitr(q_vector);
92915f71
GR
1045 }
1046
1047 ixgbevf_set_ivar(adapter, -1, 1, v_idx);
5f3600eb
AD
1048 /* setup eims_other and add value to global eims_enable_mask */
1049 adapter->eims_other = 1 << v_idx;
1050 adapter->eims_enable_mask |= adapter->eims_other;
92915f71
GR
1051}
1052
1053enum latency_range {
1054 lowest_latency = 0,
1055 low_latency = 1,
1056 bulk_latency = 2,
1057 latency_invalid = 255
1058};
1059
1060/**
1061 * ixgbevf_update_itr - update the dynamic ITR value based on statistics
5f3600eb
AD
1062 * @q_vector: structure containing interrupt and ring information
1063 * @ring_container: structure containing ring performance data
92915f71
GR
1064 *
1065 * Stores a new ITR value based on packets and byte
1066 * counts during the last interrupt. The advantage of per interrupt
1067 * computation is faster updates and more accurate ITR for the current
1068 * traffic pattern. Constants in this function were computed
1069 * based on theoretical maximum wire speed and thresholds were set based
1070 * on testing data as well as attempting to minimize response time
1071 * while increasing bulk throughput.
1072 **/
5f3600eb
AD
1073static void ixgbevf_update_itr(struct ixgbevf_q_vector *q_vector,
1074 struct ixgbevf_ring_container *ring_container)
92915f71 1075{
5f3600eb
AD
1076 int bytes = ring_container->total_bytes;
1077 int packets = ring_container->total_packets;
92915f71
GR
1078 u32 timepassed_us;
1079 u64 bytes_perint;
5f3600eb 1080 u8 itr_setting = ring_container->itr;
92915f71
GR
1081
1082 if (packets == 0)
5f3600eb 1083 return;
92915f71
GR
1084
1085 /* simple throttlerate management
1086 * 0-20MB/s lowest (100000 ints/s)
1087 * 20-100MB/s low (20000 ints/s)
1088 * 100-1249MB/s bulk (8000 ints/s)
1089 */
1090 /* what was last interrupt timeslice? */
5f3600eb 1091 timepassed_us = q_vector->itr >> 2;
92915f71
GR
1092 bytes_perint = bytes / timepassed_us; /* bytes/usec */
1093
1094 switch (itr_setting) {
1095 case lowest_latency:
e2c28ce7 1096 if (bytes_perint > 10)
5f3600eb 1097 itr_setting = low_latency;
92915f71
GR
1098 break;
1099 case low_latency:
e2c28ce7 1100 if (bytes_perint > 20)
5f3600eb 1101 itr_setting = bulk_latency;
e2c28ce7 1102 else if (bytes_perint <= 10)
5f3600eb 1103 itr_setting = lowest_latency;
92915f71
GR
1104 break;
1105 case bulk_latency:
e2c28ce7 1106 if (bytes_perint <= 20)
5f3600eb 1107 itr_setting = low_latency;
92915f71
GR
1108 break;
1109 }
1110
5f3600eb
AD
1111 /* clear work counters since we have the values we need */
1112 ring_container->total_bytes = 0;
1113 ring_container->total_packets = 0;
1114
1115 /* write updated itr to ring container */
1116 ring_container->itr = itr_setting;
92915f71
GR
1117}
1118
fa71ae27 1119static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector)
92915f71 1120{
5f3600eb
AD
1121 u32 new_itr = q_vector->itr;
1122 u8 current_itr;
92915f71 1123
5f3600eb
AD
1124 ixgbevf_update_itr(q_vector, &q_vector->tx);
1125 ixgbevf_update_itr(q_vector, &q_vector->rx);
92915f71 1126
6b43c446 1127 current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
92915f71
GR
1128
1129 switch (current_itr) {
1130 /* counts and packets in update_itr are dependent on these numbers */
1131 case lowest_latency:
5f3600eb 1132 new_itr = IXGBE_100K_ITR;
92915f71
GR
1133 break;
1134 case low_latency:
5f3600eb 1135 new_itr = IXGBE_20K_ITR;
92915f71
GR
1136 break;
1137 case bulk_latency:
1138 default:
5f3600eb 1139 new_itr = IXGBE_8K_ITR;
92915f71
GR
1140 break;
1141 }
1142
5f3600eb 1143 if (new_itr != q_vector->itr) {
92915f71 1144 /* do an exponential smoothing */
5f3600eb
AD
1145 new_itr = (10 * new_itr * q_vector->itr) /
1146 ((9 * new_itr) + q_vector->itr);
1147
1148 /* save the algorithm value here */
1149 q_vector->itr = new_itr;
1150
1151 ixgbevf_write_eitr(q_vector);
92915f71 1152 }
92915f71
GR
1153}
1154
4b2cd27f 1155static irqreturn_t ixgbevf_msix_other(int irq, void *data)
92915f71 1156{
fa71ae27 1157 struct ixgbevf_adapter *adapter = data;
92915f71 1158 struct ixgbe_hw *hw = &adapter->hw;
08259594 1159
4b2cd27f 1160 hw->mac.get_link_status = 1;
1e72bfc3 1161
2e7cfbdd
MR
1162 if (!test_bit(__IXGBEVF_DOWN, &adapter->state) &&
1163 !test_bit(__IXGBEVF_REMOVING, &adapter->state))
c7bb417d 1164 mod_timer(&adapter->watchdog_timer, jiffies);
3a2c4033 1165
5f3600eb
AD
1166 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_other);
1167
92915f71
GR
1168 return IRQ_HANDLED;
1169}
1170
92915f71 1171/**
fa71ae27 1172 * ixgbevf_msix_clean_rings - single unshared vector rx clean (all queues)
92915f71
GR
1173 * @irq: unused
1174 * @data: pointer to our q_vector struct for this interrupt vector
1175 **/
fa71ae27 1176static irqreturn_t ixgbevf_msix_clean_rings(int irq, void *data)
92915f71
GR
1177{
1178 struct ixgbevf_q_vector *q_vector = data;
92915f71 1179
5f3600eb 1180 /* EIAM disabled interrupts (on this vector) for us */
fa71ae27
AD
1181 if (q_vector->rx.ring || q_vector->tx.ring)
1182 napi_schedule(&q_vector->napi);
92915f71
GR
1183
1184 return IRQ_HANDLED;
1185}
1186
1187static inline void map_vector_to_rxq(struct ixgbevf_adapter *a, int v_idx,
1188 int r_idx)
1189{
1190 struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx];
1191
87e70ab9
DS
1192 a->rx_ring[r_idx]->next = q_vector->rx.ring;
1193 q_vector->rx.ring = a->rx_ring[r_idx];
6b43c446 1194 q_vector->rx.count++;
92915f71
GR
1195}
1196
1197static inline void map_vector_to_txq(struct ixgbevf_adapter *a, int v_idx,
1198 int t_idx)
1199{
1200 struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx];
1201
87e70ab9
DS
1202 a->tx_ring[t_idx]->next = q_vector->tx.ring;
1203 q_vector->tx.ring = a->tx_ring[t_idx];
6b43c446 1204 q_vector->tx.count++;
92915f71
GR
1205}
1206
1207/**
1208 * ixgbevf_map_rings_to_vectors - Maps descriptor rings to vectors
1209 * @adapter: board private structure to initialize
1210 *
1211 * This function maps descriptor rings to the queue-specific vectors
1212 * we were allotted through the MSI-X enabling code. Ideally, we'd have
1213 * one vector per ring/queue, but on a constrained vector budget, we
1214 * group the rings as "efficiently" as possible. You would add new
1215 * mapping configurations in here.
1216 **/
1217static int ixgbevf_map_rings_to_vectors(struct ixgbevf_adapter *adapter)
1218{
1219 int q_vectors;
1220 int v_start = 0;
1221 int rxr_idx = 0, txr_idx = 0;
1222 int rxr_remaining = adapter->num_rx_queues;
1223 int txr_remaining = adapter->num_tx_queues;
1224 int i, j;
1225 int rqpv, tqpv;
1226 int err = 0;
1227
1228 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1229
1230 /*
1231 * The ideal configuration...
1232 * We have enough vectors to map one per queue.
1233 */
1234 if (q_vectors == adapter->num_rx_queues + adapter->num_tx_queues) {
1235 for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++)
1236 map_vector_to_rxq(adapter, v_start, rxr_idx);
1237
1238 for (; txr_idx < txr_remaining; v_start++, txr_idx++)
1239 map_vector_to_txq(adapter, v_start, txr_idx);
1240 goto out;
1241 }
1242
1243 /*
1244 * If we don't have enough vectors for a 1-to-1
1245 * mapping, we'll have to group them so there are
1246 * multiple queues per vector.
1247 */
1248 /* Re-adjusting *qpv takes care of the remainder. */
1249 for (i = v_start; i < q_vectors; i++) {
1250 rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - i);
1251 for (j = 0; j < rqpv; j++) {
1252 map_vector_to_rxq(adapter, i, rxr_idx);
1253 rxr_idx++;
1254 rxr_remaining--;
1255 }
1256 }
1257 for (i = v_start; i < q_vectors; i++) {
1258 tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - i);
1259 for (j = 0; j < tqpv; j++) {
1260 map_vector_to_txq(adapter, i, txr_idx);
1261 txr_idx++;
1262 txr_remaining--;
1263 }
1264 }
1265
1266out:
1267 return err;
1268}
1269
1270/**
1271 * ixgbevf_request_msix_irqs - Initialize MSI-X interrupts
1272 * @adapter: board private structure
1273 *
1274 * ixgbevf_request_msix_irqs allocates MSI-X vectors and requests
1275 * interrupts from the kernel.
1276 **/
1277static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter *adapter)
1278{
1279 struct net_device *netdev = adapter->netdev;
fa71ae27
AD
1280 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1281 int vector, err;
92915f71
GR
1282 int ri = 0, ti = 0;
1283
92915f71 1284 for (vector = 0; vector < q_vectors; vector++) {
fa71ae27
AD
1285 struct ixgbevf_q_vector *q_vector = adapter->q_vector[vector];
1286 struct msix_entry *entry = &adapter->msix_entries[vector];
1287
1288 if (q_vector->tx.ring && q_vector->rx.ring) {
1289 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
1290 "%s-%s-%d", netdev->name, "TxRx", ri++);
1291 ti++;
1292 } else if (q_vector->rx.ring) {
1293 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
1294 "%s-%s-%d", netdev->name, "rx", ri++);
1295 } else if (q_vector->tx.ring) {
1296 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
1297 "%s-%s-%d", netdev->name, "tx", ti++);
92915f71
GR
1298 } else {
1299 /* skip this unused q_vector */
1300 continue;
1301 }
fa71ae27
AD
1302 err = request_irq(entry->vector, &ixgbevf_msix_clean_rings, 0,
1303 q_vector->name, q_vector);
92915f71
GR
1304 if (err) {
1305 hw_dbg(&adapter->hw,
1306 "request_irq failed for MSIX interrupt "
1307 "Error: %d\n", err);
1308 goto free_queue_irqs;
1309 }
1310 }
1311
92915f71 1312 err = request_irq(adapter->msix_entries[vector].vector,
4b2cd27f 1313 &ixgbevf_msix_other, 0, netdev->name, adapter);
92915f71
GR
1314 if (err) {
1315 hw_dbg(&adapter->hw,
4b2cd27f 1316 "request_irq for msix_other failed: %d\n", err);
92915f71
GR
1317 goto free_queue_irqs;
1318 }
1319
1320 return 0;
1321
1322free_queue_irqs:
fa71ae27
AD
1323 while (vector) {
1324 vector--;
1325 free_irq(adapter->msix_entries[vector].vector,
1326 adapter->q_vector[vector]);
1327 }
a1f6c6b1 1328 /* This failure is non-recoverable - it indicates the system is
1329 * out of MSIX vector resources and the VF driver cannot run
1330 * without them. Set the number of msix vectors to zero
1331 * indicating that not enough can be allocated. The error
1332 * will be returned to the user indicating device open failed.
1333 * Any further attempts to force the driver to open will also
1334 * fail. The only way to recover is to unload the driver and
1335 * reload it again. If the system has recovered some MSIX
1336 * vectors then it may succeed.
1337 */
1338 adapter->num_msix_vectors = 0;
92915f71
GR
1339 return err;
1340}
1341
1342static inline void ixgbevf_reset_q_vectors(struct ixgbevf_adapter *adapter)
1343{
1344 int i, q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1345
1346 for (i = 0; i < q_vectors; i++) {
1347 struct ixgbevf_q_vector *q_vector = adapter->q_vector[i];
6b43c446
AD
1348 q_vector->rx.ring = NULL;
1349 q_vector->tx.ring = NULL;
1350 q_vector->rx.count = 0;
1351 q_vector->tx.count = 0;
92915f71
GR
1352 }
1353}
1354
1355/**
1356 * ixgbevf_request_irq - initialize interrupts
1357 * @adapter: board private structure
1358 *
1359 * Attempts to configure interrupts using the best available
1360 * capabilities of the hardware and kernel.
1361 **/
1362static int ixgbevf_request_irq(struct ixgbevf_adapter *adapter)
1363{
1364 int err = 0;
1365
1366 err = ixgbevf_request_msix_irqs(adapter);
1367
1368 if (err)
1369 hw_dbg(&adapter->hw,
1370 "request_irq failed, Error %d\n", err);
1371
1372 return err;
1373}
1374
1375static void ixgbevf_free_irq(struct ixgbevf_adapter *adapter)
1376{
92915f71
GR
1377 int i, q_vectors;
1378
1379 q_vectors = adapter->num_msix_vectors;
92915f71
GR
1380 i = q_vectors - 1;
1381
fa71ae27 1382 free_irq(adapter->msix_entries[i].vector, adapter);
92915f71
GR
1383 i--;
1384
1385 for (; i >= 0; i--) {
fa71ae27
AD
1386 /* free only the irqs that were actually requested */
1387 if (!adapter->q_vector[i]->rx.ring &&
1388 !adapter->q_vector[i]->tx.ring)
1389 continue;
1390
92915f71
GR
1391 free_irq(adapter->msix_entries[i].vector,
1392 adapter->q_vector[i]);
1393 }
1394
1395 ixgbevf_reset_q_vectors(adapter);
1396}
1397
1398/**
1399 * ixgbevf_irq_disable - Mask off interrupt generation on the NIC
1400 * @adapter: board private structure
1401 **/
1402static inline void ixgbevf_irq_disable(struct ixgbevf_adapter *adapter)
1403{
92915f71 1404 struct ixgbe_hw *hw = &adapter->hw;
5f3600eb 1405 int i;
92915f71 1406
5f3600eb 1407 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, 0);
92915f71 1408 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, ~0);
5f3600eb 1409 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, 0);
92915f71
GR
1410
1411 IXGBE_WRITE_FLUSH(hw);
1412
1413 for (i = 0; i < adapter->num_msix_vectors; i++)
1414 synchronize_irq(adapter->msix_entries[i].vector);
1415}
1416
1417/**
1418 * ixgbevf_irq_enable - Enable default interrupt generation settings
1419 * @adapter: board private structure
1420 **/
5f3600eb 1421static inline void ixgbevf_irq_enable(struct ixgbevf_adapter *adapter)
92915f71
GR
1422{
1423 struct ixgbe_hw *hw = &adapter->hw;
92915f71 1424
5f3600eb
AD
1425 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, adapter->eims_enable_mask);
1426 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, adapter->eims_enable_mask);
1427 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_enable_mask);
92915f71
GR
1428}
1429
de02decb
DS
1430/**
1431 * ixgbevf_configure_tx_ring - Configure 82599 VF Tx ring after Reset
1432 * @adapter: board private structure
1433 * @ring: structure containing ring specific data
1434 *
1435 * Configure the Tx descriptor ring after a reset.
1436 **/
1437static void ixgbevf_configure_tx_ring(struct ixgbevf_adapter *adapter,
1438 struct ixgbevf_ring *ring)
1439{
1440 struct ixgbe_hw *hw = &adapter->hw;
1441 u64 tdba = ring->dma;
1442 int wait_loop = 10;
1443 u32 txdctl = IXGBE_TXDCTL_ENABLE;
1444 u8 reg_idx = ring->reg_idx;
1445
1446 /* disable queue to avoid issues while updating state */
1447 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH);
1448 IXGBE_WRITE_FLUSH(hw);
1449
1450 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(reg_idx), tdba & DMA_BIT_MASK(32));
1451 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(reg_idx), tdba >> 32);
1452 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(reg_idx),
1453 ring->count * sizeof(union ixgbe_adv_tx_desc));
1454
1455 /* disable head writeback */
1456 IXGBE_WRITE_REG(hw, IXGBE_VFTDWBAH(reg_idx), 0);
1457 IXGBE_WRITE_REG(hw, IXGBE_VFTDWBAL(reg_idx), 0);
1458
1459 /* enable relaxed ordering */
1460 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(reg_idx),
1461 (IXGBE_DCA_TXCTRL_DESC_RRO_EN |
1462 IXGBE_DCA_TXCTRL_DATA_RRO_EN));
1463
1464 /* reset head and tail pointers */
1465 IXGBE_WRITE_REG(hw, IXGBE_VFTDH(reg_idx), 0);
1466 IXGBE_WRITE_REG(hw, IXGBE_VFTDT(reg_idx), 0);
dbf8b0d8 1467 ring->tail = adapter->io_addr + IXGBE_VFTDT(reg_idx);
de02decb
DS
1468
1469 /* reset ntu and ntc to place SW in sync with hardwdare */
1470 ring->next_to_clean = 0;
1471 ring->next_to_use = 0;
1472
1473 /* In order to avoid issues WTHRESH + PTHRESH should always be equal
1474 * to or less than the number of on chip descriptors, which is
1475 * currently 40.
1476 */
1477 txdctl |= (8 << 16); /* WTHRESH = 8 */
1478
1479 /* Setting PTHRESH to 32 both improves performance */
1480 txdctl |= (1 << 8) | /* HTHRESH = 1 */
1481 32; /* PTHRESH = 32 */
1482
1483 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx), txdctl);
1484
1485 /* poll to verify queue is enabled */
1486 do {
1487 usleep_range(1000, 2000);
1488 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(reg_idx));
1489 } while (--wait_loop && !(txdctl & IXGBE_TXDCTL_ENABLE));
1490 if (!wait_loop)
1491 pr_err("Could not enable Tx Queue %d\n", reg_idx);
1492}
1493
92915f71
GR
1494/**
1495 * ixgbevf_configure_tx - Configure 82599 VF Transmit Unit after Reset
1496 * @adapter: board private structure
1497 *
1498 * Configure the Tx unit of the MAC after a reset.
1499 **/
1500static void ixgbevf_configure_tx(struct ixgbevf_adapter *adapter)
1501{
de02decb 1502 u32 i;
92915f71
GR
1503
1504 /* Setup the HW Tx Head and Tail descriptor pointers */
de02decb
DS
1505 for (i = 0; i < adapter->num_tx_queues; i++)
1506 ixgbevf_configure_tx_ring(adapter, adapter->tx_ring[i]);
92915f71
GR
1507}
1508
1509#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
1510
1511static void ixgbevf_configure_srrctl(struct ixgbevf_adapter *adapter, int index)
1512{
92915f71
GR
1513 struct ixgbe_hw *hw = &adapter->hw;
1514 u32 srrctl;
1515
92915f71
GR
1516 srrctl = IXGBE_SRRCTL_DROP_EN;
1517
bad17234
ET
1518 srrctl |= IXGBEVF_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT;
1519 srrctl |= IXGBEVF_RX_BUFSZ >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
77d5dfca 1520 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
92915f71 1521
92915f71
GR
1522 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(index), srrctl);
1523}
1524
1bb9c639
DS
1525static void ixgbevf_setup_psrtype(struct ixgbevf_adapter *adapter)
1526{
1527 struct ixgbe_hw *hw = &adapter->hw;
1528
1529 /* PSRTYPE must be initialized in 82599 */
1530 u32 psrtype = IXGBE_PSRTYPE_TCPHDR | IXGBE_PSRTYPE_UDPHDR |
1531 IXGBE_PSRTYPE_IPV4HDR | IXGBE_PSRTYPE_IPV6HDR |
1532 IXGBE_PSRTYPE_L2HDR;
1533
1534 if (adapter->num_rx_queues > 1)
1535 psrtype |= 1 << 29;
1536
1537 IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
1538}
1539
de02decb
DS
1540#define IXGBEVF_MAX_RX_DESC_POLL 10
1541static void ixgbevf_disable_rx_queue(struct ixgbevf_adapter *adapter,
1542 struct ixgbevf_ring *ring)
1543{
1544 struct ixgbe_hw *hw = &adapter->hw;
1545 int wait_loop = IXGBEVF_MAX_RX_DESC_POLL;
1546 u32 rxdctl;
1547 u8 reg_idx = ring->reg_idx;
1548
26597802
MR
1549 if (IXGBE_REMOVED(hw->hw_addr))
1550 return;
de02decb
DS
1551 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
1552 rxdctl &= ~IXGBE_RXDCTL_ENABLE;
1553
1554 /* write value back with RXDCTL.ENABLE bit cleared */
1555 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(reg_idx), rxdctl);
1556
1557 /* the hardware may take up to 100us to really disable the rx queue */
1558 do {
1559 udelay(10);
1560 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
1561 } while (--wait_loop && (rxdctl & IXGBE_RXDCTL_ENABLE));
1562
1563 if (!wait_loop)
1564 pr_err("RXDCTL.ENABLE queue %d not cleared while polling\n",
1565 reg_idx);
1566}
1567
1568static void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter,
1569 struct ixgbevf_ring *ring)
1570{
1571 struct ixgbe_hw *hw = &adapter->hw;
1572 int wait_loop = IXGBEVF_MAX_RX_DESC_POLL;
1573 u32 rxdctl;
1574 u8 reg_idx = ring->reg_idx;
1575
26597802
MR
1576 if (IXGBE_REMOVED(hw->hw_addr))
1577 return;
de02decb
DS
1578 do {
1579 usleep_range(1000, 2000);
1580 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
1581 } while (--wait_loop && !(rxdctl & IXGBE_RXDCTL_ENABLE));
1582
1583 if (!wait_loop)
1584 pr_err("RXDCTL.ENABLE queue %d not set while polling\n",
1585 reg_idx);
1586}
1587
1588static void ixgbevf_configure_rx_ring(struct ixgbevf_adapter *adapter,
1589 struct ixgbevf_ring *ring)
1590{
1591 struct ixgbe_hw *hw = &adapter->hw;
1592 u64 rdba = ring->dma;
1593 u32 rxdctl;
1594 u8 reg_idx = ring->reg_idx;
1595
1596 /* disable queue to avoid issues while updating state */
1597 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
1598 ixgbevf_disable_rx_queue(adapter, ring);
1599
1600 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(reg_idx), rdba & DMA_BIT_MASK(32));
1601 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(reg_idx), rdba >> 32);
1602 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(reg_idx),
1603 ring->count * sizeof(union ixgbe_adv_rx_desc));
1604
1605 /* enable relaxed ordering */
1606 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_RXCTRL(reg_idx),
1607 IXGBE_DCA_RXCTRL_DESC_RRO_EN);
1608
1609 /* reset head and tail pointers */
1610 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(reg_idx), 0);
1611 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(reg_idx), 0);
dbf8b0d8 1612 ring->tail = adapter->io_addr + IXGBE_VFRDT(reg_idx);
de02decb
DS
1613
1614 /* reset ntu and ntc to place SW in sync with hardwdare */
1615 ring->next_to_clean = 0;
1616 ring->next_to_use = 0;
bad17234 1617 ring->next_to_alloc = 0;
de02decb
DS
1618
1619 ixgbevf_configure_srrctl(adapter, reg_idx);
1620
bad17234
ET
1621 /* allow any size packet since we can handle overflow */
1622 rxdctl &= ~IXGBE_RXDCTL_RLPML_EN;
1623
de02decb
DS
1624 rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
1625 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(reg_idx), rxdctl);
1626
1627 ixgbevf_rx_desc_queue_enable(adapter, ring);
095e2617 1628 ixgbevf_alloc_rx_buffers(ring, ixgbevf_desc_unused(ring));
de02decb
DS
1629}
1630
92915f71
GR
1631/**
1632 * ixgbevf_configure_rx - Configure 82599 VF Receive Unit after Reset
1633 * @adapter: board private structure
1634 *
1635 * Configure the Rx unit of the MAC after a reset.
1636 **/
1637static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter)
1638{
de02decb 1639 int i;
bad17234
ET
1640 struct ixgbe_hw *hw = &adapter->hw;
1641 struct net_device *netdev = adapter->netdev;
92915f71 1642
1bb9c639 1643 ixgbevf_setup_psrtype(adapter);
dd1fe113 1644
bad17234
ET
1645 /* notify the PF of our intent to use this size of frame */
1646 ixgbevf_rlpml_set_vf(hw, netdev->mtu + ETH_HLEN + ETH_FCS_LEN);
92915f71 1647
92915f71
GR
1648 /* Setup the HW Rx Head and Tail Descriptor Pointers and
1649 * the Base and Length of the Rx Descriptor Ring */
de02decb
DS
1650 for (i = 0; i < adapter->num_rx_queues; i++)
1651 ixgbevf_configure_rx_ring(adapter, adapter->rx_ring[i]);
92915f71
GR
1652}
1653
80d5c368
PM
1654static int ixgbevf_vlan_rx_add_vid(struct net_device *netdev,
1655 __be16 proto, u16 vid)
92915f71
GR
1656{
1657 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1658 struct ixgbe_hw *hw = &adapter->hw;
2ddc7fe1
AD
1659 int err;
1660
55fdd45b 1661 spin_lock_bh(&adapter->mbx_lock);
1c55ed76 1662
92915f71 1663 /* add VID to filter table */
2ddc7fe1 1664 err = hw->mac.ops.set_vfta(hw, vid, 0, true);
1c55ed76 1665
55fdd45b 1666 spin_unlock_bh(&adapter->mbx_lock);
1c55ed76 1667
2ddc7fe1
AD
1668 /* translate error return types so error makes sense */
1669 if (err == IXGBE_ERR_MBX)
1670 return -EIO;
1671
1672 if (err == IXGBE_ERR_INVALID_ARGUMENT)
1673 return -EACCES;
1674
dadcd65f 1675 set_bit(vid, adapter->active_vlans);
8e586137 1676
2ddc7fe1 1677 return err;
92915f71
GR
1678}
1679
80d5c368
PM
1680static int ixgbevf_vlan_rx_kill_vid(struct net_device *netdev,
1681 __be16 proto, u16 vid)
92915f71
GR
1682{
1683 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1684 struct ixgbe_hw *hw = &adapter->hw;
2ddc7fe1 1685 int err = -EOPNOTSUPP;
92915f71 1686
55fdd45b 1687 spin_lock_bh(&adapter->mbx_lock);
1c55ed76 1688
92915f71 1689 /* remove VID from filter table */
92fe0bf7 1690 err = hw->mac.ops.set_vfta(hw, vid, 0, false);
1c55ed76 1691
55fdd45b 1692 spin_unlock_bh(&adapter->mbx_lock);
1c55ed76 1693
dadcd65f 1694 clear_bit(vid, adapter->active_vlans);
8e586137 1695
2ddc7fe1 1696 return err;
92915f71
GR
1697}
1698
1699static void ixgbevf_restore_vlan(struct ixgbevf_adapter *adapter)
1700{
dadcd65f 1701 u16 vid;
92915f71 1702
dadcd65f 1703 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
80d5c368
PM
1704 ixgbevf_vlan_rx_add_vid(adapter->netdev,
1705 htons(ETH_P_8021Q), vid);
92915f71
GR
1706}
1707
46ec20ff
GR
1708static int ixgbevf_write_uc_addr_list(struct net_device *netdev)
1709{
1710 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1711 struct ixgbe_hw *hw = &adapter->hw;
1712 int count = 0;
1713
1714 if ((netdev_uc_count(netdev)) > 10) {
dbd9636e 1715 pr_err("Too many unicast filters - No Space\n");
46ec20ff
GR
1716 return -ENOSPC;
1717 }
1718
1719 if (!netdev_uc_empty(netdev)) {
1720 struct netdev_hw_addr *ha;
1721 netdev_for_each_uc_addr(ha, netdev) {
1722 hw->mac.ops.set_uc_addr(hw, ++count, ha->addr);
1723 udelay(200);
1724 }
1725 } else {
1726 /*
1727 * If the list is empty then send message to PF driver to
1728 * clear all macvlans on this VF.
1729 */
1730 hw->mac.ops.set_uc_addr(hw, 0, NULL);
1731 }
1732
1733 return count;
1734}
1735
92915f71 1736/**
dee847f5 1737 * ixgbevf_set_rx_mode - Multicast and unicast set
92915f71
GR
1738 * @netdev: network interface device structure
1739 *
1740 * The set_rx_method entry point is called whenever the multicast address
dee847f5
GR
1741 * list, unicast address list or the network interface flags are updated.
1742 * This routine is responsible for configuring the hardware for proper
1743 * multicast mode and configuring requested unicast filters.
92915f71
GR
1744 **/
1745static void ixgbevf_set_rx_mode(struct net_device *netdev)
1746{
1747 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1748 struct ixgbe_hw *hw = &adapter->hw;
92915f71 1749
55fdd45b 1750 spin_lock_bh(&adapter->mbx_lock);
1c55ed76 1751
92915f71 1752 /* reprogram multicast list */
92fe0bf7 1753 hw->mac.ops.update_mc_addr_list(hw, netdev);
46ec20ff
GR
1754
1755 ixgbevf_write_uc_addr_list(netdev);
1c55ed76 1756
55fdd45b 1757 spin_unlock_bh(&adapter->mbx_lock);
92915f71
GR
1758}
1759
1760static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter)
1761{
1762 int q_idx;
1763 struct ixgbevf_q_vector *q_vector;
1764 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1765
1766 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
92915f71 1767 q_vector = adapter->q_vector[q_idx];
c777cdfa
JK
1768#ifdef CONFIG_NET_RX_BUSY_POLL
1769 ixgbevf_qv_init_lock(adapter->q_vector[q_idx]);
1770#endif
fa71ae27 1771 napi_enable(&q_vector->napi);
92915f71
GR
1772 }
1773}
1774
1775static void ixgbevf_napi_disable_all(struct ixgbevf_adapter *adapter)
1776{
1777 int q_idx;
1778 struct ixgbevf_q_vector *q_vector;
1779 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1780
1781 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
1782 q_vector = adapter->q_vector[q_idx];
92915f71 1783 napi_disable(&q_vector->napi);
c777cdfa
JK
1784#ifdef CONFIG_NET_RX_BUSY_POLL
1785 while (!ixgbevf_qv_disable(adapter->q_vector[q_idx])) {
1786 pr_info("QV %d locked\n", q_idx);
1787 usleep_range(1000, 20000);
1788 }
1789#endif /* CONFIG_NET_RX_BUSY_POLL */
92915f71
GR
1790 }
1791}
1792
220fe050
DS
1793static int ixgbevf_configure_dcb(struct ixgbevf_adapter *adapter)
1794{
1795 struct ixgbe_hw *hw = &adapter->hw;
1796 unsigned int def_q = 0;
1797 unsigned int num_tcs = 0;
1798 unsigned int num_rx_queues = 1;
1799 int err;
1800
1801 spin_lock_bh(&adapter->mbx_lock);
1802
1803 /* fetch queue configuration from the PF */
1804 err = ixgbevf_get_queues(hw, &num_tcs, &def_q);
1805
1806 spin_unlock_bh(&adapter->mbx_lock);
1807
1808 if (err)
1809 return err;
1810
1811 if (num_tcs > 1) {
1812 /* update default Tx ring register index */
87e70ab9 1813 adapter->tx_ring[0]->reg_idx = def_q;
220fe050
DS
1814
1815 /* we need as many queues as traffic classes */
1816 num_rx_queues = num_tcs;
1817 }
1818
1819 /* if we have a bad config abort request queue reset */
1820 if (adapter->num_rx_queues != num_rx_queues) {
1821 /* force mailbox timeout to prevent further messages */
1822 hw->mbx.timeout = 0;
1823
1824 /* wait for watchdog to come around and bail us out */
1825 adapter->flags |= IXGBEVF_FLAG_QUEUE_RESET_REQUESTED;
1826 }
1827
1828 return 0;
1829}
1830
92915f71
GR
1831static void ixgbevf_configure(struct ixgbevf_adapter *adapter)
1832{
220fe050
DS
1833 ixgbevf_configure_dcb(adapter);
1834
de02decb 1835 ixgbevf_set_rx_mode(adapter->netdev);
92915f71
GR
1836
1837 ixgbevf_restore_vlan(adapter);
1838
1839 ixgbevf_configure_tx(adapter);
1840 ixgbevf_configure_rx(adapter);
92915f71
GR
1841}
1842
33bd9f60
GR
1843static void ixgbevf_save_reset_stats(struct ixgbevf_adapter *adapter)
1844{
1845 /* Only save pre-reset stats if there are some */
1846 if (adapter->stats.vfgprc || adapter->stats.vfgptc) {
1847 adapter->stats.saved_reset_vfgprc += adapter->stats.vfgprc -
1848 adapter->stats.base_vfgprc;
1849 adapter->stats.saved_reset_vfgptc += adapter->stats.vfgptc -
1850 adapter->stats.base_vfgptc;
1851 adapter->stats.saved_reset_vfgorc += adapter->stats.vfgorc -
1852 adapter->stats.base_vfgorc;
1853 adapter->stats.saved_reset_vfgotc += adapter->stats.vfgotc -
1854 adapter->stats.base_vfgotc;
1855 adapter->stats.saved_reset_vfmprc += adapter->stats.vfmprc -
1856 adapter->stats.base_vfmprc;
1857 }
1858}
1859
1860static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter *adapter)
1861{
1862 struct ixgbe_hw *hw = &adapter->hw;
1863
1864 adapter->stats.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
1865 adapter->stats.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
1866 adapter->stats.last_vfgorc |=
1867 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
1868 adapter->stats.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
1869 adapter->stats.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
1870 adapter->stats.last_vfgotc |=
1871 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
1872 adapter->stats.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
1873
1874 adapter->stats.base_vfgprc = adapter->stats.last_vfgprc;
1875 adapter->stats.base_vfgorc = adapter->stats.last_vfgorc;
1876 adapter->stats.base_vfgptc = adapter->stats.last_vfgptc;
1877 adapter->stats.base_vfgotc = adapter->stats.last_vfgotc;
1878 adapter->stats.base_vfmprc = adapter->stats.last_vfmprc;
1879}
1880
31186785
AD
1881static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter)
1882{
1883 struct ixgbe_hw *hw = &adapter->hw;
56e94095
AD
1884 int api[] = { ixgbe_mbox_api_11,
1885 ixgbe_mbox_api_10,
31186785
AD
1886 ixgbe_mbox_api_unknown };
1887 int err = 0, idx = 0;
1888
55fdd45b 1889 spin_lock_bh(&adapter->mbx_lock);
31186785
AD
1890
1891 while (api[idx] != ixgbe_mbox_api_unknown) {
1892 err = ixgbevf_negotiate_api_version(hw, api[idx]);
1893 if (!err)
1894 break;
1895 idx++;
1896 }
1897
55fdd45b 1898 spin_unlock_bh(&adapter->mbx_lock);
31186785
AD
1899}
1900
795180d8 1901static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
92915f71
GR
1902{
1903 struct net_device *netdev = adapter->netdev;
1904 struct ixgbe_hw *hw = &adapter->hw;
92915f71
GR
1905
1906 ixgbevf_configure_msix(adapter);
1907
55fdd45b 1908 spin_lock_bh(&adapter->mbx_lock);
1c55ed76 1909
92fe0bf7
GR
1910 if (is_valid_ether_addr(hw->mac.addr))
1911 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
1912 else
1913 hw->mac.ops.set_rar(hw, 0, hw->mac.perm_addr, 0);
92915f71 1914
55fdd45b 1915 spin_unlock_bh(&adapter->mbx_lock);
1c55ed76 1916
4e857c58 1917 smp_mb__before_atomic();
92915f71
GR
1918 clear_bit(__IXGBEVF_DOWN, &adapter->state);
1919 ixgbevf_napi_enable_all(adapter);
1920
1921 /* enable transmits */
1922 netif_tx_start_all_queues(netdev);
1923
33bd9f60
GR
1924 ixgbevf_save_reset_stats(adapter);
1925 ixgbevf_init_last_counter_stats(adapter);
1926
4b2cd27f 1927 hw->mac.get_link_status = 1;
92915f71 1928 mod_timer(&adapter->watchdog_timer, jiffies);
92915f71
GR
1929}
1930
795180d8 1931void ixgbevf_up(struct ixgbevf_adapter *adapter)
92915f71 1932{
92915f71
GR
1933 struct ixgbe_hw *hw = &adapter->hw;
1934
1935 ixgbevf_configure(adapter);
1936
795180d8 1937 ixgbevf_up_complete(adapter);
92915f71
GR
1938
1939 /* clear any pending interrupts, may auto mask */
1940 IXGBE_READ_REG(hw, IXGBE_VTEICR);
1941
5f3600eb 1942 ixgbevf_irq_enable(adapter);
92915f71
GR
1943}
1944
1945/**
1946 * ixgbevf_clean_rx_ring - Free Rx Buffers per Queue
92915f71
GR
1947 * @rx_ring: ring to free buffers from
1948 **/
05d063aa 1949static void ixgbevf_clean_rx_ring(struct ixgbevf_ring *rx_ring)
92915f71 1950{
bad17234 1951 struct device *dev = rx_ring->dev;
92915f71
GR
1952 unsigned long size;
1953 unsigned int i;
1954
bad17234
ET
1955 /* Free Rx ring sk_buff */
1956 if (rx_ring->skb) {
1957 dev_kfree_skb(rx_ring->skb);
1958 rx_ring->skb = NULL;
1959 }
1960
1961 /* ring already cleared, nothing to do */
c0456c23
GR
1962 if (!rx_ring->rx_buffer_info)
1963 return;
92915f71 1964
bad17234 1965 /* Free all the Rx ring pages */
92915f71 1966 for (i = 0; i < rx_ring->count; i++) {
bad17234 1967 struct ixgbevf_rx_buffer *rx_buffer;
92915f71 1968
bad17234
ET
1969 rx_buffer = &rx_ring->rx_buffer_info[i];
1970 if (rx_buffer->dma)
1971 dma_unmap_page(dev, rx_buffer->dma,
1972 PAGE_SIZE, DMA_FROM_DEVICE);
1973 rx_buffer->dma = 0;
1974 if (rx_buffer->page)
1975 __free_page(rx_buffer->page);
1976 rx_buffer->page = NULL;
92915f71
GR
1977 }
1978
1979 size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count;
1980 memset(rx_ring->rx_buffer_info, 0, size);
1981
1982 /* Zero out the descriptor ring */
1983 memset(rx_ring->desc, 0, rx_ring->size);
92915f71
GR
1984}
1985
1986/**
1987 * ixgbevf_clean_tx_ring - Free Tx Buffers
92915f71
GR
1988 * @tx_ring: ring to be cleaned
1989 **/
05d063aa 1990static void ixgbevf_clean_tx_ring(struct ixgbevf_ring *tx_ring)
92915f71
GR
1991{
1992 struct ixgbevf_tx_buffer *tx_buffer_info;
1993 unsigned long size;
1994 unsigned int i;
1995
c0456c23
GR
1996 if (!tx_ring->tx_buffer_info)
1997 return;
1998
92915f71 1999 /* Free all the Tx ring sk_buffs */
92915f71
GR
2000 for (i = 0; i < tx_ring->count; i++) {
2001 tx_buffer_info = &tx_ring->tx_buffer_info[i];
70a10e25 2002 ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
92915f71
GR
2003 }
2004
2005 size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count;
2006 memset(tx_ring->tx_buffer_info, 0, size);
2007
2008 memset(tx_ring->desc, 0, tx_ring->size);
92915f71
GR
2009}
2010
2011/**
2012 * ixgbevf_clean_all_rx_rings - Free Rx Buffers for all queues
2013 * @adapter: board private structure
2014 **/
2015static void ixgbevf_clean_all_rx_rings(struct ixgbevf_adapter *adapter)
2016{
2017 int i;
2018
2019 for (i = 0; i < adapter->num_rx_queues; i++)
05d063aa 2020 ixgbevf_clean_rx_ring(adapter->rx_ring[i]);
92915f71
GR
2021}
2022
2023/**
2024 * ixgbevf_clean_all_tx_rings - Free Tx Buffers for all queues
2025 * @adapter: board private structure
2026 **/
2027static void ixgbevf_clean_all_tx_rings(struct ixgbevf_adapter *adapter)
2028{
2029 int i;
2030
2031 for (i = 0; i < adapter->num_tx_queues; i++)
05d063aa 2032 ixgbevf_clean_tx_ring(adapter->tx_ring[i]);
92915f71
GR
2033}
2034
2035void ixgbevf_down(struct ixgbevf_adapter *adapter)
2036{
2037 struct net_device *netdev = adapter->netdev;
2038 struct ixgbe_hw *hw = &adapter->hw;
de02decb 2039 int i;
92915f71
GR
2040
2041 /* signal that we are down to the interrupt handler */
5b346dc9
MR
2042 if (test_and_set_bit(__IXGBEVF_DOWN, &adapter->state))
2043 return; /* do nothing if already down */
858c3dda
DS
2044
2045 /* disable all enabled rx queues */
2046 for (i = 0; i < adapter->num_rx_queues; i++)
87e70ab9 2047 ixgbevf_disable_rx_queue(adapter, adapter->rx_ring[i]);
92915f71
GR
2048
2049 netif_tx_disable(netdev);
2050
2051 msleep(10);
2052
2053 netif_tx_stop_all_queues(netdev);
2054
2055 ixgbevf_irq_disable(adapter);
2056
2057 ixgbevf_napi_disable_all(adapter);
2058
2059 del_timer_sync(&adapter->watchdog_timer);
2060 /* can't call flush scheduled work here because it can deadlock
2061 * if linkwatch_event tries to acquire the rtnl_lock which we are
2062 * holding */
2063 while (adapter->flags & IXGBE_FLAG_IN_WATCHDOG_TASK)
2064 msleep(1);
2065
2066 /* disable transmits in the hardware now that interrupts are off */
2067 for (i = 0; i < adapter->num_tx_queues; i++) {
de02decb
DS
2068 u8 reg_idx = adapter->tx_ring[i]->reg_idx;
2069
2070 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx),
2071 IXGBE_TXDCTL_SWFLSH);
92915f71
GR
2072 }
2073
2074 netif_carrier_off(netdev);
2075
2076 if (!pci_channel_offline(adapter->pdev))
2077 ixgbevf_reset(adapter);
2078
2079 ixgbevf_clean_all_tx_rings(adapter);
2080 ixgbevf_clean_all_rx_rings(adapter);
2081}
2082
2083void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter)
2084{
2085 WARN_ON(in_interrupt());
c0456c23 2086
92915f71
GR
2087 while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state))
2088 msleep(1);
2089
4b2cd27f
AD
2090 ixgbevf_down(adapter);
2091 ixgbevf_up(adapter);
92915f71
GR
2092
2093 clear_bit(__IXGBEVF_RESETTING, &adapter->state);
2094}
2095
2096void ixgbevf_reset(struct ixgbevf_adapter *adapter)
2097{
2098 struct ixgbe_hw *hw = &adapter->hw;
2099 struct net_device *netdev = adapter->netdev;
2100
798e381a 2101 if (hw->mac.ops.reset_hw(hw)) {
92915f71 2102 hw_dbg(hw, "PF still resetting\n");
798e381a 2103 } else {
92915f71 2104 hw->mac.ops.init_hw(hw);
798e381a
DS
2105 ixgbevf_negotiate_api(adapter);
2106 }
92915f71
GR
2107
2108 if (is_valid_ether_addr(adapter->hw.mac.addr)) {
2109 memcpy(netdev->dev_addr, adapter->hw.mac.addr,
2110 netdev->addr_len);
2111 memcpy(netdev->perm_addr, adapter->hw.mac.addr,
2112 netdev->addr_len);
2113 }
2114}
2115
e45dd5fe
JK
2116static int ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter,
2117 int vectors)
92915f71 2118{
a5f9337b 2119 int vector_threshold;
92915f71 2120
fa71ae27
AD
2121 /* We'll want at least 2 (vector_threshold):
2122 * 1) TxQ[0] + RxQ[0] handler
2123 * 2) Other (Link Status Change, etc.)
92915f71
GR
2124 */
2125 vector_threshold = MIN_MSIX_COUNT;
2126
2127 /* The more we get, the more we will assign to Tx/Rx Cleanup
2128 * for the separate queues...where Rx Cleanup >= Tx Cleanup.
2129 * Right now, we simply care about how many we'll get; we'll
2130 * set them up later while requesting irq's.
2131 */
5c1e3588
AG
2132 vectors = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
2133 vector_threshold, vectors);
92915f71 2134
5c1e3588 2135 if (vectors < 0) {
e45dd5fe
JK
2136 dev_err(&adapter->pdev->dev,
2137 "Unable to allocate MSI-X interrupts\n");
92915f71
GR
2138 kfree(adapter->msix_entries);
2139 adapter->msix_entries = NULL;
5c1e3588 2140 return vectors;
92915f71 2141 }
dee847f5 2142
5c1e3588
AG
2143 /* Adjust for only the vectors we'll use, which is minimum
2144 * of max_msix_q_vectors + NON_Q_VECTORS, or the number of
2145 * vectors we were allocated.
2146 */
2147 adapter->num_msix_vectors = vectors;
2148
2149 return 0;
92915f71
GR
2150}
2151
49ce9c2c
BH
2152/**
2153 * ixgbevf_set_num_queues - Allocate queues for device, feature dependent
92915f71
GR
2154 * @adapter: board private structure to initialize
2155 *
2156 * This is the top level queue allocation routine. The order here is very
2157 * important, starting with the "most" number of features turned on at once,
2158 * and ending with the smallest set of features. This way large combinations
2159 * can be allocated if they're turned on, and smaller combinations are the
2160 * fallthrough conditions.
2161 *
2162 **/
2163static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter)
2164{
220fe050
DS
2165 struct ixgbe_hw *hw = &adapter->hw;
2166 unsigned int def_q = 0;
2167 unsigned int num_tcs = 0;
2168 int err;
2169
92915f71
GR
2170 /* Start with base case */
2171 adapter->num_rx_queues = 1;
2172 adapter->num_tx_queues = 1;
220fe050
DS
2173
2174 spin_lock_bh(&adapter->mbx_lock);
2175
2176 /* fetch queue configuration from the PF */
2177 err = ixgbevf_get_queues(hw, &num_tcs, &def_q);
2178
2179 spin_unlock_bh(&adapter->mbx_lock);
2180
2181 if (err)
2182 return;
2183
2184 /* we need as many queues as traffic classes */
2185 if (num_tcs > 1)
2186 adapter->num_rx_queues = num_tcs;
92915f71
GR
2187}
2188
2189/**
2190 * ixgbevf_alloc_queues - Allocate memory for all rings
2191 * @adapter: board private structure to initialize
2192 *
2193 * We allocate one ring per queue at run-time since we don't know the
2194 * number of queues at compile-time. The polling_netdev array is
2195 * intended for Multiqueue, but should work fine with a single queue.
2196 **/
2197static int ixgbevf_alloc_queues(struct ixgbevf_adapter *adapter)
2198{
87e70ab9
DS
2199 struct ixgbevf_ring *ring;
2200 int rx = 0, tx = 0;
92915f71 2201
87e70ab9
DS
2202 for (; tx < adapter->num_tx_queues; tx++) {
2203 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
2204 if (!ring)
2205 goto err_allocation;
92915f71 2206
87e70ab9
DS
2207 ring->dev = &adapter->pdev->dev;
2208 ring->netdev = adapter->netdev;
2209 ring->count = adapter->tx_ring_count;
2210 ring->queue_index = tx;
2211 ring->reg_idx = tx;
92915f71 2212
87e70ab9 2213 adapter->tx_ring[tx] = ring;
92915f71
GR
2214 }
2215
87e70ab9
DS
2216 for (; rx < adapter->num_rx_queues; rx++) {
2217 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
2218 if (!ring)
2219 goto err_allocation;
2220
2221 ring->dev = &adapter->pdev->dev;
2222 ring->netdev = adapter->netdev;
2223
2224 ring->count = adapter->rx_ring_count;
2225 ring->queue_index = rx;
2226 ring->reg_idx = rx;
2227
2228 adapter->rx_ring[rx] = ring;
92915f71
GR
2229 }
2230
2231 return 0;
2232
87e70ab9
DS
2233err_allocation:
2234 while (tx) {
2235 kfree(adapter->tx_ring[--tx]);
2236 adapter->tx_ring[tx] = NULL;
2237 }
2238
2239 while (rx) {
2240 kfree(adapter->rx_ring[--rx]);
2241 adapter->rx_ring[rx] = NULL;
2242 }
92915f71
GR
2243 return -ENOMEM;
2244}
2245
2246/**
2247 * ixgbevf_set_interrupt_capability - set MSI-X or FAIL if not supported
2248 * @adapter: board private structure to initialize
2249 *
2250 * Attempt to configure the interrupts using the best available
2251 * capabilities of the hardware and the kernel.
2252 **/
2253static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter)
2254{
91e2b89b 2255 struct net_device *netdev = adapter->netdev;
92915f71
GR
2256 int err = 0;
2257 int vector, v_budget;
2258
2259 /*
2260 * It's easy to be greedy for MSI-X vectors, but it really
2261 * doesn't do us much good if we have a lot more vectors
2262 * than CPU's. So let's be conservative and only ask for
fa71ae27
AD
2263 * (roughly) the same number of vectors as there are CPU's.
2264 * The default is to use pairs of vectors.
92915f71 2265 */
fa71ae27
AD
2266 v_budget = max(adapter->num_rx_queues, adapter->num_tx_queues);
2267 v_budget = min_t(int, v_budget, num_online_cpus());
2268 v_budget += NON_Q_VECTORS;
92915f71
GR
2269
2270 /* A failure in MSI-X entry allocation isn't fatal, but it does
2271 * mean we disable MSI-X capabilities of the adapter. */
2272 adapter->msix_entries = kcalloc(v_budget,
2273 sizeof(struct msix_entry), GFP_KERNEL);
2274 if (!adapter->msix_entries) {
2275 err = -ENOMEM;
2276 goto out;
2277 }
2278
2279 for (vector = 0; vector < v_budget; vector++)
2280 adapter->msix_entries[vector].entry = vector;
2281
e45dd5fe
JK
2282 err = ixgbevf_acquire_msix_vectors(adapter, v_budget);
2283 if (err)
2284 goto out;
92915f71 2285
91e2b89b
GR
2286 err = netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues);
2287 if (err)
2288 goto out;
2289
2290 err = netif_set_real_num_rx_queues(netdev, adapter->num_rx_queues);
2291
92915f71
GR
2292out:
2293 return err;
2294}
2295
2296/**
2297 * ixgbevf_alloc_q_vectors - Allocate memory for interrupt vectors
2298 * @adapter: board private structure to initialize
2299 *
2300 * We allocate one q_vector per queue interrupt. If allocation fails we
2301 * return -ENOMEM.
2302 **/
2303static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter *adapter)
2304{
2305 int q_idx, num_q_vectors;
2306 struct ixgbevf_q_vector *q_vector;
92915f71
GR
2307
2308 num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
92915f71
GR
2309
2310 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
2311 q_vector = kzalloc(sizeof(struct ixgbevf_q_vector), GFP_KERNEL);
2312 if (!q_vector)
2313 goto err_out;
2314 q_vector->adapter = adapter;
2315 q_vector->v_idx = q_idx;
fa71ae27
AD
2316 netif_napi_add(adapter->netdev, &q_vector->napi,
2317 ixgbevf_poll, 64);
c777cdfa
JK
2318#ifdef CONFIG_NET_RX_BUSY_POLL
2319 napi_hash_add(&q_vector->napi);
2320#endif
92915f71
GR
2321 adapter->q_vector[q_idx] = q_vector;
2322 }
2323
2324 return 0;
2325
2326err_out:
2327 while (q_idx) {
2328 q_idx--;
2329 q_vector = adapter->q_vector[q_idx];
c777cdfa
JK
2330#ifdef CONFIG_NET_RX_BUSY_POLL
2331 napi_hash_del(&q_vector->napi);
2332#endif
92915f71
GR
2333 netif_napi_del(&q_vector->napi);
2334 kfree(q_vector);
2335 adapter->q_vector[q_idx] = NULL;
2336 }
2337 return -ENOMEM;
2338}
2339
2340/**
2341 * ixgbevf_free_q_vectors - Free memory allocated for interrupt vectors
2342 * @adapter: board private structure to initialize
2343 *
2344 * This function frees the memory allocated to the q_vectors. In addition if
2345 * NAPI is enabled it will delete any references to the NAPI struct prior
2346 * to freeing the q_vector.
2347 **/
2348static void ixgbevf_free_q_vectors(struct ixgbevf_adapter *adapter)
2349{
f4477702 2350 int q_idx, num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
92915f71
GR
2351
2352 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
2353 struct ixgbevf_q_vector *q_vector = adapter->q_vector[q_idx];
2354
2355 adapter->q_vector[q_idx] = NULL;
c777cdfa
JK
2356#ifdef CONFIG_NET_RX_BUSY_POLL
2357 napi_hash_del(&q_vector->napi);
2358#endif
f4477702 2359 netif_napi_del(&q_vector->napi);
92915f71
GR
2360 kfree(q_vector);
2361 }
2362}
2363
2364/**
2365 * ixgbevf_reset_interrupt_capability - Reset MSIX setup
2366 * @adapter: board private structure
2367 *
2368 **/
2369static void ixgbevf_reset_interrupt_capability(struct ixgbevf_adapter *adapter)
2370{
2371 pci_disable_msix(adapter->pdev);
2372 kfree(adapter->msix_entries);
2373 adapter->msix_entries = NULL;
92915f71
GR
2374}
2375
2376/**
2377 * ixgbevf_init_interrupt_scheme - Determine if MSIX is supported and init
2378 * @adapter: board private structure to initialize
2379 *
2380 **/
2381static int ixgbevf_init_interrupt_scheme(struct ixgbevf_adapter *adapter)
2382{
2383 int err;
2384
2385 /* Number of supported queues */
2386 ixgbevf_set_num_queues(adapter);
2387
2388 err = ixgbevf_set_interrupt_capability(adapter);
2389 if (err) {
2390 hw_dbg(&adapter->hw,
2391 "Unable to setup interrupt capabilities\n");
2392 goto err_set_interrupt;
2393 }
2394
2395 err = ixgbevf_alloc_q_vectors(adapter);
2396 if (err) {
2397 hw_dbg(&adapter->hw, "Unable to allocate memory for queue "
2398 "vectors\n");
2399 goto err_alloc_q_vectors;
2400 }
2401
2402 err = ixgbevf_alloc_queues(adapter);
2403 if (err) {
dbd9636e 2404 pr_err("Unable to allocate memory for queues\n");
92915f71
GR
2405 goto err_alloc_queues;
2406 }
2407
2408 hw_dbg(&adapter->hw, "Multiqueue %s: Rx Queue count = %u, "
2409 "Tx Queue count = %u\n",
2410 (adapter->num_rx_queues > 1) ? "Enabled" :
2411 "Disabled", adapter->num_rx_queues, adapter->num_tx_queues);
2412
2413 set_bit(__IXGBEVF_DOWN, &adapter->state);
2414
2415 return 0;
2416err_alloc_queues:
2417 ixgbevf_free_q_vectors(adapter);
2418err_alloc_q_vectors:
2419 ixgbevf_reset_interrupt_capability(adapter);
2420err_set_interrupt:
2421 return err;
2422}
2423
0ac1e8ce
AD
2424/**
2425 * ixgbevf_clear_interrupt_scheme - Clear the current interrupt scheme settings
2426 * @adapter: board private structure to clear interrupt scheme on
2427 *
2428 * We go through and clear interrupt specific resources and reset the structure
2429 * to pre-load conditions
2430 **/
2431static void ixgbevf_clear_interrupt_scheme(struct ixgbevf_adapter *adapter)
2432{
87e70ab9
DS
2433 int i;
2434
2435 for (i = 0; i < adapter->num_tx_queues; i++) {
2436 kfree(adapter->tx_ring[i]);
2437 adapter->tx_ring[i] = NULL;
2438 }
2439 for (i = 0; i < adapter->num_rx_queues; i++) {
2440 kfree(adapter->rx_ring[i]);
2441 adapter->rx_ring[i] = NULL;
2442 }
2443
0ac1e8ce
AD
2444 adapter->num_tx_queues = 0;
2445 adapter->num_rx_queues = 0;
2446
2447 ixgbevf_free_q_vectors(adapter);
2448 ixgbevf_reset_interrupt_capability(adapter);
2449}
2450
92915f71
GR
2451/**
2452 * ixgbevf_sw_init - Initialize general software structures
2453 * (struct ixgbevf_adapter)
2454 * @adapter: board private structure to initialize
2455 *
2456 * ixgbevf_sw_init initializes the Adapter private data structure.
2457 * Fields are initialized based on PCI device information and
2458 * OS network device settings (MTU size).
2459 **/
9f9a12f8 2460static int ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
92915f71
GR
2461{
2462 struct ixgbe_hw *hw = &adapter->hw;
2463 struct pci_dev *pdev = adapter->pdev;
e1941a74 2464 struct net_device *netdev = adapter->netdev;
92915f71
GR
2465 int err;
2466
2467 /* PCI config space info */
2468
2469 hw->vendor_id = pdev->vendor;
2470 hw->device_id = pdev->device;
ff938e43 2471 hw->revision_id = pdev->revision;
92915f71
GR
2472 hw->subsystem_vendor_id = pdev->subsystem_vendor;
2473 hw->subsystem_device_id = pdev->subsystem_device;
2474
2475 hw->mbx.ops.init_params(hw);
56e94095
AD
2476
2477 /* assume legacy case in which PF would only give VF 2 queues */
2478 hw->mac.max_tx_queues = 2;
2479 hw->mac.max_rx_queues = 2;
2480
798e381a
DS
2481 /* lock to protect mailbox accesses */
2482 spin_lock_init(&adapter->mbx_lock);
2483
92915f71
GR
2484 err = hw->mac.ops.reset_hw(hw);
2485 if (err) {
2486 dev_info(&pdev->dev,
e1941a74 2487 "PF still in reset state. Is the PF interface up?\n");
92915f71
GR
2488 } else {
2489 err = hw->mac.ops.init_hw(hw);
2490 if (err) {
dbd9636e 2491 pr_err("init_shared_code failed: %d\n", err);
92915f71
GR
2492 goto out;
2493 }
798e381a 2494 ixgbevf_negotiate_api(adapter);
e1941a74
GR
2495 err = hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
2496 if (err)
2497 dev_info(&pdev->dev, "Error reading MAC address\n");
2498 else if (is_zero_ether_addr(adapter->hw.mac.addr))
2499 dev_info(&pdev->dev,
2500 "MAC address not assigned by administrator.\n");
2501 memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len);
2502 }
2503
2504 if (!is_valid_ether_addr(netdev->dev_addr)) {
2505 dev_info(&pdev->dev, "Assigning random MAC address\n");
2506 eth_hw_addr_random(netdev);
2507 memcpy(hw->mac.addr, netdev->dev_addr, netdev->addr_len);
92915f71
GR
2508 }
2509
2510 /* Enable dynamic interrupt throttling rates */
5f3600eb
AD
2511 adapter->rx_itr_setting = 1;
2512 adapter->tx_itr_setting = 1;
92915f71 2513
92915f71
GR
2514 /* set default ring sizes */
2515 adapter->tx_ring_count = IXGBEVF_DEFAULT_TXD;
2516 adapter->rx_ring_count = IXGBEVF_DEFAULT_RXD;
2517
92915f71 2518 set_bit(__IXGBEVF_DOWN, &adapter->state);
1a0d6ae5 2519 return 0;
92915f71
GR
2520
2521out:
2522 return err;
2523}
2524
92915f71
GR
2525#define UPDATE_VF_COUNTER_32bit(reg, last_counter, counter) \
2526 { \
2527 u32 current_counter = IXGBE_READ_REG(hw, reg); \
2528 if (current_counter < last_counter) \
2529 counter += 0x100000000LL; \
2530 last_counter = current_counter; \
2531 counter &= 0xFFFFFFFF00000000LL; \
2532 counter |= current_counter; \
2533 }
2534
2535#define UPDATE_VF_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \
2536 { \
2537 u64 current_counter_lsb = IXGBE_READ_REG(hw, reg_lsb); \
2538 u64 current_counter_msb = IXGBE_READ_REG(hw, reg_msb); \
2539 u64 current_counter = (current_counter_msb << 32) | \
2540 current_counter_lsb; \
2541 if (current_counter < last_counter) \
2542 counter += 0x1000000000LL; \
2543 last_counter = current_counter; \
2544 counter &= 0xFFFFFFF000000000LL; \
2545 counter |= current_counter; \
2546 }
2547/**
2548 * ixgbevf_update_stats - Update the board statistics counters.
2549 * @adapter: board private structure
2550 **/
2551void ixgbevf_update_stats(struct ixgbevf_adapter *adapter)
2552{
2553 struct ixgbe_hw *hw = &adapter->hw;
55fb277c 2554 int i;
92915f71 2555
088245a3
GR
2556 if (!adapter->link_up)
2557 return;
2558
92915f71
GR
2559 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPRC, adapter->stats.last_vfgprc,
2560 adapter->stats.vfgprc);
2561 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPTC, adapter->stats.last_vfgptc,
2562 adapter->stats.vfgptc);
2563 UPDATE_VF_COUNTER_36bit(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
2564 adapter->stats.last_vfgorc,
2565 adapter->stats.vfgorc);
2566 UPDATE_VF_COUNTER_36bit(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
2567 adapter->stats.last_vfgotc,
2568 adapter->stats.vfgotc);
2569 UPDATE_VF_COUNTER_32bit(IXGBE_VFMPRC, adapter->stats.last_vfmprc,
2570 adapter->stats.vfmprc);
55fb277c
GR
2571
2572 for (i = 0; i < adapter->num_rx_queues; i++) {
2573 adapter->hw_csum_rx_error +=
87e70ab9 2574 adapter->rx_ring[i]->hw_csum_rx_error;
87e70ab9 2575 adapter->rx_ring[i]->hw_csum_rx_error = 0;
55fb277c 2576 }
92915f71
GR
2577}
2578
2579/**
2580 * ixgbevf_watchdog - Timer Call-back
2581 * @data: pointer to adapter cast into an unsigned long
2582 **/
2583static void ixgbevf_watchdog(unsigned long data)
2584{
2585 struct ixgbevf_adapter *adapter = (struct ixgbevf_adapter *)data;
2586 struct ixgbe_hw *hw = &adapter->hw;
5f3600eb 2587 u32 eics = 0;
92915f71
GR
2588 int i;
2589
2590 /*
2591 * Do the watchdog outside of interrupt context due to the lovely
2592 * delays that some of the newer hardware requires
2593 */
2594
2595 if (test_bit(__IXGBEVF_DOWN, &adapter->state))
2596 goto watchdog_short_circuit;
2597
2598 /* get one bit for every active tx/rx interrupt vector */
2599 for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) {
2600 struct ixgbevf_q_vector *qv = adapter->q_vector[i];
6b43c446 2601 if (qv->rx.ring || qv->tx.ring)
5f3600eb 2602 eics |= 1 << i;
92915f71
GR
2603 }
2604
5f3600eb 2605 IXGBE_WRITE_REG(hw, IXGBE_VTEICS, eics);
92915f71
GR
2606
2607watchdog_short_circuit:
2608 schedule_work(&adapter->watchdog_task);
2609}
2610
2611/**
2612 * ixgbevf_tx_timeout - Respond to a Tx Hang
2613 * @netdev: network interface device structure
2614 **/
2615static void ixgbevf_tx_timeout(struct net_device *netdev)
2616{
2617 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2618
2619 /* Do the reset outside of interrupt context */
2620 schedule_work(&adapter->reset_task);
2621}
2622
2623static void ixgbevf_reset_task(struct work_struct *work)
2624{
2625 struct ixgbevf_adapter *adapter;
2626 adapter = container_of(work, struct ixgbevf_adapter, reset_task);
2627
2628 /* If we're already down or resetting, just bail */
2629 if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
2e7cfbdd 2630 test_bit(__IXGBEVF_REMOVING, &adapter->state) ||
92915f71
GR
2631 test_bit(__IXGBEVF_RESETTING, &adapter->state))
2632 return;
2633
2634 adapter->tx_timeout_count++;
2635
2636 ixgbevf_reinit_locked(adapter);
2637}
2638
2639/**
2640 * ixgbevf_watchdog_task - worker thread to bring link up
2641 * @work: pointer to work_struct containing our data
2642 **/
2643static void ixgbevf_watchdog_task(struct work_struct *work)
2644{
2645 struct ixgbevf_adapter *adapter = container_of(work,
2646 struct ixgbevf_adapter,
2647 watchdog_task);
2648 struct net_device *netdev = adapter->netdev;
2649 struct ixgbe_hw *hw = &adapter->hw;
2650 u32 link_speed = adapter->link_speed;
2651 bool link_up = adapter->link_up;
92fe0bf7 2652 s32 need_reset;
92915f71 2653
26597802
MR
2654 if (IXGBE_REMOVED(hw->hw_addr)) {
2655 if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) {
2656 rtnl_lock();
2657 ixgbevf_down(adapter);
2658 rtnl_unlock();
2659 }
2660 return;
2661 }
220fe050
DS
2662 ixgbevf_queue_reset_subtask(adapter);
2663
92915f71
GR
2664 adapter->flags |= IXGBE_FLAG_IN_WATCHDOG_TASK;
2665
2666 /*
2667 * Always check the link on the watchdog because we have
2668 * no LSC interrupt
2669 */
92fe0bf7 2670 spin_lock_bh(&adapter->mbx_lock);
1c55ed76 2671
92fe0bf7 2672 need_reset = hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
1c55ed76 2673
92fe0bf7 2674 spin_unlock_bh(&adapter->mbx_lock);
1c55ed76 2675
92fe0bf7
GR
2676 if (need_reset) {
2677 adapter->link_up = link_up;
2678 adapter->link_speed = link_speed;
2679 netif_carrier_off(netdev);
2680 netif_tx_stop_all_queues(netdev);
2681 schedule_work(&adapter->reset_task);
2682 goto pf_has_reset;
92915f71
GR
2683 }
2684 adapter->link_up = link_up;
2685 adapter->link_speed = link_speed;
2686
2687 if (link_up) {
2688 if (!netif_carrier_ok(netdev)) {
b876a744
GR
2689 char *link_speed_string;
2690 switch (link_speed) {
2691 case IXGBE_LINK_SPEED_10GB_FULL:
2692 link_speed_string = "10 Gbps";
2693 break;
2694 case IXGBE_LINK_SPEED_1GB_FULL:
2695 link_speed_string = "1 Gbps";
2696 break;
2697 case IXGBE_LINK_SPEED_100_FULL:
2698 link_speed_string = "100 Mbps";
2699 break;
2700 default:
2701 link_speed_string = "unknown speed";
2702 break;
2703 }
6fe59675 2704 dev_info(&adapter->pdev->dev,
b876a744 2705 "NIC Link is Up, %s\n", link_speed_string);
92915f71
GR
2706 netif_carrier_on(netdev);
2707 netif_tx_wake_all_queues(netdev);
92915f71
GR
2708 }
2709 } else {
2710 adapter->link_up = false;
2711 adapter->link_speed = 0;
2712 if (netif_carrier_ok(netdev)) {
6fe59675 2713 dev_info(&adapter->pdev->dev, "NIC Link is Down\n");
92915f71
GR
2714 netif_carrier_off(netdev);
2715 netif_tx_stop_all_queues(netdev);
2716 }
2717 }
2718
92915f71
GR
2719 ixgbevf_update_stats(adapter);
2720
33bd9f60 2721pf_has_reset:
92915f71 2722 /* Reset the timer */
2e7cfbdd
MR
2723 if (!test_bit(__IXGBEVF_DOWN, &adapter->state) &&
2724 !test_bit(__IXGBEVF_REMOVING, &adapter->state))
92915f71
GR
2725 mod_timer(&adapter->watchdog_timer,
2726 round_jiffies(jiffies + (2 * HZ)));
2727
2728 adapter->flags &= ~IXGBE_FLAG_IN_WATCHDOG_TASK;
2729}
2730
2731/**
2732 * ixgbevf_free_tx_resources - Free Tx Resources per Queue
92915f71
GR
2733 * @tx_ring: Tx descriptor ring for a specific queue
2734 *
2735 * Free all transmit software resources
2736 **/
05d063aa 2737void ixgbevf_free_tx_resources(struct ixgbevf_ring *tx_ring)
92915f71 2738{
05d063aa 2739 ixgbevf_clean_tx_ring(tx_ring);
92915f71
GR
2740
2741 vfree(tx_ring->tx_buffer_info);
2742 tx_ring->tx_buffer_info = NULL;
2743
de02decb
DS
2744 /* if not set, then don't free */
2745 if (!tx_ring->desc)
2746 return;
2747
05d063aa 2748 dma_free_coherent(tx_ring->dev, tx_ring->size, tx_ring->desc,
2a1f8794 2749 tx_ring->dma);
92915f71
GR
2750
2751 tx_ring->desc = NULL;
2752}
2753
2754/**
2755 * ixgbevf_free_all_tx_resources - Free Tx Resources for All Queues
2756 * @adapter: board private structure
2757 *
2758 * Free all transmit software resources
2759 **/
2760static void ixgbevf_free_all_tx_resources(struct ixgbevf_adapter *adapter)
2761{
2762 int i;
2763
2764 for (i = 0; i < adapter->num_tx_queues; i++)
87e70ab9 2765 if (adapter->tx_ring[i]->desc)
05d063aa 2766 ixgbevf_free_tx_resources(adapter->tx_ring[i]);
92915f71
GR
2767}
2768
2769/**
2770 * ixgbevf_setup_tx_resources - allocate Tx resources (Descriptors)
92915f71
GR
2771 * @tx_ring: tx descriptor ring (for a specific queue) to setup
2772 *
2773 * Return 0 on success, negative on failure
2774 **/
05d063aa 2775int ixgbevf_setup_tx_resources(struct ixgbevf_ring *tx_ring)
92915f71 2776{
92915f71
GR
2777 int size;
2778
2779 size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count;
89bf67f1 2780 tx_ring->tx_buffer_info = vzalloc(size);
92915f71
GR
2781 if (!tx_ring->tx_buffer_info)
2782 goto err;
92915f71
GR
2783
2784 /* round up to nearest 4K */
2785 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
2786 tx_ring->size = ALIGN(tx_ring->size, 4096);
2787
05d063aa 2788 tx_ring->desc = dma_alloc_coherent(tx_ring->dev, tx_ring->size,
2a1f8794 2789 &tx_ring->dma, GFP_KERNEL);
92915f71
GR
2790 if (!tx_ring->desc)
2791 goto err;
2792
92915f71
GR
2793 return 0;
2794
2795err:
2796 vfree(tx_ring->tx_buffer_info);
2797 tx_ring->tx_buffer_info = NULL;
2798 hw_dbg(&adapter->hw, "Unable to allocate memory for the transmit "
2799 "descriptor ring\n");
2800 return -ENOMEM;
2801}
2802
2803/**
2804 * ixgbevf_setup_all_tx_resources - allocate all queues Tx resources
2805 * @adapter: board private structure
2806 *
2807 * If this function returns with an error, then it's possible one or
2808 * more of the rings is populated (while the rest are not). It is the
2809 * callers duty to clean those orphaned rings.
2810 *
2811 * Return 0 on success, negative on failure
2812 **/
2813static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter *adapter)
2814{
2815 int i, err = 0;
2816
2817 for (i = 0; i < adapter->num_tx_queues; i++) {
05d063aa 2818 err = ixgbevf_setup_tx_resources(adapter->tx_ring[i]);
92915f71
GR
2819 if (!err)
2820 continue;
2821 hw_dbg(&adapter->hw,
2822 "Allocation for Tx Queue %u failed\n", i);
2823 break;
2824 }
2825
2826 return err;
2827}
2828
2829/**
2830 * ixgbevf_setup_rx_resources - allocate Rx resources (Descriptors)
92915f71
GR
2831 * @rx_ring: rx descriptor ring (for a specific queue) to setup
2832 *
2833 * Returns 0 on success, negative on failure
2834 **/
05d063aa 2835int ixgbevf_setup_rx_resources(struct ixgbevf_ring *rx_ring)
92915f71 2836{
92915f71
GR
2837 int size;
2838
2839 size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count;
89bf67f1 2840 rx_ring->rx_buffer_info = vzalloc(size);
e404decb 2841 if (!rx_ring->rx_buffer_info)
05d063aa 2842 goto err;
92915f71
GR
2843
2844 /* Round up to nearest 4K */
2845 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
2846 rx_ring->size = ALIGN(rx_ring->size, 4096);
2847
05d063aa 2848 rx_ring->desc = dma_alloc_coherent(rx_ring->dev, rx_ring->size,
2a1f8794 2849 &rx_ring->dma, GFP_KERNEL);
92915f71 2850
05d063aa
ET
2851 if (!rx_ring->desc)
2852 goto err;
92915f71 2853
92915f71 2854 return 0;
05d063aa
ET
2855err:
2856 vfree(rx_ring->rx_buffer_info);
2857 rx_ring->rx_buffer_info = NULL;
2858 dev_err(rx_ring->dev, "Unable to allocate memory for the Rx descriptor ring\n");
92915f71
GR
2859 return -ENOMEM;
2860}
2861
2862/**
2863 * ixgbevf_setup_all_rx_resources - allocate all queues Rx resources
2864 * @adapter: board private structure
2865 *
2866 * If this function returns with an error, then it's possible one or
2867 * more of the rings is populated (while the rest are not). It is the
2868 * callers duty to clean those orphaned rings.
2869 *
2870 * Return 0 on success, negative on failure
2871 **/
2872static int ixgbevf_setup_all_rx_resources(struct ixgbevf_adapter *adapter)
2873{
2874 int i, err = 0;
2875
2876 for (i = 0; i < adapter->num_rx_queues; i++) {
05d063aa 2877 err = ixgbevf_setup_rx_resources(adapter->rx_ring[i]);
92915f71
GR
2878 if (!err)
2879 continue;
2880 hw_dbg(&adapter->hw,
2881 "Allocation for Rx Queue %u failed\n", i);
2882 break;
2883 }
2884 return err;
2885}
2886
2887/**
2888 * ixgbevf_free_rx_resources - Free Rx Resources
92915f71
GR
2889 * @rx_ring: ring to clean the resources from
2890 *
2891 * Free all receive software resources
2892 **/
05d063aa 2893void ixgbevf_free_rx_resources(struct ixgbevf_ring *rx_ring)
92915f71 2894{
05d063aa 2895 ixgbevf_clean_rx_ring(rx_ring);
92915f71
GR
2896
2897 vfree(rx_ring->rx_buffer_info);
2898 rx_ring->rx_buffer_info = NULL;
2899
05d063aa 2900 dma_free_coherent(rx_ring->dev, rx_ring->size, rx_ring->desc,
2a1f8794 2901 rx_ring->dma);
92915f71
GR
2902
2903 rx_ring->desc = NULL;
2904}
2905
2906/**
2907 * ixgbevf_free_all_rx_resources - Free Rx Resources for All Queues
2908 * @adapter: board private structure
2909 *
2910 * Free all receive software resources
2911 **/
2912static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter)
2913{
2914 int i;
2915
2916 for (i = 0; i < adapter->num_rx_queues; i++)
87e70ab9 2917 if (adapter->rx_ring[i]->desc)
05d063aa 2918 ixgbevf_free_rx_resources(adapter->rx_ring[i]);
92915f71
GR
2919}
2920
2921/**
2922 * ixgbevf_open - Called when a network interface is made active
2923 * @netdev: network interface device structure
2924 *
2925 * Returns 0 on success, negative value on failure
2926 *
2927 * The open entry point is called when a network interface is made
2928 * active by the system (IFF_UP). At this point all resources needed
2929 * for transmit and receive operations are allocated, the interrupt
2930 * handler is registered with the OS, the watchdog timer is started,
2931 * and the stack is notified that the interface is ready.
2932 **/
2933static int ixgbevf_open(struct net_device *netdev)
2934{
2935 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2936 struct ixgbe_hw *hw = &adapter->hw;
2937 int err;
2938
a1f6c6b1 2939 /* A previous failure to open the device because of a lack of
2940 * available MSIX vector resources may have reset the number
2941 * of msix vectors variable to zero. The only way to recover
2942 * is to unload/reload the driver and hope that the system has
2943 * been able to recover some MSIX vector resources.
2944 */
2945 if (!adapter->num_msix_vectors)
2946 return -ENOMEM;
2947
92915f71
GR
2948 /* disallow open during test */
2949 if (test_bit(__IXGBEVF_TESTING, &adapter->state))
2950 return -EBUSY;
2951
2952 if (hw->adapter_stopped) {
2953 ixgbevf_reset(adapter);
2954 /* if adapter is still stopped then PF isn't up and
2955 * the vf can't start. */
2956 if (hw->adapter_stopped) {
2957 err = IXGBE_ERR_MBX;
dbd9636e
JK
2958 pr_err("Unable to start - perhaps the PF Driver isn't "
2959 "up yet\n");
92915f71
GR
2960 goto err_setup_reset;
2961 }
2962 }
2963
2964 /* allocate transmit descriptors */
2965 err = ixgbevf_setup_all_tx_resources(adapter);
2966 if (err)
2967 goto err_setup_tx;
2968
2969 /* allocate receive descriptors */
2970 err = ixgbevf_setup_all_rx_resources(adapter);
2971 if (err)
2972 goto err_setup_rx;
2973
2974 ixgbevf_configure(adapter);
2975
2976 /*
2977 * Map the Tx/Rx rings to the vectors we were allotted.
2978 * if request_irq will be called in this function map_rings
2979 * must be called *before* up_complete
2980 */
2981 ixgbevf_map_rings_to_vectors(adapter);
2982
795180d8 2983 ixgbevf_up_complete(adapter);
92915f71
GR
2984
2985 /* clear any pending interrupts, may auto mask */
2986 IXGBE_READ_REG(hw, IXGBE_VTEICR);
2987 err = ixgbevf_request_irq(adapter);
2988 if (err)
2989 goto err_req_irq;
2990
5f3600eb 2991 ixgbevf_irq_enable(adapter);
92915f71
GR
2992
2993 return 0;
2994
2995err_req_irq:
2996 ixgbevf_down(adapter);
92915f71
GR
2997err_setup_rx:
2998 ixgbevf_free_all_rx_resources(adapter);
2999err_setup_tx:
3000 ixgbevf_free_all_tx_resources(adapter);
3001 ixgbevf_reset(adapter);
3002
3003err_setup_reset:
3004
3005 return err;
3006}
3007
3008/**
3009 * ixgbevf_close - Disables a network interface
3010 * @netdev: network interface device structure
3011 *
3012 * Returns 0, this is not allowed to fail
3013 *
3014 * The close entry point is called when an interface is de-activated
3015 * by the OS. The hardware is still under the drivers control, but
3016 * needs to be disabled. A global MAC reset is issued to stop the
3017 * hardware, and all transmit and receive resources are freed.
3018 **/
3019static int ixgbevf_close(struct net_device *netdev)
3020{
3021 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3022
3023 ixgbevf_down(adapter);
3024 ixgbevf_free_irq(adapter);
3025
3026 ixgbevf_free_all_tx_resources(adapter);
3027 ixgbevf_free_all_rx_resources(adapter);
3028
3029 return 0;
3030}
3031
220fe050
DS
3032static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter *adapter)
3033{
3034 struct net_device *dev = adapter->netdev;
3035
3036 if (!(adapter->flags & IXGBEVF_FLAG_QUEUE_RESET_REQUESTED))
3037 return;
3038
3039 adapter->flags &= ~IXGBEVF_FLAG_QUEUE_RESET_REQUESTED;
3040
3041 /* if interface is down do nothing */
3042 if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
3043 test_bit(__IXGBEVF_RESETTING, &adapter->state))
3044 return;
3045
3046 /* Hardware has to reinitialize queues and interrupts to
3047 * match packet buffer alignment. Unfortunately, the
3048 * hardware is not flexible enough to do this dynamically.
3049 */
3050 if (netif_running(dev))
3051 ixgbevf_close(dev);
3052
3053 ixgbevf_clear_interrupt_scheme(adapter);
3054 ixgbevf_init_interrupt_scheme(adapter);
3055
3056 if (netif_running(dev))
3057 ixgbevf_open(dev);
3058}
3059
70a10e25
AD
3060static void ixgbevf_tx_ctxtdesc(struct ixgbevf_ring *tx_ring,
3061 u32 vlan_macip_lens, u32 type_tucmd,
3062 u32 mss_l4len_idx)
92915f71
GR
3063{
3064 struct ixgbe_adv_tx_context_desc *context_desc;
70a10e25 3065 u16 i = tx_ring->next_to_use;
92915f71 3066
70a10e25 3067 context_desc = IXGBEVF_TX_CTXTDESC(tx_ring, i);
92915f71 3068
70a10e25
AD
3069 i++;
3070 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
92915f71 3071
70a10e25
AD
3072 /* set bits to identify this as an advanced context descriptor */
3073 type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
92915f71 3074
70a10e25
AD
3075 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
3076 context_desc->seqnum_seed = 0;
3077 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
3078 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
3079}
3080
3081static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
7ad1a093
ET
3082 struct ixgbevf_tx_buffer *first,
3083 u8 *hdr_len)
70a10e25 3084{
7ad1a093 3085 struct sk_buff *skb = first->skb;
70a10e25
AD
3086 u32 vlan_macip_lens, type_tucmd;
3087 u32 mss_l4len_idx, l4len;
8f12c034 3088 int err;
70a10e25 3089
01a545cf
ET
3090 if (skb->ip_summed != CHECKSUM_PARTIAL)
3091 return 0;
3092
70a10e25
AD
3093 if (!skb_is_gso(skb))
3094 return 0;
92915f71 3095
8f12c034
FR
3096 err = skb_cow_head(skb, 0);
3097 if (err < 0)
3098 return err;
92915f71 3099
70a10e25
AD
3100 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
3101 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
3102
3103 if (skb->protocol == htons(ETH_P_IP)) {
3104 struct iphdr *iph = ip_hdr(skb);
3105 iph->tot_len = 0;
3106 iph->check = 0;
3107 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
3108 iph->daddr, 0,
3109 IPPROTO_TCP,
3110 0);
3111 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
7ad1a093
ET
3112 first->tx_flags |= IXGBE_TX_FLAGS_TSO |
3113 IXGBE_TX_FLAGS_CSUM |
3114 IXGBE_TX_FLAGS_IPV4;
70a10e25
AD
3115 } else if (skb_is_gso_v6(skb)) {
3116 ipv6_hdr(skb)->payload_len = 0;
3117 tcp_hdr(skb)->check =
3118 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
3119 &ipv6_hdr(skb)->daddr,
3120 0, IPPROTO_TCP, 0);
7ad1a093
ET
3121 first->tx_flags |= IXGBE_TX_FLAGS_TSO |
3122 IXGBE_TX_FLAGS_CSUM;
70a10e25
AD
3123 }
3124
3125 /* compute header lengths */
3126 l4len = tcp_hdrlen(skb);
3127 *hdr_len += l4len;
3128 *hdr_len = skb_transport_offset(skb) + l4len;
3129
7ad1a093
ET
3130 /* update gso size and bytecount with header size */
3131 first->gso_segs = skb_shinfo(skb)->gso_segs;
3132 first->bytecount += (first->gso_segs - 1) * *hdr_len;
3133
70a10e25
AD
3134 /* mss_l4len_id: use 1 as index for TSO */
3135 mss_l4len_idx = l4len << IXGBE_ADVTXD_L4LEN_SHIFT;
3136 mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
3137 mss_l4len_idx |= 1 << IXGBE_ADVTXD_IDX_SHIFT;
3138
3139 /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */
3140 vlan_macip_lens = skb_network_header_len(skb);
3141 vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
7ad1a093 3142 vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
70a10e25
AD
3143
3144 ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens,
3145 type_tucmd, mss_l4len_idx);
3146
3147 return 1;
92915f71
GR
3148}
3149
7ad1a093
ET
3150static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring,
3151 struct ixgbevf_tx_buffer *first)
92915f71 3152{
7ad1a093 3153 struct sk_buff *skb = first->skb;
70a10e25
AD
3154 u32 vlan_macip_lens = 0;
3155 u32 mss_l4len_idx = 0;
3156 u32 type_tucmd = 0;
92915f71 3157
70a10e25
AD
3158 if (skb->ip_summed == CHECKSUM_PARTIAL) {
3159 u8 l4_hdr = 0;
3160 switch (skb->protocol) {
0933ce4a 3161 case htons(ETH_P_IP):
70a10e25
AD
3162 vlan_macip_lens |= skb_network_header_len(skb);
3163 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
3164 l4_hdr = ip_hdr(skb)->protocol;
3165 break;
0933ce4a 3166 case htons(ETH_P_IPV6):
70a10e25
AD
3167 vlan_macip_lens |= skb_network_header_len(skb);
3168 l4_hdr = ipv6_hdr(skb)->nexthdr;
3169 break;
3170 default:
3171 if (unlikely(net_ratelimit())) {
3172 dev_warn(tx_ring->dev,
3173 "partial checksum but proto=%x!\n",
7ad1a093 3174 first->protocol);
70a10e25
AD
3175 }
3176 break;
3177 }
92915f71 3178
70a10e25
AD
3179 switch (l4_hdr) {
3180 case IPPROTO_TCP:
3181 type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
3182 mss_l4len_idx = tcp_hdrlen(skb) <<
3183 IXGBE_ADVTXD_L4LEN_SHIFT;
3184 break;
3185 case IPPROTO_SCTP:
3186 type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_SCTP;
3187 mss_l4len_idx = sizeof(struct sctphdr) <<
3188 IXGBE_ADVTXD_L4LEN_SHIFT;
3189 break;
3190 case IPPROTO_UDP:
3191 mss_l4len_idx = sizeof(struct udphdr) <<
3192 IXGBE_ADVTXD_L4LEN_SHIFT;
3193 break;
3194 default:
3195 if (unlikely(net_ratelimit())) {
3196 dev_warn(tx_ring->dev,
3197 "partial checksum but l4 proto=%x!\n",
3198 l4_hdr);
3199 }
3200 break;
3201 }
7ad1a093
ET
3202
3203 /* update TX checksum flag */
3204 first->tx_flags |= IXGBE_TX_FLAGS_CSUM;
92915f71
GR
3205 }
3206
70a10e25
AD
3207 /* vlan_macip_lens: MACLEN, VLAN tag */
3208 vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
7ad1a093 3209 vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
70a10e25
AD
3210
3211 ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens,
3212 type_tucmd, mss_l4len_idx);
92915f71
GR
3213}
3214
29d37fa1 3215static __le32 ixgbevf_tx_cmd_type(u32 tx_flags)
92915f71 3216{
29d37fa1
ET
3217 /* set type for advanced descriptor with frame checksum insertion */
3218 __le32 cmd_type = cpu_to_le32(IXGBE_ADVTXD_DTYP_DATA |
3219 IXGBE_ADVTXD_DCMD_IFCS |
3220 IXGBE_ADVTXD_DCMD_DEXT);
92915f71 3221
29d37fa1
ET
3222 /* set HW vlan bit if vlan is present */
3223 if (tx_flags & IXGBE_TX_FLAGS_VLAN)
3224 cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_VLE);
92915f71 3225
29d37fa1
ET
3226 /* set segmentation enable bits for TSO/FSO */
3227 if (tx_flags & IXGBE_TX_FLAGS_TSO)
3228 cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_TSE);
92915f71 3229
29d37fa1
ET
3230 return cmd_type;
3231}
92915f71 3232
29d37fa1
ET
3233static void ixgbevf_tx_olinfo_status(union ixgbe_adv_tx_desc *tx_desc,
3234 u32 tx_flags, unsigned int paylen)
3235{
3236 __le32 olinfo_status = cpu_to_le32(paylen << IXGBE_ADVTXD_PAYLEN_SHIFT);
9bdfefd2 3237
29d37fa1
ET
3238 /* enable L4 checksum for TSO and TX checksum offload */
3239 if (tx_flags & IXGBE_TX_FLAGS_CSUM)
3240 olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_TXSM);
92915f71 3241
29d37fa1
ET
3242 /* enble IPv4 checksum for TSO */
3243 if (tx_flags & IXGBE_TX_FLAGS_IPV4)
3244 olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_IXSM);
92915f71 3245
29d37fa1
ET
3246 /* use index 1 context for TSO/FSO/FCOE */
3247 if (tx_flags & IXGBE_TX_FLAGS_TSO)
3248 olinfo_status |= cpu_to_le32(1 << IXGBE_ADVTXD_IDX_SHIFT);
92915f71 3249
29d37fa1
ET
3250 /* Check Context must be set if Tx switch is enabled, which it
3251 * always is for case where virtual functions are running
3252 */
3253 olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_CC);
92915f71 3254
29d37fa1
ET
3255 tx_desc->read.olinfo_status = olinfo_status;
3256}
92915f71 3257
29d37fa1
ET
3258static void ixgbevf_tx_map(struct ixgbevf_ring *tx_ring,
3259 struct ixgbevf_tx_buffer *first,
3260 const u8 hdr_len)
3261{
3262 dma_addr_t dma;
3263 struct sk_buff *skb = first->skb;
3264 struct ixgbevf_tx_buffer *tx_buffer;
3265 union ixgbe_adv_tx_desc *tx_desc;
3266 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
3267 unsigned int data_len = skb->data_len;
3268 unsigned int size = skb_headlen(skb);
3269 unsigned int paylen = skb->len - hdr_len;
3270 u32 tx_flags = first->tx_flags;
3271 __le32 cmd_type;
3272 u16 i = tx_ring->next_to_use;
9bdfefd2 3273
29d37fa1 3274 tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
92915f71 3275
29d37fa1
ET
3276 ixgbevf_tx_olinfo_status(tx_desc, tx_flags, paylen);
3277 cmd_type = ixgbevf_tx_cmd_type(tx_flags);
7ad1a093 3278
29d37fa1
ET
3279 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
3280 if (dma_mapping_error(tx_ring->dev, dma))
3281 goto dma_error;
92915f71 3282
29d37fa1
ET
3283 /* record length, and DMA address */
3284 dma_unmap_len_set(first, len, size);
3285 dma_unmap_addr_set(first, dma, dma);
92915f71 3286
29d37fa1 3287 tx_desc->read.buffer_addr = cpu_to_le64(dma);
92915f71 3288
29d37fa1
ET
3289 for (;;) {
3290 while (unlikely(size > IXGBE_MAX_DATA_PER_TXD)) {
3291 tx_desc->read.cmd_type_len =
3292 cmd_type | cpu_to_le32(IXGBE_MAX_DATA_PER_TXD);
92915f71 3293
29d37fa1
ET
3294 i++;
3295 tx_desc++;
3296 if (i == tx_ring->count) {
3297 tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
3298 i = 0;
3299 }
92915f71 3300
29d37fa1
ET
3301 dma += IXGBE_MAX_DATA_PER_TXD;
3302 size -= IXGBE_MAX_DATA_PER_TXD;
92915f71 3303
29d37fa1
ET
3304 tx_desc->read.buffer_addr = cpu_to_le64(dma);
3305 tx_desc->read.olinfo_status = 0;
3306 }
92915f71 3307
29d37fa1
ET
3308 if (likely(!data_len))
3309 break;
92915f71 3310
29d37fa1 3311 tx_desc->read.cmd_type_len = cmd_type | cpu_to_le32(size);
92915f71 3312
29d37fa1
ET
3313 i++;
3314 tx_desc++;
3315 if (i == tx_ring->count) {
3316 tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
3317 i = 0;
3318 }
92915f71 3319
29d37fa1
ET
3320 size = skb_frag_size(frag);
3321 data_len -= size;
92915f71 3322
29d37fa1
ET
3323 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
3324 DMA_TO_DEVICE);
3325 if (dma_mapping_error(tx_ring->dev, dma))
3326 goto dma_error;
70a10e25 3327
29d37fa1
ET
3328 tx_buffer = &tx_ring->tx_buffer_info[i];
3329 dma_unmap_len_set(tx_buffer, len, size);
3330 dma_unmap_addr_set(tx_buffer, dma, dma);
92915f71 3331
29d37fa1
ET
3332 tx_desc->read.buffer_addr = cpu_to_le64(dma);
3333 tx_desc->read.olinfo_status = 0;
3334
3335 frag++;
70a10e25 3336 }
92915f71 3337
29d37fa1
ET
3338 /* write last descriptor with RS and EOP bits */
3339 cmd_type |= cpu_to_le32(size) | cpu_to_le32(IXGBE_TXD_CMD);
3340 tx_desc->read.cmd_type_len = cmd_type;
3341
3342 /* set the timestamp */
3343 first->time_stamp = jiffies;
3344
3345 /* Force memory writes to complete before letting h/w know there
3346 * are new descriptors to fetch. (Only applicable for weak-ordered
3347 * memory model archs, such as IA-64).
3348 *
3349 * We also need this memory barrier (wmb) to make certain all of the
3350 * status bits have been updated before next_to_watch is written.
70a10e25 3351 */
29d37fa1 3352 wmb();
92915f71 3353
29d37fa1
ET
3354 /* set next_to_watch value indicating a packet is present */
3355 first->next_to_watch = tx_desc;
92915f71 3356
29d37fa1
ET
3357 i++;
3358 if (i == tx_ring->count)
3359 i = 0;
9bdfefd2 3360
29d37fa1 3361 tx_ring->next_to_use = i;
92915f71 3362
29d37fa1 3363 /* notify HW of packet */
06380db6 3364 ixgbevf_write_tail(tx_ring, i);
29d37fa1
ET
3365
3366 return;
3367dma_error:
3368 dev_err(tx_ring->dev, "TX DMA map failed\n");
3369
3370 /* clear dma mappings for failed tx_buffer_info map */
3371 for (;;) {
3372 tx_buffer = &tx_ring->tx_buffer_info[i];
3373 ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer);
3374 if (tx_buffer == first)
3375 break;
3376 if (i == 0)
3377 i = tx_ring->count;
3378 i--;
3379 }
92915f71 3380
92915f71 3381 tx_ring->next_to_use = i;
92915f71
GR
3382}
3383
fb40195c 3384static int __ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
92915f71 3385{
fb40195c 3386 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
92915f71
GR
3387 /* Herbert's original patch had:
3388 * smp_mb__after_netif_stop_queue();
3389 * but since that doesn't exist yet, just open code it. */
3390 smp_mb();
3391
3392 /* We need to check again in a case another CPU has just
3393 * made room available. */
f880d07b 3394 if (likely(ixgbevf_desc_unused(tx_ring) < size))
92915f71
GR
3395 return -EBUSY;
3396
3397 /* A reprieve! - use start_queue because it doesn't call schedule */
fb40195c 3398 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
095e2617
ET
3399 ++tx_ring->tx_stats.restart_queue;
3400
92915f71
GR
3401 return 0;
3402}
3403
fb40195c 3404static int ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
92915f71 3405{
f880d07b 3406 if (likely(ixgbevf_desc_unused(tx_ring) >= size))
92915f71 3407 return 0;
fb40195c 3408 return __ixgbevf_maybe_stop_tx(tx_ring, size);
92915f71
GR
3409}
3410
3411static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
3412{
3413 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
7ad1a093 3414 struct ixgbevf_tx_buffer *first;
92915f71 3415 struct ixgbevf_ring *tx_ring;
7ad1a093
ET
3416 int tso;
3417 u32 tx_flags = 0;
3595990a
AD
3418 u16 count = TXD_USE_COUNT(skb_headlen(skb));
3419#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
3420 unsigned short f;
3421#endif
7ad1a093 3422 u8 hdr_len = 0;
f9d08f16 3423 u8 *dst_mac = skb_header_pointer(skb, 0, 0, NULL);
7ad1a093 3424
46acc460 3425 if (!dst_mac || is_link_local_ether_addr(dst_mac)) {
f9d08f16
GR
3426 dev_kfree_skb(skb);
3427 return NETDEV_TX_OK;
3428 }
92915f71 3429
7ad1a093 3430 tx_ring = adapter->tx_ring[skb->queue_mapping];
92915f71 3431
3595990a
AD
3432 /*
3433 * need: 1 descriptor per page * PAGE_SIZE/IXGBE_MAX_DATA_PER_TXD,
3434 * + 1 desc for skb_headlen/IXGBE_MAX_DATA_PER_TXD,
3435 * + 2 desc gap to keep tail from touching head,
3436 * + 1 desc for context descriptor,
3437 * otherwise try next time
3438 */
3439#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
3440 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
3441 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
3442#else
3443 count += skb_shinfo(skb)->nr_frags;
3444#endif
fb40195c 3445 if (ixgbevf_maybe_stop_tx(tx_ring, count + 3)) {
095e2617 3446 tx_ring->tx_stats.tx_busy++;
3595990a
AD
3447 return NETDEV_TX_BUSY;
3448 }
3449
7ad1a093
ET
3450 /* record the location of the first descriptor for this packet */
3451 first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
3452 first->skb = skb;
3453 first->bytecount = skb->len;
3454 first->gso_segs = 1;
3455
eab6d18d 3456 if (vlan_tx_tag_present(skb)) {
92915f71
GR
3457 tx_flags |= vlan_tx_tag_get(skb);
3458 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
3459 tx_flags |= IXGBE_TX_FLAGS_VLAN;
3460 }
3461
7ad1a093
ET
3462 /* record initial flags and protocol */
3463 first->tx_flags = tx_flags;
3464 first->protocol = vlan_get_protocol(skb);
92915f71 3465
7ad1a093
ET
3466 tso = ixgbevf_tso(tx_ring, first, &hdr_len);
3467 if (tso < 0)
3468 goto out_drop;
b5d217f3 3469 else if (!tso)
7ad1a093 3470 ixgbevf_tx_csum(tx_ring, first);
92915f71 3471
29d37fa1 3472 ixgbevf_tx_map(tx_ring, first, hdr_len);
70a10e25 3473
fb40195c 3474 ixgbevf_maybe_stop_tx(tx_ring, DESC_NEEDED);
92915f71 3475
7ad1a093
ET
3476 return NETDEV_TX_OK;
3477
3478out_drop:
3479 dev_kfree_skb_any(first->skb);
3480 first->skb = NULL;
3481
92915f71
GR
3482 return NETDEV_TX_OK;
3483}
3484
92915f71
GR
3485/**
3486 * ixgbevf_set_mac - Change the Ethernet Address of the NIC
3487 * @netdev: network interface device structure
3488 * @p: pointer to an address structure
3489 *
3490 * Returns 0 on success, negative on failure
3491 **/
3492static int ixgbevf_set_mac(struct net_device *netdev, void *p)
3493{
3494 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3495 struct ixgbe_hw *hw = &adapter->hw;
3496 struct sockaddr *addr = p;
3497
3498 if (!is_valid_ether_addr(addr->sa_data))
3499 return -EADDRNOTAVAIL;
3500
3501 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
3502 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
3503
55fdd45b 3504 spin_lock_bh(&adapter->mbx_lock);
1c55ed76 3505
92fe0bf7 3506 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
92915f71 3507
55fdd45b 3508 spin_unlock_bh(&adapter->mbx_lock);
1c55ed76 3509
92915f71
GR
3510 return 0;
3511}
3512
3513/**
3514 * ixgbevf_change_mtu - Change the Maximum Transfer Unit
3515 * @netdev: network interface device structure
3516 * @new_mtu: new value for maximum frame size
3517 *
3518 * Returns 0 on success, negative on failure
3519 **/
3520static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
3521{
3522 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
bad17234 3523 struct ixgbe_hw *hw = &adapter->hw;
92915f71 3524 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
69bfbec4 3525 int max_possible_frame = MAXIMUM_ETHERNET_VLAN_SIZE;
69bfbec4 3526
56e94095
AD
3527 switch (adapter->hw.api_version) {
3528 case ixgbe_mbox_api_11:
69bfbec4 3529 max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE;
56e94095
AD
3530 break;
3531 default:
3532 if (adapter->hw.mac.type == ixgbe_mac_X540_vf)
3533 max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE;
3534 break;
3535 }
92915f71
GR
3536
3537 /* MTU < 68 is an error and causes problems on some kernels */
69bfbec4 3538 if ((new_mtu < 68) || (max_frame > max_possible_frame))
92915f71
GR
3539 return -EINVAL;
3540
bad17234 3541 hw_dbg(hw, "changing MTU from %d to %d\n",
92915f71
GR
3542 netdev->mtu, new_mtu);
3543 /* must set new MTU before calling down or up */
3544 netdev->mtu = new_mtu;
3545
bad17234
ET
3546 /* notify the PF of our intent to use this size of frame */
3547 ixgbevf_rlpml_set_vf(hw, max_frame);
92915f71
GR
3548
3549 return 0;
3550}
3551
688ff32d
ET
3552#ifdef CONFIG_NET_POLL_CONTROLLER
3553/* Polling 'interrupt' - used by things like netconsole to send skbs
3554 * without having to re-enable interrupts. It's not called while
3555 * the interrupt routine is executing.
3556 */
3557static void ixgbevf_netpoll(struct net_device *netdev)
3558{
3559 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3560 int i;
3561
3562 /* if interface is down do nothing */
3563 if (test_bit(__IXGBEVF_DOWN, &adapter->state))
3564 return;
3565 for (i = 0; i < adapter->num_rx_queues; i++)
3566 ixgbevf_msix_clean_rings(0, adapter->q_vector[i]);
3567}
3568#endif /* CONFIG_NET_POLL_CONTROLLER */
3569
0ac1e8ce 3570static int ixgbevf_suspend(struct pci_dev *pdev, pm_message_t state)
92915f71
GR
3571{
3572 struct net_device *netdev = pci_get_drvdata(pdev);
3573 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
0ac1e8ce
AD
3574#ifdef CONFIG_PM
3575 int retval = 0;
3576#endif
92915f71
GR
3577
3578 netif_device_detach(netdev);
3579
3580 if (netif_running(netdev)) {
0ac1e8ce 3581 rtnl_lock();
92915f71
GR
3582 ixgbevf_down(adapter);
3583 ixgbevf_free_irq(adapter);
3584 ixgbevf_free_all_tx_resources(adapter);
3585 ixgbevf_free_all_rx_resources(adapter);
0ac1e8ce 3586 rtnl_unlock();
92915f71
GR
3587 }
3588
0ac1e8ce 3589 ixgbevf_clear_interrupt_scheme(adapter);
92915f71 3590
0ac1e8ce
AD
3591#ifdef CONFIG_PM
3592 retval = pci_save_state(pdev);
3593 if (retval)
3594 return retval;
92915f71 3595
0ac1e8ce 3596#endif
bc0c7151
MR
3597 if (!test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state))
3598 pci_disable_device(pdev);
0ac1e8ce
AD
3599
3600 return 0;
3601}
3602
3603#ifdef CONFIG_PM
3604static int ixgbevf_resume(struct pci_dev *pdev)
3605{
27ae2967
WY
3606 struct net_device *netdev = pci_get_drvdata(pdev);
3607 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
0ac1e8ce
AD
3608 u32 err;
3609
0ac1e8ce
AD
3610 pci_restore_state(pdev);
3611 /*
3612 * pci_restore_state clears dev->state_saved so call
3613 * pci_save_state to restore it.
3614 */
3615 pci_save_state(pdev);
3616
3617 err = pci_enable_device_mem(pdev);
3618 if (err) {
3619 dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n");
3620 return err;
3621 }
4e857c58 3622 smp_mb__before_atomic();
bc0c7151 3623 clear_bit(__IXGBEVF_DISABLED, &adapter->state);
0ac1e8ce
AD
3624 pci_set_master(pdev);
3625
798e381a
DS
3626 ixgbevf_reset(adapter);
3627
0ac1e8ce
AD
3628 rtnl_lock();
3629 err = ixgbevf_init_interrupt_scheme(adapter);
3630 rtnl_unlock();
3631 if (err) {
3632 dev_err(&pdev->dev, "Cannot initialize interrupts\n");
3633 return err;
3634 }
3635
0ac1e8ce
AD
3636 if (netif_running(netdev)) {
3637 err = ixgbevf_open(netdev);
3638 if (err)
3639 return err;
3640 }
3641
3642 netif_device_attach(netdev);
3643
3644 return err;
3645}
3646
3647#endif /* CONFIG_PM */
3648static void ixgbevf_shutdown(struct pci_dev *pdev)
3649{
3650 ixgbevf_suspend(pdev, PMSG_SUSPEND);
92915f71
GR
3651}
3652
4197aa7b
ED
3653static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev,
3654 struct rtnl_link_stats64 *stats)
3655{
3656 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3657 unsigned int start;
3658 u64 bytes, packets;
3659 const struct ixgbevf_ring *ring;
3660 int i;
3661
3662 ixgbevf_update_stats(adapter);
3663
3664 stats->multicast = adapter->stats.vfmprc - adapter->stats.base_vfmprc;
3665
3666 for (i = 0; i < adapter->num_rx_queues; i++) {
87e70ab9 3667 ring = adapter->rx_ring[i];
4197aa7b 3668 do {
57a7744e 3669 start = u64_stats_fetch_begin_irq(&ring->syncp);
095e2617
ET
3670 bytes = ring->stats.bytes;
3671 packets = ring->stats.packets;
57a7744e 3672 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
4197aa7b
ED
3673 stats->rx_bytes += bytes;
3674 stats->rx_packets += packets;
3675 }
3676
3677 for (i = 0; i < adapter->num_tx_queues; i++) {
87e70ab9 3678 ring = adapter->tx_ring[i];
4197aa7b 3679 do {
57a7744e 3680 start = u64_stats_fetch_begin_irq(&ring->syncp);
095e2617
ET
3681 bytes = ring->stats.bytes;
3682 packets = ring->stats.packets;
57a7744e 3683 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
4197aa7b
ED
3684 stats->tx_bytes += bytes;
3685 stats->tx_packets += packets;
3686 }
3687
3688 return stats;
3689}
3690
0ac1e8ce 3691static const struct net_device_ops ixgbevf_netdev_ops = {
c12db769
SH
3692 .ndo_open = ixgbevf_open,
3693 .ndo_stop = ixgbevf_close,
3694 .ndo_start_xmit = ixgbevf_xmit_frame,
3695 .ndo_set_rx_mode = ixgbevf_set_rx_mode,
4197aa7b 3696 .ndo_get_stats64 = ixgbevf_get_stats,
92915f71 3697 .ndo_validate_addr = eth_validate_addr,
c12db769
SH
3698 .ndo_set_mac_address = ixgbevf_set_mac,
3699 .ndo_change_mtu = ixgbevf_change_mtu,
3700 .ndo_tx_timeout = ixgbevf_tx_timeout,
c12db769
SH
3701 .ndo_vlan_rx_add_vid = ixgbevf_vlan_rx_add_vid,
3702 .ndo_vlan_rx_kill_vid = ixgbevf_vlan_rx_kill_vid,
c777cdfa
JK
3703#ifdef CONFIG_NET_RX_BUSY_POLL
3704 .ndo_busy_poll = ixgbevf_busy_poll_recv,
3705#endif
688ff32d
ET
3706#ifdef CONFIG_NET_POLL_CONTROLLER
3707 .ndo_poll_controller = ixgbevf_netpoll,
3708#endif
92915f71 3709};
92915f71
GR
3710
3711static void ixgbevf_assign_netdev_ops(struct net_device *dev)
3712{
0ac1e8ce 3713 dev->netdev_ops = &ixgbevf_netdev_ops;
92915f71
GR
3714 ixgbevf_set_ethtool_ops(dev);
3715 dev->watchdog_timeo = 5 * HZ;
3716}
3717
3718/**
3719 * ixgbevf_probe - Device Initialization Routine
3720 * @pdev: PCI device information struct
3721 * @ent: entry in ixgbevf_pci_tbl
3722 *
3723 * Returns 0 on success, negative on failure
3724 *
3725 * ixgbevf_probe initializes an adapter identified by a pci_dev structure.
3726 * The OS initialization, configuring of the adapter private structure,
3727 * and a hardware reset occur.
3728 **/
1dd06ae8 3729static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
92915f71
GR
3730{
3731 struct net_device *netdev;
3732 struct ixgbevf_adapter *adapter = NULL;
3733 struct ixgbe_hw *hw = NULL;
3734 const struct ixgbevf_info *ii = ixgbevf_info_tbl[ent->driver_data];
92915f71
GR
3735 int err, pci_using_dac;
3736
3737 err = pci_enable_device(pdev);
3738 if (err)
3739 return err;
3740
53567aa4 3741 if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
92915f71
GR
3742 pci_using_dac = 1;
3743 } else {
53567aa4 3744 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
92915f71 3745 if (err) {
53567aa4
RK
3746 dev_err(&pdev->dev, "No usable DMA "
3747 "configuration, aborting\n");
3748 goto err_dma;
92915f71
GR
3749 }
3750 pci_using_dac = 0;
3751 }
3752
3753 err = pci_request_regions(pdev, ixgbevf_driver_name);
3754 if (err) {
3755 dev_err(&pdev->dev, "pci_request_regions failed 0x%x\n", err);
3756 goto err_pci_reg;
3757 }
3758
3759 pci_set_master(pdev);
3760
92915f71
GR
3761 netdev = alloc_etherdev_mq(sizeof(struct ixgbevf_adapter),
3762 MAX_TX_QUEUES);
92915f71
GR
3763 if (!netdev) {
3764 err = -ENOMEM;
3765 goto err_alloc_etherdev;
3766 }
3767
3768 SET_NETDEV_DEV(netdev, &pdev->dev);
3769
3770 pci_set_drvdata(pdev, netdev);
3771 adapter = netdev_priv(netdev);
3772
3773 adapter->netdev = netdev;
3774 adapter->pdev = pdev;
3775 hw = &adapter->hw;
3776 hw->back = adapter;
b3f4d599 3777 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
92915f71
GR
3778
3779 /*
3780 * call save state here in standalone driver because it relies on
3781 * adapter struct to exist, and needs to call netdev_priv
3782 */
3783 pci_save_state(pdev);
3784
3785 hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
3786 pci_resource_len(pdev, 0));
dbf8b0d8 3787 adapter->io_addr = hw->hw_addr;
92915f71
GR
3788 if (!hw->hw_addr) {
3789 err = -EIO;
3790 goto err_ioremap;
3791 }
3792
3793 ixgbevf_assign_netdev_ops(netdev);
3794
92915f71
GR
3795 /* Setup hw api */
3796 memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops));
3797 hw->mac.type = ii->mac;
3798
3799 memcpy(&hw->mbx.ops, &ixgbevf_mbx_ops,
f416dfc0 3800 sizeof(struct ixgbe_mbx_operations));
92915f71 3801
92915f71
GR
3802 /* setup the private structure */
3803 err = ixgbevf_sw_init(adapter);
1a0d6ae5
DK
3804 if (err)
3805 goto err_sw_init;
3806
3807 /* The HW MAC address was set and/or determined in sw_init */
1a0d6ae5
DK
3808 if (!is_valid_ether_addr(netdev->dev_addr)) {
3809 pr_err("invalid MAC address\n");
3810 err = -EIO;
3811 goto err_sw_init;
3812 }
92915f71 3813
471a76de 3814 netdev->hw_features = NETIF_F_SG |
92915f71 3815 NETIF_F_IP_CSUM |
471a76de
MM
3816 NETIF_F_IPV6_CSUM |
3817 NETIF_F_TSO |
3818 NETIF_F_TSO6 |
3819 NETIF_F_RXCSUM;
3820
3821 netdev->features = netdev->hw_features |
f646968f
PM
3822 NETIF_F_HW_VLAN_CTAG_TX |
3823 NETIF_F_HW_VLAN_CTAG_RX |
3824 NETIF_F_HW_VLAN_CTAG_FILTER;
92915f71 3825
92915f71
GR
3826 netdev->vlan_features |= NETIF_F_TSO;
3827 netdev->vlan_features |= NETIF_F_TSO6;
3828 netdev->vlan_features |= NETIF_F_IP_CSUM;
3bfacf96 3829 netdev->vlan_features |= NETIF_F_IPV6_CSUM;
92915f71
GR
3830 netdev->vlan_features |= NETIF_F_SG;
3831
3832 if (pci_using_dac)
3833 netdev->features |= NETIF_F_HIGHDMA;
3834
01789349
JP
3835 netdev->priv_flags |= IFF_UNICAST_FLT;
3836
92915f71 3837 init_timer(&adapter->watchdog_timer);
c061b18d 3838 adapter->watchdog_timer.function = ixgbevf_watchdog;
92915f71
GR
3839 adapter->watchdog_timer.data = (unsigned long)adapter;
3840
ea699569
MR
3841 if (IXGBE_REMOVED(hw->hw_addr)) {
3842 err = -EIO;
3843 goto err_sw_init;
3844 }
92915f71
GR
3845 INIT_WORK(&adapter->reset_task, ixgbevf_reset_task);
3846 INIT_WORK(&adapter->watchdog_task, ixgbevf_watchdog_task);
ea699569 3847 set_bit(__IXGBEVF_WORK_INIT, &adapter->state);
92915f71
GR
3848
3849 err = ixgbevf_init_interrupt_scheme(adapter);
3850 if (err)
3851 goto err_sw_init;
3852
92915f71
GR
3853 strcpy(netdev->name, "eth%d");
3854
3855 err = register_netdev(netdev);
3856 if (err)
3857 goto err_register;
3858
5d426ad1
GR
3859 netif_carrier_off(netdev);
3860
33bd9f60
GR
3861 ixgbevf_init_last_counter_stats(adapter);
3862
92915f71 3863 /* print the MAC address */
f794e7ef 3864 hw_dbg(hw, "%pM\n", netdev->dev_addr);
92915f71
GR
3865
3866 hw_dbg(hw, "MAC: %d\n", hw->mac.type);
3867
92915f71 3868 hw_dbg(hw, "Intel(R) 82599 Virtual Function\n");
92915f71
GR
3869 return 0;
3870
3871err_register:
0ac1e8ce 3872 ixgbevf_clear_interrupt_scheme(adapter);
92915f71
GR
3873err_sw_init:
3874 ixgbevf_reset_interrupt_capability(adapter);
dbf8b0d8 3875 iounmap(adapter->io_addr);
92915f71
GR
3876err_ioremap:
3877 free_netdev(netdev);
3878err_alloc_etherdev:
3879 pci_release_regions(pdev);
3880err_pci_reg:
3881err_dma:
bc0c7151
MR
3882 if (!test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state))
3883 pci_disable_device(pdev);
92915f71
GR
3884 return err;
3885}
3886
3887/**
3888 * ixgbevf_remove - Device Removal Routine
3889 * @pdev: PCI device information struct
3890 *
3891 * ixgbevf_remove is called by the PCI subsystem to alert the driver
3892 * that it should release a PCI device. The could be caused by a
3893 * Hot-Plug event, or because the driver is going to be removed from
3894 * memory.
3895 **/
9f9a12f8 3896static void ixgbevf_remove(struct pci_dev *pdev)
92915f71
GR
3897{
3898 struct net_device *netdev = pci_get_drvdata(pdev);
3899 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3900
2e7cfbdd 3901 set_bit(__IXGBEVF_REMOVING, &adapter->state);
92915f71
GR
3902
3903 del_timer_sync(&adapter->watchdog_timer);
3904
23f333a2 3905 cancel_work_sync(&adapter->reset_task);
92915f71
GR
3906 cancel_work_sync(&adapter->watchdog_task);
3907
fd13a9ab 3908 if (netdev->reg_state == NETREG_REGISTERED)
92915f71 3909 unregister_netdev(netdev);
92915f71 3910
0ac1e8ce 3911 ixgbevf_clear_interrupt_scheme(adapter);
92915f71
GR
3912 ixgbevf_reset_interrupt_capability(adapter);
3913
dbf8b0d8 3914 iounmap(adapter->io_addr);
92915f71
GR
3915 pci_release_regions(pdev);
3916
3917 hw_dbg(&adapter->hw, "Remove complete\n");
3918
92915f71
GR
3919 free_netdev(netdev);
3920
bc0c7151
MR
3921 if (!test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state))
3922 pci_disable_device(pdev);
92915f71
GR
3923}
3924
9f19f31d
AD
3925/**
3926 * ixgbevf_io_error_detected - called when PCI error is detected
3927 * @pdev: Pointer to PCI device
3928 * @state: The current pci connection state
3929 *
3930 * This function is called after a PCI bus error affecting
3931 * this device has been detected.
3932 */
3933static pci_ers_result_t ixgbevf_io_error_detected(struct pci_dev *pdev,
3934 pci_channel_state_t state)
3935{
3936 struct net_device *netdev = pci_get_drvdata(pdev);
3937 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3938
ea699569
MR
3939 if (!test_bit(__IXGBEVF_WORK_INIT, &adapter->state))
3940 return PCI_ERS_RESULT_DISCONNECT;
3941
bc0c7151 3942 rtnl_lock();
9f19f31d
AD
3943 netif_device_detach(netdev);
3944
bc0c7151
MR
3945 if (state == pci_channel_io_perm_failure) {
3946 rtnl_unlock();
9f19f31d 3947 return PCI_ERS_RESULT_DISCONNECT;
bc0c7151 3948 }
9f19f31d
AD
3949
3950 if (netif_running(netdev))
3951 ixgbevf_down(adapter);
3952
bc0c7151
MR
3953 if (!test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state))
3954 pci_disable_device(pdev);
3955 rtnl_unlock();
9f19f31d
AD
3956
3957 /* Request a slot slot reset. */
3958 return PCI_ERS_RESULT_NEED_RESET;
3959}
3960
3961/**
3962 * ixgbevf_io_slot_reset - called after the pci bus has been reset.
3963 * @pdev: Pointer to PCI device
3964 *
3965 * Restart the card from scratch, as if from a cold-boot. Implementation
3966 * resembles the first-half of the ixgbevf_resume routine.
3967 */
3968static pci_ers_result_t ixgbevf_io_slot_reset(struct pci_dev *pdev)
3969{
3970 struct net_device *netdev = pci_get_drvdata(pdev);
3971 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3972
3973 if (pci_enable_device_mem(pdev)) {
3974 dev_err(&pdev->dev,
3975 "Cannot re-enable PCI device after reset.\n");
3976 return PCI_ERS_RESULT_DISCONNECT;
3977 }
3978
4e857c58 3979 smp_mb__before_atomic();
bc0c7151 3980 clear_bit(__IXGBEVF_DISABLED, &adapter->state);
9f19f31d
AD
3981 pci_set_master(pdev);
3982
3983 ixgbevf_reset(adapter);
3984
3985 return PCI_ERS_RESULT_RECOVERED;
3986}
3987
3988/**
3989 * ixgbevf_io_resume - called when traffic can start flowing again.
3990 * @pdev: Pointer to PCI device
3991 *
3992 * This callback is called when the error recovery driver tells us that
3993 * its OK to resume normal operation. Implementation resembles the
3994 * second-half of the ixgbevf_resume routine.
3995 */
3996static void ixgbevf_io_resume(struct pci_dev *pdev)
3997{
3998 struct net_device *netdev = pci_get_drvdata(pdev);
3999 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
4000
4001 if (netif_running(netdev))
4002 ixgbevf_up(adapter);
4003
4004 netif_device_attach(netdev);
4005}
4006
4007/* PCI Error Recovery (ERS) */
3646f0e5 4008static const struct pci_error_handlers ixgbevf_err_handler = {
9f19f31d
AD
4009 .error_detected = ixgbevf_io_error_detected,
4010 .slot_reset = ixgbevf_io_slot_reset,
4011 .resume = ixgbevf_io_resume,
4012};
4013
92915f71
GR
4014static struct pci_driver ixgbevf_driver = {
4015 .name = ixgbevf_driver_name,
4016 .id_table = ixgbevf_pci_tbl,
4017 .probe = ixgbevf_probe,
9f9a12f8 4018 .remove = ixgbevf_remove,
0ac1e8ce
AD
4019#ifdef CONFIG_PM
4020 /* Power Management Hooks */
4021 .suspend = ixgbevf_suspend,
4022 .resume = ixgbevf_resume,
4023#endif
92915f71 4024 .shutdown = ixgbevf_shutdown,
9f19f31d 4025 .err_handler = &ixgbevf_err_handler
92915f71
GR
4026};
4027
4028/**
65d676c8 4029 * ixgbevf_init_module - Driver Registration Routine
92915f71 4030 *
65d676c8 4031 * ixgbevf_init_module is the first routine called when the driver is
92915f71
GR
4032 * loaded. All it does is register with the PCI subsystem.
4033 **/
4034static int __init ixgbevf_init_module(void)
4035{
4036 int ret;
dbd9636e
JK
4037 pr_info("%s - version %s\n", ixgbevf_driver_string,
4038 ixgbevf_driver_version);
92915f71 4039
dbd9636e 4040 pr_info("%s\n", ixgbevf_copyright);
92915f71
GR
4041
4042 ret = pci_register_driver(&ixgbevf_driver);
4043 return ret;
4044}
4045
4046module_init(ixgbevf_init_module);
4047
4048/**
65d676c8 4049 * ixgbevf_exit_module - Driver Exit Cleanup Routine
92915f71 4050 *
65d676c8 4051 * ixgbevf_exit_module is called just before the driver is removed
92915f71
GR
4052 * from memory.
4053 **/
4054static void __exit ixgbevf_exit_module(void)
4055{
4056 pci_unregister_driver(&ixgbevf_driver);
4057}
4058
4059#ifdef DEBUG
4060/**
65d676c8 4061 * ixgbevf_get_hw_dev_name - return device name string
92915f71
GR
4062 * used by hardware layer to print debugging information
4063 **/
4064char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw)
4065{
4066 struct ixgbevf_adapter *adapter = hw->back;
4067 return adapter->netdev->name;
4068}
4069
4070#endif
4071module_exit(ixgbevf_exit_module);
4072
4073/* ixgbevf_main.c */