]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - drivers/net/ixgbe/ixgbe_main.c
ixgbe: fix ring assignment issues for SR-IOV and drop cases
[mirror_ubuntu-zesty-kernel.git] / drivers / net / ixgbe / ixgbe_main.c
CommitLineData
9a799d71
AK
1/*******************************************************************************
2
3 Intel 10 Gigabit PCI Express Linux driver
a52055e0 4 Copyright(c) 1999 - 2011 Intel Corporation.
9a799d71
AK
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
9a799d71
AK
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28#include <linux/types.h>
29#include <linux/module.h>
30#include <linux/pci.h>
31#include <linux/netdevice.h>
32#include <linux/vmalloc.h>
33#include <linux/string.h>
34#include <linux/in.h>
a6b7a407 35#include <linux/interrupt.h>
9a799d71
AK
36#include <linux/ip.h>
37#include <linux/tcp.h>
60127865 38#include <linux/pkt_sched.h>
9a799d71 39#include <linux/ipv6.h>
5a0e3ad6 40#include <linux/slab.h>
9a799d71
AK
41#include <net/checksum.h>
42#include <net/ip6_checksum.h>
43#include <linux/ethtool.h>
44#include <linux/if_vlan.h>
70c71606 45#include <linux/prefetch.h>
eacd73f7 46#include <scsi/fc/fc_fcoe.h>
9a799d71
AK
47
48#include "ixgbe.h"
49#include "ixgbe_common.h"
ee5f784a 50#include "ixgbe_dcb_82599.h"
1cdd1ec8 51#include "ixgbe_sriov.h"
9a799d71
AK
52
53char ixgbe_driver_name[] = "ixgbe";
9c8eb720 54static const char ixgbe_driver_string[] =
e8e9f696 55 "Intel(R) 10 Gigabit PCI Express Network Driver";
75e3d3c6 56#define MAJ 3
c89c7112
DS
57#define MIN 3
58#define BUILD 8
75e3d3c6
JK
59#define KFIX 2
60#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
61 __stringify(BUILD) "-k" __stringify(KFIX)
9c8eb720 62const char ixgbe_driver_version[] = DRV_VERSION;
a52055e0
DS
63static const char ixgbe_copyright[] =
64 "Copyright (c) 1999-2011 Intel Corporation.";
9a799d71
AK
65
66static const struct ixgbe_info *ixgbe_info_tbl[] = {
b4617240 67 [board_82598] = &ixgbe_82598_info,
e8e26350 68 [board_82599] = &ixgbe_82599_info,
fe15e8e1 69 [board_X540] = &ixgbe_X540_info,
9a799d71
AK
70};
71
72/* ixgbe_pci_tbl - PCI Device ID Table
73 *
74 * Wildcard entries (PCI_ANY_ID) should come last
75 * Last entry must be all 0s
76 *
77 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
78 * Class, Class Mask, private data (not used) }
79 */
a3aa1884 80static DEFINE_PCI_DEVICE_TABLE(ixgbe_pci_tbl) = {
1e336d0f
DS
81 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598),
82 board_82598 },
9a799d71 83 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_DUAL_PORT),
3957d63d 84 board_82598 },
9a799d71 85 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_SINGLE_PORT),
3957d63d 86 board_82598 },
0befdb3e
JB
87 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT),
88 board_82598 },
3845bec0
PWJ
89 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT2),
90 board_82598 },
9a799d71 91 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_CX4),
3957d63d 92 board_82598 },
8d792cd9
JB
93 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_CX4_DUAL_PORT),
94 board_82598 },
c4900be0
DS
95 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_DA_DUAL_PORT),
96 board_82598 },
97 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM),
98 board_82598 },
b95f5fcb
JB
99 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_XF_LR),
100 board_82598 },
c4900be0
DS
101 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_SFP_LOM),
102 board_82598 },
2f21bdd3
DS
103 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_BX),
104 board_82598 },
e8e26350
PW
105 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4),
106 board_82599 },
1fcf03e6
PWJ
107 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_XAUI_LOM),
108 board_82599 },
74757d49
DS
109 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KR),
110 board_82599 },
e8e26350
PW
111 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP),
112 board_82599 },
38ad1c8e
DS
113 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_EM),
114 board_82599 },
dbfec662
DS
115 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4_MEZZ),
116 board_82599 },
8911184f
PWJ
117 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_CX4),
118 board_82599 },
dbffcb21
DS
119 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_BACKPLANE_FCOE),
120 board_82599 },
121 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_FCOE),
122 board_82599 },
119fc60a
MC
123 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_T3_LOM),
124 board_82599 },
312eb931
DS
125 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_COMBO_BACKPLANE),
126 board_82599 },
b93a2226 127 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T),
d994653d 128 board_X540 },
4c40ef02
ET
129 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF2),
130 board_82599 },
4f6290cf
DS
131 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_LS),
132 board_82599 },
9a799d71
AK
133
134 /* required last entry */
135 {0, }
136};
137MODULE_DEVICE_TABLE(pci, ixgbe_pci_tbl);
138
5dd2d332 139#ifdef CONFIG_IXGBE_DCA
bd0362dd 140static int ixgbe_notify_dca(struct notifier_block *, unsigned long event,
e8e9f696 141 void *p);
bd0362dd
JC
142static struct notifier_block dca_notifier = {
143 .notifier_call = ixgbe_notify_dca,
144 .next = NULL,
145 .priority = 0
146};
147#endif
148
1cdd1ec8
GR
149#ifdef CONFIG_PCI_IOV
150static unsigned int max_vfs;
151module_param(max_vfs, uint, 0);
e8e9f696
JP
152MODULE_PARM_DESC(max_vfs,
153 "Maximum number of virtual functions to allocate per physical function");
1cdd1ec8
GR
154#endif /* CONFIG_PCI_IOV */
155
9a799d71
AK
156MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
157MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver");
158MODULE_LICENSE("GPL");
159MODULE_VERSION(DRV_VERSION);
160
161#define DEFAULT_DEBUG_LEVEL_SHIFT 3
162
1cdd1ec8
GR
163static inline void ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
164{
165 struct ixgbe_hw *hw = &adapter->hw;
166 u32 gcr;
167 u32 gpie;
168 u32 vmdctl;
169
170#ifdef CONFIG_PCI_IOV
171 /* disable iov and allow time for transactions to clear */
172 pci_disable_sriov(adapter->pdev);
173#endif
174
175 /* turn off device IOV mode */
176 gcr = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
177 gcr &= ~(IXGBE_GCR_EXT_SRIOV);
178 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr);
179 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
180 gpie &= ~IXGBE_GPIE_VTMODE_MASK;
181 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
182
183 /* set default pool back to 0 */
184 vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
185 vmdctl &= ~IXGBE_VT_CTL_POOL_MASK;
186 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl);
187
188 /* take a breather then clean up driver data */
189 msleep(100);
e8e9f696
JP
190
191 kfree(adapter->vfinfo);
1cdd1ec8
GR
192 adapter->vfinfo = NULL;
193
194 adapter->num_vfs = 0;
195 adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED;
196}
197
7086400d
AD
198static void ixgbe_service_event_schedule(struct ixgbe_adapter *adapter)
199{
200 if (!test_bit(__IXGBE_DOWN, &adapter->state) &&
201 !test_and_set_bit(__IXGBE_SERVICE_SCHED, &adapter->state))
202 schedule_work(&adapter->service_task);
203}
204
205static void ixgbe_service_event_complete(struct ixgbe_adapter *adapter)
206{
207 BUG_ON(!test_bit(__IXGBE_SERVICE_SCHED, &adapter->state));
208
209 /* flush memory to make sure state is correct before next watchog */
210 smp_mb__before_clear_bit();
211 clear_bit(__IXGBE_SERVICE_SCHED, &adapter->state);
212}
213
dcd79aeb
TI
214struct ixgbe_reg_info {
215 u32 ofs;
216 char *name;
217};
218
219static const struct ixgbe_reg_info ixgbe_reg_info_tbl[] = {
220
221 /* General Registers */
222 {IXGBE_CTRL, "CTRL"},
223 {IXGBE_STATUS, "STATUS"},
224 {IXGBE_CTRL_EXT, "CTRL_EXT"},
225
226 /* Interrupt Registers */
227 {IXGBE_EICR, "EICR"},
228
229 /* RX Registers */
230 {IXGBE_SRRCTL(0), "SRRCTL"},
231 {IXGBE_DCA_RXCTRL(0), "DRXCTL"},
232 {IXGBE_RDLEN(0), "RDLEN"},
233 {IXGBE_RDH(0), "RDH"},
234 {IXGBE_RDT(0), "RDT"},
235 {IXGBE_RXDCTL(0), "RXDCTL"},
236 {IXGBE_RDBAL(0), "RDBAL"},
237 {IXGBE_RDBAH(0), "RDBAH"},
238
239 /* TX Registers */
240 {IXGBE_TDBAL(0), "TDBAL"},
241 {IXGBE_TDBAH(0), "TDBAH"},
242 {IXGBE_TDLEN(0), "TDLEN"},
243 {IXGBE_TDH(0), "TDH"},
244 {IXGBE_TDT(0), "TDT"},
245 {IXGBE_TXDCTL(0), "TXDCTL"},
246
247 /* List Terminator */
248 {}
249};
250
251
252/*
253 * ixgbe_regdump - register printout routine
254 */
255static void ixgbe_regdump(struct ixgbe_hw *hw, struct ixgbe_reg_info *reginfo)
256{
257 int i = 0, j = 0;
258 char rname[16];
259 u32 regs[64];
260
261 switch (reginfo->ofs) {
262 case IXGBE_SRRCTL(0):
263 for (i = 0; i < 64; i++)
264 regs[i] = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
265 break;
266 case IXGBE_DCA_RXCTRL(0):
267 for (i = 0; i < 64; i++)
268 regs[i] = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
269 break;
270 case IXGBE_RDLEN(0):
271 for (i = 0; i < 64; i++)
272 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDLEN(i));
273 break;
274 case IXGBE_RDH(0):
275 for (i = 0; i < 64; i++)
276 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDH(i));
277 break;
278 case IXGBE_RDT(0):
279 for (i = 0; i < 64; i++)
280 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDT(i));
281 break;
282 case IXGBE_RXDCTL(0):
283 for (i = 0; i < 64; i++)
284 regs[i] = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
285 break;
286 case IXGBE_RDBAL(0):
287 for (i = 0; i < 64; i++)
288 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDBAL(i));
289 break;
290 case IXGBE_RDBAH(0):
291 for (i = 0; i < 64; i++)
292 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDBAH(i));
293 break;
294 case IXGBE_TDBAL(0):
295 for (i = 0; i < 64; i++)
296 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDBAL(i));
297 break;
298 case IXGBE_TDBAH(0):
299 for (i = 0; i < 64; i++)
300 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDBAH(i));
301 break;
302 case IXGBE_TDLEN(0):
303 for (i = 0; i < 64; i++)
304 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDLEN(i));
305 break;
306 case IXGBE_TDH(0):
307 for (i = 0; i < 64; i++)
308 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDH(i));
309 break;
310 case IXGBE_TDT(0):
311 for (i = 0; i < 64; i++)
312 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDT(i));
313 break;
314 case IXGBE_TXDCTL(0):
315 for (i = 0; i < 64; i++)
316 regs[i] = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i));
317 break;
318 default:
c7689578 319 pr_info("%-15s %08x\n", reginfo->name,
dcd79aeb
TI
320 IXGBE_READ_REG(hw, reginfo->ofs));
321 return;
322 }
323
324 for (i = 0; i < 8; i++) {
325 snprintf(rname, 16, "%s[%d-%d]", reginfo->name, i*8, i*8+7);
c7689578 326 pr_err("%-15s", rname);
dcd79aeb 327 for (j = 0; j < 8; j++)
c7689578
JP
328 pr_cont(" %08x", regs[i*8+j]);
329 pr_cont("\n");
dcd79aeb
TI
330 }
331
332}
333
334/*
335 * ixgbe_dump - Print registers, tx-rings and rx-rings
336 */
337static void ixgbe_dump(struct ixgbe_adapter *adapter)
338{
339 struct net_device *netdev = adapter->netdev;
340 struct ixgbe_hw *hw = &adapter->hw;
341 struct ixgbe_reg_info *reginfo;
342 int n = 0;
343 struct ixgbe_ring *tx_ring;
344 struct ixgbe_tx_buffer *tx_buffer_info;
345 union ixgbe_adv_tx_desc *tx_desc;
346 struct my_u0 { u64 a; u64 b; } *u0;
347 struct ixgbe_ring *rx_ring;
348 union ixgbe_adv_rx_desc *rx_desc;
349 struct ixgbe_rx_buffer *rx_buffer_info;
350 u32 staterr;
351 int i = 0;
352
353 if (!netif_msg_hw(adapter))
354 return;
355
356 /* Print netdevice Info */
357 if (netdev) {
358 dev_info(&adapter->pdev->dev, "Net device Info\n");
c7689578 359 pr_info("Device Name state "
dcd79aeb 360 "trans_start last_rx\n");
c7689578
JP
361 pr_info("%-15s %016lX %016lX %016lX\n",
362 netdev->name,
363 netdev->state,
364 netdev->trans_start,
365 netdev->last_rx);
dcd79aeb
TI
366 }
367
368 /* Print Registers */
369 dev_info(&adapter->pdev->dev, "Register Dump\n");
c7689578 370 pr_info(" Register Name Value\n");
dcd79aeb
TI
371 for (reginfo = (struct ixgbe_reg_info *)ixgbe_reg_info_tbl;
372 reginfo->name; reginfo++) {
373 ixgbe_regdump(hw, reginfo);
374 }
375
376 /* Print TX Ring Summary */
377 if (!netdev || !netif_running(netdev))
378 goto exit;
379
380 dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
c7689578 381 pr_info("Queue [NTU] [NTC] [bi(ntc)->dma ] leng ntw timestamp\n");
dcd79aeb
TI
382 for (n = 0; n < adapter->num_tx_queues; n++) {
383 tx_ring = adapter->tx_ring[n];
384 tx_buffer_info =
385 &tx_ring->tx_buffer_info[tx_ring->next_to_clean];
c7689578 386 pr_info(" %5d %5X %5X %016llX %04X %3X %016llX\n",
dcd79aeb
TI
387 n, tx_ring->next_to_use, tx_ring->next_to_clean,
388 (u64)tx_buffer_info->dma,
389 tx_buffer_info->length,
390 tx_buffer_info->next_to_watch,
391 (u64)tx_buffer_info->time_stamp);
392 }
393
394 /* Print TX Rings */
395 if (!netif_msg_tx_done(adapter))
396 goto rx_ring_summary;
397
398 dev_info(&adapter->pdev->dev, "TX Rings Dump\n");
399
400 /* Transmit Descriptor Formats
401 *
402 * Advanced Transmit Descriptor
403 * +--------------------------------------------------------------+
404 * 0 | Buffer Address [63:0] |
405 * +--------------------------------------------------------------+
406 * 8 | PAYLEN | PORTS | IDX | STA | DCMD |DTYP | RSV | DTALEN |
407 * +--------------------------------------------------------------+
408 * 63 46 45 40 39 36 35 32 31 24 23 20 19 0
409 */
410
411 for (n = 0; n < adapter->num_tx_queues; n++) {
412 tx_ring = adapter->tx_ring[n];
c7689578
JP
413 pr_info("------------------------------------\n");
414 pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index);
415 pr_info("------------------------------------\n");
416 pr_info("T [desc] [address 63:0 ] "
dcd79aeb
TI
417 "[PlPOIdStDDt Ln] [bi->dma ] "
418 "leng ntw timestamp bi->skb\n");
419
420 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
31f05a2d 421 tx_desc = IXGBE_TX_DESC_ADV(tx_ring, i);
dcd79aeb
TI
422 tx_buffer_info = &tx_ring->tx_buffer_info[i];
423 u0 = (struct my_u0 *)tx_desc;
c7689578 424 pr_info("T [0x%03X] %016llX %016llX %016llX"
dcd79aeb
TI
425 " %04X %3X %016llX %p", i,
426 le64_to_cpu(u0->a),
427 le64_to_cpu(u0->b),
428 (u64)tx_buffer_info->dma,
429 tx_buffer_info->length,
430 tx_buffer_info->next_to_watch,
431 (u64)tx_buffer_info->time_stamp,
432 tx_buffer_info->skb);
433 if (i == tx_ring->next_to_use &&
434 i == tx_ring->next_to_clean)
c7689578 435 pr_cont(" NTC/U\n");
dcd79aeb 436 else if (i == tx_ring->next_to_use)
c7689578 437 pr_cont(" NTU\n");
dcd79aeb 438 else if (i == tx_ring->next_to_clean)
c7689578 439 pr_cont(" NTC\n");
dcd79aeb 440 else
c7689578 441 pr_cont("\n");
dcd79aeb
TI
442
443 if (netif_msg_pktdata(adapter) &&
444 tx_buffer_info->dma != 0)
445 print_hex_dump(KERN_INFO, "",
446 DUMP_PREFIX_ADDRESS, 16, 1,
447 phys_to_virt(tx_buffer_info->dma),
448 tx_buffer_info->length, true);
449 }
450 }
451
452 /* Print RX Rings Summary */
453rx_ring_summary:
454 dev_info(&adapter->pdev->dev, "RX Rings Summary\n");
c7689578 455 pr_info("Queue [NTU] [NTC]\n");
dcd79aeb
TI
456 for (n = 0; n < adapter->num_rx_queues; n++) {
457 rx_ring = adapter->rx_ring[n];
c7689578
JP
458 pr_info("%5d %5X %5X\n",
459 n, rx_ring->next_to_use, rx_ring->next_to_clean);
dcd79aeb
TI
460 }
461
462 /* Print RX Rings */
463 if (!netif_msg_rx_status(adapter))
464 goto exit;
465
466 dev_info(&adapter->pdev->dev, "RX Rings Dump\n");
467
468 /* Advanced Receive Descriptor (Read) Format
469 * 63 1 0
470 * +-----------------------------------------------------+
471 * 0 | Packet Buffer Address [63:1] |A0/NSE|
472 * +----------------------------------------------+------+
473 * 8 | Header Buffer Address [63:1] | DD |
474 * +-----------------------------------------------------+
475 *
476 *
477 * Advanced Receive Descriptor (Write-Back) Format
478 *
479 * 63 48 47 32 31 30 21 20 16 15 4 3 0
480 * +------------------------------------------------------+
481 * 0 | Packet IP |SPH| HDR_LEN | RSV|Packet| RSS |
482 * | Checksum Ident | | | | Type | Type |
483 * +------------------------------------------------------+
484 * 8 | VLAN Tag | Length | Extended Error | Extended Status |
485 * +------------------------------------------------------+
486 * 63 48 47 32 31 20 19 0
487 */
488 for (n = 0; n < adapter->num_rx_queues; n++) {
489 rx_ring = adapter->rx_ring[n];
c7689578
JP
490 pr_info("------------------------------------\n");
491 pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index);
492 pr_info("------------------------------------\n");
493 pr_info("R [desc] [ PktBuf A0] "
dcd79aeb
TI
494 "[ HeadBuf DD] [bi->dma ] [bi->skb] "
495 "<-- Adv Rx Read format\n");
c7689578 496 pr_info("RWB[desc] [PcsmIpSHl PtRs] "
dcd79aeb
TI
497 "[vl er S cks ln] ---------------- [bi->skb] "
498 "<-- Adv Rx Write-Back format\n");
499
500 for (i = 0; i < rx_ring->count; i++) {
501 rx_buffer_info = &rx_ring->rx_buffer_info[i];
31f05a2d 502 rx_desc = IXGBE_RX_DESC_ADV(rx_ring, i);
dcd79aeb
TI
503 u0 = (struct my_u0 *)rx_desc;
504 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
505 if (staterr & IXGBE_RXD_STAT_DD) {
506 /* Descriptor Done */
c7689578 507 pr_info("RWB[0x%03X] %016llX "
dcd79aeb
TI
508 "%016llX ---------------- %p", i,
509 le64_to_cpu(u0->a),
510 le64_to_cpu(u0->b),
511 rx_buffer_info->skb);
512 } else {
c7689578 513 pr_info("R [0x%03X] %016llX "
dcd79aeb
TI
514 "%016llX %016llX %p", i,
515 le64_to_cpu(u0->a),
516 le64_to_cpu(u0->b),
517 (u64)rx_buffer_info->dma,
518 rx_buffer_info->skb);
519
520 if (netif_msg_pktdata(adapter)) {
521 print_hex_dump(KERN_INFO, "",
522 DUMP_PREFIX_ADDRESS, 16, 1,
523 phys_to_virt(rx_buffer_info->dma),
524 rx_ring->rx_buf_len, true);
525
526 if (rx_ring->rx_buf_len
527 < IXGBE_RXBUFFER_2048)
528 print_hex_dump(KERN_INFO, "",
529 DUMP_PREFIX_ADDRESS, 16, 1,
530 phys_to_virt(
531 rx_buffer_info->page_dma +
532 rx_buffer_info->page_offset
533 ),
534 PAGE_SIZE/2, true);
535 }
536 }
537
538 if (i == rx_ring->next_to_use)
c7689578 539 pr_cont(" NTU\n");
dcd79aeb 540 else if (i == rx_ring->next_to_clean)
c7689578 541 pr_cont(" NTC\n");
dcd79aeb 542 else
c7689578 543 pr_cont("\n");
dcd79aeb
TI
544
545 }
546 }
547
548exit:
549 return;
550}
551
5eba3699
AV
552static void ixgbe_release_hw_control(struct ixgbe_adapter *adapter)
553{
554 u32 ctrl_ext;
555
556 /* Let firmware take over control of h/w */
557 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
558 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
e8e9f696 559 ctrl_ext & ~IXGBE_CTRL_EXT_DRV_LOAD);
5eba3699
AV
560}
561
562static void ixgbe_get_hw_control(struct ixgbe_adapter *adapter)
563{
564 u32 ctrl_ext;
565
566 /* Let firmware know the driver has taken over */
567 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
568 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
e8e9f696 569 ctrl_ext | IXGBE_CTRL_EXT_DRV_LOAD);
5eba3699 570}
9a799d71 571
e8e26350
PW
572/*
573 * ixgbe_set_ivar - set the IVAR registers, mapping interrupt causes to vectors
574 * @adapter: pointer to adapter struct
575 * @direction: 0 for Rx, 1 for Tx, -1 for other causes
576 * @queue: queue to map the corresponding interrupt to
577 * @msix_vector: the vector to map to the corresponding queue
578 *
579 */
580static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, s8 direction,
e8e9f696 581 u8 queue, u8 msix_vector)
9a799d71
AK
582{
583 u32 ivar, index;
e8e26350
PW
584 struct ixgbe_hw *hw = &adapter->hw;
585 switch (hw->mac.type) {
586 case ixgbe_mac_82598EB:
587 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
588 if (direction == -1)
589 direction = 0;
590 index = (((direction * 64) + queue) >> 2) & 0x1F;
591 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
592 ivar &= ~(0xFF << (8 * (queue & 0x3)));
593 ivar |= (msix_vector << (8 * (queue & 0x3)));
594 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
595 break;
596 case ixgbe_mac_82599EB:
b93a2226 597 case ixgbe_mac_X540:
e8e26350
PW
598 if (direction == -1) {
599 /* other causes */
600 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
601 index = ((queue & 1) * 8);
602 ivar = IXGBE_READ_REG(&adapter->hw, IXGBE_IVAR_MISC);
603 ivar &= ~(0xFF << index);
604 ivar |= (msix_vector << index);
605 IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR_MISC, ivar);
606 break;
607 } else {
608 /* tx or rx causes */
609 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
610 index = ((16 * (queue & 1)) + (8 * direction));
611 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(queue >> 1));
612 ivar &= ~(0xFF << index);
613 ivar |= (msix_vector << index);
614 IXGBE_WRITE_REG(hw, IXGBE_IVAR(queue >> 1), ivar);
615 break;
616 }
617 default:
618 break;
619 }
9a799d71
AK
620}
621
fe49f04a 622static inline void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter,
e8e9f696 623 u64 qmask)
fe49f04a
AD
624{
625 u32 mask;
626
bd508178
AD
627 switch (adapter->hw.mac.type) {
628 case ixgbe_mac_82598EB:
fe49f04a
AD
629 mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
630 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
bd508178
AD
631 break;
632 case ixgbe_mac_82599EB:
b93a2226 633 case ixgbe_mac_X540:
fe49f04a
AD
634 mask = (qmask & 0xFFFFFFFF);
635 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
636 mask = (qmask >> 32);
637 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
bd508178
AD
638 break;
639 default:
640 break;
fe49f04a
AD
641 }
642}
643
b6ec895e
AD
644void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *tx_ring,
645 struct ixgbe_tx_buffer *tx_buffer_info)
9a799d71 646{
e5a43549
AD
647 if (tx_buffer_info->dma) {
648 if (tx_buffer_info->mapped_as_page)
b6ec895e 649 dma_unmap_page(tx_ring->dev,
e5a43549
AD
650 tx_buffer_info->dma,
651 tx_buffer_info->length,
1b507730 652 DMA_TO_DEVICE);
e5a43549 653 else
b6ec895e 654 dma_unmap_single(tx_ring->dev,
e5a43549
AD
655 tx_buffer_info->dma,
656 tx_buffer_info->length,
1b507730 657 DMA_TO_DEVICE);
e5a43549
AD
658 tx_buffer_info->dma = 0;
659 }
9a799d71
AK
660 if (tx_buffer_info->skb) {
661 dev_kfree_skb_any(tx_buffer_info->skb);
662 tx_buffer_info->skb = NULL;
663 }
44df32c5 664 tx_buffer_info->time_stamp = 0;
9a799d71
AK
665 /* tx_buffer_info must be completely set up in the transmit path */
666}
667
c84d324c
JF
668static void ixgbe_update_xoff_received(struct ixgbe_adapter *adapter)
669{
670 struct ixgbe_hw *hw = &adapter->hw;
671 struct ixgbe_hw_stats *hwstats = &adapter->stats;
672 u32 data = 0;
673 u32 xoff[8] = {0};
674 int i;
675
676 if ((hw->fc.current_mode == ixgbe_fc_full) ||
677 (hw->fc.current_mode == ixgbe_fc_rx_pause)) {
678 switch (hw->mac.type) {
679 case ixgbe_mac_82598EB:
680 data = IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
6837e895
PW
681 break;
682 default:
c84d324c
JF
683 data = IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
684 }
685 hwstats->lxoffrxc += data;
686
687 /* refill credits (no tx hang) if we received xoff */
688 if (!data)
689 return;
690
691 for (i = 0; i < adapter->num_tx_queues; i++)
692 clear_bit(__IXGBE_HANG_CHECK_ARMED,
693 &adapter->tx_ring[i]->state);
694 return;
695 } else if (!(adapter->dcb_cfg.pfc_mode_enable))
696 return;
697
698 /* update stats for each tc, only valid with PFC enabled */
699 for (i = 0; i < MAX_TX_PACKET_BUFFERS; i++) {
700 switch (hw->mac.type) {
701 case ixgbe_mac_82598EB:
702 xoff[i] = IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
bd508178 703 break;
c84d324c
JF
704 default:
705 xoff[i] = IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
26f23d82 706 }
c84d324c
JF
707 hwstats->pxoffrxc[i] += xoff[i];
708 }
709
710 /* disarm tx queues that have received xoff frames */
711 for (i = 0; i < adapter->num_tx_queues; i++) {
712 struct ixgbe_ring *tx_ring = adapter->tx_ring[i];
fb5475ff 713 u8 tc = tx_ring->dcb_tc;
c84d324c
JF
714
715 if (xoff[tc])
716 clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state);
26f23d82 717 }
26f23d82
YZ
718}
719
c84d324c 720static u64 ixgbe_get_tx_completed(struct ixgbe_ring *ring)
9a799d71 721{
c84d324c
JF
722 return ring->tx_stats.completed;
723}
724
725static u64 ixgbe_get_tx_pending(struct ixgbe_ring *ring)
726{
727 struct ixgbe_adapter *adapter = netdev_priv(ring->netdev);
e01c31a5 728 struct ixgbe_hw *hw = &adapter->hw;
e01c31a5 729
c84d324c
JF
730 u32 head = IXGBE_READ_REG(hw, IXGBE_TDH(ring->reg_idx));
731 u32 tail = IXGBE_READ_REG(hw, IXGBE_TDT(ring->reg_idx));
732
733 if (head != tail)
734 return (head < tail) ?
735 tail - head : (tail + ring->count - head);
736
737 return 0;
738}
739
740static inline bool ixgbe_check_tx_hang(struct ixgbe_ring *tx_ring)
741{
742 u32 tx_done = ixgbe_get_tx_completed(tx_ring);
743 u32 tx_done_old = tx_ring->tx_stats.tx_done_old;
744 u32 tx_pending = ixgbe_get_tx_pending(tx_ring);
745 bool ret = false;
746
7d637bcc 747 clear_check_for_tx_hang(tx_ring);
c84d324c
JF
748
749 /*
750 * Check for a hung queue, but be thorough. This verifies
751 * that a transmit has been completed since the previous
752 * check AND there is at least one packet pending. The
753 * ARMED bit is set to indicate a potential hang. The
754 * bit is cleared if a pause frame is received to remove
755 * false hang detection due to PFC or 802.3x frames. By
756 * requiring this to fail twice we avoid races with
757 * pfc clearing the ARMED bit and conditions where we
758 * run the check_tx_hang logic with a transmit completion
759 * pending but without time to complete it yet.
760 */
761 if ((tx_done_old == tx_done) && tx_pending) {
762 /* make sure it is true for two checks in a row */
763 ret = test_and_set_bit(__IXGBE_HANG_CHECK_ARMED,
764 &tx_ring->state);
765 } else {
766 /* update completed stats and continue */
767 tx_ring->tx_stats.tx_done_old = tx_done;
768 /* reset the countdown */
769 clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state);
9a799d71
AK
770 }
771
c84d324c 772 return ret;
9a799d71
AK
773}
774
b4617240
PW
775#define IXGBE_MAX_TXD_PWR 14
776#define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR)
e092be60
AV
777
778/* Tx Descriptors needed, worst case */
779#define TXD_USE_COUNT(S) (((S) >> IXGBE_MAX_TXD_PWR) + \
780 (((S) & (IXGBE_MAX_DATA_PER_TXD - 1)) ? 1 : 0))
781#define DESC_NEEDED (TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD) /* skb->data */ + \
b4617240 782 MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1) /* for context */
e092be60 783
c83c6cbd
AD
784/**
785 * ixgbe_tx_timeout_reset - initiate reset due to Tx timeout
786 * @adapter: driver private struct
787 **/
788static void ixgbe_tx_timeout_reset(struct ixgbe_adapter *adapter)
789{
790
791 /* Do the reset outside of interrupt context */
792 if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
793 adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED;
794 ixgbe_service_event_schedule(adapter);
795 }
796}
e01c31a5 797
9a799d71
AK
798/**
799 * ixgbe_clean_tx_irq - Reclaim resources after transmit completes
fe49f04a 800 * @q_vector: structure containing interrupt and ring information
e01c31a5 801 * @tx_ring: tx ring to clean
9a799d71 802 **/
fe49f04a 803static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
e8e9f696 804 struct ixgbe_ring *tx_ring)
9a799d71 805{
fe49f04a 806 struct ixgbe_adapter *adapter = q_vector->adapter;
12207e49
PWJ
807 union ixgbe_adv_tx_desc *tx_desc, *eop_desc;
808 struct ixgbe_tx_buffer *tx_buffer_info;
e01c31a5 809 unsigned int total_bytes = 0, total_packets = 0;
b953799e 810 u16 i, eop, count = 0;
9a799d71
AK
811
812 i = tx_ring->next_to_clean;
12207e49 813 eop = tx_ring->tx_buffer_info[i].next_to_watch;
31f05a2d 814 eop_desc = IXGBE_TX_DESC_ADV(tx_ring, eop);
12207e49
PWJ
815
816 while ((eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) &&
9a1a69ad 817 (count < tx_ring->work_limit)) {
12207e49 818 bool cleaned = false;
2d0bb1c1 819 rmb(); /* read buffer_info after eop_desc */
12207e49 820 for ( ; !cleaned; count++) {
31f05a2d 821 tx_desc = IXGBE_TX_DESC_ADV(tx_ring, i);
9a799d71 822 tx_buffer_info = &tx_ring->tx_buffer_info[i];
8ad494b0
AD
823
824 tx_desc->wb.status = 0;
12207e49 825 cleaned = (i == eop);
9a799d71 826
8ad494b0
AD
827 i++;
828 if (i == tx_ring->count)
829 i = 0;
e01c31a5 830
8ad494b0
AD
831 if (cleaned && tx_buffer_info->skb) {
832 total_bytes += tx_buffer_info->bytecount;
833 total_packets += tx_buffer_info->gso_segs;
e092be60 834 }
e01c31a5 835
b6ec895e 836 ixgbe_unmap_and_free_tx_resource(tx_ring,
e8e9f696 837 tx_buffer_info);
e01c31a5 838 }
12207e49 839
c84d324c 840 tx_ring->tx_stats.completed++;
12207e49 841 eop = tx_ring->tx_buffer_info[i].next_to_watch;
31f05a2d 842 eop_desc = IXGBE_TX_DESC_ADV(tx_ring, eop);
12207e49
PWJ
843 }
844
9a799d71 845 tx_ring->next_to_clean = i;
b953799e
AD
846 tx_ring->total_bytes += total_bytes;
847 tx_ring->total_packets += total_packets;
848 u64_stats_update_begin(&tx_ring->syncp);
849 tx_ring->stats.packets += total_packets;
850 tx_ring->stats.bytes += total_bytes;
851 u64_stats_update_end(&tx_ring->syncp);
852
c84d324c
JF
853 if (check_for_tx_hang(tx_ring) && ixgbe_check_tx_hang(tx_ring)) {
854 /* schedule immediate reset if we believe we hung */
855 struct ixgbe_hw *hw = &adapter->hw;
856 tx_desc = IXGBE_TX_DESC_ADV(tx_ring, eop);
857 e_err(drv, "Detected Tx Unit Hang\n"
858 " Tx Queue <%d>\n"
859 " TDH, TDT <%x>, <%x>\n"
860 " next_to_use <%x>\n"
861 " next_to_clean <%x>\n"
862 "tx_buffer_info[next_to_clean]\n"
863 " time_stamp <%lx>\n"
864 " jiffies <%lx>\n",
865 tx_ring->queue_index,
866 IXGBE_READ_REG(hw, IXGBE_TDH(tx_ring->reg_idx)),
867 IXGBE_READ_REG(hw, IXGBE_TDT(tx_ring->reg_idx)),
868 tx_ring->next_to_use, eop,
869 tx_ring->tx_buffer_info[eop].time_stamp, jiffies);
870
871 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
872
873 e_info(probe,
874 "tx hang %d detected on queue %d, resetting adapter\n",
875 adapter->tx_timeout_count + 1, tx_ring->queue_index);
876
b953799e 877 /* schedule immediate reset if we believe we hung */
c83c6cbd 878 ixgbe_tx_timeout_reset(adapter);
b953799e
AD
879
880 /* the adapter is about to reset, no point in enabling stuff */
881 return true;
882 }
9a799d71 883
e092be60 884#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
fc77dc3c 885 if (unlikely(count && netif_carrier_ok(tx_ring->netdev) &&
e8e9f696 886 (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
e092be60
AV
887 /* Make sure that anybody stopping the queue after this
888 * sees the new next_to_clean.
889 */
890 smp_mb();
fc77dc3c 891 if (__netif_subqueue_stopped(tx_ring->netdev, tx_ring->queue_index) &&
30eba97a 892 !test_bit(__IXGBE_DOWN, &adapter->state)) {
fc77dc3c 893 netif_wake_subqueue(tx_ring->netdev, tx_ring->queue_index);
5b7da515 894 ++tx_ring->tx_stats.restart_queue;
30eba97a 895 }
e092be60 896 }
9a799d71 897
807540ba 898 return count < tx_ring->work_limit;
9a799d71
AK
899}
900
5dd2d332 901#ifdef CONFIG_IXGBE_DCA
bd0362dd 902static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter,
33cf09c9
AD
903 struct ixgbe_ring *rx_ring,
904 int cpu)
bd0362dd 905{
33cf09c9 906 struct ixgbe_hw *hw = &adapter->hw;
bd0362dd 907 u32 rxctrl;
33cf09c9
AD
908 u8 reg_idx = rx_ring->reg_idx;
909
910 rxctrl = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(reg_idx));
911 switch (hw->mac.type) {
912 case ixgbe_mac_82598EB:
913 rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK;
914 rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
915 break;
916 case ixgbe_mac_82599EB:
b93a2226 917 case ixgbe_mac_X540:
33cf09c9
AD
918 rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK_82599;
919 rxctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) <<
920 IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599);
921 break;
922 default:
923 break;
bd0362dd 924 }
33cf09c9
AD
925 rxctrl |= IXGBE_DCA_RXCTRL_DESC_DCA_EN;
926 rxctrl |= IXGBE_DCA_RXCTRL_HEAD_DCA_EN;
927 rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_RRO_EN);
33cf09c9 928 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(reg_idx), rxctrl);
bd0362dd
JC
929}
930
931static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter,
33cf09c9
AD
932 struct ixgbe_ring *tx_ring,
933 int cpu)
bd0362dd 934{
33cf09c9 935 struct ixgbe_hw *hw = &adapter->hw;
bd0362dd 936 u32 txctrl;
33cf09c9
AD
937 u8 reg_idx = tx_ring->reg_idx;
938
939 switch (hw->mac.type) {
940 case ixgbe_mac_82598EB:
941 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(reg_idx));
942 txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK;
943 txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
944 txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN;
33cf09c9
AD
945 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(reg_idx), txctrl);
946 break;
947 case ixgbe_mac_82599EB:
b93a2226 948 case ixgbe_mac_X540:
33cf09c9
AD
949 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(reg_idx));
950 txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK_82599;
951 txctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) <<
952 IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599);
953 txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN;
33cf09c9
AD
954 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(reg_idx), txctrl);
955 break;
956 default:
957 break;
958 }
959}
960
961static void ixgbe_update_dca(struct ixgbe_q_vector *q_vector)
962{
963 struct ixgbe_adapter *adapter = q_vector->adapter;
bd0362dd 964 int cpu = get_cpu();
33cf09c9
AD
965 long r_idx;
966 int i;
bd0362dd 967
33cf09c9
AD
968 if (q_vector->cpu == cpu)
969 goto out_no_update;
970
971 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
972 for (i = 0; i < q_vector->txr_count; i++) {
973 ixgbe_update_tx_dca(adapter, adapter->tx_ring[r_idx], cpu);
974 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
975 r_idx + 1);
bd0362dd 976 }
33cf09c9
AD
977
978 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
979 for (i = 0; i < q_vector->rxr_count; i++) {
980 ixgbe_update_rx_dca(adapter, adapter->rx_ring[r_idx], cpu);
981 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
982 r_idx + 1);
983 }
984
985 q_vector->cpu = cpu;
986out_no_update:
bd0362dd
JC
987 put_cpu();
988}
989
990static void ixgbe_setup_dca(struct ixgbe_adapter *adapter)
991{
33cf09c9 992 int num_q_vectors;
bd0362dd
JC
993 int i;
994
995 if (!(adapter->flags & IXGBE_FLAG_DCA_ENABLED))
996 return;
997
e35ec126
AD
998 /* always use CB2 mode, difference is masked in the CB driver */
999 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 2);
1000
33cf09c9
AD
1001 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
1002 num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1003 else
1004 num_q_vectors = 1;
1005
1006 for (i = 0; i < num_q_vectors; i++) {
1007 adapter->q_vector[i]->cpu = -1;
1008 ixgbe_update_dca(adapter->q_vector[i]);
bd0362dd
JC
1009 }
1010}
1011
1012static int __ixgbe_notify_dca(struct device *dev, void *data)
1013{
c60fbb00 1014 struct ixgbe_adapter *adapter = dev_get_drvdata(dev);
bd0362dd
JC
1015 unsigned long event = *(unsigned long *)data;
1016
33cf09c9
AD
1017 if (!(adapter->flags & IXGBE_FLAG_DCA_ENABLED))
1018 return 0;
1019
bd0362dd
JC
1020 switch (event) {
1021 case DCA_PROVIDER_ADD:
96b0e0f6
JB
1022 /* if we're already enabled, don't do it again */
1023 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
1024 break;
652f093f 1025 if (dca_add_requester(dev) == 0) {
96b0e0f6 1026 adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
bd0362dd
JC
1027 ixgbe_setup_dca(adapter);
1028 break;
1029 }
1030 /* Fall Through since DCA is disabled. */
1031 case DCA_PROVIDER_REMOVE:
1032 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
1033 dca_remove_requester(dev);
1034 adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED;
1035 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 1);
1036 }
1037 break;
1038 }
1039
652f093f 1040 return 0;
bd0362dd 1041}
5dd2d332 1042#endif /* CONFIG_IXGBE_DCA */
67a74ee2
ET
1043
1044static inline void ixgbe_rx_hash(union ixgbe_adv_rx_desc *rx_desc,
1045 struct sk_buff *skb)
1046{
1047 skb->rxhash = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss);
1048}
1049
9a799d71
AK
1050/**
1051 * ixgbe_receive_skb - Send a completed packet up the stack
1052 * @adapter: board private structure
1053 * @skb: packet to send up
177db6ff
MC
1054 * @status: hardware indication of status of receive
1055 * @rx_ring: rx descriptor ring (for a specific queue) to setup
1056 * @rx_desc: rx descriptor
9a799d71 1057 **/
78b6f4ce 1058static void ixgbe_receive_skb(struct ixgbe_q_vector *q_vector,
e8e9f696
JP
1059 struct sk_buff *skb, u8 status,
1060 struct ixgbe_ring *ring,
1061 union ixgbe_adv_rx_desc *rx_desc)
9a799d71 1062{
78b6f4ce
HX
1063 struct ixgbe_adapter *adapter = q_vector->adapter;
1064 struct napi_struct *napi = &q_vector->napi;
177db6ff
MC
1065 bool is_vlan = (status & IXGBE_RXD_STAT_VP);
1066 u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan);
9a799d71 1067
f62bbb5e
JG
1068 if (is_vlan && (tag & VLAN_VID_MASK))
1069 __vlan_hwaccel_put_tag(skb, tag);
1070
1071 if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL))
1072 napi_gro_receive(napi, skb);
1073 else
1074 netif_rx(skb);
9a799d71
AK
1075}
1076
e59bd25d
AV
1077/**
1078 * ixgbe_rx_checksum - indicate in skb if hw indicated a good cksum
1079 * @adapter: address of board private structure
1080 * @status_err: hardware indication of status of receive
1081 * @skb: skb currently being received and modified
1082 **/
9a799d71 1083static inline void ixgbe_rx_checksum(struct ixgbe_adapter *adapter,
8bae1b2b
DS
1084 union ixgbe_adv_rx_desc *rx_desc,
1085 struct sk_buff *skb)
9a799d71 1086{
8bae1b2b
DS
1087 u32 status_err = le32_to_cpu(rx_desc->wb.upper.status_error);
1088
bc8acf2c 1089 skb_checksum_none_assert(skb);
9a799d71 1090
712744be
JB
1091 /* Rx csum disabled */
1092 if (!(adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED))
9a799d71 1093 return;
e59bd25d
AV
1094
1095 /* if IP and error */
1096 if ((status_err & IXGBE_RXD_STAT_IPCS) &&
1097 (status_err & IXGBE_RXDADV_ERR_IPE)) {
9a799d71
AK
1098 adapter->hw_csum_rx_error++;
1099 return;
1100 }
e59bd25d
AV
1101
1102 if (!(status_err & IXGBE_RXD_STAT_L4CS))
1103 return;
1104
1105 if (status_err & IXGBE_RXDADV_ERR_TCPE) {
8bae1b2b
DS
1106 u16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
1107
1108 /*
1109 * 82599 errata, UDP frames with a 0 checksum can be marked as
1110 * checksum errors.
1111 */
1112 if ((pkt_info & IXGBE_RXDADV_PKTTYPE_UDP) &&
1113 (adapter->hw.mac.type == ixgbe_mac_82599EB))
1114 return;
1115
e59bd25d
AV
1116 adapter->hw_csum_rx_error++;
1117 return;
1118 }
1119
9a799d71 1120 /* It must be a TCP or UDP packet with a valid checksum */
e59bd25d 1121 skb->ip_summed = CHECKSUM_UNNECESSARY;
9a799d71
AK
1122}
1123
84ea2591 1124static inline void ixgbe_release_rx_desc(struct ixgbe_ring *rx_ring, u32 val)
e8e26350
PW
1125{
1126 /*
1127 * Force memory writes to complete before letting h/w
1128 * know there are new descriptors to fetch. (Only
1129 * applicable for weak-ordered memory model archs,
1130 * such as IA-64).
1131 */
1132 wmb();
84ea2591 1133 writel(val, rx_ring->tail);
e8e26350
PW
1134}
1135
9a799d71
AK
1136/**
1137 * ixgbe_alloc_rx_buffers - Replace used receive buffers; packet split
fc77dc3c
AD
1138 * @rx_ring: ring to place buffers on
1139 * @cleaned_count: number of buffers to replace
9a799d71 1140 **/
fc77dc3c 1141void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count)
9a799d71 1142{
9a799d71 1143 union ixgbe_adv_rx_desc *rx_desc;
3a581073 1144 struct ixgbe_rx_buffer *bi;
d5f398ed
AD
1145 struct sk_buff *skb;
1146 u16 i = rx_ring->next_to_use;
9a799d71 1147
fc77dc3c
AD
1148 /* do nothing if no valid netdev defined */
1149 if (!rx_ring->netdev)
1150 return;
1151
9a799d71 1152 while (cleaned_count--) {
31f05a2d 1153 rx_desc = IXGBE_RX_DESC_ADV(rx_ring, i);
d5f398ed
AD
1154 bi = &rx_ring->rx_buffer_info[i];
1155 skb = bi->skb;
9a799d71 1156
d5f398ed 1157 if (!skb) {
fc77dc3c 1158 skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
d5f398ed 1159 rx_ring->rx_buf_len);
9a799d71 1160 if (!skb) {
5b7da515 1161 rx_ring->rx_stats.alloc_rx_buff_failed++;
9a799d71
AK
1162 goto no_buffers;
1163 }
d716a7d8
AD
1164 /* initialize queue mapping */
1165 skb_record_rx_queue(skb, rx_ring->queue_index);
d5f398ed 1166 bi->skb = skb;
d716a7d8 1167 }
9a799d71 1168
d716a7d8 1169 if (!bi->dma) {
b6ec895e 1170 bi->dma = dma_map_single(rx_ring->dev,
d5f398ed 1171 skb->data,
e8e9f696 1172 rx_ring->rx_buf_len,
1b507730 1173 DMA_FROM_DEVICE);
b6ec895e 1174 if (dma_mapping_error(rx_ring->dev, bi->dma)) {
5b7da515 1175 rx_ring->rx_stats.alloc_rx_buff_failed++;
d5f398ed
AD
1176 bi->dma = 0;
1177 goto no_buffers;
1178 }
9a799d71 1179 }
d5f398ed 1180
7d637bcc 1181 if (ring_is_ps_enabled(rx_ring)) {
d5f398ed 1182 if (!bi->page) {
fc77dc3c 1183 bi->page = netdev_alloc_page(rx_ring->netdev);
d5f398ed 1184 if (!bi->page) {
5b7da515 1185 rx_ring->rx_stats.alloc_rx_page_failed++;
d5f398ed
AD
1186 goto no_buffers;
1187 }
1188 }
1189
1190 if (!bi->page_dma) {
1191 /* use a half page if we're re-using */
1192 bi->page_offset ^= PAGE_SIZE / 2;
b6ec895e 1193 bi->page_dma = dma_map_page(rx_ring->dev,
d5f398ed
AD
1194 bi->page,
1195 bi->page_offset,
1196 PAGE_SIZE / 2,
1197 DMA_FROM_DEVICE);
b6ec895e 1198 if (dma_mapping_error(rx_ring->dev,
d5f398ed 1199 bi->page_dma)) {
5b7da515 1200 rx_ring->rx_stats.alloc_rx_page_failed++;
d5f398ed
AD
1201 bi->page_dma = 0;
1202 goto no_buffers;
1203 }
1204 }
1205
1206 /* Refresh the desc even if buffer_addrs didn't change
1207 * because each write-back erases this info. */
3a581073
JB
1208 rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
1209 rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
9a799d71 1210 } else {
3a581073 1211 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
84418e3b 1212 rx_desc->read.hdr_addr = 0;
9a799d71
AK
1213 }
1214
1215 i++;
1216 if (i == rx_ring->count)
1217 i = 0;
9a799d71 1218 }
7c6e0a43 1219
9a799d71
AK
1220no_buffers:
1221 if (rx_ring->next_to_use != i) {
1222 rx_ring->next_to_use = i;
84ea2591 1223 ixgbe_release_rx_desc(rx_ring, i);
9a799d71
AK
1224 }
1225}
1226
c267fc16 1227static inline u16 ixgbe_get_hlen(union ixgbe_adv_rx_desc *rx_desc)
7c6e0a43 1228{
c267fc16
AD
1229 /* HW will not DMA in data larger than the given buffer, even if it
1230 * parses the (NFS, of course) header to be larger. In that case, it
1231 * fills the header buffer and spills the rest into the page.
1232 */
1233 u16 hdr_info = le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.hdr_info);
1234 u16 hlen = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
1235 IXGBE_RXDADV_HDRBUFLEN_SHIFT;
1236 if (hlen > IXGBE_RX_HDR_SIZE)
1237 hlen = IXGBE_RX_HDR_SIZE;
1238 return hlen;
7c6e0a43
JB
1239}
1240
f8212f97
AD
1241/**
1242 * ixgbe_transform_rsc_queue - change rsc queue into a full packet
1243 * @skb: pointer to the last skb in the rsc queue
1244 *
1245 * This function changes a queue full of hw rsc buffers into a completed
1246 * packet. It uses the ->prev pointers to find the first packet and then
1247 * turns it into the frag list owner.
1248 **/
aa80175a 1249static inline struct sk_buff *ixgbe_transform_rsc_queue(struct sk_buff *skb)
f8212f97
AD
1250{
1251 unsigned int frag_list_size = 0;
aa80175a 1252 unsigned int skb_cnt = 1;
f8212f97
AD
1253
1254 while (skb->prev) {
1255 struct sk_buff *prev = skb->prev;
1256 frag_list_size += skb->len;
1257 skb->prev = NULL;
1258 skb = prev;
aa80175a 1259 skb_cnt++;
f8212f97
AD
1260 }
1261
1262 skb_shinfo(skb)->frag_list = skb->next;
1263 skb->next = NULL;
1264 skb->len += frag_list_size;
1265 skb->data_len += frag_list_size;
1266 skb->truesize += frag_list_size;
aa80175a
AD
1267 IXGBE_RSC_CB(skb)->skb_cnt = skb_cnt;
1268
f8212f97
AD
1269 return skb;
1270}
1271
aa80175a
AD
1272static inline bool ixgbe_get_rsc_state(union ixgbe_adv_rx_desc *rx_desc)
1273{
1274 return !!(le32_to_cpu(rx_desc->wb.lower.lo_dword.data) &
1275 IXGBE_RXDADV_RSCCNT_MASK);
1276}
43634e82 1277
c267fc16 1278static void ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
e8e9f696
JP
1279 struct ixgbe_ring *rx_ring,
1280 int *work_done, int work_to_do)
9a799d71 1281{
78b6f4ce 1282 struct ixgbe_adapter *adapter = q_vector->adapter;
9a799d71
AK
1283 union ixgbe_adv_rx_desc *rx_desc, *next_rxd;
1284 struct ixgbe_rx_buffer *rx_buffer_info, *next_buffer;
1285 struct sk_buff *skb;
d2f4fbe2 1286 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
c267fc16 1287 const int current_node = numa_node_id();
3d8fd385
YZ
1288#ifdef IXGBE_FCOE
1289 int ddp_bytes = 0;
1290#endif /* IXGBE_FCOE */
c267fc16
AD
1291 u32 staterr;
1292 u16 i;
1293 u16 cleaned_count = 0;
aa80175a 1294 bool pkt_is_rsc = false;
9a799d71
AK
1295
1296 i = rx_ring->next_to_clean;
31f05a2d 1297 rx_desc = IXGBE_RX_DESC_ADV(rx_ring, i);
9a799d71 1298 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
9a799d71
AK
1299
1300 while (staterr & IXGBE_RXD_STAT_DD) {
7c6e0a43 1301 u32 upper_len = 0;
9a799d71 1302
3c945e5b 1303 rmb(); /* read descriptor and rx_buffer_info after status DD */
9a799d71 1304
c267fc16
AD
1305 rx_buffer_info = &rx_ring->rx_buffer_info[i];
1306
9a799d71 1307 skb = rx_buffer_info->skb;
9a799d71 1308 rx_buffer_info->skb = NULL;
c267fc16 1309 prefetch(skb->data);
9a799d71 1310
c267fc16 1311 if (ring_is_rsc_enabled(rx_ring))
aa80175a 1312 pkt_is_rsc = ixgbe_get_rsc_state(rx_desc);
c267fc16
AD
1313
1314 /* if this is a skb from previous receive DMA will be 0 */
21fa4e66 1315 if (rx_buffer_info->dma) {
c267fc16 1316 u16 hlen;
aa80175a 1317 if (pkt_is_rsc &&
c267fc16
AD
1318 !(staterr & IXGBE_RXD_STAT_EOP) &&
1319 !skb->prev) {
43634e82
MC
1320 /*
1321 * When HWRSC is enabled, delay unmapping
1322 * of the first packet. It carries the
1323 * header information, HW may still
1324 * access the header after the writeback.
1325 * Only unmap it when EOP is reached
1326 */
e8171aaa 1327 IXGBE_RSC_CB(skb)->delay_unmap = true;
43634e82 1328 IXGBE_RSC_CB(skb)->dma = rx_buffer_info->dma;
e8171aaa 1329 } else {
b6ec895e 1330 dma_unmap_single(rx_ring->dev,
e8e9f696
JP
1331 rx_buffer_info->dma,
1332 rx_ring->rx_buf_len,
1333 DMA_FROM_DEVICE);
e8171aaa 1334 }
4f57ca6e 1335 rx_buffer_info->dma = 0;
c267fc16
AD
1336
1337 if (ring_is_ps_enabled(rx_ring)) {
1338 hlen = ixgbe_get_hlen(rx_desc);
1339 upper_len = le16_to_cpu(rx_desc->wb.upper.length);
1340 } else {
1341 hlen = le16_to_cpu(rx_desc->wb.upper.length);
1342 }
1343
1344 skb_put(skb, hlen);
1345 } else {
1346 /* assume packet split since header is unmapped */
1347 upper_len = le16_to_cpu(rx_desc->wb.upper.length);
9a799d71
AK
1348 }
1349
1350 if (upper_len) {
b6ec895e
AD
1351 dma_unmap_page(rx_ring->dev,
1352 rx_buffer_info->page_dma,
1353 PAGE_SIZE / 2,
1354 DMA_FROM_DEVICE);
9a799d71
AK
1355 rx_buffer_info->page_dma = 0;
1356 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
e8e9f696
JP
1357 rx_buffer_info->page,
1358 rx_buffer_info->page_offset,
1359 upper_len);
762f4c57 1360
c267fc16
AD
1361 if ((page_count(rx_buffer_info->page) == 1) &&
1362 (page_to_nid(rx_buffer_info->page) == current_node))
762f4c57 1363 get_page(rx_buffer_info->page);
c267fc16
AD
1364 else
1365 rx_buffer_info->page = NULL;
9a799d71
AK
1366
1367 skb->len += upper_len;
1368 skb->data_len += upper_len;
1369 skb->truesize += upper_len;
1370 }
1371
1372 i++;
1373 if (i == rx_ring->count)
1374 i = 0;
9a799d71 1375
31f05a2d 1376 next_rxd = IXGBE_RX_DESC_ADV(rx_ring, i);
9a799d71 1377 prefetch(next_rxd);
9a799d71 1378 cleaned_count++;
f8212f97 1379
aa80175a 1380 if (pkt_is_rsc) {
f8212f97
AD
1381 u32 nextp = (staterr & IXGBE_RXDADV_NEXTP_MASK) >>
1382 IXGBE_RXDADV_NEXTP_SHIFT;
1383 next_buffer = &rx_ring->rx_buffer_info[nextp];
f8212f97
AD
1384 } else {
1385 next_buffer = &rx_ring->rx_buffer_info[i];
1386 }
1387
c267fc16 1388 if (!(staterr & IXGBE_RXD_STAT_EOP)) {
7d637bcc 1389 if (ring_is_ps_enabled(rx_ring)) {
f8212f97
AD
1390 rx_buffer_info->skb = next_buffer->skb;
1391 rx_buffer_info->dma = next_buffer->dma;
1392 next_buffer->skb = skb;
1393 next_buffer->dma = 0;
1394 } else {
1395 skb->next = next_buffer->skb;
1396 skb->next->prev = skb;
1397 }
5b7da515 1398 rx_ring->rx_stats.non_eop_descs++;
9a799d71
AK
1399 goto next_desc;
1400 }
1401
aa80175a
AD
1402 if (skb->prev) {
1403 skb = ixgbe_transform_rsc_queue(skb);
1404 /* if we got here without RSC the packet is invalid */
1405 if (!pkt_is_rsc) {
1406 __pskb_trim(skb, 0);
1407 rx_buffer_info->skb = skb;
1408 goto next_desc;
1409 }
1410 }
c267fc16
AD
1411
1412 if (ring_is_rsc_enabled(rx_ring)) {
1413 if (IXGBE_RSC_CB(skb)->delay_unmap) {
1414 dma_unmap_single(rx_ring->dev,
1415 IXGBE_RSC_CB(skb)->dma,
1416 rx_ring->rx_buf_len,
1417 DMA_FROM_DEVICE);
1418 IXGBE_RSC_CB(skb)->dma = 0;
1419 IXGBE_RSC_CB(skb)->delay_unmap = false;
1420 }
aa80175a
AD
1421 }
1422 if (pkt_is_rsc) {
c267fc16
AD
1423 if (ring_is_ps_enabled(rx_ring))
1424 rx_ring->rx_stats.rsc_count +=
aa80175a 1425 skb_shinfo(skb)->nr_frags;
c267fc16 1426 else
aa80175a
AD
1427 rx_ring->rx_stats.rsc_count +=
1428 IXGBE_RSC_CB(skb)->skb_cnt;
c267fc16
AD
1429 rx_ring->rx_stats.rsc_flush++;
1430 }
1431
1432 /* ERR_MASK will only have valid bits if EOP set */
9a799d71 1433 if (staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) {
c267fc16
AD
1434 /* trim packet back to size 0 and recycle it */
1435 __pskb_trim(skb, 0);
1436 rx_buffer_info->skb = skb;
9a799d71
AK
1437 goto next_desc;
1438 }
1439
8bae1b2b 1440 ixgbe_rx_checksum(adapter, rx_desc, skb);
67a74ee2
ET
1441 if (adapter->netdev->features & NETIF_F_RXHASH)
1442 ixgbe_rx_hash(rx_desc, skb);
d2f4fbe2
AV
1443
1444 /* probably a little skewed due to removing CRC */
1445 total_rx_bytes += skb->len;
1446 total_rx_packets++;
1447
fc77dc3c 1448 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
332d4a7d
YZ
1449#ifdef IXGBE_FCOE
1450 /* if ddp, not passing to ULD unless for FCP_RSP or error */
3d8fd385
YZ
1451 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
1452 ddp_bytes = ixgbe_fcoe_ddp(adapter, rx_desc, skb);
1453 if (!ddp_bytes)
332d4a7d 1454 goto next_desc;
3d8fd385 1455 }
332d4a7d 1456#endif /* IXGBE_FCOE */
fdaff1ce 1457 ixgbe_receive_skb(q_vector, skb, staterr, rx_ring, rx_desc);
9a799d71
AK
1458
1459next_desc:
1460 rx_desc->wb.upper.status_error = 0;
1461
c267fc16
AD
1462 (*work_done)++;
1463 if (*work_done >= work_to_do)
1464 break;
1465
9a799d71
AK
1466 /* return some buffers to hardware, one at a time is too slow */
1467 if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) {
fc77dc3c 1468 ixgbe_alloc_rx_buffers(rx_ring, cleaned_count);
9a799d71
AK
1469 cleaned_count = 0;
1470 }
1471
1472 /* use prefetched values */
1473 rx_desc = next_rxd;
9a799d71 1474 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
177db6ff
MC
1475 }
1476
9a799d71
AK
1477 rx_ring->next_to_clean = i;
1478 cleaned_count = IXGBE_DESC_UNUSED(rx_ring);
1479
1480 if (cleaned_count)
fc77dc3c 1481 ixgbe_alloc_rx_buffers(rx_ring, cleaned_count);
9a799d71 1482
3d8fd385
YZ
1483#ifdef IXGBE_FCOE
1484 /* include DDPed FCoE data */
1485 if (ddp_bytes > 0) {
1486 unsigned int mss;
1487
fc77dc3c 1488 mss = rx_ring->netdev->mtu - sizeof(struct fcoe_hdr) -
3d8fd385
YZ
1489 sizeof(struct fc_frame_header) -
1490 sizeof(struct fcoe_crc_eof);
1491 if (mss > 512)
1492 mss &= ~511;
1493 total_rx_bytes += ddp_bytes;
1494 total_rx_packets += DIV_ROUND_UP(ddp_bytes, mss);
1495 }
1496#endif /* IXGBE_FCOE */
1497
f494e8fa
AV
1498 rx_ring->total_packets += total_rx_packets;
1499 rx_ring->total_bytes += total_rx_bytes;
c267fc16
AD
1500 u64_stats_update_begin(&rx_ring->syncp);
1501 rx_ring->stats.packets += total_rx_packets;
1502 rx_ring->stats.bytes += total_rx_bytes;
1503 u64_stats_update_end(&rx_ring->syncp);
9a799d71
AK
1504}
1505
021230d4 1506static int ixgbe_clean_rxonly(struct napi_struct *, int);
9a799d71
AK
1507/**
1508 * ixgbe_configure_msix - Configure MSI-X hardware
1509 * @adapter: board private structure
1510 *
1511 * ixgbe_configure_msix sets up the hardware to properly generate MSI-X
1512 * interrupts.
1513 **/
1514static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
1515{
021230d4 1516 struct ixgbe_q_vector *q_vector;
bf29ee6c 1517 int i, q_vectors, v_idx, r_idx;
021230d4 1518 u32 mask;
9a799d71 1519
021230d4 1520 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
9a799d71 1521
4df10466
JB
1522 /*
1523 * Populate the IVAR table and set the ITR values to the
021230d4
AV
1524 * corresponding register.
1525 */
1526 for (v_idx = 0; v_idx < q_vectors; v_idx++) {
7a921c93 1527 q_vector = adapter->q_vector[v_idx];
984b3f57 1528 /* XXX for_each_set_bit(...) */
021230d4 1529 r_idx = find_first_bit(q_vector->rxr_idx,
e8e9f696 1530 adapter->num_rx_queues);
021230d4
AV
1531
1532 for (i = 0; i < q_vector->rxr_count; i++) {
bf29ee6c
AD
1533 u8 reg_idx = adapter->rx_ring[r_idx]->reg_idx;
1534 ixgbe_set_ivar(adapter, 0, reg_idx, v_idx);
021230d4 1535 r_idx = find_next_bit(q_vector->rxr_idx,
e8e9f696
JP
1536 adapter->num_rx_queues,
1537 r_idx + 1);
021230d4
AV
1538 }
1539 r_idx = find_first_bit(q_vector->txr_idx,
e8e9f696 1540 adapter->num_tx_queues);
021230d4
AV
1541
1542 for (i = 0; i < q_vector->txr_count; i++) {
bf29ee6c
AD
1543 u8 reg_idx = adapter->tx_ring[r_idx]->reg_idx;
1544 ixgbe_set_ivar(adapter, 1, reg_idx, v_idx);
021230d4 1545 r_idx = find_next_bit(q_vector->txr_idx,
e8e9f696
JP
1546 adapter->num_tx_queues,
1547 r_idx + 1);
021230d4
AV
1548 }
1549
021230d4 1550 if (q_vector->txr_count && !q_vector->rxr_count)
f7554a2b
NS
1551 /* tx only */
1552 q_vector->eitr = adapter->tx_eitr_param;
509ee935 1553 else if (q_vector->rxr_count)
f7554a2b
NS
1554 /* rx or mixed */
1555 q_vector->eitr = adapter->rx_eitr_param;
021230d4 1556
fe49f04a 1557 ixgbe_write_eitr(q_vector);
03ecf91a
AD
1558 /* If ATR is enabled, set interrupt affinity */
1559 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
b25ebfd2
PW
1560 /*
1561 * Allocate the affinity_hint cpumask, assign the mask
1562 * for this vector, and set our affinity_hint for
1563 * this irq.
1564 */
1565 if (!alloc_cpumask_var(&q_vector->affinity_mask,
1566 GFP_KERNEL))
1567 return;
1568 cpumask_set_cpu(v_idx, q_vector->affinity_mask);
1569 irq_set_affinity_hint(adapter->msix_entries[v_idx].vector,
1570 q_vector->affinity_mask);
1571 }
9a799d71
AK
1572 }
1573
bd508178
AD
1574 switch (adapter->hw.mac.type) {
1575 case ixgbe_mac_82598EB:
e8e26350 1576 ixgbe_set_ivar(adapter, -1, IXGBE_IVAR_OTHER_CAUSES_INDEX,
e8e9f696 1577 v_idx);
bd508178
AD
1578 break;
1579 case ixgbe_mac_82599EB:
b93a2226 1580 case ixgbe_mac_X540:
e8e26350 1581 ixgbe_set_ivar(adapter, -1, 1, v_idx);
bd508178
AD
1582 break;
1583
1584 default:
1585 break;
1586 }
021230d4
AV
1587 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx), 1950);
1588
41fb9248 1589 /* set up to autoclear timer, and the vectors */
021230d4 1590 mask = IXGBE_EIMS_ENABLE_MASK;
1cdd1ec8
GR
1591 if (adapter->num_vfs)
1592 mask &= ~(IXGBE_EIMS_OTHER |
1593 IXGBE_EIMS_MAILBOX |
1594 IXGBE_EIMS_LSC);
1595 else
1596 mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
021230d4 1597 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, mask);
9a799d71
AK
1598}
1599
f494e8fa
AV
1600enum latency_range {
1601 lowest_latency = 0,
1602 low_latency = 1,
1603 bulk_latency = 2,
1604 latency_invalid = 255
1605};
1606
1607/**
1608 * ixgbe_update_itr - update the dynamic ITR value based on statistics
1609 * @adapter: pointer to adapter
1610 * @eitr: eitr setting (ints per sec) to give last timeslice
1611 * @itr_setting: current throttle rate in ints/second
1612 * @packets: the number of packets during this measurement interval
1613 * @bytes: the number of bytes during this measurement interval
1614 *
1615 * Stores a new ITR value based on packets and byte
1616 * counts during the last interrupt. The advantage of per interrupt
1617 * computation is faster updates and more accurate ITR for the current
1618 * traffic pattern. Constants in this function were computed
1619 * based on theoretical maximum wire speed and thresholds were set based
1620 * on testing data as well as attempting to minimize response time
1621 * while increasing bulk throughput.
1622 * this functionality is controlled by the InterruptThrottleRate module
1623 * parameter (see ixgbe_param.c)
1624 **/
1625static u8 ixgbe_update_itr(struct ixgbe_adapter *adapter,
e8e9f696
JP
1626 u32 eitr, u8 itr_setting,
1627 int packets, int bytes)
f494e8fa
AV
1628{
1629 unsigned int retval = itr_setting;
1630 u32 timepassed_us;
1631 u64 bytes_perint;
1632
1633 if (packets == 0)
1634 goto update_itr_done;
1635
1636
1637 /* simple throttlerate management
1638 * 0-20MB/s lowest (100000 ints/s)
1639 * 20-100MB/s low (20000 ints/s)
1640 * 100-1249MB/s bulk (8000 ints/s)
1641 */
1642 /* what was last interrupt timeslice? */
1643 timepassed_us = 1000000/eitr;
1644 bytes_perint = bytes / timepassed_us; /* bytes/usec */
1645
1646 switch (itr_setting) {
1647 case lowest_latency:
1648 if (bytes_perint > adapter->eitr_low)
1649 retval = low_latency;
1650 break;
1651 case low_latency:
1652 if (bytes_perint > adapter->eitr_high)
1653 retval = bulk_latency;
1654 else if (bytes_perint <= adapter->eitr_low)
1655 retval = lowest_latency;
1656 break;
1657 case bulk_latency:
1658 if (bytes_perint <= adapter->eitr_high)
1659 retval = low_latency;
1660 break;
1661 }
1662
1663update_itr_done:
1664 return retval;
1665}
1666
509ee935
JB
1667/**
1668 * ixgbe_write_eitr - write EITR register in hardware specific way
fe49f04a 1669 * @q_vector: structure containing interrupt and ring information
509ee935
JB
1670 *
1671 * This function is made to be called by ethtool and by the driver
1672 * when it needs to update EITR registers at runtime. Hardware
1673 * specific quirks/differences are taken care of here.
1674 */
fe49f04a 1675void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector)
509ee935 1676{
fe49f04a 1677 struct ixgbe_adapter *adapter = q_vector->adapter;
509ee935 1678 struct ixgbe_hw *hw = &adapter->hw;
fe49f04a
AD
1679 int v_idx = q_vector->v_idx;
1680 u32 itr_reg = EITR_INTS_PER_SEC_TO_REG(q_vector->eitr);
1681
bd508178
AD
1682 switch (adapter->hw.mac.type) {
1683 case ixgbe_mac_82598EB:
509ee935
JB
1684 /* must write high and low 16 bits to reset counter */
1685 itr_reg |= (itr_reg << 16);
bd508178
AD
1686 break;
1687 case ixgbe_mac_82599EB:
b93a2226 1688 case ixgbe_mac_X540:
f8d1dcaf 1689 /*
b93a2226 1690 * 82599 and X540 can support a value of zero, so allow it for
f8d1dcaf
JB
1691 * max interrupt rate, but there is an errata where it can
1692 * not be zero with RSC
1693 */
1694 if (itr_reg == 8 &&
1695 !(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED))
1696 itr_reg = 0;
1697
509ee935
JB
1698 /*
1699 * set the WDIS bit to not clear the timer bits and cause an
1700 * immediate assertion of the interrupt
1701 */
1702 itr_reg |= IXGBE_EITR_CNT_WDIS;
bd508178
AD
1703 break;
1704 default:
1705 break;
509ee935
JB
1706 }
1707 IXGBE_WRITE_REG(hw, IXGBE_EITR(v_idx), itr_reg);
1708}
1709
f494e8fa
AV
1710static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector)
1711{
1712 struct ixgbe_adapter *adapter = q_vector->adapter;
125601bf 1713 int i, r_idx;
f494e8fa
AV
1714 u32 new_itr;
1715 u8 current_itr, ret_itr;
f494e8fa
AV
1716
1717 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
1718 for (i = 0; i < q_vector->txr_count; i++) {
125601bf 1719 struct ixgbe_ring *tx_ring = adapter->tx_ring[r_idx];
f494e8fa 1720 ret_itr = ixgbe_update_itr(adapter, q_vector->eitr,
e8e9f696
JP
1721 q_vector->tx_itr,
1722 tx_ring->total_packets,
1723 tx_ring->total_bytes);
f494e8fa
AV
1724 /* if the result for this queue would decrease interrupt
1725 * rate for this vector then use that result */
30efa5a3 1726 q_vector->tx_itr = ((q_vector->tx_itr > ret_itr) ?
e8e9f696 1727 q_vector->tx_itr - 1 : ret_itr);
f494e8fa 1728 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
e8e9f696 1729 r_idx + 1);
f494e8fa
AV
1730 }
1731
1732 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
1733 for (i = 0; i < q_vector->rxr_count; i++) {
125601bf 1734 struct ixgbe_ring *rx_ring = adapter->rx_ring[r_idx];
f494e8fa 1735 ret_itr = ixgbe_update_itr(adapter, q_vector->eitr,
e8e9f696
JP
1736 q_vector->rx_itr,
1737 rx_ring->total_packets,
1738 rx_ring->total_bytes);
f494e8fa
AV
1739 /* if the result for this queue would decrease interrupt
1740 * rate for this vector then use that result */
30efa5a3 1741 q_vector->rx_itr = ((q_vector->rx_itr > ret_itr) ?
e8e9f696 1742 q_vector->rx_itr - 1 : ret_itr);
f494e8fa 1743 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
e8e9f696 1744 r_idx + 1);
f494e8fa
AV
1745 }
1746
30efa5a3 1747 current_itr = max(q_vector->rx_itr, q_vector->tx_itr);
f494e8fa
AV
1748
1749 switch (current_itr) {
1750 /* counts and packets in update_itr are dependent on these numbers */
1751 case lowest_latency:
1752 new_itr = 100000;
1753 break;
1754 case low_latency:
1755 new_itr = 20000; /* aka hwitr = ~200 */
1756 break;
1757 case bulk_latency:
1758 default:
1759 new_itr = 8000;
1760 break;
1761 }
1762
1763 if (new_itr != q_vector->eitr) {
fe49f04a 1764 /* do an exponential smoothing */
125601bf 1765 new_itr = ((q_vector->eitr * 9) + new_itr)/10;
509ee935
JB
1766
1767 /* save the algorithm value here, not the smoothed one */
1768 q_vector->eitr = new_itr;
fe49f04a
AD
1769
1770 ixgbe_write_eitr(q_vector);
f494e8fa 1771 }
f494e8fa
AV
1772}
1773
119fc60a 1774/**
f0f9778d
AD
1775 * ixgbe_check_overtemp_subtask - check for over tempurature
1776 * @adapter: pointer to adapter
119fc60a 1777 **/
f0f9778d 1778static void ixgbe_check_overtemp_subtask(struct ixgbe_adapter *adapter)
119fc60a 1779{
119fc60a
MC
1780 struct ixgbe_hw *hw = &adapter->hw;
1781 u32 eicr = adapter->interrupt_event;
1782
f0f9778d 1783 if (test_bit(__IXGBE_DOWN, &adapter->state))
7ca647bd
JP
1784 return;
1785
f0f9778d
AD
1786 if (!(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) &&
1787 !(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_EVENT))
1788 return;
1789
1790 adapter->flags2 &= ~IXGBE_FLAG2_TEMP_SENSOR_EVENT;
1791
7ca647bd 1792 switch (hw->device_id) {
f0f9778d
AD
1793 case IXGBE_DEV_ID_82599_T3_LOM:
1794 /*
1795 * Since the warning interrupt is for both ports
1796 * we don't have to check if:
1797 * - This interrupt wasn't for our port.
1798 * - We may have missed the interrupt so always have to
1799 * check if we got a LSC
1800 */
1801 if (!(eicr & IXGBE_EICR_GPI_SDP0) &&
1802 !(eicr & IXGBE_EICR_LSC))
1803 return;
1804
1805 if (!(eicr & IXGBE_EICR_LSC) && hw->mac.ops.check_link) {
1806 u32 autoneg;
1807 bool link_up = false;
7ca647bd 1808
7ca647bd
JP
1809 hw->mac.ops.check_link(hw, &autoneg, &link_up, false);
1810
f0f9778d
AD
1811 if (link_up)
1812 return;
1813 }
1814
1815 /* Check if this is not due to overtemp */
1816 if (hw->phy.ops.check_overtemp(hw) != IXGBE_ERR_OVERTEMP)
1817 return;
1818
1819 break;
7ca647bd
JP
1820 default:
1821 if (!(eicr & IXGBE_EICR_GPI_SDP0))
119fc60a 1822 return;
7ca647bd 1823 break;
119fc60a 1824 }
7ca647bd
JP
1825 e_crit(drv,
1826 "Network adapter has been stopped because it has over heated. "
1827 "Restart the computer. If the problem persists, "
1828 "power off the system and replace the adapter\n");
f0f9778d
AD
1829
1830 adapter->interrupt_event = 0;
119fc60a
MC
1831}
1832
0befdb3e
JB
1833static void ixgbe_check_fan_failure(struct ixgbe_adapter *adapter, u32 eicr)
1834{
1835 struct ixgbe_hw *hw = &adapter->hw;
1836
1837 if ((adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) &&
1838 (eicr & IXGBE_EICR_GPI_SDP1)) {
396e799c 1839 e_crit(probe, "Fan has stopped, replace the adapter\n");
0befdb3e
JB
1840 /* write to clear the interrupt */
1841 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
1842 }
1843}
cf8280ee 1844
e8e26350
PW
1845static void ixgbe_check_sfp_event(struct ixgbe_adapter *adapter, u32 eicr)
1846{
1847 struct ixgbe_hw *hw = &adapter->hw;
1848
73c4b7cd
AD
1849 if (eicr & IXGBE_EICR_GPI_SDP2) {
1850 /* Clear the interrupt */
1851 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2);
7086400d
AD
1852 if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
1853 adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET;
1854 ixgbe_service_event_schedule(adapter);
1855 }
73c4b7cd
AD
1856 }
1857
e8e26350
PW
1858 if (eicr & IXGBE_EICR_GPI_SDP1) {
1859 /* Clear the interrupt */
1860 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
7086400d
AD
1861 if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
1862 adapter->flags |= IXGBE_FLAG_NEED_LINK_CONFIG;
1863 ixgbe_service_event_schedule(adapter);
1864 }
e8e26350
PW
1865 }
1866}
1867
cf8280ee
JB
1868static void ixgbe_check_lsc(struct ixgbe_adapter *adapter)
1869{
1870 struct ixgbe_hw *hw = &adapter->hw;
1871
1872 adapter->lsc_int++;
1873 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
1874 adapter->link_check_timeout = jiffies;
1875 if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
1876 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
8a0717f3 1877 IXGBE_WRITE_FLUSH(hw);
93c52dd0 1878 ixgbe_service_event_schedule(adapter);
cf8280ee
JB
1879 }
1880}
1881
9a799d71
AK
1882static irqreturn_t ixgbe_msix_lsc(int irq, void *data)
1883{
1884 struct net_device *netdev = data;
1885 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1886 struct ixgbe_hw *hw = &adapter->hw;
54037505
DS
1887 u32 eicr;
1888
1889 /*
1890 * Workaround for Silicon errata. Use clear-by-write instead
1891 * of clear-by-read. Reading with EICS will return the
1892 * interrupt causes without clearing, which later be done
1893 * with the write to EICR.
1894 */
1895 eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
1896 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
9a799d71 1897
cf8280ee
JB
1898 if (eicr & IXGBE_EICR_LSC)
1899 ixgbe_check_lsc(adapter);
d4f80882 1900
1cdd1ec8
GR
1901 if (eicr & IXGBE_EICR_MAILBOX)
1902 ixgbe_msg_task(adapter);
1903
bd508178
AD
1904 switch (hw->mac.type) {
1905 case ixgbe_mac_82599EB:
b93a2226 1906 case ixgbe_mac_X540:
c4cf55e5
PWJ
1907 /* Handle Flow Director Full threshold interrupt */
1908 if (eicr & IXGBE_EICR_FLOW_DIR) {
d034acf1 1909 int reinit_count = 0;
c4cf55e5 1910 int i;
c4cf55e5 1911 for (i = 0; i < adapter->num_tx_queues; i++) {
d034acf1 1912 struct ixgbe_ring *ring = adapter->tx_ring[i];
7d637bcc 1913 if (test_and_clear_bit(__IXGBE_TX_FDIR_INIT_DONE,
d034acf1
AD
1914 &ring->state))
1915 reinit_count++;
1916 }
1917 if (reinit_count) {
1918 /* no more flow director interrupts until after init */
1919 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_FLOW_DIR);
1920 eicr &= ~IXGBE_EICR_FLOW_DIR;
1921 adapter->flags2 |= IXGBE_FLAG2_FDIR_REQUIRES_REINIT;
1922 ixgbe_service_event_schedule(adapter);
c4cf55e5
PWJ
1923 }
1924 }
f0f9778d
AD
1925 ixgbe_check_sfp_event(adapter, eicr);
1926 if ((adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) &&
1927 ((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC))) {
1928 if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
1929 adapter->interrupt_event = eicr;
1930 adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_EVENT;
1931 ixgbe_service_event_schedule(adapter);
c4cf55e5
PWJ
1932 }
1933 }
bd508178
AD
1934 break;
1935 default:
1936 break;
c4cf55e5 1937 }
bd508178
AD
1938
1939 ixgbe_check_fan_failure(adapter, eicr);
1940
7086400d 1941 /* re-enable the original interrupt state, no lsc, no queues */
d4f80882 1942 if (!test_bit(__IXGBE_DOWN, &adapter->state))
7086400d
AD
1943 IXGBE_WRITE_REG(hw, IXGBE_EIMS, eicr &
1944 ~(IXGBE_EIMS_LSC | IXGBE_EIMS_RTX_QUEUE));
9a799d71
AK
1945
1946 return IRQ_HANDLED;
1947}
1948
fe49f04a
AD
1949static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter,
1950 u64 qmask)
1951{
1952 u32 mask;
bd508178 1953 struct ixgbe_hw *hw = &adapter->hw;
fe49f04a 1954
bd508178
AD
1955 switch (hw->mac.type) {
1956 case ixgbe_mac_82598EB:
fe49f04a 1957 mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
bd508178
AD
1958 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
1959 break;
1960 case ixgbe_mac_82599EB:
b93a2226 1961 case ixgbe_mac_X540:
fe49f04a 1962 mask = (qmask & 0xFFFFFFFF);
bd508178
AD
1963 if (mask)
1964 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
fe49f04a 1965 mask = (qmask >> 32);
bd508178
AD
1966 if (mask)
1967 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
1968 break;
1969 default:
1970 break;
fe49f04a
AD
1971 }
1972 /* skip the flush */
1973}
1974
1975static inline void ixgbe_irq_disable_queues(struct ixgbe_adapter *adapter,
e8e9f696 1976 u64 qmask)
fe49f04a
AD
1977{
1978 u32 mask;
bd508178 1979 struct ixgbe_hw *hw = &adapter->hw;
fe49f04a 1980
bd508178
AD
1981 switch (hw->mac.type) {
1982 case ixgbe_mac_82598EB:
fe49f04a 1983 mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
bd508178
AD
1984 IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
1985 break;
1986 case ixgbe_mac_82599EB:
b93a2226 1987 case ixgbe_mac_X540:
fe49f04a 1988 mask = (qmask & 0xFFFFFFFF);
bd508178
AD
1989 if (mask)
1990 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
fe49f04a 1991 mask = (qmask >> 32);
bd508178
AD
1992 if (mask)
1993 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
1994 break;
1995 default:
1996 break;
fe49f04a
AD
1997 }
1998 /* skip the flush */
1999}
2000
9a799d71
AK
2001static irqreturn_t ixgbe_msix_clean_tx(int irq, void *data)
2002{
021230d4
AV
2003 struct ixgbe_q_vector *q_vector = data;
2004 struct ixgbe_adapter *adapter = q_vector->adapter;
3a581073 2005 struct ixgbe_ring *tx_ring;
021230d4
AV
2006 int i, r_idx;
2007
2008 if (!q_vector->txr_count)
2009 return IRQ_HANDLED;
2010
2011 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
2012 for (i = 0; i < q_vector->txr_count; i++) {
4a0b9ca0 2013 tx_ring = adapter->tx_ring[r_idx];
3a581073
JB
2014 tx_ring->total_bytes = 0;
2015 tx_ring->total_packets = 0;
021230d4 2016 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
e8e9f696 2017 r_idx + 1);
021230d4 2018 }
9a799d71 2019
9b471446 2020 /* EIAM disabled interrupts (on this vector) for us */
91281fd3
AD
2021 napi_schedule(&q_vector->napi);
2022
9a799d71
AK
2023 return IRQ_HANDLED;
2024}
2025
021230d4
AV
2026/**
2027 * ixgbe_msix_clean_rx - single unshared vector rx clean (all queues)
2028 * @irq: unused
2029 * @data: pointer to our q_vector struct for this interrupt vector
2030 **/
9a799d71
AK
2031static irqreturn_t ixgbe_msix_clean_rx(int irq, void *data)
2032{
021230d4
AV
2033 struct ixgbe_q_vector *q_vector = data;
2034 struct ixgbe_adapter *adapter = q_vector->adapter;
3a581073 2035 struct ixgbe_ring *rx_ring;
021230d4 2036 int r_idx;
30efa5a3 2037 int i;
021230d4 2038
33cf09c9
AD
2039#ifdef CONFIG_IXGBE_DCA
2040 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
2041 ixgbe_update_dca(q_vector);
2042#endif
2043
021230d4 2044 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
33cf09c9 2045 for (i = 0; i < q_vector->rxr_count; i++) {
4a0b9ca0 2046 rx_ring = adapter->rx_ring[r_idx];
30efa5a3
JB
2047 rx_ring->total_bytes = 0;
2048 rx_ring->total_packets = 0;
2049 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
e8e9f696 2050 r_idx + 1);
30efa5a3
JB
2051 }
2052
021230d4
AV
2053 if (!q_vector->rxr_count)
2054 return IRQ_HANDLED;
2055
9b471446 2056 /* EIAM disabled interrupts (on this vector) for us */
288379f0 2057 napi_schedule(&q_vector->napi);
021230d4
AV
2058
2059 return IRQ_HANDLED;
2060}
2061
2062static irqreturn_t ixgbe_msix_clean_many(int irq, void *data)
2063{
91281fd3
AD
2064 struct ixgbe_q_vector *q_vector = data;
2065 struct ixgbe_adapter *adapter = q_vector->adapter;
2066 struct ixgbe_ring *ring;
2067 int r_idx;
2068 int i;
2069
2070 if (!q_vector->txr_count && !q_vector->rxr_count)
2071 return IRQ_HANDLED;
2072
2073 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
2074 for (i = 0; i < q_vector->txr_count; i++) {
4a0b9ca0 2075 ring = adapter->tx_ring[r_idx];
91281fd3
AD
2076 ring->total_bytes = 0;
2077 ring->total_packets = 0;
2078 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
e8e9f696 2079 r_idx + 1);
91281fd3
AD
2080 }
2081
2082 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
2083 for (i = 0; i < q_vector->rxr_count; i++) {
4a0b9ca0 2084 ring = adapter->rx_ring[r_idx];
91281fd3
AD
2085 ring->total_bytes = 0;
2086 ring->total_packets = 0;
2087 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
e8e9f696 2088 r_idx + 1);
91281fd3
AD
2089 }
2090
9b471446 2091 /* EIAM disabled interrupts (on this vector) for us */
91281fd3 2092 napi_schedule(&q_vector->napi);
9a799d71 2093
9a799d71
AK
2094 return IRQ_HANDLED;
2095}
2096
021230d4
AV
2097/**
2098 * ixgbe_clean_rxonly - msix (aka one shot) rx clean routine
2099 * @napi: napi struct with our devices info in it
2100 * @budget: amount of work driver is allowed to do this pass, in packets
2101 *
f0848276
JB
2102 * This function is optimized for cleaning one queue only on a single
2103 * q_vector!!!
021230d4 2104 **/
9a799d71
AK
2105static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget)
2106{
021230d4 2107 struct ixgbe_q_vector *q_vector =
e8e9f696 2108 container_of(napi, struct ixgbe_q_vector, napi);
021230d4 2109 struct ixgbe_adapter *adapter = q_vector->adapter;
f0848276 2110 struct ixgbe_ring *rx_ring = NULL;
9a799d71 2111 int work_done = 0;
021230d4 2112 long r_idx;
9a799d71 2113
5dd2d332 2114#ifdef CONFIG_IXGBE_DCA
bd0362dd 2115 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
33cf09c9 2116 ixgbe_update_dca(q_vector);
bd0362dd 2117#endif
9a799d71 2118
33cf09c9
AD
2119 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
2120 rx_ring = adapter->rx_ring[r_idx];
2121
78b6f4ce 2122 ixgbe_clean_rx_irq(q_vector, rx_ring, &work_done, budget);
9a799d71 2123
021230d4
AV
2124 /* If all Rx work done, exit the polling mode */
2125 if (work_done < budget) {
288379f0 2126 napi_complete(napi);
f7554a2b 2127 if (adapter->rx_itr_setting & 1)
f494e8fa 2128 ixgbe_set_itr_msix(q_vector);
9a799d71 2129 if (!test_bit(__IXGBE_DOWN, &adapter->state))
fe49f04a 2130 ixgbe_irq_enable_queues(adapter,
e8e9f696 2131 ((u64)1 << q_vector->v_idx));
9a799d71
AK
2132 }
2133
2134 return work_done;
2135}
2136
f0848276 2137/**
91281fd3 2138 * ixgbe_clean_rxtx_many - msix (aka one shot) rx clean routine
f0848276
JB
2139 * @napi: napi struct with our devices info in it
2140 * @budget: amount of work driver is allowed to do this pass, in packets
2141 *
2142 * This function will clean more than one rx queue associated with a
2143 * q_vector.
2144 **/
91281fd3 2145static int ixgbe_clean_rxtx_many(struct napi_struct *napi, int budget)
f0848276
JB
2146{
2147 struct ixgbe_q_vector *q_vector =
e8e9f696 2148 container_of(napi, struct ixgbe_q_vector, napi);
f0848276 2149 struct ixgbe_adapter *adapter = q_vector->adapter;
91281fd3 2150 struct ixgbe_ring *ring = NULL;
f0848276
JB
2151 int work_done = 0, i;
2152 long r_idx;
91281fd3
AD
2153 bool tx_clean_complete = true;
2154
33cf09c9
AD
2155#ifdef CONFIG_IXGBE_DCA
2156 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
2157 ixgbe_update_dca(q_vector);
2158#endif
2159
91281fd3
AD
2160 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
2161 for (i = 0; i < q_vector->txr_count; i++) {
4a0b9ca0 2162 ring = adapter->tx_ring[r_idx];
91281fd3
AD
2163 tx_clean_complete &= ixgbe_clean_tx_irq(q_vector, ring);
2164 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
e8e9f696 2165 r_idx + 1);
91281fd3 2166 }
f0848276
JB
2167
2168 /* attempt to distribute budget to each queue fairly, but don't allow
2169 * the budget to go below 1 because we'll exit polling */
2170 budget /= (q_vector->rxr_count ?: 1);
2171 budget = max(budget, 1);
2172 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
2173 for (i = 0; i < q_vector->rxr_count; i++) {
4a0b9ca0 2174 ring = adapter->rx_ring[r_idx];
91281fd3 2175 ixgbe_clean_rx_irq(q_vector, ring, &work_done, budget);
f0848276 2176 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
e8e9f696 2177 r_idx + 1);
f0848276
JB
2178 }
2179
2180 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
4a0b9ca0 2181 ring = adapter->rx_ring[r_idx];
f0848276 2182 /* If all Rx work done, exit the polling mode */
7f821875 2183 if (work_done < budget) {
288379f0 2184 napi_complete(napi);
f7554a2b 2185 if (adapter->rx_itr_setting & 1)
f0848276
JB
2186 ixgbe_set_itr_msix(q_vector);
2187 if (!test_bit(__IXGBE_DOWN, &adapter->state))
fe49f04a 2188 ixgbe_irq_enable_queues(adapter,
e8e9f696 2189 ((u64)1 << q_vector->v_idx));
f0848276
JB
2190 return 0;
2191 }
2192
2193 return work_done;
2194}
91281fd3
AD
2195
2196/**
2197 * ixgbe_clean_txonly - msix (aka one shot) tx clean routine
2198 * @napi: napi struct with our devices info in it
2199 * @budget: amount of work driver is allowed to do this pass, in packets
2200 *
2201 * This function is optimized for cleaning one queue only on a single
2202 * q_vector!!!
2203 **/
2204static int ixgbe_clean_txonly(struct napi_struct *napi, int budget)
2205{
2206 struct ixgbe_q_vector *q_vector =
e8e9f696 2207 container_of(napi, struct ixgbe_q_vector, napi);
91281fd3
AD
2208 struct ixgbe_adapter *adapter = q_vector->adapter;
2209 struct ixgbe_ring *tx_ring = NULL;
2210 int work_done = 0;
2211 long r_idx;
2212
91281fd3
AD
2213#ifdef CONFIG_IXGBE_DCA
2214 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
33cf09c9 2215 ixgbe_update_dca(q_vector);
91281fd3
AD
2216#endif
2217
33cf09c9
AD
2218 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
2219 tx_ring = adapter->tx_ring[r_idx];
2220
91281fd3
AD
2221 if (!ixgbe_clean_tx_irq(q_vector, tx_ring))
2222 work_done = budget;
2223
f7554a2b 2224 /* If all Tx work done, exit the polling mode */
91281fd3
AD
2225 if (work_done < budget) {
2226 napi_complete(napi);
f7554a2b 2227 if (adapter->tx_itr_setting & 1)
91281fd3
AD
2228 ixgbe_set_itr_msix(q_vector);
2229 if (!test_bit(__IXGBE_DOWN, &adapter->state))
e8e9f696
JP
2230 ixgbe_irq_enable_queues(adapter,
2231 ((u64)1 << q_vector->v_idx));
91281fd3
AD
2232 }
2233
2234 return work_done;
2235}
2236
021230d4 2237static inline void map_vector_to_rxq(struct ixgbe_adapter *a, int v_idx,
e8e9f696 2238 int r_idx)
021230d4 2239{
7a921c93 2240 struct ixgbe_q_vector *q_vector = a->q_vector[v_idx];
2274543f 2241 struct ixgbe_ring *rx_ring = a->rx_ring[r_idx];
7a921c93
AD
2242
2243 set_bit(r_idx, q_vector->rxr_idx);
2244 q_vector->rxr_count++;
2274543f 2245 rx_ring->q_vector = q_vector;
021230d4
AV
2246}
2247
2248static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx,
e8e9f696 2249 int t_idx)
021230d4 2250{
7a921c93 2251 struct ixgbe_q_vector *q_vector = a->q_vector[v_idx];
2274543f 2252 struct ixgbe_ring *tx_ring = a->tx_ring[t_idx];
7a921c93
AD
2253
2254 set_bit(t_idx, q_vector->txr_idx);
2255 q_vector->txr_count++;
2274543f 2256 tx_ring->q_vector = q_vector;
021230d4
AV
2257}
2258
9a799d71 2259/**
021230d4
AV
2260 * ixgbe_map_rings_to_vectors - Maps descriptor rings to vectors
2261 * @adapter: board private structure to initialize
9a799d71 2262 *
021230d4
AV
2263 * This function maps descriptor rings to the queue-specific vectors
2264 * we were allotted through the MSI-X enabling code. Ideally, we'd have
2265 * one vector per ring/queue, but on a constrained vector budget, we
2266 * group the rings as "efficiently" as possible. You would add new
2267 * mapping configurations in here.
9a799d71 2268 **/
d0759ebb 2269static int ixgbe_map_rings_to_vectors(struct ixgbe_adapter *adapter)
021230d4 2270{
d0759ebb 2271 int q_vectors;
021230d4
AV
2272 int v_start = 0;
2273 int rxr_idx = 0, txr_idx = 0;
2274 int rxr_remaining = adapter->num_rx_queues;
2275 int txr_remaining = adapter->num_tx_queues;
2276 int i, j;
2277 int rqpv, tqpv;
2278 int err = 0;
2279
2280 /* No mapping required if MSI-X is disabled. */
2281 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
2282 goto out;
9a799d71 2283
d0759ebb
AD
2284 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
2285
021230d4
AV
2286 /*
2287 * The ideal configuration...
2288 * We have enough vectors to map one per queue.
2289 */
d0759ebb 2290 if (q_vectors == adapter->num_rx_queues + adapter->num_tx_queues) {
021230d4
AV
2291 for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++)
2292 map_vector_to_rxq(adapter, v_start, rxr_idx);
9a799d71 2293
021230d4
AV
2294 for (; txr_idx < txr_remaining; v_start++, txr_idx++)
2295 map_vector_to_txq(adapter, v_start, txr_idx);
9a799d71 2296
9a799d71 2297 goto out;
021230d4 2298 }
9a799d71 2299
021230d4
AV
2300 /*
2301 * If we don't have enough vectors for a 1-to-1
2302 * mapping, we'll have to group them so there are
2303 * multiple queues per vector.
2304 */
2305 /* Re-adjusting *qpv takes care of the remainder. */
d0759ebb
AD
2306 for (i = v_start; i < q_vectors; i++) {
2307 rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - i);
021230d4
AV
2308 for (j = 0; j < rqpv; j++) {
2309 map_vector_to_rxq(adapter, i, rxr_idx);
2310 rxr_idx++;
2311 rxr_remaining--;
2312 }
d0759ebb 2313 tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - i);
021230d4
AV
2314 for (j = 0; j < tqpv; j++) {
2315 map_vector_to_txq(adapter, i, txr_idx);
2316 txr_idx++;
2317 txr_remaining--;
9a799d71 2318 }
9a799d71 2319 }
021230d4
AV
2320out:
2321 return err;
2322}
2323
2324/**
2325 * ixgbe_request_msix_irqs - Initialize MSI-X interrupts
2326 * @adapter: board private structure
2327 *
2328 * ixgbe_request_msix_irqs allocates MSI-X vectors and requests
2329 * interrupts from the kernel.
2330 **/
2331static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
2332{
2333 struct net_device *netdev = adapter->netdev;
2334 irqreturn_t (*handler)(int, void *);
2335 int i, vector, q_vectors, err;
e8e9f696 2336 int ri = 0, ti = 0;
021230d4
AV
2337
2338 /* Decrement for Other and TCP Timer vectors */
2339 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
2340
d0759ebb 2341 err = ixgbe_map_rings_to_vectors(adapter);
021230d4 2342 if (err)
d0759ebb 2343 return err;
021230d4 2344
d0759ebb
AD
2345#define SET_HANDLER(_v) (((_v)->rxr_count && (_v)->txr_count) \
2346 ? &ixgbe_msix_clean_many : \
2347 (_v)->rxr_count ? &ixgbe_msix_clean_rx : \
2348 (_v)->txr_count ? &ixgbe_msix_clean_tx : \
2349 NULL)
021230d4 2350 for (vector = 0; vector < q_vectors; vector++) {
d0759ebb
AD
2351 struct ixgbe_q_vector *q_vector = adapter->q_vector[vector];
2352 handler = SET_HANDLER(q_vector);
cb13fc20 2353
e8e9f696 2354 if (handler == &ixgbe_msix_clean_rx) {
9fe93afd
DS
2355 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2356 "%s-%s-%d", netdev->name, "rx", ri++);
e8e9f696 2357 } else if (handler == &ixgbe_msix_clean_tx) {
9fe93afd
DS
2358 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2359 "%s-%s-%d", netdev->name, "tx", ti++);
d0759ebb 2360 } else if (handler == &ixgbe_msix_clean_many) {
9fe93afd
DS
2361 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2362 "%s-%s-%d", netdev->name, "TxRx", ri++);
32aa77a4 2363 ti++;
d0759ebb
AD
2364 } else {
2365 /* skip this unused q_vector */
2366 continue;
32aa77a4 2367 }
021230d4 2368 err = request_irq(adapter->msix_entries[vector].vector,
d0759ebb
AD
2369 handler, 0, q_vector->name,
2370 q_vector);
9a799d71 2371 if (err) {
396e799c 2372 e_err(probe, "request_irq failed for MSIX interrupt "
849c4542 2373 "Error: %d\n", err);
021230d4 2374 goto free_queue_irqs;
9a799d71 2375 }
9a799d71
AK
2376 }
2377
d0759ebb 2378 sprintf(adapter->lsc_int_name, "%s:lsc", netdev->name);
021230d4 2379 err = request_irq(adapter->msix_entries[vector].vector,
d0759ebb 2380 ixgbe_msix_lsc, 0, adapter->lsc_int_name, netdev);
9a799d71 2381 if (err) {
396e799c 2382 e_err(probe, "request_irq for msix_lsc failed: %d\n", err);
021230d4 2383 goto free_queue_irqs;
9a799d71
AK
2384 }
2385
9a799d71
AK
2386 return 0;
2387
021230d4
AV
2388free_queue_irqs:
2389 for (i = vector - 1; i >= 0; i--)
2390 free_irq(adapter->msix_entries[--vector].vector,
e8e9f696 2391 adapter->q_vector[i]);
021230d4
AV
2392 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
2393 pci_disable_msix(adapter->pdev);
9a799d71
AK
2394 kfree(adapter->msix_entries);
2395 adapter->msix_entries = NULL;
9a799d71
AK
2396 return err;
2397}
2398
f494e8fa
AV
2399static void ixgbe_set_itr(struct ixgbe_adapter *adapter)
2400{
7a921c93 2401 struct ixgbe_q_vector *q_vector = adapter->q_vector[0];
4a0b9ca0
PW
2402 struct ixgbe_ring *rx_ring = adapter->rx_ring[0];
2403 struct ixgbe_ring *tx_ring = adapter->tx_ring[0];
125601bf
AD
2404 u32 new_itr = q_vector->eitr;
2405 u8 current_itr;
f494e8fa 2406
30efa5a3 2407 q_vector->tx_itr = ixgbe_update_itr(adapter, new_itr,
e8e9f696
JP
2408 q_vector->tx_itr,
2409 tx_ring->total_packets,
2410 tx_ring->total_bytes);
30efa5a3 2411 q_vector->rx_itr = ixgbe_update_itr(adapter, new_itr,
e8e9f696
JP
2412 q_vector->rx_itr,
2413 rx_ring->total_packets,
2414 rx_ring->total_bytes);
f494e8fa 2415
30efa5a3 2416 current_itr = max(q_vector->rx_itr, q_vector->tx_itr);
f494e8fa
AV
2417
2418 switch (current_itr) {
2419 /* counts and packets in update_itr are dependent on these numbers */
2420 case lowest_latency:
2421 new_itr = 100000;
2422 break;
2423 case low_latency:
2424 new_itr = 20000; /* aka hwitr = ~200 */
2425 break;
2426 case bulk_latency:
2427 new_itr = 8000;
2428 break;
2429 default:
2430 break;
2431 }
2432
2433 if (new_itr != q_vector->eitr) {
fe49f04a 2434 /* do an exponential smoothing */
125601bf 2435 new_itr = ((q_vector->eitr * 9) + new_itr)/10;
509ee935 2436
125601bf 2437 /* save the algorithm value here */
509ee935 2438 q_vector->eitr = new_itr;
fe49f04a
AD
2439
2440 ixgbe_write_eitr(q_vector);
f494e8fa 2441 }
f494e8fa
AV
2442}
2443
79aefa45
AD
2444/**
2445 * ixgbe_irq_enable - Enable default interrupt generation settings
2446 * @adapter: board private structure
2447 **/
6af3b9eb
ET
2448static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues,
2449 bool flush)
79aefa45
AD
2450{
2451 u32 mask;
835462fc
NS
2452
2453 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
119fc60a
MC
2454 if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE)
2455 mask |= IXGBE_EIMS_GPI_SDP0;
6ab33d51
DM
2456 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE)
2457 mask |= IXGBE_EIMS_GPI_SDP1;
bd508178
AD
2458 switch (adapter->hw.mac.type) {
2459 case ixgbe_mac_82599EB:
b93a2226 2460 case ixgbe_mac_X540:
2a41ff81 2461 mask |= IXGBE_EIMS_ECC;
e8e26350
PW
2462 mask |= IXGBE_EIMS_GPI_SDP1;
2463 mask |= IXGBE_EIMS_GPI_SDP2;
1cdd1ec8
GR
2464 if (adapter->num_vfs)
2465 mask |= IXGBE_EIMS_MAILBOX;
bd508178
AD
2466 break;
2467 default:
2468 break;
e8e26350 2469 }
03ecf91a 2470 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)
c4cf55e5 2471 mask |= IXGBE_EIMS_FLOW_DIR;
e8e26350 2472
79aefa45 2473 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
6af3b9eb
ET
2474 if (queues)
2475 ixgbe_irq_enable_queues(adapter, ~0);
2476 if (flush)
2477 IXGBE_WRITE_FLUSH(&adapter->hw);
1cdd1ec8
GR
2478
2479 if (adapter->num_vfs > 32) {
2480 u32 eitrsel = (1 << (adapter->num_vfs - 32)) - 1;
2481 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, eitrsel);
2482 }
79aefa45 2483}
021230d4 2484
9a799d71 2485/**
021230d4 2486 * ixgbe_intr - legacy mode Interrupt Handler
9a799d71
AK
2487 * @irq: interrupt number
2488 * @data: pointer to a network interface device structure
9a799d71
AK
2489 **/
2490static irqreturn_t ixgbe_intr(int irq, void *data)
2491{
2492 struct net_device *netdev = data;
2493 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2494 struct ixgbe_hw *hw = &adapter->hw;
7a921c93 2495 struct ixgbe_q_vector *q_vector = adapter->q_vector[0];
9a799d71
AK
2496 u32 eicr;
2497
54037505 2498 /*
6af3b9eb 2499 * Workaround for silicon errata on 82598. Mask the interrupts
54037505
DS
2500 * before the read of EICR.
2501 */
2502 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
2503
021230d4
AV
2504 /* for NAPI, using EIAM to auto-mask tx/rx interrupt bits on read
2505 * therefore no explict interrupt disable is necessary */
2506 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
f47cf66e 2507 if (!eicr) {
6af3b9eb
ET
2508 /*
2509 * shared interrupt alert!
f47cf66e 2510 * make sure interrupts are enabled because the read will
6af3b9eb
ET
2511 * have disabled interrupts due to EIAM
2512 * finish the workaround of silicon errata on 82598. Unmask
2513 * the interrupt that we masked before the EICR read.
2514 */
2515 if (!test_bit(__IXGBE_DOWN, &adapter->state))
2516 ixgbe_irq_enable(adapter, true, true);
9a799d71 2517 return IRQ_NONE; /* Not our interrupt */
f47cf66e 2518 }
9a799d71 2519
cf8280ee
JB
2520 if (eicr & IXGBE_EICR_LSC)
2521 ixgbe_check_lsc(adapter);
021230d4 2522
bd508178
AD
2523 switch (hw->mac.type) {
2524 case ixgbe_mac_82599EB:
e8e26350 2525 ixgbe_check_sfp_event(adapter, eicr);
bd508178
AD
2526 if ((adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) &&
2527 ((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC))) {
f0f9778d
AD
2528 if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
2529 adapter->interrupt_event = eicr;
2530 adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_EVENT;
2531 ixgbe_service_event_schedule(adapter);
2532 }
bd508178
AD
2533 }
2534 break;
2535 default:
2536 break;
2537 }
e8e26350 2538
0befdb3e
JB
2539 ixgbe_check_fan_failure(adapter, eicr);
2540
7a921c93 2541 if (napi_schedule_prep(&(q_vector->napi))) {
4a0b9ca0
PW
2542 adapter->tx_ring[0]->total_packets = 0;
2543 adapter->tx_ring[0]->total_bytes = 0;
2544 adapter->rx_ring[0]->total_packets = 0;
2545 adapter->rx_ring[0]->total_bytes = 0;
021230d4 2546 /* would disable interrupts here but EIAM disabled it */
7a921c93 2547 __napi_schedule(&(q_vector->napi));
9a799d71
AK
2548 }
2549
6af3b9eb
ET
2550 /*
2551 * re-enable link(maybe) and non-queue interrupts, no flush.
2552 * ixgbe_poll will re-enable the queue interrupts
2553 */
2554
2555 if (!test_bit(__IXGBE_DOWN, &adapter->state))
2556 ixgbe_irq_enable(adapter, false, false);
2557
9a799d71
AK
2558 return IRQ_HANDLED;
2559}
2560
021230d4
AV
2561static inline void ixgbe_reset_q_vectors(struct ixgbe_adapter *adapter)
2562{
2563 int i, q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
2564
2565 for (i = 0; i < q_vectors; i++) {
7a921c93 2566 struct ixgbe_q_vector *q_vector = adapter->q_vector[i];
021230d4
AV
2567 bitmap_zero(q_vector->rxr_idx, MAX_RX_QUEUES);
2568 bitmap_zero(q_vector->txr_idx, MAX_TX_QUEUES);
2569 q_vector->rxr_count = 0;
2570 q_vector->txr_count = 0;
2571 }
2572}
2573
9a799d71
AK
2574/**
2575 * ixgbe_request_irq - initialize interrupts
2576 * @adapter: board private structure
2577 *
2578 * Attempts to configure interrupts using the best available
2579 * capabilities of the hardware and kernel.
2580 **/
021230d4 2581static int ixgbe_request_irq(struct ixgbe_adapter *adapter)
9a799d71
AK
2582{
2583 struct net_device *netdev = adapter->netdev;
021230d4 2584 int err;
9a799d71 2585
021230d4
AV
2586 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
2587 err = ixgbe_request_msix_irqs(adapter);
2588 } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
a0607fd3 2589 err = request_irq(adapter->pdev->irq, ixgbe_intr, 0,
e8e9f696 2590 netdev->name, netdev);
021230d4 2591 } else {
a0607fd3 2592 err = request_irq(adapter->pdev->irq, ixgbe_intr, IRQF_SHARED,
e8e9f696 2593 netdev->name, netdev);
9a799d71
AK
2594 }
2595
9a799d71 2596 if (err)
396e799c 2597 e_err(probe, "request_irq failed, Error %d\n", err);
9a799d71 2598
9a799d71
AK
2599 return err;
2600}
2601
2602static void ixgbe_free_irq(struct ixgbe_adapter *adapter)
2603{
2604 struct net_device *netdev = adapter->netdev;
2605
2606 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
021230d4 2607 int i, q_vectors;
9a799d71 2608
021230d4
AV
2609 q_vectors = adapter->num_msix_vectors;
2610
2611 i = q_vectors - 1;
9a799d71 2612 free_irq(adapter->msix_entries[i].vector, netdev);
9a799d71 2613
021230d4
AV
2614 i--;
2615 for (; i >= 0; i--) {
894ff7cf
AD
2616 /* free only the irqs that were actually requested */
2617 if (!adapter->q_vector[i]->rxr_count &&
2618 !adapter->q_vector[i]->txr_count)
2619 continue;
2620
021230d4 2621 free_irq(adapter->msix_entries[i].vector,
e8e9f696 2622 adapter->q_vector[i]);
021230d4
AV
2623 }
2624
2625 ixgbe_reset_q_vectors(adapter);
2626 } else {
2627 free_irq(adapter->pdev->irq, netdev);
9a799d71
AK
2628 }
2629}
2630
22d5a71b
JB
2631/**
2632 * ixgbe_irq_disable - Mask off interrupt generation on the NIC
2633 * @adapter: board private structure
2634 **/
2635static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
2636{
bd508178
AD
2637 switch (adapter->hw.mac.type) {
2638 case ixgbe_mac_82598EB:
835462fc 2639 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
bd508178
AD
2640 break;
2641 case ixgbe_mac_82599EB:
b93a2226 2642 case ixgbe_mac_X540:
835462fc
NS
2643 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
2644 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
22d5a71b 2645 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
1cdd1ec8
GR
2646 if (adapter->num_vfs > 32)
2647 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, 0);
bd508178
AD
2648 break;
2649 default:
2650 break;
22d5a71b
JB
2651 }
2652 IXGBE_WRITE_FLUSH(&adapter->hw);
2653 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
2654 int i;
2655 for (i = 0; i < adapter->num_msix_vectors; i++)
2656 synchronize_irq(adapter->msix_entries[i].vector);
2657 } else {
2658 synchronize_irq(adapter->pdev->irq);
2659 }
2660}
2661
9a799d71
AK
2662/**
2663 * ixgbe_configure_msi_and_legacy - Initialize PIN (INTA...) and MSI interrupts
2664 *
2665 **/
2666static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter *adapter)
2667{
9a799d71
AK
2668 struct ixgbe_hw *hw = &adapter->hw;
2669
021230d4 2670 IXGBE_WRITE_REG(hw, IXGBE_EITR(0),
e8e9f696 2671 EITR_INTS_PER_SEC_TO_REG(adapter->rx_eitr_param));
9a799d71 2672
e8e26350
PW
2673 ixgbe_set_ivar(adapter, 0, 0, 0);
2674 ixgbe_set_ivar(adapter, 1, 0, 0);
021230d4
AV
2675
2676 map_vector_to_rxq(adapter, 0, 0);
2677 map_vector_to_txq(adapter, 0, 0);
2678
396e799c 2679 e_info(hw, "Legacy interrupt IVAR setup done\n");
9a799d71
AK
2680}
2681
43e69bf0
AD
2682/**
2683 * ixgbe_configure_tx_ring - Configure 8259x Tx ring after Reset
2684 * @adapter: board private structure
2685 * @ring: structure containing ring specific data
2686 *
2687 * Configure the Tx descriptor ring after a reset.
2688 **/
84418e3b
AD
2689void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
2690 struct ixgbe_ring *ring)
43e69bf0
AD
2691{
2692 struct ixgbe_hw *hw = &adapter->hw;
2693 u64 tdba = ring->dma;
2f1860b8
AD
2694 int wait_loop = 10;
2695 u32 txdctl;
bf29ee6c 2696 u8 reg_idx = ring->reg_idx;
43e69bf0 2697
2f1860b8
AD
2698 /* disable queue to avoid issues while updating state */
2699 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx));
2700 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx),
2701 txdctl & ~IXGBE_TXDCTL_ENABLE);
2702 IXGBE_WRITE_FLUSH(hw);
2703
43e69bf0 2704 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(reg_idx),
e8e9f696 2705 (tdba & DMA_BIT_MASK(32)));
43e69bf0
AD
2706 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(reg_idx), (tdba >> 32));
2707 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(reg_idx),
2708 ring->count * sizeof(union ixgbe_adv_tx_desc));
2709 IXGBE_WRITE_REG(hw, IXGBE_TDH(reg_idx), 0);
2710 IXGBE_WRITE_REG(hw, IXGBE_TDT(reg_idx), 0);
84ea2591 2711 ring->tail = hw->hw_addr + IXGBE_TDT(reg_idx);
43e69bf0 2712
2f1860b8
AD
2713 /* configure fetching thresholds */
2714 if (adapter->rx_itr_setting == 0) {
2715 /* cannot set wthresh when itr==0 */
2716 txdctl &= ~0x007F0000;
2717 } else {
2718 /* enable WTHRESH=8 descriptors, to encourage burst writeback */
2719 txdctl |= (8 << 16);
2720 }
2721 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
2722 /* PThresh workaround for Tx hang with DFP enabled. */
2723 txdctl |= 32;
2724 }
2725
2726 /* reinitialize flowdirector state */
ee9e0f0b
AD
2727 if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) &&
2728 adapter->atr_sample_rate) {
2729 ring->atr_sample_rate = adapter->atr_sample_rate;
2730 ring->atr_count = 0;
2731 set_bit(__IXGBE_TX_FDIR_INIT_DONE, &ring->state);
2732 } else {
2733 ring->atr_sample_rate = 0;
2734 }
2f1860b8 2735
c84d324c
JF
2736 clear_bit(__IXGBE_HANG_CHECK_ARMED, &ring->state);
2737
2f1860b8
AD
2738 /* enable queue */
2739 txdctl |= IXGBE_TXDCTL_ENABLE;
2740 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), txdctl);
2741
2742 /* TXDCTL.EN will return 0 on 82598 if link is down, so skip it */
2743 if (hw->mac.type == ixgbe_mac_82598EB &&
2744 !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
2745 return;
2746
2747 /* poll to verify queue is enabled */
2748 do {
032b4325 2749 usleep_range(1000, 2000);
2f1860b8
AD
2750 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx));
2751 } while (--wait_loop && !(txdctl & IXGBE_TXDCTL_ENABLE));
2752 if (!wait_loop)
2753 e_err(drv, "Could not enable Tx Queue %d\n", reg_idx);
43e69bf0
AD
2754}
2755
120ff942
AD
2756static void ixgbe_setup_mtqc(struct ixgbe_adapter *adapter)
2757{
2758 struct ixgbe_hw *hw = &adapter->hw;
2759 u32 rttdcs;
72a32f1f 2760 u32 reg;
8b1c0b24 2761 u8 tcs = netdev_get_num_tc(adapter->netdev);
120ff942
AD
2762
2763 if (hw->mac.type == ixgbe_mac_82598EB)
2764 return;
2765
2766 /* disable the arbiter while setting MTQC */
2767 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
2768 rttdcs |= IXGBE_RTTDCS_ARBDIS;
2769 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
2770
2771 /* set transmit pool layout */
8b1c0b24 2772 switch (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
120ff942
AD
2773 case (IXGBE_FLAG_SRIOV_ENABLED):
2774 IXGBE_WRITE_REG(hw, IXGBE_MTQC,
2775 (IXGBE_MTQC_VT_ENA | IXGBE_MTQC_64VF));
2776 break;
8b1c0b24
JF
2777 default:
2778 if (!tcs)
2779 reg = IXGBE_MTQC_64Q_1PB;
2780 else if (tcs <= 4)
2781 reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ;
2782 else
2783 reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ;
120ff942 2784
8b1c0b24 2785 IXGBE_WRITE_REG(hw, IXGBE_MTQC, reg);
120ff942 2786
8b1c0b24
JF
2787 /* Enable Security TX Buffer IFG for multiple pb */
2788 if (tcs) {
2789 reg = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
2790 reg |= IXGBE_SECTX_DCB;
2791 IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, reg);
2792 }
120ff942
AD
2793 break;
2794 }
2795
2796 /* re-enable the arbiter */
2797 rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
2798 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
2799}
2800
9a799d71 2801/**
3a581073 2802 * ixgbe_configure_tx - Configure 8259x Transmit Unit after Reset
9a799d71
AK
2803 * @adapter: board private structure
2804 *
2805 * Configure the Tx unit of the MAC after a reset.
2806 **/
2807static void ixgbe_configure_tx(struct ixgbe_adapter *adapter)
2808{
2f1860b8
AD
2809 struct ixgbe_hw *hw = &adapter->hw;
2810 u32 dmatxctl;
43e69bf0 2811 u32 i;
9a799d71 2812
2f1860b8
AD
2813 ixgbe_setup_mtqc(adapter);
2814
2815 if (hw->mac.type != ixgbe_mac_82598EB) {
2816 /* DMATXCTL.EN must be before Tx queues are enabled */
2817 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
2818 dmatxctl |= IXGBE_DMATXCTL_TE;
2819 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
2820 }
2821
9a799d71 2822 /* Setup the HW Tx Head and Tail descriptor pointers */
43e69bf0
AD
2823 for (i = 0; i < adapter->num_tx_queues; i++)
2824 ixgbe_configure_tx_ring(adapter, adapter->tx_ring[i]);
9a799d71
AK
2825}
2826
e8e26350 2827#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
cc41ac7c 2828
a6616b42 2829static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter,
e8e9f696 2830 struct ixgbe_ring *rx_ring)
cc41ac7c 2831{
cc41ac7c 2832 u32 srrctl;
bf29ee6c 2833 u8 reg_idx = rx_ring->reg_idx;
3be1adfb 2834
bd508178
AD
2835 switch (adapter->hw.mac.type) {
2836 case ixgbe_mac_82598EB: {
2837 struct ixgbe_ring_feature *feature = adapter->ring_feature;
2838 const int mask = feature[RING_F_RSS].mask;
bf29ee6c 2839 reg_idx = reg_idx & mask;
cc41ac7c 2840 }
bd508178
AD
2841 break;
2842 case ixgbe_mac_82599EB:
b93a2226 2843 case ixgbe_mac_X540:
bd508178
AD
2844 default:
2845 break;
2846 }
2847
bf29ee6c 2848 srrctl = IXGBE_READ_REG(&adapter->hw, IXGBE_SRRCTL(reg_idx));
cc41ac7c
JB
2849
2850 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
2851 srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
9e10e045
AD
2852 if (adapter->num_vfs)
2853 srrctl |= IXGBE_SRRCTL_DROP_EN;
cc41ac7c 2854
afafd5b0
AD
2855 srrctl |= (IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
2856 IXGBE_SRRCTL_BSIZEHDR_MASK;
2857
7d637bcc 2858 if (ring_is_ps_enabled(rx_ring)) {
afafd5b0
AD
2859#if (PAGE_SIZE / 2) > IXGBE_MAX_RXBUFFER
2860 srrctl |= IXGBE_MAX_RXBUFFER >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
2861#else
2862 srrctl |= (PAGE_SIZE / 2) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
2863#endif
cc41ac7c 2864 srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
cc41ac7c 2865 } else {
afafd5b0
AD
2866 srrctl |= ALIGN(rx_ring->rx_buf_len, 1024) >>
2867 IXGBE_SRRCTL_BSIZEPKT_SHIFT;
cc41ac7c 2868 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
cc41ac7c 2869 }
e8e26350 2870
bf29ee6c 2871 IXGBE_WRITE_REG(&adapter->hw, IXGBE_SRRCTL(reg_idx), srrctl);
cc41ac7c 2872}
9a799d71 2873
05abb126 2874static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
0cefafad 2875{
05abb126
AD
2876 struct ixgbe_hw *hw = &adapter->hw;
2877 static const u32 seed[10] = { 0xE291D73D, 0x1805EC6C, 0x2A94B30D,
e8e9f696
JP
2878 0xA54F2BEC, 0xEA49AF7C, 0xE214AD3D, 0xB855AABE,
2879 0x6A3E67EA, 0x14364D17, 0x3BED200D};
05abb126
AD
2880 u32 mrqc = 0, reta = 0;
2881 u32 rxcsum;
2882 int i, j;
8b1c0b24 2883 u8 tcs = netdev_get_num_tc(adapter->netdev);
86b4db3b
JF
2884 int maxq = adapter->ring_feature[RING_F_RSS].indices;
2885
2886 if (tcs)
2887 maxq = min(maxq, adapter->num_tx_queues / tcs);
0cefafad 2888
05abb126
AD
2889 /* Fill out hash function seeds */
2890 for (i = 0; i < 10; i++)
2891 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), seed[i]);
2892
2893 /* Fill out redirection table */
2894 for (i = 0, j = 0; i < 128; i++, j++) {
86b4db3b 2895 if (j == maxq)
05abb126
AD
2896 j = 0;
2897 /* reta = 4-byte sliding window of
2898 * 0x00..(indices-1)(indices-1)00..etc. */
2899 reta = (reta << 8) | (j * 0x11);
2900 if ((i & 3) == 3)
2901 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
2902 }
0cefafad 2903
05abb126
AD
2904 /* Disable indicating checksum in descriptor, enables RSS hash */
2905 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
2906 rxcsum |= IXGBE_RXCSUM_PCSD;
2907 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
2908
8b1c0b24
JF
2909 if (adapter->hw.mac.type == ixgbe_mac_82598EB &&
2910 (adapter->flags & IXGBE_FLAG_RSS_ENABLED)) {
0cefafad 2911 mrqc = IXGBE_MRQC_RSSEN;
8b1c0b24
JF
2912 } else {
2913 int mask = adapter->flags & (IXGBE_FLAG_RSS_ENABLED
2914 | IXGBE_FLAG_SRIOV_ENABLED);
2915
2916 switch (mask) {
2917 case (IXGBE_FLAG_RSS_ENABLED):
2918 if (!tcs)
2919 mrqc = IXGBE_MRQC_RSSEN;
2920 else if (tcs <= 4)
2921 mrqc = IXGBE_MRQC_RTRSS4TCEN;
2922 else
2923 mrqc = IXGBE_MRQC_RTRSS8TCEN;
2924 break;
2925 case (IXGBE_FLAG_SRIOV_ENABLED):
2926 mrqc = IXGBE_MRQC_VMDQEN;
2927 break;
2928 default:
2929 break;
2930 }
0cefafad
JB
2931 }
2932
05abb126
AD
2933 /* Perform hash on these packet types */
2934 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4
2935 | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
2936 | IXGBE_MRQC_RSS_FIELD_IPV6
2937 | IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
2938
2939 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
0cefafad
JB
2940}
2941
b93a2226
DS
2942/**
2943 * ixgbe_clear_rscctl - disable RSC for the indicated ring
2944 * @adapter: address of board private structure
2945 * @ring: structure containing ring specific data
2946 **/
2947void ixgbe_clear_rscctl(struct ixgbe_adapter *adapter,
2948 struct ixgbe_ring *ring)
2949{
2950 struct ixgbe_hw *hw = &adapter->hw;
2951 u32 rscctrl;
2952 u8 reg_idx = ring->reg_idx;
2953
2954 rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(reg_idx));
2955 rscctrl &= ~IXGBE_RSCCTL_RSCEN;
2956 IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(reg_idx), rscctrl);
2957}
2958
bb5a9ad2
NS
2959/**
2960 * ixgbe_configure_rscctl - enable RSC for the indicated ring
2961 * @adapter: address of board private structure
2962 * @index: index of ring to set
bb5a9ad2 2963 **/
b93a2226 2964void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter,
7367096a 2965 struct ixgbe_ring *ring)
bb5a9ad2 2966{
bb5a9ad2 2967 struct ixgbe_hw *hw = &adapter->hw;
bb5a9ad2 2968 u32 rscctrl;
edd2ea55 2969 int rx_buf_len;
bf29ee6c 2970 u8 reg_idx = ring->reg_idx;
7367096a 2971
7d637bcc 2972 if (!ring_is_rsc_enabled(ring))
7367096a 2973 return;
bb5a9ad2 2974
7367096a
AD
2975 rx_buf_len = ring->rx_buf_len;
2976 rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(reg_idx));
bb5a9ad2
NS
2977 rscctrl |= IXGBE_RSCCTL_RSCEN;
2978 /*
2979 * we must limit the number of descriptors so that the
2980 * total size of max desc * buf_len is not greater
2981 * than 65535
2982 */
7d637bcc 2983 if (ring_is_ps_enabled(ring)) {
bb5a9ad2
NS
2984#if (MAX_SKB_FRAGS > 16)
2985 rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
2986#elif (MAX_SKB_FRAGS > 8)
2987 rscctrl |= IXGBE_RSCCTL_MAXDESC_8;
2988#elif (MAX_SKB_FRAGS > 4)
2989 rscctrl |= IXGBE_RSCCTL_MAXDESC_4;
2990#else
2991 rscctrl |= IXGBE_RSCCTL_MAXDESC_1;
2992#endif
2993 } else {
2994 if (rx_buf_len < IXGBE_RXBUFFER_4096)
2995 rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
2996 else if (rx_buf_len < IXGBE_RXBUFFER_8192)
2997 rscctrl |= IXGBE_RSCCTL_MAXDESC_8;
2998 else
2999 rscctrl |= IXGBE_RSCCTL_MAXDESC_4;
3000 }
7367096a 3001 IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(reg_idx), rscctrl);
bb5a9ad2
NS
3002}
3003
9e10e045
AD
3004/**
3005 * ixgbe_set_uta - Set unicast filter table address
3006 * @adapter: board private structure
3007 *
3008 * The unicast table address is a register array of 32-bit registers.
3009 * The table is meant to be used in a way similar to how the MTA is used
3010 * however due to certain limitations in the hardware it is necessary to
3011 * set all the hash bits to 1 and use the VMOLR ROPE bit as a promiscuous
3012 * enable bit to allow vlan tag stripping when promiscuous mode is enabled
3013 **/
3014static void ixgbe_set_uta(struct ixgbe_adapter *adapter)
3015{
3016 struct ixgbe_hw *hw = &adapter->hw;
3017 int i;
3018
3019 /* The UTA table only exists on 82599 hardware and newer */
3020 if (hw->mac.type < ixgbe_mac_82599EB)
3021 return;
3022
3023 /* we only need to do this if VMDq is enabled */
3024 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
3025 return;
3026
3027 for (i = 0; i < 128; i++)
3028 IXGBE_WRITE_REG(hw, IXGBE_UTA(i), ~0);
3029}
3030
3031#define IXGBE_MAX_RX_DESC_POLL 10
3032static void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter,
3033 struct ixgbe_ring *ring)
3034{
3035 struct ixgbe_hw *hw = &adapter->hw;
9e10e045
AD
3036 int wait_loop = IXGBE_MAX_RX_DESC_POLL;
3037 u32 rxdctl;
bf29ee6c 3038 u8 reg_idx = ring->reg_idx;
9e10e045
AD
3039
3040 /* RXDCTL.EN will return 0 on 82598 if link is down, so skip it */
3041 if (hw->mac.type == ixgbe_mac_82598EB &&
3042 !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
3043 return;
3044
3045 do {
032b4325 3046 usleep_range(1000, 2000);
9e10e045
AD
3047 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
3048 } while (--wait_loop && !(rxdctl & IXGBE_RXDCTL_ENABLE));
3049
3050 if (!wait_loop) {
3051 e_err(drv, "RXDCTL.ENABLE on Rx queue %d not set within "
3052 "the polling period\n", reg_idx);
3053 }
3054}
3055
2d39d576
YZ
3056void ixgbe_disable_rx_queue(struct ixgbe_adapter *adapter,
3057 struct ixgbe_ring *ring)
3058{
3059 struct ixgbe_hw *hw = &adapter->hw;
3060 int wait_loop = IXGBE_MAX_RX_DESC_POLL;
3061 u32 rxdctl;
3062 u8 reg_idx = ring->reg_idx;
3063
3064 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
3065 rxdctl &= ~IXGBE_RXDCTL_ENABLE;
3066
3067 /* write value back with RXDCTL.ENABLE bit cleared */
3068 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
3069
3070 if (hw->mac.type == ixgbe_mac_82598EB &&
3071 !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
3072 return;
3073
3074 /* the hardware may take up to 100us to really disable the rx queue */
3075 do {
3076 udelay(10);
3077 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
3078 } while (--wait_loop && (rxdctl & IXGBE_RXDCTL_ENABLE));
3079
3080 if (!wait_loop) {
3081 e_err(drv, "RXDCTL.ENABLE on Rx queue %d not cleared within "
3082 "the polling period\n", reg_idx);
3083 }
3084}
3085
84418e3b
AD
3086void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
3087 struct ixgbe_ring *ring)
acd37177
AD
3088{
3089 struct ixgbe_hw *hw = &adapter->hw;
3090 u64 rdba = ring->dma;
9e10e045 3091 u32 rxdctl;
bf29ee6c 3092 u8 reg_idx = ring->reg_idx;
acd37177 3093
9e10e045
AD
3094 /* disable queue to avoid issues while updating state */
3095 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
2d39d576 3096 ixgbe_disable_rx_queue(adapter, ring);
9e10e045 3097
acd37177
AD
3098 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(reg_idx), (rdba & DMA_BIT_MASK(32)));
3099 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(reg_idx), (rdba >> 32));
3100 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(reg_idx),
3101 ring->count * sizeof(union ixgbe_adv_rx_desc));
3102 IXGBE_WRITE_REG(hw, IXGBE_RDH(reg_idx), 0);
3103 IXGBE_WRITE_REG(hw, IXGBE_RDT(reg_idx), 0);
84ea2591 3104 ring->tail = hw->hw_addr + IXGBE_RDT(reg_idx);
9e10e045
AD
3105
3106 ixgbe_configure_srrctl(adapter, ring);
3107 ixgbe_configure_rscctl(adapter, ring);
3108
e9f98072
GR
3109 /* If operating in IOV mode set RLPML for X540 */
3110 if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) &&
3111 hw->mac.type == ixgbe_mac_X540) {
3112 rxdctl &= ~IXGBE_RXDCTL_RLPMLMASK;
3113 rxdctl |= ((ring->netdev->mtu + ETH_HLEN +
3114 ETH_FCS_LEN + VLAN_HLEN) | IXGBE_RXDCTL_RLPML_EN);
3115 }
3116
9e10e045
AD
3117 if (hw->mac.type == ixgbe_mac_82598EB) {
3118 /*
3119 * enable cache line friendly hardware writes:
3120 * PTHRESH=32 descriptors (half the internal cache),
3121 * this also removes ugly rx_no_buffer_count increment
3122 * HTHRESH=4 descriptors (to minimize latency on fetch)
3123 * WTHRESH=8 burst writeback up to two cache lines
3124 */
3125 rxdctl &= ~0x3FFFFF;
3126 rxdctl |= 0x080420;
3127 }
3128
3129 /* enable receive descriptor ring */
3130 rxdctl |= IXGBE_RXDCTL_ENABLE;
3131 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
3132
3133 ixgbe_rx_desc_queue_enable(adapter, ring);
fc77dc3c 3134 ixgbe_alloc_rx_buffers(ring, IXGBE_DESC_UNUSED(ring));
acd37177
AD
3135}
3136
48654521
AD
3137static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter)
3138{
3139 struct ixgbe_hw *hw = &adapter->hw;
3140 int p;
3141
3142 /* PSRTYPE must be initialized in non 82598 adapters */
3143 u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
e8e9f696
JP
3144 IXGBE_PSRTYPE_UDPHDR |
3145 IXGBE_PSRTYPE_IPV4HDR |
48654521 3146 IXGBE_PSRTYPE_L2HDR |
e8e9f696 3147 IXGBE_PSRTYPE_IPV6HDR;
48654521
AD
3148
3149 if (hw->mac.type == ixgbe_mac_82598EB)
3150 return;
3151
3152 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED)
3153 psrtype |= (adapter->num_rx_queues_per_pool << 29);
3154
3155 for (p = 0; p < adapter->num_rx_pools; p++)
3156 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(adapter->num_vfs + p),
3157 psrtype);
3158}
3159
f5b4a52e
AD
3160static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter)
3161{
3162 struct ixgbe_hw *hw = &adapter->hw;
3163 u32 gcr_ext;
3164 u32 vt_reg_bits;
3165 u32 reg_offset, vf_shift;
3166 u32 vmdctl;
3167
3168 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
3169 return;
3170
3171 vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
3172 vt_reg_bits = IXGBE_VMD_CTL_VMDQ_EN | IXGBE_VT_CTL_REPLEN;
3173 vt_reg_bits |= (adapter->num_vfs << IXGBE_VT_CTL_POOL_SHIFT);
3174 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl | vt_reg_bits);
3175
3176 vf_shift = adapter->num_vfs % 32;
3177 reg_offset = (adapter->num_vfs > 32) ? 1 : 0;
3178
3179 /* Enable only the PF's pool for Tx/Rx */
3180 IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), (1 << vf_shift));
3181 IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset ^ 1), 0);
3182 IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), (1 << vf_shift));
3183 IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset ^ 1), 0);
3184 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
3185
3186 /* Map PF MAC address in RAR Entry 0 to first pool following VFs */
3187 hw->mac.ops.set_vmdq(hw, 0, adapter->num_vfs);
3188
3189 /*
3190 * Set up VF register offsets for selected VT Mode,
3191 * i.e. 32 or 64 VFs for SR-IOV
3192 */
3193 gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
3194 gcr_ext |= IXGBE_GCR_EXT_MSIX_EN;
3195 gcr_ext |= IXGBE_GCR_EXT_VT_MODE_64;
3196 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
3197
3198 /* enable Tx loopback for VF/PF communication */
3199 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
a985b6c3 3200 /* Enable MAC Anti-Spoofing */
a1cbb15c
GR
3201 hw->mac.ops.set_mac_anti_spoofing(hw,
3202 (adapter->antispoofing_enabled =
3203 (adapter->num_vfs != 0)),
a985b6c3 3204 adapter->num_vfs);
f5b4a52e
AD
3205}
3206
477de6ed 3207static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter)
9a799d71 3208{
9a799d71
AK
3209 struct ixgbe_hw *hw = &adapter->hw;
3210 struct net_device *netdev = adapter->netdev;
3211 int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
7c6e0a43 3212 int rx_buf_len;
477de6ed
AD
3213 struct ixgbe_ring *rx_ring;
3214 int i;
3215 u32 mhadd, hlreg0;
48654521 3216
9a799d71 3217 /* Decide whether to use packet split mode or not */
a124339a
DS
3218 /* On by default */
3219 adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED;
3220
1cdd1ec8 3221 /* Do not use packet split if we're in SR-IOV Mode */
a124339a
DS
3222 if (adapter->num_vfs)
3223 adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED;
3224
3225 /* Disable packet split due to 82599 erratum #45 */
3226 if (hw->mac.type == ixgbe_mac_82599EB)
3227 adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED;
9a799d71
AK
3228
3229 /* Set the RX buffer length according to the mode */
3230 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
7c6e0a43 3231 rx_buf_len = IXGBE_RX_HDR_SIZE;
9a799d71 3232 } else {
0c19d6af 3233 if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) &&
f8212f97 3234 (netdev->mtu <= ETH_DATA_LEN))
7c6e0a43 3235 rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE;
9a799d71 3236 else
477de6ed 3237 rx_buf_len = ALIGN(max_frame + VLAN_HLEN, 1024);
9a799d71
AK
3238 }
3239
63f39bd1 3240#ifdef IXGBE_FCOE
477de6ed
AD
3241 /* adjust max frame to be able to do baby jumbo for FCoE */
3242 if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) &&
3243 (max_frame < IXGBE_FCOE_JUMBO_FRAME_SIZE))
3244 max_frame = IXGBE_FCOE_JUMBO_FRAME_SIZE;
9a799d71 3245
477de6ed
AD
3246#endif /* IXGBE_FCOE */
3247 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
3248 if (max_frame != (mhadd >> IXGBE_MHADD_MFS_SHIFT)) {
3249 mhadd &= ~IXGBE_MHADD_MFS_MASK;
3250 mhadd |= max_frame << IXGBE_MHADD_MFS_SHIFT;
3251
3252 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
3253 }
3254
3255 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
3256 /* set jumbo enable since MHADD.MFS is keeping size locked at max_frame */
3257 hlreg0 |= IXGBE_HLREG0_JUMBOEN;
3258 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
9a799d71 3259
0cefafad
JB
3260 /*
3261 * Setup the HW Rx Head and Tail Descriptor Pointers and
3262 * the Base and Length of the Rx Descriptor Ring
3263 */
9a799d71 3264 for (i = 0; i < adapter->num_rx_queues; i++) {
4a0b9ca0 3265 rx_ring = adapter->rx_ring[i];
a6616b42 3266 rx_ring->rx_buf_len = rx_buf_len;
cc41ac7c 3267
6e455b89 3268 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED)
7d637bcc
AD
3269 set_ring_ps_enabled(rx_ring);
3270 else
3271 clear_ring_ps_enabled(rx_ring);
3272
3273 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
3274 set_ring_rsc_enabled(rx_ring);
1b3ff02e 3275 else
7d637bcc 3276 clear_ring_rsc_enabled(rx_ring);
cc41ac7c 3277
63f39bd1 3278#ifdef IXGBE_FCOE
e8e9f696 3279 if (netdev->features & NETIF_F_FCOE_MTU) {
63f39bd1
YZ
3280 struct ixgbe_ring_feature *f;
3281 f = &adapter->ring_feature[RING_F_FCOE];
6e455b89 3282 if ((i >= f->mask) && (i < f->mask + f->indices)) {
7d637bcc 3283 clear_ring_ps_enabled(rx_ring);
6e455b89
YZ
3284 if (rx_buf_len < IXGBE_FCOE_JUMBO_FRAME_SIZE)
3285 rx_ring->rx_buf_len =
e8e9f696 3286 IXGBE_FCOE_JUMBO_FRAME_SIZE;
7d637bcc
AD
3287 } else if (!ring_is_rsc_enabled(rx_ring) &&
3288 !ring_is_ps_enabled(rx_ring)) {
3289 rx_ring->rx_buf_len =
3290 IXGBE_FCOE_JUMBO_FRAME_SIZE;
6e455b89 3291 }
63f39bd1 3292 }
63f39bd1 3293#endif /* IXGBE_FCOE */
477de6ed 3294 }
477de6ed
AD
3295}
3296
7367096a
AD
3297static void ixgbe_setup_rdrxctl(struct ixgbe_adapter *adapter)
3298{
3299 struct ixgbe_hw *hw = &adapter->hw;
3300 u32 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
3301
3302 switch (hw->mac.type) {
3303 case ixgbe_mac_82598EB:
3304 /*
3305 * For VMDq support of different descriptor types or
3306 * buffer sizes through the use of multiple SRRCTL
3307 * registers, RDRXCTL.MVMEN must be set to 1
3308 *
3309 * also, the manual doesn't mention it clearly but DCA hints
3310 * will only use queue 0's tags unless this bit is set. Side
3311 * effects of setting this bit are only that SRRCTL must be
3312 * fully programmed [0..15]
3313 */
3314 rdrxctl |= IXGBE_RDRXCTL_MVMEN;
3315 break;
3316 case ixgbe_mac_82599EB:
b93a2226 3317 case ixgbe_mac_X540:
7367096a
AD
3318 /* Disable RSC for ACK packets */
3319 IXGBE_WRITE_REG(hw, IXGBE_RSCDBU,
3320 (IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU)));
3321 rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
3322 /* hardware requires some bits to be set by default */
3323 rdrxctl |= (IXGBE_RDRXCTL_RSCACKC | IXGBE_RDRXCTL_FCOE_WRFIX);
3324 rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
3325 break;
3326 default:
3327 /* We should do nothing since we don't know this hardware */
3328 return;
3329 }
3330
3331 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
3332}
3333
477de6ed
AD
3334/**
3335 * ixgbe_configure_rx - Configure 8259x Receive Unit after Reset
3336 * @adapter: board private structure
3337 *
3338 * Configure the Rx unit of the MAC after a reset.
3339 **/
3340static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
3341{
3342 struct ixgbe_hw *hw = &adapter->hw;
477de6ed
AD
3343 int i;
3344 u32 rxctrl;
477de6ed
AD
3345
3346 /* disable receives while setting up the descriptors */
3347 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
3348 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
3349
3350 ixgbe_setup_psrtype(adapter);
7367096a 3351 ixgbe_setup_rdrxctl(adapter);
477de6ed 3352
9e10e045 3353 /* Program registers for the distribution of queues */
f5b4a52e 3354 ixgbe_setup_mrqc(adapter);
f5b4a52e 3355
9e10e045
AD
3356 ixgbe_set_uta(adapter);
3357
477de6ed
AD
3358 /* set_rx_buffer_len must be called before ring initialization */
3359 ixgbe_set_rx_buffer_len(adapter);
3360
3361 /*
3362 * Setup the HW Rx Head and Tail Descriptor Pointers and
3363 * the Base and Length of the Rx Descriptor Ring
3364 */
9e10e045
AD
3365 for (i = 0; i < adapter->num_rx_queues; i++)
3366 ixgbe_configure_rx_ring(adapter, adapter->rx_ring[i]);
177db6ff 3367
9e10e045
AD
3368 /* disable drop enable for 82598 parts */
3369 if (hw->mac.type == ixgbe_mac_82598EB)
3370 rxctrl |= IXGBE_RXCTRL_DMBYPS;
3371
3372 /* enable all receives */
3373 rxctrl |= IXGBE_RXCTRL_RXEN;
3374 hw->mac.ops.enable_rx_dma(hw, rxctrl);
9a799d71
AK
3375}
3376
068c89b0
DS
3377static void ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
3378{
3379 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3380 struct ixgbe_hw *hw = &adapter->hw;
1ada1b1b 3381 int pool_ndx = adapter->num_vfs;
068c89b0
DS
3382
3383 /* add VID to filter table */
1ada1b1b 3384 hw->mac.ops.set_vfta(&adapter->hw, vid, pool_ndx, true);
f62bbb5e 3385 set_bit(vid, adapter->active_vlans);
068c89b0
DS
3386}
3387
3388static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
3389{
3390 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3391 struct ixgbe_hw *hw = &adapter->hw;
1ada1b1b 3392 int pool_ndx = adapter->num_vfs;
068c89b0 3393
068c89b0 3394 /* remove VID from filter table */
1ada1b1b 3395 hw->mac.ops.set_vfta(&adapter->hw, vid, pool_ndx, false);
f62bbb5e 3396 clear_bit(vid, adapter->active_vlans);
068c89b0
DS
3397}
3398
5f6c0181
JB
3399/**
3400 * ixgbe_vlan_filter_disable - helper to disable hw vlan filtering
3401 * @adapter: driver data
3402 */
3403static void ixgbe_vlan_filter_disable(struct ixgbe_adapter *adapter)
3404{
3405 struct ixgbe_hw *hw = &adapter->hw;
f62bbb5e
JG
3406 u32 vlnctrl;
3407
3408 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
3409 vlnctrl &= ~(IXGBE_VLNCTRL_VFE | IXGBE_VLNCTRL_CFIEN);
3410 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
3411}
3412
3413/**
3414 * ixgbe_vlan_filter_enable - helper to enable hw vlan filtering
3415 * @adapter: driver data
3416 */
3417static void ixgbe_vlan_filter_enable(struct ixgbe_adapter *adapter)
3418{
3419 struct ixgbe_hw *hw = &adapter->hw;
3420 u32 vlnctrl;
3421
3422 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
3423 vlnctrl |= IXGBE_VLNCTRL_VFE;
3424 vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
3425 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
3426}
3427
3428/**
3429 * ixgbe_vlan_strip_disable - helper to disable hw vlan stripping
3430 * @adapter: driver data
3431 */
3432static void ixgbe_vlan_strip_disable(struct ixgbe_adapter *adapter)
3433{
3434 struct ixgbe_hw *hw = &adapter->hw;
3435 u32 vlnctrl;
5f6c0181
JB
3436 int i, j;
3437
3438 switch (hw->mac.type) {
3439 case ixgbe_mac_82598EB:
f62bbb5e
JG
3440 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
3441 vlnctrl &= ~IXGBE_VLNCTRL_VME;
5f6c0181
JB
3442 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
3443 break;
3444 case ixgbe_mac_82599EB:
b93a2226 3445 case ixgbe_mac_X540:
5f6c0181
JB
3446 for (i = 0; i < adapter->num_rx_queues; i++) {
3447 j = adapter->rx_ring[i]->reg_idx;
3448 vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
3449 vlnctrl &= ~IXGBE_RXDCTL_VME;
3450 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl);
3451 }
3452 break;
3453 default:
3454 break;
3455 }
3456}
3457
3458/**
f62bbb5e 3459 * ixgbe_vlan_strip_enable - helper to enable hw vlan stripping
5f6c0181
JB
3460 * @adapter: driver data
3461 */
f62bbb5e 3462static void ixgbe_vlan_strip_enable(struct ixgbe_adapter *adapter)
5f6c0181
JB
3463{
3464 struct ixgbe_hw *hw = &adapter->hw;
f62bbb5e 3465 u32 vlnctrl;
5f6c0181
JB
3466 int i, j;
3467
3468 switch (hw->mac.type) {
3469 case ixgbe_mac_82598EB:
f62bbb5e
JG
3470 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
3471 vlnctrl |= IXGBE_VLNCTRL_VME;
5f6c0181
JB
3472 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
3473 break;
3474 case ixgbe_mac_82599EB:
b93a2226 3475 case ixgbe_mac_X540:
5f6c0181
JB
3476 for (i = 0; i < adapter->num_rx_queues; i++) {
3477 j = adapter->rx_ring[i]->reg_idx;
3478 vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
3479 vlnctrl |= IXGBE_RXDCTL_VME;
3480 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl);
3481 }
3482 break;
3483 default:
3484 break;
3485 }
3486}
3487
9a799d71
AK
3488static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter)
3489{
f62bbb5e 3490 u16 vid;
9a799d71 3491
f62bbb5e
JG
3492 ixgbe_vlan_rx_add_vid(adapter->netdev, 0);
3493
3494 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
3495 ixgbe_vlan_rx_add_vid(adapter->netdev, vid);
9a799d71
AK
3496}
3497
2850062a
AD
3498/**
3499 * ixgbe_write_uc_addr_list - write unicast addresses to RAR table
3500 * @netdev: network interface device structure
3501 *
3502 * Writes unicast address list to the RAR table.
3503 * Returns: -ENOMEM on failure/insufficient address space
3504 * 0 on no addresses written
3505 * X on writing X addresses to the RAR table
3506 **/
3507static int ixgbe_write_uc_addr_list(struct net_device *netdev)
3508{
3509 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3510 struct ixgbe_hw *hw = &adapter->hw;
3511 unsigned int vfn = adapter->num_vfs;
a1cbb15c 3512 unsigned int rar_entries = IXGBE_MAX_PF_MACVLANS;
2850062a
AD
3513 int count = 0;
3514
3515 /* return ENOMEM indicating insufficient memory for addresses */
3516 if (netdev_uc_count(netdev) > rar_entries)
3517 return -ENOMEM;
3518
3519 if (!netdev_uc_empty(netdev) && rar_entries) {
3520 struct netdev_hw_addr *ha;
3521 /* return error if we do not support writing to RAR table */
3522 if (!hw->mac.ops.set_rar)
3523 return -ENOMEM;
3524
3525 netdev_for_each_uc_addr(ha, netdev) {
3526 if (!rar_entries)
3527 break;
3528 hw->mac.ops.set_rar(hw, rar_entries--, ha->addr,
3529 vfn, IXGBE_RAH_AV);
3530 count++;
3531 }
3532 }
3533 /* write the addresses in reverse order to avoid write combining */
3534 for (; rar_entries > 0 ; rar_entries--)
3535 hw->mac.ops.clear_rar(hw, rar_entries);
3536
3537 return count;
3538}
3539
9a799d71 3540/**
2c5645cf 3541 * ixgbe_set_rx_mode - Unicast, Multicast and Promiscuous mode set
9a799d71
AK
3542 * @netdev: network interface device structure
3543 *
2c5645cf
CL
3544 * The set_rx_method entry point is called whenever the unicast/multicast
3545 * address list or the network interface flags are updated. This routine is
3546 * responsible for configuring the hardware for proper unicast, multicast and
3547 * promiscuous mode.
9a799d71 3548 **/
7f870475 3549void ixgbe_set_rx_mode(struct net_device *netdev)
9a799d71
AK
3550{
3551 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3552 struct ixgbe_hw *hw = &adapter->hw;
2850062a
AD
3553 u32 fctrl, vmolr = IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE;
3554 int count;
9a799d71
AK
3555
3556 /* Check for Promiscuous and All Multicast modes */
3557
3558 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
3559
f5dc442b
AD
3560 /* set all bits that we expect to always be set */
3561 fctrl |= IXGBE_FCTRL_BAM;
3562 fctrl |= IXGBE_FCTRL_DPF; /* discard pause frames when FC enabled */
3563 fctrl |= IXGBE_FCTRL_PMCF;
3564
2850062a
AD
3565 /* clear the bits we are changing the status of */
3566 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
3567
9a799d71 3568 if (netdev->flags & IFF_PROMISC) {
e433ea1f 3569 hw->addr_ctrl.user_set_promisc = true;
9a799d71 3570 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
2850062a 3571 vmolr |= (IXGBE_VMOLR_ROPE | IXGBE_VMOLR_MPE);
5f6c0181
JB
3572 /* don't hardware filter vlans in promisc mode */
3573 ixgbe_vlan_filter_disable(adapter);
9a799d71 3574 } else {
746b9f02
PM
3575 if (netdev->flags & IFF_ALLMULTI) {
3576 fctrl |= IXGBE_FCTRL_MPE;
2850062a
AD
3577 vmolr |= IXGBE_VMOLR_MPE;
3578 } else {
3579 /*
3580 * Write addresses to the MTA, if the attempt fails
25985edc 3581 * then we should just turn on promiscuous mode so
2850062a
AD
3582 * that we can at least receive multicast traffic
3583 */
3584 hw->mac.ops.update_mc_addr_list(hw, netdev);
3585 vmolr |= IXGBE_VMOLR_ROMPE;
746b9f02 3586 }
5f6c0181 3587 ixgbe_vlan_filter_enable(adapter);
e433ea1f 3588 hw->addr_ctrl.user_set_promisc = false;
2850062a
AD
3589 /*
3590 * Write addresses to available RAR registers, if there is not
3591 * sufficient space to store all the addresses then enable
25985edc 3592 * unicast promiscuous mode
2850062a
AD
3593 */
3594 count = ixgbe_write_uc_addr_list(netdev);
3595 if (count < 0) {
3596 fctrl |= IXGBE_FCTRL_UPE;
3597 vmolr |= IXGBE_VMOLR_ROPE;
3598 }
9a799d71
AK
3599 }
3600
2850062a 3601 if (adapter->num_vfs) {
1cdd1ec8 3602 ixgbe_restore_vf_multicasts(adapter);
2850062a
AD
3603 vmolr |= IXGBE_READ_REG(hw, IXGBE_VMOLR(adapter->num_vfs)) &
3604 ~(IXGBE_VMOLR_MPE | IXGBE_VMOLR_ROMPE |
3605 IXGBE_VMOLR_ROPE);
3606 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(adapter->num_vfs), vmolr);
3607 }
3608
3609 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
f62bbb5e
JG
3610
3611 if (netdev->features & NETIF_F_HW_VLAN_RX)
3612 ixgbe_vlan_strip_enable(adapter);
3613 else
3614 ixgbe_vlan_strip_disable(adapter);
9a799d71
AK
3615}
3616
021230d4
AV
3617static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter)
3618{
3619 int q_idx;
3620 struct ixgbe_q_vector *q_vector;
3621 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
3622
3623 /* legacy and MSI only use one vector */
3624 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
3625 q_vectors = 1;
3626
3627 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
f0848276 3628 struct napi_struct *napi;
7a921c93 3629 q_vector = adapter->q_vector[q_idx];
f0848276 3630 napi = &q_vector->napi;
91281fd3
AD
3631 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
3632 if (!q_vector->rxr_count || !q_vector->txr_count) {
3633 if (q_vector->txr_count == 1)
3634 napi->poll = &ixgbe_clean_txonly;
3635 else if (q_vector->rxr_count == 1)
3636 napi->poll = &ixgbe_clean_rxonly;
3637 }
3638 }
f0848276
JB
3639
3640 napi_enable(napi);
021230d4
AV
3641 }
3642}
3643
3644static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter)
3645{
3646 int q_idx;
3647 struct ixgbe_q_vector *q_vector;
3648 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
3649
3650 /* legacy and MSI only use one vector */
3651 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
3652 q_vectors = 1;
3653
3654 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
7a921c93 3655 q_vector = adapter->q_vector[q_idx];
021230d4
AV
3656 napi_disable(&q_vector->napi);
3657 }
3658}
3659
7a6b6f51 3660#ifdef CONFIG_IXGBE_DCB
2f90b865
AD
3661/*
3662 * ixgbe_configure_dcb - Configure DCB hardware
3663 * @adapter: ixgbe adapter struct
3664 *
3665 * This is called by the driver on open to configure the DCB hardware.
3666 * This is also called by the gennetlink interface when reconfiguring
3667 * the DCB state.
3668 */
3669static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
3670{
3671 struct ixgbe_hw *hw = &adapter->hw;
9806307a 3672 int max_frame = adapter->netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
2f90b865 3673
67ebd791
AD
3674 if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) {
3675 if (hw->mac.type == ixgbe_mac_82598EB)
3676 netif_set_gso_max_size(adapter->netdev, 65536);
3677 return;
3678 }
3679
3680 if (hw->mac.type == ixgbe_mac_82598EB)
3681 netif_set_gso_max_size(adapter->netdev, 32768);
3682
2f90b865 3683
2f90b865 3684 /* Enable VLAN tag insert/strip */
f62bbb5e 3685 adapter->netdev->features |= NETIF_F_HW_VLAN_RX;
5f6c0181 3686
2f90b865 3687 hw->mac.ops.set_vfta(&adapter->hw, 0, 0, true);
01fa7d90
AD
3688
3689 /* reconfigure the hardware */
6f70f6ac 3690 if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE) {
c27931da
JF
3691#ifdef CONFIG_FCOE
3692 if (adapter->netdev->features & NETIF_F_FCOE_MTU)
3693 max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE);
3694#endif
3695 ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame,
3696 DCB_TX_CONFIG);
3697 ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame,
3698 DCB_RX_CONFIG);
3699 ixgbe_dcb_hw_config(hw, &adapter->dcb_cfg);
3700 } else {
3701 struct net_device *dev = adapter->netdev;
3702
3703 if (adapter->ixgbe_ieee_ets)
3704 dev->dcbnl_ops->ieee_setets(dev,
3705 adapter->ixgbe_ieee_ets);
3706 if (adapter->ixgbe_ieee_pfc)
3707 dev->dcbnl_ops->ieee_setpfc(dev,
3708 adapter->ixgbe_ieee_pfc);
3709 }
8187cd48
JF
3710
3711 /* Enable RSS Hash per TC */
3712 if (hw->mac.type != ixgbe_mac_82598EB) {
3713 int i;
3714 u32 reg = 0;
3715
3716 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
3717 u8 msb = 0;
3718 u8 cnt = adapter->netdev->tc_to_txq[i].count;
3719
3720 while (cnt >>= 1)
3721 msb++;
3722
3723 reg |= msb << IXGBE_RQTC_SHIFT_TC(i);
3724 }
3725 IXGBE_WRITE_REG(hw, IXGBE_RQTC, reg);
3726 }
2f90b865
AD
3727}
3728
3729#endif
80605c65
JF
3730
3731static void ixgbe_configure_pb(struct ixgbe_adapter *adapter)
3732{
3733 int hdrm = 0;
3734 int num_tc = netdev_get_num_tc(adapter->netdev);
3735 struct ixgbe_hw *hw = &adapter->hw;
3736
3737 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
3738 adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
3739 hdrm = 64 << adapter->fdir_pballoc;
3740
3741 hw->mac.ops.set_rxpba(&adapter->hw, num_tc, hdrm, PBA_STRATEGY_EQUAL);
3742}
3743
e4911d57
AD
3744static void ixgbe_fdir_filter_restore(struct ixgbe_adapter *adapter)
3745{
3746 struct ixgbe_hw *hw = &adapter->hw;
3747 struct hlist_node *node, *node2;
3748 struct ixgbe_fdir_filter *filter;
3749
3750 spin_lock(&adapter->fdir_perfect_lock);
3751
3752 if (!hlist_empty(&adapter->fdir_filter_list))
3753 ixgbe_fdir_set_input_mask_82599(hw, &adapter->fdir_mask);
3754
3755 hlist_for_each_entry_safe(filter, node, node2,
3756 &adapter->fdir_filter_list, fdir_node) {
3757 ixgbe_fdir_write_perfect_filter_82599(hw,
1f4d5183
AD
3758 &filter->filter,
3759 filter->sw_idx,
3760 (filter->action == IXGBE_FDIR_DROP_QUEUE) ?
3761 IXGBE_FDIR_DROP_QUEUE :
3762 adapter->rx_ring[filter->action]->reg_idx);
e4911d57
AD
3763 }
3764
3765 spin_unlock(&adapter->fdir_perfect_lock);
3766}
3767
9a799d71
AK
3768static void ixgbe_configure(struct ixgbe_adapter *adapter)
3769{
3770 struct net_device *netdev = adapter->netdev;
c4cf55e5 3771 struct ixgbe_hw *hw = &adapter->hw;
9a799d71
AK
3772 int i;
3773
80605c65 3774 ixgbe_configure_pb(adapter);
7a6b6f51 3775#ifdef CONFIG_IXGBE_DCB
67ebd791 3776 ixgbe_configure_dcb(adapter);
2f90b865 3777#endif
9a799d71 3778
f62bbb5e
JG
3779 ixgbe_set_rx_mode(netdev);
3780 ixgbe_restore_vlan(adapter);
3781
eacd73f7
YZ
3782#ifdef IXGBE_FCOE
3783 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
3784 ixgbe_configure_fcoe(adapter);
3785
3786#endif /* IXGBE_FCOE */
c4cf55e5
PWJ
3787 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
3788 for (i = 0; i < adapter->num_tx_queues; i++)
4a0b9ca0 3789 adapter->tx_ring[i]->atr_sample_rate =
e8e9f696 3790 adapter->atr_sample_rate;
c4cf55e5 3791 ixgbe_init_fdir_signature_82599(hw, adapter->fdir_pballoc);
e4911d57
AD
3792 } else if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) {
3793 ixgbe_init_fdir_perfect_82599(&adapter->hw,
3794 adapter->fdir_pballoc);
3795 ixgbe_fdir_filter_restore(adapter);
c4cf55e5 3796 }
933d41f1 3797 ixgbe_configure_virtualization(adapter);
c4cf55e5 3798
9a799d71
AK
3799 ixgbe_configure_tx(adapter);
3800 ixgbe_configure_rx(adapter);
9a799d71
AK
3801}
3802
e8e26350
PW
3803static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw)
3804{
3805 switch (hw->phy.type) {
3806 case ixgbe_phy_sfp_avago:
3807 case ixgbe_phy_sfp_ftl:
3808 case ixgbe_phy_sfp_intel:
3809 case ixgbe_phy_sfp_unknown:
ea0a04df
DS
3810 case ixgbe_phy_sfp_passive_tyco:
3811 case ixgbe_phy_sfp_passive_unknown:
3812 case ixgbe_phy_sfp_active_unknown:
3813 case ixgbe_phy_sfp_ftl_active:
e8e26350
PW
3814 return true;
3815 default:
3816 return false;
3817 }
3818}
3819
0ecc061d 3820/**
e8e26350
PW
3821 * ixgbe_sfp_link_config - set up SFP+ link
3822 * @adapter: pointer to private adapter struct
3823 **/
3824static void ixgbe_sfp_link_config(struct ixgbe_adapter *adapter)
3825{
7086400d
AD
3826 /*
3827 * We are assuming the worst case scenerio here, and that
3828 * is that an SFP was inserted/removed after the reset
3829 * but before SFP detection was enabled. As such the best
3830 * solution is to just start searching as soon as we start
3831 */
3832 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
3833 adapter->flags2 |= IXGBE_FLAG2_SEARCH_FOR_SFP;
e8e26350 3834
7086400d 3835 adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET;
e8e26350
PW
3836}
3837
3838/**
3839 * ixgbe_non_sfp_link_config - set up non-SFP+ link
0ecc061d
PWJ
3840 * @hw: pointer to private hardware struct
3841 *
3842 * Returns 0 on success, negative on failure
3843 **/
e8e26350 3844static int ixgbe_non_sfp_link_config(struct ixgbe_hw *hw)
0ecc061d
PWJ
3845{
3846 u32 autoneg;
8620a103 3847 bool negotiation, link_up = false;
0ecc061d
PWJ
3848 u32 ret = IXGBE_ERR_LINK_SETUP;
3849
3850 if (hw->mac.ops.check_link)
3851 ret = hw->mac.ops.check_link(hw, &autoneg, &link_up, false);
3852
3853 if (ret)
3854 goto link_cfg_out;
3855
0b0c2b31
ET
3856 autoneg = hw->phy.autoneg_advertised;
3857 if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
e8e9f696
JP
3858 ret = hw->mac.ops.get_link_capabilities(hw, &autoneg,
3859 &negotiation);
0ecc061d
PWJ
3860 if (ret)
3861 goto link_cfg_out;
3862
8620a103
MC
3863 if (hw->mac.ops.setup_link)
3864 ret = hw->mac.ops.setup_link(hw, autoneg, negotiation, link_up);
0ecc061d
PWJ
3865link_cfg_out:
3866 return ret;
3867}
3868
a34bcfff 3869static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter)
9a799d71 3870{
9a799d71 3871 struct ixgbe_hw *hw = &adapter->hw;
a34bcfff 3872 u32 gpie = 0;
9a799d71 3873
9b471446 3874 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
a34bcfff
AD
3875 gpie = IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT |
3876 IXGBE_GPIE_OCD;
3877 gpie |= IXGBE_GPIE_EIAME;
9b471446
JB
3878 /*
3879 * use EIAM to auto-mask when MSI-X interrupt is asserted
3880 * this saves a register write for every interrupt
3881 */
3882 switch (hw->mac.type) {
3883 case ixgbe_mac_82598EB:
3884 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
3885 break;
9b471446 3886 case ixgbe_mac_82599EB:
b93a2226
DS
3887 case ixgbe_mac_X540:
3888 default:
9b471446
JB
3889 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
3890 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
3891 break;
3892 }
3893 } else {
021230d4
AV
3894 /* legacy interrupts, use EIAM to auto-mask when reading EICR,
3895 * specifically only auto mask tx and rx interrupts */
3896 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
3897 }
9a799d71 3898
a34bcfff
AD
3899 /* XXX: to interrupt immediately for EICS writes, enable this */
3900 /* gpie |= IXGBE_GPIE_EIMEN; */
3901
3902 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
3903 gpie &= ~IXGBE_GPIE_VTMODE_MASK;
3904 gpie |= IXGBE_GPIE_VTMODE_64;
119fc60a
MC
3905 }
3906
a34bcfff
AD
3907 /* Enable fan failure interrupt */
3908 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE)
0befdb3e 3909 gpie |= IXGBE_SDP1_GPIEN;
0befdb3e 3910
2698b208 3911 if (hw->mac.type == ixgbe_mac_82599EB) {
e8e26350
PW
3912 gpie |= IXGBE_SDP1_GPIEN;
3913 gpie |= IXGBE_SDP2_GPIEN;
2698b208 3914 }
a34bcfff
AD
3915
3916 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
3917}
3918
3919static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
3920{
3921 struct ixgbe_hw *hw = &adapter->hw;
a34bcfff 3922 int err;
a34bcfff
AD
3923 u32 ctrl_ext;
3924
3925 ixgbe_get_hw_control(adapter);
3926 ixgbe_setup_gpie(adapter);
e8e26350 3927
9a799d71
AK
3928 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
3929 ixgbe_configure_msix(adapter);
3930 else
3931 ixgbe_configure_msi_and_legacy(adapter);
3932
c6ecf39a
DS
3933 /* enable the optics for both mult-speed fiber and 82599 SFP+ fiber */
3934 if (hw->mac.ops.enable_tx_laser &&
3935 ((hw->phy.multispeed_fiber) ||
9f911707 3936 ((hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) &&
c6ecf39a 3937 (hw->mac.type == ixgbe_mac_82599EB))))
61fac744
PW
3938 hw->mac.ops.enable_tx_laser(hw);
3939
9a799d71 3940 clear_bit(__IXGBE_DOWN, &adapter->state);
021230d4
AV
3941 ixgbe_napi_enable_all(adapter);
3942
73c4b7cd
AD
3943 if (ixgbe_is_sfp(hw)) {
3944 ixgbe_sfp_link_config(adapter);
3945 } else {
3946 err = ixgbe_non_sfp_link_config(hw);
3947 if (err)
3948 e_err(probe, "link_config FAILED %d\n", err);
3949 }
3950
021230d4
AV
3951 /* clear any pending interrupts, may auto mask */
3952 IXGBE_READ_REG(hw, IXGBE_EICR);
6af3b9eb 3953 ixgbe_irq_enable(adapter, true, true);
9a799d71 3954
bf069c97
DS
3955 /*
3956 * If this adapter has a fan, check to see if we had a failure
3957 * before we enabled the interrupt.
3958 */
3959 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) {
3960 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
3961 if (esdp & IXGBE_ESDP_SDP1)
396e799c 3962 e_crit(drv, "Fan has stopped, replace the adapter\n");
bf069c97
DS
3963 }
3964
1da100bb 3965 /* enable transmits */
477de6ed 3966 netif_tx_start_all_queues(adapter->netdev);
1da100bb 3967
9a799d71
AK
3968 /* bring the link up in the watchdog, this could race with our first
3969 * link up interrupt but shouldn't be a problem */
cf8280ee
JB
3970 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
3971 adapter->link_check_timeout = jiffies;
7086400d 3972 mod_timer(&adapter->service_timer, jiffies);
c9205697
GR
3973
3974 /* Set PF Reset Done bit so PF/VF Mail Ops can work */
3975 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
3976 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
3977 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
3978
9a799d71
AK
3979 return 0;
3980}
3981
d4f80882
AV
3982void ixgbe_reinit_locked(struct ixgbe_adapter *adapter)
3983{
3984 WARN_ON(in_interrupt());
7086400d
AD
3985 /* put off any impending NetWatchDogTimeout */
3986 adapter->netdev->trans_start = jiffies;
3987
d4f80882 3988 while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
032b4325 3989 usleep_range(1000, 2000);
d4f80882 3990 ixgbe_down(adapter);
5809a1ae
GR
3991 /*
3992 * If SR-IOV enabled then wait a bit before bringing the adapter
3993 * back up to give the VFs time to respond to the reset. The
3994 * two second wait is based upon the watchdog timer cycle in
3995 * the VF driver.
3996 */
3997 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
3998 msleep(2000);
d4f80882
AV
3999 ixgbe_up(adapter);
4000 clear_bit(__IXGBE_RESETTING, &adapter->state);
4001}
4002
9a799d71
AK
4003int ixgbe_up(struct ixgbe_adapter *adapter)
4004{
4005 /* hardware has been reset, we need to reload some things */
4006 ixgbe_configure(adapter);
4007
4008 return ixgbe_up_complete(adapter);
4009}
4010
4011void ixgbe_reset(struct ixgbe_adapter *adapter)
4012{
c44ade9e 4013 struct ixgbe_hw *hw = &adapter->hw;
8ca783ab
DS
4014 int err;
4015
7086400d
AD
4016 /* lock SFP init bit to prevent race conditions with the watchdog */
4017 while (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
4018 usleep_range(1000, 2000);
4019
4020 /* clear all SFP and link config related flags while holding SFP_INIT */
4021 adapter->flags2 &= ~(IXGBE_FLAG2_SEARCH_FOR_SFP |
4022 IXGBE_FLAG2_SFP_NEEDS_RESET);
4023 adapter->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG;
4024
8ca783ab 4025 err = hw->mac.ops.init_hw(hw);
da4dd0f7
PWJ
4026 switch (err) {
4027 case 0:
4028 case IXGBE_ERR_SFP_NOT_PRESENT:
7086400d 4029 case IXGBE_ERR_SFP_NOT_SUPPORTED:
da4dd0f7
PWJ
4030 break;
4031 case IXGBE_ERR_MASTER_REQUESTS_PENDING:
849c4542 4032 e_dev_err("master disable timed out\n");
da4dd0f7 4033 break;
794caeb2
PWJ
4034 case IXGBE_ERR_EEPROM_VERSION:
4035 /* We are running on a pre-production device, log a warning */
849c4542
ET
4036 e_dev_warn("This device is a pre-production adapter/LOM. "
4037 "Please be aware there may be issuesassociated with "
4038 "your hardware. If you are experiencing problems "
4039 "please contact your Intel or hardware "
4040 "representative who provided you with this "
4041 "hardware.\n");
794caeb2 4042 break;
da4dd0f7 4043 default:
849c4542 4044 e_dev_err("Hardware Error: %d\n", err);
da4dd0f7 4045 }
9a799d71 4046
7086400d
AD
4047 clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
4048
9a799d71 4049 /* reprogram the RAR[0] in case user changed it. */
1cdd1ec8
GR
4050 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, adapter->num_vfs,
4051 IXGBE_RAH_AV);
9a799d71
AK
4052}
4053
9a799d71
AK
4054/**
4055 * ixgbe_clean_rx_ring - Free Rx Buffers per Queue
9a799d71
AK
4056 * @rx_ring: ring to free buffers from
4057 **/
b6ec895e 4058static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring)
9a799d71 4059{
b6ec895e 4060 struct device *dev = rx_ring->dev;
9a799d71 4061 unsigned long size;
b6ec895e 4062 u16 i;
9a799d71 4063
84418e3b
AD
4064 /* ring already cleared, nothing to do */
4065 if (!rx_ring->rx_buffer_info)
4066 return;
9a799d71 4067
84418e3b 4068 /* Free all the Rx ring sk_buffs */
9a799d71
AK
4069 for (i = 0; i < rx_ring->count; i++) {
4070 struct ixgbe_rx_buffer *rx_buffer_info;
4071
4072 rx_buffer_info = &rx_ring->rx_buffer_info[i];
4073 if (rx_buffer_info->dma) {
b6ec895e 4074 dma_unmap_single(rx_ring->dev, rx_buffer_info->dma,
e8e9f696 4075 rx_ring->rx_buf_len,
1b507730 4076 DMA_FROM_DEVICE);
9a799d71
AK
4077 rx_buffer_info->dma = 0;
4078 }
4079 if (rx_buffer_info->skb) {
f8212f97 4080 struct sk_buff *skb = rx_buffer_info->skb;
9a799d71 4081 rx_buffer_info->skb = NULL;
f8212f97
AD
4082 do {
4083 struct sk_buff *this = skb;
e8171aaa 4084 if (IXGBE_RSC_CB(this)->delay_unmap) {
b6ec895e 4085 dma_unmap_single(dev,
1b507730 4086 IXGBE_RSC_CB(this)->dma,
e8e9f696 4087 rx_ring->rx_buf_len,
1b507730 4088 DMA_FROM_DEVICE);
fd3686a8 4089 IXGBE_RSC_CB(this)->dma = 0;
e8171aaa 4090 IXGBE_RSC_CB(skb)->delay_unmap = false;
fd3686a8 4091 }
f8212f97
AD
4092 skb = skb->prev;
4093 dev_kfree_skb(this);
4094 } while (skb);
9a799d71
AK
4095 }
4096 if (!rx_buffer_info->page)
4097 continue;
4f57ca6e 4098 if (rx_buffer_info->page_dma) {
b6ec895e 4099 dma_unmap_page(dev, rx_buffer_info->page_dma,
1b507730 4100 PAGE_SIZE / 2, DMA_FROM_DEVICE);
4f57ca6e
JB
4101 rx_buffer_info->page_dma = 0;
4102 }
9a799d71
AK
4103 put_page(rx_buffer_info->page);
4104 rx_buffer_info->page = NULL;
762f4c57 4105 rx_buffer_info->page_offset = 0;
9a799d71
AK
4106 }
4107
4108 size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
4109 memset(rx_ring->rx_buffer_info, 0, size);
4110
4111 /* Zero out the descriptor ring */
4112 memset(rx_ring->desc, 0, rx_ring->size);
4113
4114 rx_ring->next_to_clean = 0;
4115 rx_ring->next_to_use = 0;
9a799d71
AK
4116}
4117
4118/**
4119 * ixgbe_clean_tx_ring - Free Tx Buffers
9a799d71
AK
4120 * @tx_ring: ring to be cleaned
4121 **/
b6ec895e 4122static void ixgbe_clean_tx_ring(struct ixgbe_ring *tx_ring)
9a799d71
AK
4123{
4124 struct ixgbe_tx_buffer *tx_buffer_info;
4125 unsigned long size;
b6ec895e 4126 u16 i;
9a799d71 4127
84418e3b
AD
4128 /* ring already cleared, nothing to do */
4129 if (!tx_ring->tx_buffer_info)
4130 return;
9a799d71 4131
84418e3b 4132 /* Free all the Tx ring sk_buffs */
9a799d71
AK
4133 for (i = 0; i < tx_ring->count; i++) {
4134 tx_buffer_info = &tx_ring->tx_buffer_info[i];
b6ec895e 4135 ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
9a799d71
AK
4136 }
4137
4138 size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
4139 memset(tx_ring->tx_buffer_info, 0, size);
4140
4141 /* Zero out the descriptor ring */
4142 memset(tx_ring->desc, 0, tx_ring->size);
4143
4144 tx_ring->next_to_use = 0;
4145 tx_ring->next_to_clean = 0;
9a799d71
AK
4146}
4147
4148/**
021230d4 4149 * ixgbe_clean_all_rx_rings - Free Rx Buffers for all queues
9a799d71
AK
4150 * @adapter: board private structure
4151 **/
021230d4 4152static void ixgbe_clean_all_rx_rings(struct ixgbe_adapter *adapter)
9a799d71
AK
4153{
4154 int i;
4155
021230d4 4156 for (i = 0; i < adapter->num_rx_queues; i++)
b6ec895e 4157 ixgbe_clean_rx_ring(adapter->rx_ring[i]);
9a799d71
AK
4158}
4159
4160/**
021230d4 4161 * ixgbe_clean_all_tx_rings - Free Tx Buffers for all queues
9a799d71
AK
4162 * @adapter: board private structure
4163 **/
021230d4 4164static void ixgbe_clean_all_tx_rings(struct ixgbe_adapter *adapter)
9a799d71
AK
4165{
4166 int i;
4167
021230d4 4168 for (i = 0; i < adapter->num_tx_queues; i++)
b6ec895e 4169 ixgbe_clean_tx_ring(adapter->tx_ring[i]);
9a799d71
AK
4170}
4171
e4911d57
AD
4172static void ixgbe_fdir_filter_exit(struct ixgbe_adapter *adapter)
4173{
4174 struct hlist_node *node, *node2;
4175 struct ixgbe_fdir_filter *filter;
4176
4177 spin_lock(&adapter->fdir_perfect_lock);
4178
4179 hlist_for_each_entry_safe(filter, node, node2,
4180 &adapter->fdir_filter_list, fdir_node) {
4181 hlist_del(&filter->fdir_node);
4182 kfree(filter);
4183 }
4184 adapter->fdir_filter_count = 0;
4185
4186 spin_unlock(&adapter->fdir_perfect_lock);
4187}
4188
9a799d71
AK
4189void ixgbe_down(struct ixgbe_adapter *adapter)
4190{
4191 struct net_device *netdev = adapter->netdev;
7f821875 4192 struct ixgbe_hw *hw = &adapter->hw;
9a799d71 4193 u32 rxctrl;
bf29ee6c 4194 int i;
b25ebfd2 4195 int num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
9a799d71
AK
4196
4197 /* signal that we are down to the interrupt handler */
4198 set_bit(__IXGBE_DOWN, &adapter->state);
4199
4200 /* disable receives */
7f821875
JB
4201 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
4202 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
9a799d71 4203
2d39d576
YZ
4204 /* disable all enabled rx queues */
4205 for (i = 0; i < adapter->num_rx_queues; i++)
4206 /* this call also flushes the previous write */
4207 ixgbe_disable_rx_queue(adapter, adapter->rx_ring[i]);
4208
032b4325 4209 usleep_range(10000, 20000);
9a799d71 4210
7f821875
JB
4211 netif_tx_stop_all_queues(netdev);
4212
7086400d 4213 /* call carrier off first to avoid false dev_watchdog timeouts */
c0dfb90e
JF
4214 netif_carrier_off(netdev);
4215 netif_tx_disable(netdev);
4216
4217 ixgbe_irq_disable(adapter);
4218
4219 ixgbe_napi_disable_all(adapter);
4220
d034acf1
AD
4221 adapter->flags2 &= ~(IXGBE_FLAG2_FDIR_REQUIRES_REINIT |
4222 IXGBE_FLAG2_RESET_REQUESTED);
7086400d
AD
4223 adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
4224
4225 del_timer_sync(&adapter->service_timer);
4226
34cecbbf
AD
4227 /* disable receive for all VFs and wait one second */
4228 if (adapter->num_vfs) {
4229 /* ping all the active vfs to let them know we are going down */
4230 ixgbe_ping_all_vfs(adapter);
4231
4232 /* Disable all VFTE/VFRE TX/RX */
4233 ixgbe_disable_tx_rx(adapter);
4234
4235 /* Mark all the VFs as inactive */
4236 for (i = 0 ; i < adapter->num_vfs; i++)
4237 adapter->vfinfo[i].clear_to_send = 0;
4238 }
4239
b25ebfd2
PW
4240 /* Cleanup the affinity_hint CPU mask memory and callback */
4241 for (i = 0; i < num_q_vectors; i++) {
4242 struct ixgbe_q_vector *q_vector = adapter->q_vector[i];
4243 /* clear the affinity_mask in the IRQ descriptor */
4244 irq_set_affinity_hint(adapter->msix_entries[i]. vector, NULL);
4245 /* release the CPU mask memory */
4246 free_cpumask_var(q_vector->affinity_mask);
4247 }
4248
7f821875
JB
4249 /* disable transmits in the hardware now that interrupts are off */
4250 for (i = 0; i < adapter->num_tx_queues; i++) {
bf29ee6c 4251 u8 reg_idx = adapter->tx_ring[i]->reg_idx;
34cecbbf 4252 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH);
7f821875 4253 }
34cecbbf
AD
4254
4255 /* Disable the Tx DMA engine on 82599 and X540 */
bd508178
AD
4256 switch (hw->mac.type) {
4257 case ixgbe_mac_82599EB:
b93a2226 4258 case ixgbe_mac_X540:
88512539 4259 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL,
e8e9f696
JP
4260 (IXGBE_READ_REG(hw, IXGBE_DMATXCTL) &
4261 ~IXGBE_DMATXCTL_TE));
bd508178
AD
4262 break;
4263 default:
4264 break;
4265 }
7f821875 4266
6f4a0e45
PL
4267 if (!pci_channel_offline(adapter->pdev))
4268 ixgbe_reset(adapter);
c6ecf39a
DS
4269
4270 /* power down the optics for multispeed fiber and 82599 SFP+ fiber */
4271 if (hw->mac.ops.disable_tx_laser &&
4272 ((hw->phy.multispeed_fiber) ||
9f911707 4273 ((hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) &&
c6ecf39a
DS
4274 (hw->mac.type == ixgbe_mac_82599EB))))
4275 hw->mac.ops.disable_tx_laser(hw);
4276
9a799d71
AK
4277 ixgbe_clean_all_tx_rings(adapter);
4278 ixgbe_clean_all_rx_rings(adapter);
4279
5dd2d332 4280#ifdef CONFIG_IXGBE_DCA
96b0e0f6 4281 /* since we reset the hardware DCA settings were cleared */
e35ec126 4282 ixgbe_setup_dca(adapter);
96b0e0f6 4283#endif
9a799d71
AK
4284}
4285
9a799d71 4286/**
021230d4
AV
4287 * ixgbe_poll - NAPI Rx polling callback
4288 * @napi: structure for representing this polling device
4289 * @budget: how many packets driver is allowed to clean
4290 *
4291 * This function is used for legacy and MSI, NAPI mode
9a799d71 4292 **/
021230d4 4293static int ixgbe_poll(struct napi_struct *napi, int budget)
9a799d71 4294{
9a1a69ad 4295 struct ixgbe_q_vector *q_vector =
e8e9f696 4296 container_of(napi, struct ixgbe_q_vector, napi);
021230d4 4297 struct ixgbe_adapter *adapter = q_vector->adapter;
9a1a69ad 4298 int tx_clean_complete, work_done = 0;
9a799d71 4299
5dd2d332 4300#ifdef CONFIG_IXGBE_DCA
33cf09c9
AD
4301 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
4302 ixgbe_update_dca(q_vector);
bd0362dd
JC
4303#endif
4304
4a0b9ca0
PW
4305 tx_clean_complete = ixgbe_clean_tx_irq(q_vector, adapter->tx_ring[0]);
4306 ixgbe_clean_rx_irq(q_vector, adapter->rx_ring[0], &work_done, budget);
9a799d71 4307
9a1a69ad 4308 if (!tx_clean_complete)
d2c7ddd6
DM
4309 work_done = budget;
4310
53e52c72
DM
4311 /* If budget not fully consumed, exit the polling mode */
4312 if (work_done < budget) {
288379f0 4313 napi_complete(napi);
f7554a2b 4314 if (adapter->rx_itr_setting & 1)
f494e8fa 4315 ixgbe_set_itr(adapter);
d4f80882 4316 if (!test_bit(__IXGBE_DOWN, &adapter->state))
835462fc 4317 ixgbe_irq_enable_queues(adapter, IXGBE_EIMS_RTX_QUEUE);
9a799d71 4318 }
9a799d71
AK
4319 return work_done;
4320}
4321
4322/**
4323 * ixgbe_tx_timeout - Respond to a Tx Hang
4324 * @netdev: network interface device structure
4325 **/
4326static void ixgbe_tx_timeout(struct net_device *netdev)
4327{
4328 struct ixgbe_adapter *adapter = netdev_priv(netdev);
4329
4330 /* Do the reset outside of interrupt context */
c83c6cbd 4331 ixgbe_tx_timeout_reset(adapter);
9a799d71
AK
4332}
4333
4df10466
JB
4334/**
4335 * ixgbe_set_rss_queues: Allocate queues for RSS
4336 * @adapter: board private structure to initialize
4337 *
4338 * This is our "base" multiqueue mode. RSS (Receive Side Scaling) will try
4339 * to allocate one Rx queue per CPU, and if available, one Tx queue per CPU.
4340 *
4341 **/
bc97114d
PWJ
4342static inline bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter)
4343{
4344 bool ret = false;
0cefafad 4345 struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_RSS];
bc97114d
PWJ
4346
4347 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
0cefafad
JB
4348 f->mask = 0xF;
4349 adapter->num_rx_queues = f->indices;
4350 adapter->num_tx_queues = f->indices;
bc97114d
PWJ
4351 ret = true;
4352 } else {
bc97114d 4353 ret = false;
b9804972
JB
4354 }
4355
bc97114d
PWJ
4356 return ret;
4357}
4358
c4cf55e5
PWJ
4359/**
4360 * ixgbe_set_fdir_queues: Allocate queues for Flow Director
4361 * @adapter: board private structure to initialize
4362 *
4363 * Flow Director is an advanced Rx filter, attempting to get Rx flows back
4364 * to the original CPU that initiated the Tx session. This runs in addition
4365 * to RSS, so if a packet doesn't match an FDIR filter, we can still spread the
4366 * Rx load across CPUs using RSS.
4367 *
4368 **/
e8e9f696 4369static inline bool ixgbe_set_fdir_queues(struct ixgbe_adapter *adapter)
c4cf55e5
PWJ
4370{
4371 bool ret = false;
4372 struct ixgbe_ring_feature *f_fdir = &adapter->ring_feature[RING_F_FDIR];
4373
4374 f_fdir->indices = min((int)num_online_cpus(), f_fdir->indices);
4375 f_fdir->mask = 0;
4376
4377 /* Flow Director must have RSS enabled */
03ecf91a
AD
4378 if ((adapter->flags & IXGBE_FLAG_RSS_ENABLED) &&
4379 (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)) {
c4cf55e5
PWJ
4380 adapter->num_tx_queues = f_fdir->indices;
4381 adapter->num_rx_queues = f_fdir->indices;
4382 ret = true;
4383 } else {
4384 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
c4cf55e5
PWJ
4385 }
4386 return ret;
4387}
4388
0331a832
YZ
4389#ifdef IXGBE_FCOE
4390/**
4391 * ixgbe_set_fcoe_queues: Allocate queues for Fiber Channel over Ethernet (FCoE)
4392 * @adapter: board private structure to initialize
4393 *
4394 * FCoE RX FCRETA can use up to 8 rx queues for up to 8 different exchanges.
4395 * The ring feature mask is not used as a mask for FCoE, as it can take any 8
4396 * rx queues out of the max number of rx queues, instead, it is used as the
4397 * index of the first rx queue used by FCoE.
4398 *
4399 **/
4400static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter)
4401{
0331a832
YZ
4402 struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE];
4403
e5b64635
JF
4404 if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
4405 return false;
4406
e901acd6 4407 f->indices = min((int)num_online_cpus(), f->indices);
e5b64635 4408
e901acd6
JF
4409 adapter->num_rx_queues = 1;
4410 adapter->num_tx_queues = 1;
e5b64635 4411
e901acd6
JF
4412 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
4413 e_info(probe, "FCoE enabled with RSS\n");
03ecf91a 4414 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)
e901acd6
JF
4415 ixgbe_set_fdir_queues(adapter);
4416 else
4417 ixgbe_set_rss_queues(adapter);
e5b64635 4418 }
03ecf91a 4419
e901acd6
JF
4420 /* adding FCoE rx rings to the end */
4421 f->mask = adapter->num_rx_queues;
4422 adapter->num_rx_queues += f->indices;
4423 adapter->num_tx_queues += f->indices;
0331a832 4424
e5b64635
JF
4425 return true;
4426}
4427#endif /* IXGBE_FCOE */
4428
e901acd6
JF
4429/* Artificial max queue cap per traffic class in DCB mode */
4430#define DCB_QUEUE_CAP 8
4431
e5b64635
JF
4432#ifdef CONFIG_IXGBE_DCB
4433static inline bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter)
4434{
e901acd6
JF
4435 int per_tc_q, q, i, offset = 0;
4436 struct net_device *dev = adapter->netdev;
4437 int tcs = netdev_get_num_tc(dev);
e5b64635 4438
e901acd6
JF
4439 if (!tcs)
4440 return false;
e5b64635 4441
e901acd6
JF
4442 /* Map queue offset and counts onto allocated tx queues */
4443 per_tc_q = min(dev->num_tx_queues / tcs, (unsigned int)DCB_QUEUE_CAP);
4444 q = min((int)num_online_cpus(), per_tc_q);
8b1c0b24 4445
8b1c0b24 4446 for (i = 0; i < tcs; i++) {
e901acd6
JF
4447 netdev_set_prio_tc_map(dev, i, i);
4448 netdev_set_tc_queue(dev, i, q, offset);
4449 offset += q;
0331a832
YZ
4450 }
4451
e901acd6
JF
4452 adapter->num_tx_queues = q * tcs;
4453 adapter->num_rx_queues = q * tcs;
e5b64635
JF
4454
4455#ifdef IXGBE_FCOE
e901acd6
JF
4456 /* FCoE enabled queues require special configuration indexed
4457 * by feature specific indices and mask. Here we map FCoE
4458 * indices onto the DCB queue pairs allowing FCoE to own
4459 * configuration later.
e5b64635 4460 */
e901acd6
JF
4461 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
4462 int tc;
4463 struct ixgbe_ring_feature *f =
4464 &adapter->ring_feature[RING_F_FCOE];
4465
4466 tc = netdev_get_prio_tc_map(dev, adapter->fcoe.up);
4467 f->indices = dev->tc_to_txq[tc].count;
4468 f->mask = dev->tc_to_txq[tc].offset;
4469 }
e5b64635
JF
4470#endif
4471
e901acd6 4472 return true;
0331a832 4473}
e5b64635 4474#endif
0331a832 4475
1cdd1ec8
GR
4476/**
4477 * ixgbe_set_sriov_queues: Allocate queues for IOV use
4478 * @adapter: board private structure to initialize
4479 *
4480 * IOV doesn't actually use anything, so just NAK the
4481 * request for now and let the other queue routines
4482 * figure out what to do.
4483 */
4484static inline bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter)
4485{
4486 return false;
4487}
4488
4df10466 4489/*
25985edc 4490 * ixgbe_set_num_queues: Allocate queues for device, feature dependent
4df10466
JB
4491 * @adapter: board private structure to initialize
4492 *
4493 * This is the top level queue allocation routine. The order here is very
4494 * important, starting with the "most" number of features turned on at once,
4495 * and ending with the smallest set of features. This way large combinations
4496 * can be allocated if they're turned on, and smaller combinations are the
4497 * fallthrough conditions.
4498 *
4499 **/
847f53ff 4500static int ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
bc97114d 4501{
1cdd1ec8
GR
4502 /* Start with base case */
4503 adapter->num_rx_queues = 1;
4504 adapter->num_tx_queues = 1;
4505 adapter->num_rx_pools = adapter->num_rx_queues;
4506 adapter->num_rx_queues_per_pool = 1;
4507
4508 if (ixgbe_set_sriov_queues(adapter))
847f53ff 4509 goto done;
1cdd1ec8 4510
bc97114d
PWJ
4511#ifdef CONFIG_IXGBE_DCB
4512 if (ixgbe_set_dcb_queues(adapter))
af22ab1b 4513 goto done;
bc97114d
PWJ
4514
4515#endif
e5b64635
JF
4516#ifdef IXGBE_FCOE
4517 if (ixgbe_set_fcoe_queues(adapter))
4518 goto done;
4519
4520#endif /* IXGBE_FCOE */
c4cf55e5
PWJ
4521 if (ixgbe_set_fdir_queues(adapter))
4522 goto done;
4523
bc97114d 4524 if (ixgbe_set_rss_queues(adapter))
af22ab1b
WF
4525 goto done;
4526
4527 /* fallback to base case */
4528 adapter->num_rx_queues = 1;
4529 adapter->num_tx_queues = 1;
4530
4531done:
847f53ff 4532 /* Notify the stack of the (possibly) reduced queue counts. */
f0796d5c 4533 netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues);
847f53ff
BH
4534 return netif_set_real_num_rx_queues(adapter->netdev,
4535 adapter->num_rx_queues);
b9804972
JB
4536}
4537
021230d4 4538static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter,
e8e9f696 4539 int vectors)
021230d4
AV
4540{
4541 int err, vector_threshold;
4542
4543 /* We'll want at least 3 (vector_threshold):
4544 * 1) TxQ[0] Cleanup
4545 * 2) RxQ[0] Cleanup
4546 * 3) Other (Link Status Change, etc.)
4547 * 4) TCP Timer (optional)
4548 */
4549 vector_threshold = MIN_MSIX_COUNT;
4550
4551 /* The more we get, the more we will assign to Tx/Rx Cleanup
4552 * for the separate queues...where Rx Cleanup >= Tx Cleanup.
4553 * Right now, we simply care about how many we'll get; we'll
4554 * set them up later while requesting irq's.
4555 */
4556 while (vectors >= vector_threshold) {
4557 err = pci_enable_msix(adapter->pdev, adapter->msix_entries,
e8e9f696 4558 vectors);
021230d4
AV
4559 if (!err) /* Success in acquiring all requested vectors. */
4560 break;
4561 else if (err < 0)
4562 vectors = 0; /* Nasty failure, quit now */
4563 else /* err == number of vectors we should try again with */
4564 vectors = err;
4565 }
4566
4567 if (vectors < vector_threshold) {
4568 /* Can't allocate enough MSI-X interrupts? Oh well.
4569 * This just means we'll go with either a single MSI
4570 * vector or fall back to legacy interrupts.
4571 */
849c4542
ET
4572 netif_printk(adapter, hw, KERN_DEBUG, adapter->netdev,
4573 "Unable to allocate MSI-X interrupts\n");
021230d4
AV
4574 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
4575 kfree(adapter->msix_entries);
4576 adapter->msix_entries = NULL;
021230d4
AV
4577 } else {
4578 adapter->flags |= IXGBE_FLAG_MSIX_ENABLED; /* Woot! */
eb7f139c
PWJ
4579 /*
4580 * Adjust for only the vectors we'll use, which is minimum
4581 * of max_msix_q_vectors + NON_Q_VECTORS, or the number of
4582 * vectors we were allocated.
4583 */
4584 adapter->num_msix_vectors = min(vectors,
e8e9f696 4585 adapter->max_msix_q_vectors + NON_Q_VECTORS);
021230d4
AV
4586 }
4587}
4588
021230d4 4589/**
bc97114d 4590 * ixgbe_cache_ring_rss - Descriptor ring to register mapping for RSS
021230d4
AV
4591 * @adapter: board private structure to initialize
4592 *
bc97114d
PWJ
4593 * Cache the descriptor ring offsets for RSS to the assigned rings.
4594 *
021230d4 4595 **/
bc97114d 4596static inline bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter)
021230d4 4597{
bc97114d 4598 int i;
bc97114d 4599
9d6b758f
AD
4600 if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED))
4601 return false;
bc97114d 4602
9d6b758f
AD
4603 for (i = 0; i < adapter->num_rx_queues; i++)
4604 adapter->rx_ring[i]->reg_idx = i;
4605 for (i = 0; i < adapter->num_tx_queues; i++)
4606 adapter->tx_ring[i]->reg_idx = i;
4607
4608 return true;
bc97114d
PWJ
4609}
4610
4611#ifdef CONFIG_IXGBE_DCB
e5b64635
JF
4612
4613/* ixgbe_get_first_reg_idx - Return first register index associated with ring */
b32c8dcc
JF
4614static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc,
4615 unsigned int *tx, unsigned int *rx)
e5b64635
JF
4616{
4617 struct net_device *dev = adapter->netdev;
4618 struct ixgbe_hw *hw = &adapter->hw;
4619 u8 num_tcs = netdev_get_num_tc(dev);
4620
4621 *tx = 0;
4622 *rx = 0;
4623
4624 switch (hw->mac.type) {
4625 case ixgbe_mac_82598EB:
aba70d5e
JF
4626 *tx = tc << 2;
4627 *rx = tc << 3;
e5b64635
JF
4628 break;
4629 case ixgbe_mac_82599EB:
4630 case ixgbe_mac_X540:
4631 if (num_tcs == 8) {
4632 if (tc < 3) {
4633 *tx = tc << 5;
4634 *rx = tc << 4;
4635 } else if (tc < 5) {
4636 *tx = ((tc + 2) << 4);
4637 *rx = tc << 4;
4638 } else if (tc < num_tcs) {
4639 *tx = ((tc + 8) << 3);
4640 *rx = tc << 4;
4641 }
4642 } else if (num_tcs == 4) {
4643 *rx = tc << 5;
4644 switch (tc) {
4645 case 0:
4646 *tx = 0;
4647 break;
4648 case 1:
4649 *tx = 64;
4650 break;
4651 case 2:
4652 *tx = 96;
4653 break;
4654 case 3:
4655 *tx = 112;
4656 break;
4657 default:
4658 break;
4659 }
4660 }
4661 break;
4662 default:
4663 break;
4664 }
4665}
4666
bc97114d
PWJ
4667/**
4668 * ixgbe_cache_ring_dcb - Descriptor ring to register mapping for DCB
4669 * @adapter: board private structure to initialize
4670 *
4671 * Cache the descriptor ring offsets for DCB to the assigned rings.
4672 *
4673 **/
4674static inline bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter)
4675{
e5b64635
JF
4676 struct net_device *dev = adapter->netdev;
4677 int i, j, k;
4678 u8 num_tcs = netdev_get_num_tc(dev);
bc97114d 4679
8b1c0b24 4680 if (!num_tcs)
bd508178 4681 return false;
f92ef202 4682
e5b64635
JF
4683 for (i = 0, k = 0; i < num_tcs; i++) {
4684 unsigned int tx_s, rx_s;
4685 u16 count = dev->tc_to_txq[i].count;
4686
4687 ixgbe_get_first_reg_idx(adapter, i, &tx_s, &rx_s);
4688 for (j = 0; j < count; j++, k++) {
4689 adapter->tx_ring[k]->reg_idx = tx_s + j;
4690 adapter->rx_ring[k]->reg_idx = rx_s + j;
4691 adapter->tx_ring[k]->dcb_tc = i;
4692 adapter->rx_ring[k]->dcb_tc = i;
021230d4 4693 }
021230d4 4694 }
e5b64635
JF
4695
4696 return true;
bc97114d
PWJ
4697}
4698#endif
4699
c4cf55e5
PWJ
4700/**
4701 * ixgbe_cache_ring_fdir - Descriptor ring to register mapping for Flow Director
4702 * @adapter: board private structure to initialize
4703 *
4704 * Cache the descriptor ring offsets for Flow Director to the assigned rings.
4705 *
4706 **/
e8e9f696 4707static inline bool ixgbe_cache_ring_fdir(struct ixgbe_adapter *adapter)
c4cf55e5
PWJ
4708{
4709 int i;
4710 bool ret = false;
4711
03ecf91a
AD
4712 if ((adapter->flags & IXGBE_FLAG_RSS_ENABLED) &&
4713 (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)) {
c4cf55e5 4714 for (i = 0; i < adapter->num_rx_queues; i++)
4a0b9ca0 4715 adapter->rx_ring[i]->reg_idx = i;
c4cf55e5 4716 for (i = 0; i < adapter->num_tx_queues; i++)
4a0b9ca0 4717 adapter->tx_ring[i]->reg_idx = i;
c4cf55e5
PWJ
4718 ret = true;
4719 }
4720
4721 return ret;
4722}
4723
0331a832
YZ
4724#ifdef IXGBE_FCOE
4725/**
4726 * ixgbe_cache_ring_fcoe - Descriptor ring to register mapping for the FCoE
4727 * @adapter: board private structure to initialize
4728 *
4729 * Cache the descriptor ring offsets for FCoE mode to the assigned rings.
4730 *
4731 */
4732static inline bool ixgbe_cache_ring_fcoe(struct ixgbe_adapter *adapter)
4733{
0331a832 4734 struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE];
bf29ee6c
AD
4735 int i;
4736 u8 fcoe_rx_i = 0, fcoe_tx_i = 0;
4737
4738 if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
4739 return false;
0331a832 4740
bf29ee6c 4741 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
03ecf91a 4742 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)
bf29ee6c
AD
4743 ixgbe_cache_ring_fdir(adapter);
4744 else
4745 ixgbe_cache_ring_rss(adapter);
8faa2a78 4746
bf29ee6c
AD
4747 fcoe_rx_i = f->mask;
4748 fcoe_tx_i = f->mask;
0331a832 4749 }
bf29ee6c
AD
4750 for (i = 0; i < f->indices; i++, fcoe_rx_i++, fcoe_tx_i++) {
4751 adapter->rx_ring[f->mask + i]->reg_idx = fcoe_rx_i;
4752 adapter->tx_ring[f->mask + i]->reg_idx = fcoe_tx_i;
4753 }
4754 return true;
0331a832
YZ
4755}
4756
4757#endif /* IXGBE_FCOE */
1cdd1ec8
GR
4758/**
4759 * ixgbe_cache_ring_sriov - Descriptor ring to register mapping for sriov
4760 * @adapter: board private structure to initialize
4761 *
4762 * SR-IOV doesn't use any descriptor rings but changes the default if
4763 * no other mapping is used.
4764 *
4765 */
4766static inline bool ixgbe_cache_ring_sriov(struct ixgbe_adapter *adapter)
4767{
4a0b9ca0
PW
4768 adapter->rx_ring[0]->reg_idx = adapter->num_vfs * 2;
4769 adapter->tx_ring[0]->reg_idx = adapter->num_vfs * 2;
1cdd1ec8
GR
4770 if (adapter->num_vfs)
4771 return true;
4772 else
4773 return false;
4774}
4775
bc97114d
PWJ
4776/**
4777 * ixgbe_cache_ring_register - Descriptor ring to register mapping
4778 * @adapter: board private structure to initialize
4779 *
4780 * Once we know the feature-set enabled for the device, we'll cache
4781 * the register offset the descriptor ring is assigned to.
4782 *
4783 * Note, the order the various feature calls is important. It must start with
4784 * the "most" features enabled at the same time, then trickle down to the
4785 * least amount of features turned on at once.
4786 **/
4787static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
4788{
4789 /* start with default case */
4a0b9ca0
PW
4790 adapter->rx_ring[0]->reg_idx = 0;
4791 adapter->tx_ring[0]->reg_idx = 0;
bc97114d 4792
1cdd1ec8
GR
4793 if (ixgbe_cache_ring_sriov(adapter))
4794 return;
4795
e5b64635
JF
4796#ifdef CONFIG_IXGBE_DCB
4797 if (ixgbe_cache_ring_dcb(adapter))
4798 return;
4799#endif
4800
0331a832
YZ
4801#ifdef IXGBE_FCOE
4802 if (ixgbe_cache_ring_fcoe(adapter))
4803 return;
0331a832 4804#endif /* IXGBE_FCOE */
bc97114d 4805
c4cf55e5
PWJ
4806 if (ixgbe_cache_ring_fdir(adapter))
4807 return;
4808
bc97114d
PWJ
4809 if (ixgbe_cache_ring_rss(adapter))
4810 return;
021230d4
AV
4811}
4812
9a799d71
AK
4813/**
4814 * ixgbe_alloc_queues - Allocate memory for all rings
4815 * @adapter: board private structure to initialize
4816 *
4817 * We allocate one ring per queue at run-time since we don't know the
4df10466
JB
4818 * number of queues at compile-time. The polling_netdev array is
4819 * intended for Multiqueue, but should work fine with a single queue.
9a799d71 4820 **/
2f90b865 4821static int ixgbe_alloc_queues(struct ixgbe_adapter *adapter)
9a799d71 4822{
e2ddeba9 4823 int rx = 0, tx = 0, nid = adapter->node;
9a799d71 4824
e2ddeba9
ED
4825 if (nid < 0 || !node_online(nid))
4826 nid = first_online_node;
4827
4828 for (; tx < adapter->num_tx_queues; tx++) {
4829 struct ixgbe_ring *ring;
4830
4831 ring = kzalloc_node(sizeof(*ring), GFP_KERNEL, nid);
4a0b9ca0 4832 if (!ring)
e2ddeba9 4833 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
4a0b9ca0 4834 if (!ring)
e2ddeba9 4835 goto err_allocation;
4a0b9ca0 4836 ring->count = adapter->tx_ring_count;
e2ddeba9
ED
4837 ring->queue_index = tx;
4838 ring->numa_node = nid;
b6ec895e 4839 ring->dev = &adapter->pdev->dev;
fc77dc3c 4840 ring->netdev = adapter->netdev;
4a0b9ca0 4841
e2ddeba9 4842 adapter->tx_ring[tx] = ring;
021230d4 4843 }
b9804972 4844
e2ddeba9
ED
4845 for (; rx < adapter->num_rx_queues; rx++) {
4846 struct ixgbe_ring *ring;
4a0b9ca0 4847
e2ddeba9 4848 ring = kzalloc_node(sizeof(*ring), GFP_KERNEL, nid);
4a0b9ca0 4849 if (!ring)
e2ddeba9 4850 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
4a0b9ca0 4851 if (!ring)
e2ddeba9
ED
4852 goto err_allocation;
4853 ring->count = adapter->rx_ring_count;
4854 ring->queue_index = rx;
4855 ring->numa_node = nid;
b6ec895e 4856 ring->dev = &adapter->pdev->dev;
fc77dc3c 4857 ring->netdev = adapter->netdev;
4a0b9ca0 4858
e2ddeba9 4859 adapter->rx_ring[rx] = ring;
021230d4
AV
4860 }
4861
4862 ixgbe_cache_ring_register(adapter);
4863
4864 return 0;
4865
e2ddeba9
ED
4866err_allocation:
4867 while (tx)
4868 kfree(adapter->tx_ring[--tx]);
4869
4870 while (rx)
4871 kfree(adapter->rx_ring[--rx]);
021230d4
AV
4872 return -ENOMEM;
4873}
4874
4875/**
4876 * ixgbe_set_interrupt_capability - set MSI-X or MSI if supported
4877 * @adapter: board private structure to initialize
4878 *
4879 * Attempt to configure the interrupts using the best available
4880 * capabilities of the hardware and the kernel.
4881 **/
feea6a57 4882static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
021230d4 4883{
8be0e467 4884 struct ixgbe_hw *hw = &adapter->hw;
021230d4
AV
4885 int err = 0;
4886 int vector, v_budget;
4887
4888 /*
4889 * It's easy to be greedy for MSI-X vectors, but it really
4890 * doesn't do us much good if we have a lot more vectors
4891 * than CPU's. So let's be conservative and only ask for
342bde1b 4892 * (roughly) the same number of vectors as there are CPU's.
021230d4
AV
4893 */
4894 v_budget = min(adapter->num_rx_queues + adapter->num_tx_queues,
e8e9f696 4895 (int)num_online_cpus()) + NON_Q_VECTORS;
021230d4
AV
4896
4897 /*
4898 * At the same time, hardware can only support a maximum of
8be0e467
PW
4899 * hw.mac->max_msix_vectors vectors. With features
4900 * such as RSS and VMDq, we can easily surpass the number of Rx and Tx
4901 * descriptor queues supported by our device. Thus, we cap it off in
4902 * those rare cases where the cpu count also exceeds our vector limit.
021230d4 4903 */
8be0e467 4904 v_budget = min(v_budget, (int)hw->mac.max_msix_vectors);
021230d4
AV
4905
4906 /* A failure in MSI-X entry allocation isn't fatal, but it does
4907 * mean we disable MSI-X capabilities of the adapter. */
4908 adapter->msix_entries = kcalloc(v_budget,
e8e9f696 4909 sizeof(struct msix_entry), GFP_KERNEL);
7a921c93
AD
4910 if (adapter->msix_entries) {
4911 for (vector = 0; vector < v_budget; vector++)
4912 adapter->msix_entries[vector].entry = vector;
021230d4 4913
7a921c93 4914 ixgbe_acquire_msix_vectors(adapter, v_budget);
021230d4 4915
7a921c93
AD
4916 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
4917 goto out;
4918 }
26d27844 4919
7a921c93
AD
4920 adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
4921 adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
03ecf91a 4922 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
45b9f509 4923 e_err(probe,
03ecf91a 4924 "ATR is not supported while multiple "
45b9f509
AD
4925 "queues are disabled. Disabling Flow Director\n");
4926 }
c4cf55e5 4927 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
c4cf55e5 4928 adapter->atr_sample_rate = 0;
1cdd1ec8
GR
4929 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
4930 ixgbe_disable_sriov(adapter);
4931
847f53ff
BH
4932 err = ixgbe_set_num_queues(adapter);
4933 if (err)
4934 return err;
021230d4 4935
021230d4
AV
4936 err = pci_enable_msi(adapter->pdev);
4937 if (!err) {
4938 adapter->flags |= IXGBE_FLAG_MSI_ENABLED;
4939 } else {
849c4542
ET
4940 netif_printk(adapter, hw, KERN_DEBUG, adapter->netdev,
4941 "Unable to allocate MSI interrupt, "
4942 "falling back to legacy. Error: %d\n", err);
021230d4
AV
4943 /* reset err */
4944 err = 0;
4945 }
4946
4947out:
021230d4
AV
4948 return err;
4949}
4950
7a921c93
AD
4951/**
4952 * ixgbe_alloc_q_vectors - Allocate memory for interrupt vectors
4953 * @adapter: board private structure to initialize
4954 *
4955 * We allocate one q_vector per queue interrupt. If allocation fails we
4956 * return -ENOMEM.
4957 **/
4958static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter)
4959{
4960 int q_idx, num_q_vectors;
4961 struct ixgbe_q_vector *q_vector;
7a921c93
AD
4962 int (*poll)(struct napi_struct *, int);
4963
4964 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
4965 num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
91281fd3 4966 poll = &ixgbe_clean_rxtx_many;
7a921c93
AD
4967 } else {
4968 num_q_vectors = 1;
7a921c93
AD
4969 poll = &ixgbe_poll;
4970 }
4971
4972 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
1a6c14a2 4973 q_vector = kzalloc_node(sizeof(struct ixgbe_q_vector),
e8e9f696 4974 GFP_KERNEL, adapter->node);
1a6c14a2
JB
4975 if (!q_vector)
4976 q_vector = kzalloc(sizeof(struct ixgbe_q_vector),
e8e9f696 4977 GFP_KERNEL);
7a921c93
AD
4978 if (!q_vector)
4979 goto err_out;
4980 q_vector->adapter = adapter;
f7554a2b
NS
4981 if (q_vector->txr_count && !q_vector->rxr_count)
4982 q_vector->eitr = adapter->tx_eitr_param;
4983 else
4984 q_vector->eitr = adapter->rx_eitr_param;
fe49f04a 4985 q_vector->v_idx = q_idx;
91281fd3 4986 netif_napi_add(adapter->netdev, &q_vector->napi, (*poll), 64);
7a921c93
AD
4987 adapter->q_vector[q_idx] = q_vector;
4988 }
4989
4990 return 0;
4991
4992err_out:
4993 while (q_idx) {
4994 q_idx--;
4995 q_vector = adapter->q_vector[q_idx];
4996 netif_napi_del(&q_vector->napi);
4997 kfree(q_vector);
4998 adapter->q_vector[q_idx] = NULL;
4999 }
5000 return -ENOMEM;
5001}
5002
5003/**
5004 * ixgbe_free_q_vectors - Free memory allocated for interrupt vectors
5005 * @adapter: board private structure to initialize
5006 *
5007 * This function frees the memory allocated to the q_vectors. In addition if
5008 * NAPI is enabled it will delete any references to the NAPI struct prior
5009 * to freeing the q_vector.
5010 **/
5011static void ixgbe_free_q_vectors(struct ixgbe_adapter *adapter)
5012{
5013 int q_idx, num_q_vectors;
7a921c93 5014
91281fd3 5015 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
7a921c93 5016 num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
91281fd3 5017 else
7a921c93 5018 num_q_vectors = 1;
7a921c93
AD
5019
5020 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
5021 struct ixgbe_q_vector *q_vector = adapter->q_vector[q_idx];
7a921c93 5022 adapter->q_vector[q_idx] = NULL;
91281fd3 5023 netif_napi_del(&q_vector->napi);
7a921c93
AD
5024 kfree(q_vector);
5025 }
5026}
5027
7b25cdba 5028static void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter)
021230d4
AV
5029{
5030 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
5031 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
5032 pci_disable_msix(adapter->pdev);
5033 kfree(adapter->msix_entries);
5034 adapter->msix_entries = NULL;
5035 } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
5036 adapter->flags &= ~IXGBE_FLAG_MSI_ENABLED;
5037 pci_disable_msi(adapter->pdev);
5038 }
021230d4
AV
5039}
5040
5041/**
5042 * ixgbe_init_interrupt_scheme - Determine proper interrupt scheme
5043 * @adapter: board private structure to initialize
5044 *
5045 * We determine which interrupt scheme to use based on...
5046 * - Kernel support (MSI, MSI-X)
5047 * - which can be user-defined (via MODULE_PARAM)
5048 * - Hardware queue count (num_*_queues)
5049 * - defined by miscellaneous hardware support/features (RSS, etc.)
5050 **/
2f90b865 5051int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter)
021230d4
AV
5052{
5053 int err;
5054
5055 /* Number of supported queues */
847f53ff
BH
5056 err = ixgbe_set_num_queues(adapter);
5057 if (err)
5058 return err;
021230d4 5059
021230d4
AV
5060 err = ixgbe_set_interrupt_capability(adapter);
5061 if (err) {
849c4542 5062 e_dev_err("Unable to setup interrupt capabilities\n");
021230d4 5063 goto err_set_interrupt;
9a799d71
AK
5064 }
5065
7a921c93
AD
5066 err = ixgbe_alloc_q_vectors(adapter);
5067 if (err) {
849c4542 5068 e_dev_err("Unable to allocate memory for queue vectors\n");
7a921c93
AD
5069 goto err_alloc_q_vectors;
5070 }
5071
5072 err = ixgbe_alloc_queues(adapter);
5073 if (err) {
849c4542 5074 e_dev_err("Unable to allocate memory for queues\n");
7a921c93
AD
5075 goto err_alloc_queues;
5076 }
5077
849c4542 5078 e_dev_info("Multiqueue %s: Rx Queue count = %u, Tx Queue count = %u\n",
396e799c
ET
5079 (adapter->num_rx_queues > 1) ? "Enabled" : "Disabled",
5080 adapter->num_rx_queues, adapter->num_tx_queues);
021230d4
AV
5081
5082 set_bit(__IXGBE_DOWN, &adapter->state);
5083
9a799d71 5084 return 0;
021230d4 5085
7a921c93
AD
5086err_alloc_queues:
5087 ixgbe_free_q_vectors(adapter);
5088err_alloc_q_vectors:
5089 ixgbe_reset_interrupt_capability(adapter);
021230d4 5090err_set_interrupt:
7a921c93
AD
5091 return err;
5092}
5093
5094/**
5095 * ixgbe_clear_interrupt_scheme - Clear the current interrupt scheme settings
5096 * @adapter: board private structure to clear interrupt scheme on
5097 *
5098 * We go through and clear interrupt specific resources and reset the structure
5099 * to pre-load conditions
5100 **/
5101void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter)
5102{
4a0b9ca0
PW
5103 int i;
5104
5105 for (i = 0; i < adapter->num_tx_queues; i++) {
5106 kfree(adapter->tx_ring[i]);
5107 adapter->tx_ring[i] = NULL;
5108 }
5109 for (i = 0; i < adapter->num_rx_queues; i++) {
1a51502b
ED
5110 struct ixgbe_ring *ring = adapter->rx_ring[i];
5111
5112 /* ixgbe_get_stats64() might access this ring, we must wait
5113 * a grace period before freeing it.
5114 */
bcec8b65 5115 kfree_rcu(ring, rcu);
4a0b9ca0
PW
5116 adapter->rx_ring[i] = NULL;
5117 }
7a921c93 5118
b8eb3a10
DS
5119 adapter->num_tx_queues = 0;
5120 adapter->num_rx_queues = 0;
5121
7a921c93
AD
5122 ixgbe_free_q_vectors(adapter);
5123 ixgbe_reset_interrupt_capability(adapter);
9a799d71
AK
5124}
5125
5126/**
5127 * ixgbe_sw_init - Initialize general software structures (struct ixgbe_adapter)
5128 * @adapter: board private structure to initialize
5129 *
5130 * ixgbe_sw_init initializes the Adapter private data structure.
5131 * Fields are initialized based on PCI device information and
5132 * OS network device settings (MTU size).
5133 **/
5134static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
5135{
5136 struct ixgbe_hw *hw = &adapter->hw;
5137 struct pci_dev *pdev = adapter->pdev;
9a713e7c 5138 struct net_device *dev = adapter->netdev;
021230d4 5139 unsigned int rss;
7a6b6f51 5140#ifdef CONFIG_IXGBE_DCB
2f90b865
AD
5141 int j;
5142 struct tc_configuration *tc;
5143#endif
16b61beb 5144 int max_frame = dev->mtu + ETH_HLEN + ETH_FCS_LEN;
021230d4 5145
c44ade9e
JB
5146 /* PCI config space info */
5147
5148 hw->vendor_id = pdev->vendor;
5149 hw->device_id = pdev->device;
5150 hw->revision_id = pdev->revision;
5151 hw->subsystem_vendor_id = pdev->subsystem_vendor;
5152 hw->subsystem_device_id = pdev->subsystem_device;
5153
021230d4
AV
5154 /* Set capability flags */
5155 rss = min(IXGBE_MAX_RSS_INDICES, (int)num_online_cpus());
5156 adapter->ring_feature[RING_F_RSS].indices = rss;
5157 adapter->flags |= IXGBE_FLAG_RSS_ENABLED;
bd508178
AD
5158 switch (hw->mac.type) {
5159 case ixgbe_mac_82598EB:
bf069c97
DS
5160 if (hw->device_id == IXGBE_DEV_ID_82598AT)
5161 adapter->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE;
e8e26350 5162 adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82598;
bd508178
AD
5163 break;
5164 case ixgbe_mac_82599EB:
b93a2226 5165 case ixgbe_mac_X540:
e8e26350 5166 adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82599;
0c19d6af
PWJ
5167 adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE;
5168 adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
119fc60a
MC
5169 if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM)
5170 adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
45b9f509
AD
5171 /* n-tuple support exists, always init our spinlock */
5172 spin_lock_init(&adapter->fdir_perfect_lock);
5173 /* Flow Director hash filters enabled */
5174 adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
5175 adapter->atr_sample_rate = 20;
c4cf55e5 5176 adapter->ring_feature[RING_F_FDIR].indices =
e8e9f696 5177 IXGBE_MAX_FDIR_INDICES;
c04f6ca8 5178 adapter->fdir_pballoc = IXGBE_FDIR_PBALLOC_64K;
eacd73f7 5179#ifdef IXGBE_FCOE
0d551589
YZ
5180 adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE;
5181 adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
5182 adapter->ring_feature[RING_F_FCOE].indices = 0;
61a0f421 5183#ifdef CONFIG_IXGBE_DCB
6ee16520
YZ
5184 /* Default traffic class to use for FCoE */
5185 adapter->fcoe.tc = IXGBE_FCOE_DEFTC;
56075a98 5186 adapter->fcoe.up = IXGBE_FCOE_DEFTC;
61a0f421 5187#endif
eacd73f7 5188#endif /* IXGBE_FCOE */
bd508178
AD
5189 break;
5190 default:
5191 break;
f8212f97 5192 }
2f90b865 5193
7a6b6f51 5194#ifdef CONFIG_IXGBE_DCB
2f90b865
AD
5195 /* Configure DCB traffic classes */
5196 for (j = 0; j < MAX_TRAFFIC_CLASS; j++) {
5197 tc = &adapter->dcb_cfg.tc_config[j];
5198 tc->path[DCB_TX_CONFIG].bwg_id = 0;
5199 tc->path[DCB_TX_CONFIG].bwg_percent = 12 + (j & 1);
5200 tc->path[DCB_RX_CONFIG].bwg_id = 0;
5201 tc->path[DCB_RX_CONFIG].bwg_percent = 12 + (j & 1);
5202 tc->dcb_pfc = pfc_disabled;
5203 }
5204 adapter->dcb_cfg.bw_percentage[DCB_TX_CONFIG][0] = 100;
5205 adapter->dcb_cfg.bw_percentage[DCB_RX_CONFIG][0] = 100;
264857b8 5206 adapter->dcb_cfg.pfc_mode_enable = false;
2f90b865 5207 adapter->dcb_set_bitmap = 0x00;
3032309b 5208 adapter->dcbx_cap = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_CEE;
2f90b865 5209 ixgbe_copy_dcb_cfg(&adapter->dcb_cfg, &adapter->temp_dcb_cfg,
e5b64635 5210 MAX_TRAFFIC_CLASS);
2f90b865
AD
5211
5212#endif
9a799d71
AK
5213
5214 /* default flow control settings */
cd7664f6 5215 hw->fc.requested_mode = ixgbe_fc_full;
71fd570b 5216 hw->fc.current_mode = ixgbe_fc_full; /* init for ethtool output */
264857b8
PWJ
5217#ifdef CONFIG_DCB
5218 adapter->last_lfc_mode = hw->fc.current_mode;
5219#endif
16b61beb
JF
5220 hw->fc.high_water = FC_HIGH_WATER(max_frame);
5221 hw->fc.low_water = FC_LOW_WATER(max_frame);
2b9ade93
JB
5222 hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE;
5223 hw->fc.send_xon = true;
71fd570b 5224 hw->fc.disable_fc_autoneg = false;
9a799d71 5225
30efa5a3 5226 /* enable itr by default in dynamic mode */
f7554a2b
NS
5227 adapter->rx_itr_setting = 1;
5228 adapter->rx_eitr_param = 20000;
5229 adapter->tx_itr_setting = 1;
5230 adapter->tx_eitr_param = 10000;
30efa5a3
JB
5231
5232 /* set defaults for eitr in MegaBytes */
5233 adapter->eitr_low = 10;
5234 adapter->eitr_high = 20;
5235
5236 /* set default ring sizes */
5237 adapter->tx_ring_count = IXGBE_DEFAULT_TXD;
5238 adapter->rx_ring_count = IXGBE_DEFAULT_RXD;
5239
9a799d71 5240 /* initialize eeprom parameters */
c44ade9e 5241 if (ixgbe_init_eeprom_params_generic(hw)) {
849c4542 5242 e_dev_err("EEPROM initialization failed\n");
9a799d71
AK
5243 return -EIO;
5244 }
5245
021230d4 5246 /* enable rx csum by default */
9a799d71
AK
5247 adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED;
5248
1a6c14a2
JB
5249 /* get assigned NUMA node */
5250 adapter->node = dev_to_node(&pdev->dev);
5251
9a799d71
AK
5252 set_bit(__IXGBE_DOWN, &adapter->state);
5253
5254 return 0;
5255}
5256
5257/**
5258 * ixgbe_setup_tx_resources - allocate Tx resources (Descriptors)
3a581073 5259 * @tx_ring: tx descriptor ring (for a specific queue) to setup
9a799d71
AK
5260 *
5261 * Return 0 on success, negative on failure
5262 **/
b6ec895e 5263int ixgbe_setup_tx_resources(struct ixgbe_ring *tx_ring)
9a799d71 5264{
b6ec895e 5265 struct device *dev = tx_ring->dev;
9a799d71
AK
5266 int size;
5267
3a581073 5268 size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
89bf67f1 5269 tx_ring->tx_buffer_info = vzalloc_node(size, tx_ring->numa_node);
1a6c14a2 5270 if (!tx_ring->tx_buffer_info)
89bf67f1 5271 tx_ring->tx_buffer_info = vzalloc(size);
e01c31a5
JB
5272 if (!tx_ring->tx_buffer_info)
5273 goto err;
9a799d71
AK
5274
5275 /* round up to nearest 4K */
12207e49 5276 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
3a581073 5277 tx_ring->size = ALIGN(tx_ring->size, 4096);
9a799d71 5278
b6ec895e 5279 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
1b507730 5280 &tx_ring->dma, GFP_KERNEL);
e01c31a5
JB
5281 if (!tx_ring->desc)
5282 goto err;
9a799d71 5283
3a581073
JB
5284 tx_ring->next_to_use = 0;
5285 tx_ring->next_to_clean = 0;
5286 tx_ring->work_limit = tx_ring->count;
9a799d71 5287 return 0;
e01c31a5
JB
5288
5289err:
5290 vfree(tx_ring->tx_buffer_info);
5291 tx_ring->tx_buffer_info = NULL;
b6ec895e 5292 dev_err(dev, "Unable to allocate memory for the Tx descriptor ring\n");
e01c31a5 5293 return -ENOMEM;
9a799d71
AK
5294}
5295
69888674
AD
5296/**
5297 * ixgbe_setup_all_tx_resources - allocate all queues Tx resources
5298 * @adapter: board private structure
5299 *
5300 * If this function returns with an error, then it's possible one or
5301 * more of the rings is populated (while the rest are not). It is the
5302 * callers duty to clean those orphaned rings.
5303 *
5304 * Return 0 on success, negative on failure
5305 **/
5306static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter)
5307{
5308 int i, err = 0;
5309
5310 for (i = 0; i < adapter->num_tx_queues; i++) {
b6ec895e 5311 err = ixgbe_setup_tx_resources(adapter->tx_ring[i]);
69888674
AD
5312 if (!err)
5313 continue;
396e799c 5314 e_err(probe, "Allocation for Tx Queue %u failed\n", i);
69888674
AD
5315 break;
5316 }
5317
5318 return err;
5319}
5320
9a799d71
AK
5321/**
5322 * ixgbe_setup_rx_resources - allocate Rx resources (Descriptors)
3a581073 5323 * @rx_ring: rx descriptor ring (for a specific queue) to setup
9a799d71
AK
5324 *
5325 * Returns 0 on success, negative on failure
5326 **/
b6ec895e 5327int ixgbe_setup_rx_resources(struct ixgbe_ring *rx_ring)
9a799d71 5328{
b6ec895e 5329 struct device *dev = rx_ring->dev;
021230d4 5330 int size;
9a799d71 5331
3a581073 5332 size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
89bf67f1 5333 rx_ring->rx_buffer_info = vzalloc_node(size, rx_ring->numa_node);
1a6c14a2 5334 if (!rx_ring->rx_buffer_info)
89bf67f1 5335 rx_ring->rx_buffer_info = vzalloc(size);
b6ec895e
AD
5336 if (!rx_ring->rx_buffer_info)
5337 goto err;
9a799d71 5338
9a799d71 5339 /* Round up to nearest 4K */
3a581073
JB
5340 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
5341 rx_ring->size = ALIGN(rx_ring->size, 4096);
9a799d71 5342
b6ec895e 5343 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
1b507730 5344 &rx_ring->dma, GFP_KERNEL);
9a799d71 5345
b6ec895e
AD
5346 if (!rx_ring->desc)
5347 goto err;
9a799d71 5348
3a581073
JB
5349 rx_ring->next_to_clean = 0;
5350 rx_ring->next_to_use = 0;
9a799d71
AK
5351
5352 return 0;
b6ec895e
AD
5353err:
5354 vfree(rx_ring->rx_buffer_info);
5355 rx_ring->rx_buffer_info = NULL;
5356 dev_err(dev, "Unable to allocate memory for the Rx descriptor ring\n");
177db6ff 5357 return -ENOMEM;
9a799d71
AK
5358}
5359
69888674
AD
5360/**
5361 * ixgbe_setup_all_rx_resources - allocate all queues Rx resources
5362 * @adapter: board private structure
5363 *
5364 * If this function returns with an error, then it's possible one or
5365 * more of the rings is populated (while the rest are not). It is the
5366 * callers duty to clean those orphaned rings.
5367 *
5368 * Return 0 on success, negative on failure
5369 **/
69888674
AD
5370static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter)
5371{
5372 int i, err = 0;
5373
5374 for (i = 0; i < adapter->num_rx_queues; i++) {
b6ec895e 5375 err = ixgbe_setup_rx_resources(adapter->rx_ring[i]);
69888674
AD
5376 if (!err)
5377 continue;
396e799c 5378 e_err(probe, "Allocation for Rx Queue %u failed\n", i);
69888674
AD
5379 break;
5380 }
5381
5382 return err;
5383}
5384
9a799d71
AK
5385/**
5386 * ixgbe_free_tx_resources - Free Tx Resources per Queue
9a799d71
AK
5387 * @tx_ring: Tx descriptor ring for a specific queue
5388 *
5389 * Free all transmit software resources
5390 **/
b6ec895e 5391void ixgbe_free_tx_resources(struct ixgbe_ring *tx_ring)
9a799d71 5392{
b6ec895e 5393 ixgbe_clean_tx_ring(tx_ring);
9a799d71
AK
5394
5395 vfree(tx_ring->tx_buffer_info);
5396 tx_ring->tx_buffer_info = NULL;
5397
b6ec895e
AD
5398 /* if not set, then don't free */
5399 if (!tx_ring->desc)
5400 return;
5401
5402 dma_free_coherent(tx_ring->dev, tx_ring->size,
5403 tx_ring->desc, tx_ring->dma);
9a799d71
AK
5404
5405 tx_ring->desc = NULL;
5406}
5407
5408/**
5409 * ixgbe_free_all_tx_resources - Free Tx Resources for All Queues
5410 * @adapter: board private structure
5411 *
5412 * Free all transmit software resources
5413 **/
5414static void ixgbe_free_all_tx_resources(struct ixgbe_adapter *adapter)
5415{
5416 int i;
5417
5418 for (i = 0; i < adapter->num_tx_queues; i++)
4a0b9ca0 5419 if (adapter->tx_ring[i]->desc)
b6ec895e 5420 ixgbe_free_tx_resources(adapter->tx_ring[i]);
9a799d71
AK
5421}
5422
5423/**
b4617240 5424 * ixgbe_free_rx_resources - Free Rx Resources
9a799d71
AK
5425 * @rx_ring: ring to clean the resources from
5426 *
5427 * Free all receive software resources
5428 **/
b6ec895e 5429void ixgbe_free_rx_resources(struct ixgbe_ring *rx_ring)
9a799d71 5430{
b6ec895e 5431 ixgbe_clean_rx_ring(rx_ring);
9a799d71
AK
5432
5433 vfree(rx_ring->rx_buffer_info);
5434 rx_ring->rx_buffer_info = NULL;
5435
b6ec895e
AD
5436 /* if not set, then don't free */
5437 if (!rx_ring->desc)
5438 return;
5439
5440 dma_free_coherent(rx_ring->dev, rx_ring->size,
5441 rx_ring->desc, rx_ring->dma);
9a799d71
AK
5442
5443 rx_ring->desc = NULL;
5444}
5445
5446/**
5447 * ixgbe_free_all_rx_resources - Free Rx Resources for All Queues
5448 * @adapter: board private structure
5449 *
5450 * Free all receive software resources
5451 **/
5452static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter)
5453{
5454 int i;
5455
5456 for (i = 0; i < adapter->num_rx_queues; i++)
4a0b9ca0 5457 if (adapter->rx_ring[i]->desc)
b6ec895e 5458 ixgbe_free_rx_resources(adapter->rx_ring[i]);
9a799d71
AK
5459}
5460
9a799d71
AK
5461/**
5462 * ixgbe_change_mtu - Change the Maximum Transfer Unit
5463 * @netdev: network interface device structure
5464 * @new_mtu: new value for maximum frame size
5465 *
5466 * Returns 0 on success, negative on failure
5467 **/
5468static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
5469{
5470 struct ixgbe_adapter *adapter = netdev_priv(netdev);
16b61beb 5471 struct ixgbe_hw *hw = &adapter->hw;
9a799d71
AK
5472 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
5473
42c783c5 5474 /* MTU < 68 is an error and causes problems on some kernels */
e9f98072
GR
5475 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED &&
5476 hw->mac.type != ixgbe_mac_X540) {
5477 if ((new_mtu < 68) || (max_frame > MAXIMUM_ETHERNET_VLAN_SIZE))
5478 return -EINVAL;
5479 } else {
5480 if ((new_mtu < 68) || (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE))
5481 return -EINVAL;
5482 }
9a799d71 5483
396e799c 5484 e_info(probe, "changing MTU from %d to %d\n", netdev->mtu, new_mtu);
021230d4 5485 /* must set new MTU before calling down or up */
9a799d71
AK
5486 netdev->mtu = new_mtu;
5487
16b61beb
JF
5488 hw->fc.high_water = FC_HIGH_WATER(max_frame);
5489 hw->fc.low_water = FC_LOW_WATER(max_frame);
5490
d4f80882
AV
5491 if (netif_running(netdev))
5492 ixgbe_reinit_locked(adapter);
9a799d71
AK
5493
5494 return 0;
5495}
5496
5497/**
5498 * ixgbe_open - Called when a network interface is made active
5499 * @netdev: network interface device structure
5500 *
5501 * Returns 0 on success, negative value on failure
5502 *
5503 * The open entry point is called when a network interface is made
5504 * active by the system (IFF_UP). At this point all resources needed
5505 * for transmit and receive operations are allocated, the interrupt
5506 * handler is registered with the OS, the watchdog timer is started,
5507 * and the stack is notified that the interface is ready.
5508 **/
5509static int ixgbe_open(struct net_device *netdev)
5510{
5511 struct ixgbe_adapter *adapter = netdev_priv(netdev);
5512 int err;
4bebfaa5
AK
5513
5514 /* disallow open during test */
5515 if (test_bit(__IXGBE_TESTING, &adapter->state))
5516 return -EBUSY;
9a799d71 5517
54386467
JB
5518 netif_carrier_off(netdev);
5519
9a799d71
AK
5520 /* allocate transmit descriptors */
5521 err = ixgbe_setup_all_tx_resources(adapter);
5522 if (err)
5523 goto err_setup_tx;
5524
9a799d71
AK
5525 /* allocate receive descriptors */
5526 err = ixgbe_setup_all_rx_resources(adapter);
5527 if (err)
5528 goto err_setup_rx;
5529
5530 ixgbe_configure(adapter);
5531
021230d4 5532 err = ixgbe_request_irq(adapter);
9a799d71
AK
5533 if (err)
5534 goto err_req_irq;
5535
9a799d71
AK
5536 err = ixgbe_up_complete(adapter);
5537 if (err)
5538 goto err_up;
5539
d55b53ff
JK
5540 netif_tx_start_all_queues(netdev);
5541
9a799d71
AK
5542 return 0;
5543
5544err_up:
5eba3699 5545 ixgbe_release_hw_control(adapter);
9a799d71
AK
5546 ixgbe_free_irq(adapter);
5547err_req_irq:
9a799d71 5548err_setup_rx:
a20a1199 5549 ixgbe_free_all_rx_resources(adapter);
9a799d71 5550err_setup_tx:
a20a1199 5551 ixgbe_free_all_tx_resources(adapter);
9a799d71
AK
5552 ixgbe_reset(adapter);
5553
5554 return err;
5555}
5556
5557/**
5558 * ixgbe_close - Disables a network interface
5559 * @netdev: network interface device structure
5560 *
5561 * Returns 0, this is not allowed to fail
5562 *
5563 * The close entry point is called when an interface is de-activated
5564 * by the OS. The hardware is still under the drivers control, but
5565 * needs to be disabled. A global MAC reset is issued to stop the
5566 * hardware, and all transmit and receive resources are freed.
5567 **/
5568static int ixgbe_close(struct net_device *netdev)
5569{
5570 struct ixgbe_adapter *adapter = netdev_priv(netdev);
9a799d71
AK
5571
5572 ixgbe_down(adapter);
5573 ixgbe_free_irq(adapter);
5574
e4911d57
AD
5575 ixgbe_fdir_filter_exit(adapter);
5576
9a799d71
AK
5577 ixgbe_free_all_tx_resources(adapter);
5578 ixgbe_free_all_rx_resources(adapter);
5579
5eba3699 5580 ixgbe_release_hw_control(adapter);
9a799d71
AK
5581
5582 return 0;
5583}
5584
b3c8b4ba
AD
5585#ifdef CONFIG_PM
5586static int ixgbe_resume(struct pci_dev *pdev)
5587{
c60fbb00
AD
5588 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
5589 struct net_device *netdev = adapter->netdev;
b3c8b4ba
AD
5590 u32 err;
5591
5592 pci_set_power_state(pdev, PCI_D0);
5593 pci_restore_state(pdev);
656ab817
DS
5594 /*
5595 * pci_restore_state clears dev->state_saved so call
5596 * pci_save_state to restore it.
5597 */
5598 pci_save_state(pdev);
9ce77666 5599
5600 err = pci_enable_device_mem(pdev);
b3c8b4ba 5601 if (err) {
849c4542 5602 e_dev_err("Cannot enable PCI device from suspend\n");
b3c8b4ba
AD
5603 return err;
5604 }
5605 pci_set_master(pdev);
5606
dd4d8ca6 5607 pci_wake_from_d3(pdev, false);
b3c8b4ba
AD
5608
5609 err = ixgbe_init_interrupt_scheme(adapter);
5610 if (err) {
849c4542 5611 e_dev_err("Cannot initialize interrupts for device\n");
b3c8b4ba
AD
5612 return err;
5613 }
5614
b3c8b4ba
AD
5615 ixgbe_reset(adapter);
5616
495dce12
WJP
5617 IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
5618
b3c8b4ba 5619 if (netif_running(netdev)) {
c60fbb00 5620 err = ixgbe_open(netdev);
b3c8b4ba
AD
5621 if (err)
5622 return err;
5623 }
5624
5625 netif_device_attach(netdev);
5626
5627 return 0;
5628}
b3c8b4ba 5629#endif /* CONFIG_PM */
9d8d05ae
RW
5630
5631static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
b3c8b4ba 5632{
c60fbb00
AD
5633 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
5634 struct net_device *netdev = adapter->netdev;
e8e26350
PW
5635 struct ixgbe_hw *hw = &adapter->hw;
5636 u32 ctrl, fctrl;
5637 u32 wufc = adapter->wol;
b3c8b4ba
AD
5638#ifdef CONFIG_PM
5639 int retval = 0;
5640#endif
5641
5642 netif_device_detach(netdev);
5643
5644 if (netif_running(netdev)) {
5645 ixgbe_down(adapter);
5646 ixgbe_free_irq(adapter);
5647 ixgbe_free_all_tx_resources(adapter);
5648 ixgbe_free_all_rx_resources(adapter);
5649 }
b3c8b4ba 5650
5f5ae6fc 5651 ixgbe_clear_interrupt_scheme(adapter);
d033d526
JF
5652#ifdef CONFIG_DCB
5653 kfree(adapter->ixgbe_ieee_pfc);
5654 kfree(adapter->ixgbe_ieee_ets);
5655#endif
5f5ae6fc 5656
b3c8b4ba
AD
5657#ifdef CONFIG_PM
5658 retval = pci_save_state(pdev);
5659 if (retval)
5660 return retval;
4df10466 5661
b3c8b4ba 5662#endif
e8e26350
PW
5663 if (wufc) {
5664 ixgbe_set_rx_mode(netdev);
b3c8b4ba 5665
e8e26350
PW
5666 /* turn on all-multi mode if wake on multicast is enabled */
5667 if (wufc & IXGBE_WUFC_MC) {
5668 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
5669 fctrl |= IXGBE_FCTRL_MPE;
5670 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
5671 }
5672
5673 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
5674 ctrl |= IXGBE_CTRL_GIO_DIS;
5675 IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
5676
5677 IXGBE_WRITE_REG(hw, IXGBE_WUFC, wufc);
5678 } else {
5679 IXGBE_WRITE_REG(hw, IXGBE_WUC, 0);
5680 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
5681 }
5682
bd508178
AD
5683 switch (hw->mac.type) {
5684 case ixgbe_mac_82598EB:
dd4d8ca6 5685 pci_wake_from_d3(pdev, false);
bd508178
AD
5686 break;
5687 case ixgbe_mac_82599EB:
b93a2226 5688 case ixgbe_mac_X540:
bd508178
AD
5689 pci_wake_from_d3(pdev, !!wufc);
5690 break;
5691 default:
5692 break;
5693 }
b3c8b4ba 5694
9d8d05ae
RW
5695 *enable_wake = !!wufc;
5696
b3c8b4ba
AD
5697 ixgbe_release_hw_control(adapter);
5698
5699 pci_disable_device(pdev);
5700
9d8d05ae
RW
5701 return 0;
5702}
5703
5704#ifdef CONFIG_PM
5705static int ixgbe_suspend(struct pci_dev *pdev, pm_message_t state)
5706{
5707 int retval;
5708 bool wake;
5709
5710 retval = __ixgbe_shutdown(pdev, &wake);
5711 if (retval)
5712 return retval;
5713
5714 if (wake) {
5715 pci_prepare_to_sleep(pdev);
5716 } else {
5717 pci_wake_from_d3(pdev, false);
5718 pci_set_power_state(pdev, PCI_D3hot);
5719 }
b3c8b4ba
AD
5720
5721 return 0;
5722}
9d8d05ae 5723#endif /* CONFIG_PM */
b3c8b4ba
AD
5724
5725static void ixgbe_shutdown(struct pci_dev *pdev)
5726{
9d8d05ae
RW
5727 bool wake;
5728
5729 __ixgbe_shutdown(pdev, &wake);
5730
5731 if (system_state == SYSTEM_POWER_OFF) {
5732 pci_wake_from_d3(pdev, wake);
5733 pci_set_power_state(pdev, PCI_D3hot);
5734 }
b3c8b4ba
AD
5735}
5736
9a799d71
AK
5737/**
5738 * ixgbe_update_stats - Update the board statistics counters.
5739 * @adapter: board private structure
5740 **/
5741void ixgbe_update_stats(struct ixgbe_adapter *adapter)
5742{
2d86f139 5743 struct net_device *netdev = adapter->netdev;
9a799d71 5744 struct ixgbe_hw *hw = &adapter->hw;
5b7da515 5745 struct ixgbe_hw_stats *hwstats = &adapter->stats;
6f11eef7
AV
5746 u64 total_mpc = 0;
5747 u32 i, missed_rx = 0, mpc, bprc, lxon, lxoff, xon_off_tot;
5b7da515
AD
5748 u64 non_eop_descs = 0, restart_queue = 0, tx_busy = 0;
5749 u64 alloc_rx_page_failed = 0, alloc_rx_buff_failed = 0;
5750 u64 bytes = 0, packets = 0;
9a799d71 5751
d08935c2
DS
5752 if (test_bit(__IXGBE_DOWN, &adapter->state) ||
5753 test_bit(__IXGBE_RESETTING, &adapter->state))
5754 return;
5755
94b982b2 5756 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
f8212f97 5757 u64 rsc_count = 0;
94b982b2 5758 u64 rsc_flush = 0;
d51019a4
PW
5759 for (i = 0; i < 16; i++)
5760 adapter->hw_rx_no_dma_resources +=
7ca647bd 5761 IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
94b982b2 5762 for (i = 0; i < adapter->num_rx_queues; i++) {
5b7da515
AD
5763 rsc_count += adapter->rx_ring[i]->rx_stats.rsc_count;
5764 rsc_flush += adapter->rx_ring[i]->rx_stats.rsc_flush;
94b982b2
MC
5765 }
5766 adapter->rsc_total_count = rsc_count;
5767 adapter->rsc_total_flush = rsc_flush;
d51019a4
PW
5768 }
5769
5b7da515
AD
5770 for (i = 0; i < adapter->num_rx_queues; i++) {
5771 struct ixgbe_ring *rx_ring = adapter->rx_ring[i];
5772 non_eop_descs += rx_ring->rx_stats.non_eop_descs;
5773 alloc_rx_page_failed += rx_ring->rx_stats.alloc_rx_page_failed;
5774 alloc_rx_buff_failed += rx_ring->rx_stats.alloc_rx_buff_failed;
5775 bytes += rx_ring->stats.bytes;
5776 packets += rx_ring->stats.packets;
5777 }
5778 adapter->non_eop_descs = non_eop_descs;
5779 adapter->alloc_rx_page_failed = alloc_rx_page_failed;
5780 adapter->alloc_rx_buff_failed = alloc_rx_buff_failed;
5781 netdev->stats.rx_bytes = bytes;
5782 netdev->stats.rx_packets = packets;
5783
5784 bytes = 0;
5785 packets = 0;
7ca3bc58 5786 /* gather some stats to the adapter struct that are per queue */
5b7da515
AD
5787 for (i = 0; i < adapter->num_tx_queues; i++) {
5788 struct ixgbe_ring *tx_ring = adapter->tx_ring[i];
5789 restart_queue += tx_ring->tx_stats.restart_queue;
5790 tx_busy += tx_ring->tx_stats.tx_busy;
5791 bytes += tx_ring->stats.bytes;
5792 packets += tx_ring->stats.packets;
5793 }
eb985f09 5794 adapter->restart_queue = restart_queue;
5b7da515
AD
5795 adapter->tx_busy = tx_busy;
5796 netdev->stats.tx_bytes = bytes;
5797 netdev->stats.tx_packets = packets;
7ca3bc58 5798
7ca647bd 5799 hwstats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
6f11eef7
AV
5800 for (i = 0; i < 8; i++) {
5801 /* for packet buffers not used, the register should read 0 */
5802 mpc = IXGBE_READ_REG(hw, IXGBE_MPC(i));
5803 missed_rx += mpc;
7ca647bd
JP
5804 hwstats->mpc[i] += mpc;
5805 total_mpc += hwstats->mpc[i];
e8e26350 5806 if (hw->mac.type == ixgbe_mac_82598EB)
7ca647bd
JP
5807 hwstats->rnbc[i] += IXGBE_READ_REG(hw, IXGBE_RNBC(i));
5808 hwstats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
5809 hwstats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC(i));
5810 hwstats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
5811 hwstats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC(i));
bd508178
AD
5812 switch (hw->mac.type) {
5813 case ixgbe_mac_82598EB:
7ca647bd
JP
5814 hwstats->pxonrxc[i] +=
5815 IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
bd508178
AD
5816 break;
5817 case ixgbe_mac_82599EB:
b93a2226 5818 case ixgbe_mac_X540:
bd508178
AD
5819 hwstats->pxonrxc[i] +=
5820 IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
bd508178
AD
5821 break;
5822 default:
5823 break;
e8e26350 5824 }
7ca647bd
JP
5825 hwstats->pxontxc[i] += IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
5826 hwstats->pxofftxc[i] += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
6f11eef7 5827 }
7ca647bd 5828 hwstats->gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
6f11eef7 5829 /* work around hardware counting issue */
7ca647bd 5830 hwstats->gprc -= missed_rx;
6f11eef7 5831
c84d324c
JF
5832 ixgbe_update_xoff_received(adapter);
5833
6f11eef7 5834 /* 82598 hardware only has a 32 bit counter in the high register */
bd508178
AD
5835 switch (hw->mac.type) {
5836 case ixgbe_mac_82598EB:
5837 hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
bd508178
AD
5838 hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
5839 hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
5840 hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORH);
5841 break;
b93a2226 5842 case ixgbe_mac_X540:
58f6bcf9
ET
5843 /* OS2BMC stats are X540 only*/
5844 hwstats->o2bgptc += IXGBE_READ_REG(hw, IXGBE_O2BGPTC);
5845 hwstats->o2bspc += IXGBE_READ_REG(hw, IXGBE_O2BSPC);
5846 hwstats->b2ospc += IXGBE_READ_REG(hw, IXGBE_B2OSPC);
5847 hwstats->b2ogprc += IXGBE_READ_REG(hw, IXGBE_B2OGPRC);
5848 case ixgbe_mac_82599EB:
7ca647bd 5849 hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL);
bd508178 5850 IXGBE_READ_REG(hw, IXGBE_GORCH); /* to clear */
7ca647bd 5851 hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL);
bd508178 5852 IXGBE_READ_REG(hw, IXGBE_GOTCH); /* to clear */
7ca647bd 5853 hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORL);
bd508178 5854 IXGBE_READ_REG(hw, IXGBE_TORH); /* to clear */
7ca647bd 5855 hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
7ca647bd
JP
5856 hwstats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
5857 hwstats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
6d45522c 5858#ifdef IXGBE_FCOE
7ca647bd
JP
5859 hwstats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
5860 hwstats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
5861 hwstats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
5862 hwstats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
5863 hwstats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
5864 hwstats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
6d45522c 5865#endif /* IXGBE_FCOE */
bd508178
AD
5866 break;
5867 default:
5868 break;
e8e26350 5869 }
9a799d71 5870 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
7ca647bd
JP
5871 hwstats->bprc += bprc;
5872 hwstats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
e8e26350 5873 if (hw->mac.type == ixgbe_mac_82598EB)
7ca647bd
JP
5874 hwstats->mprc -= bprc;
5875 hwstats->roc += IXGBE_READ_REG(hw, IXGBE_ROC);
5876 hwstats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
5877 hwstats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
5878 hwstats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
5879 hwstats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
5880 hwstats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
5881 hwstats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
5882 hwstats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
6f11eef7 5883 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
7ca647bd 5884 hwstats->lxontxc += lxon;
6f11eef7 5885 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
7ca647bd
JP
5886 hwstats->lxofftxc += lxoff;
5887 hwstats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
5888 hwstats->gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
5889 hwstats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
6f11eef7
AV
5890 /*
5891 * 82598 errata - tx of flow control packets is included in tx counters
5892 */
5893 xon_off_tot = lxon + lxoff;
7ca647bd
JP
5894 hwstats->gptc -= xon_off_tot;
5895 hwstats->mptc -= xon_off_tot;
5896 hwstats->gotc -= (xon_off_tot * (ETH_ZLEN + ETH_FCS_LEN));
5897 hwstats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
5898 hwstats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
5899 hwstats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
5900 hwstats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
5901 hwstats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
5902 hwstats->ptc64 -= xon_off_tot;
5903 hwstats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
5904 hwstats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
5905 hwstats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
5906 hwstats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
5907 hwstats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
5908 hwstats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
9a799d71
AK
5909
5910 /* Fill out the OS statistics structure */
7ca647bd 5911 netdev->stats.multicast = hwstats->mprc;
9a799d71
AK
5912
5913 /* Rx Errors */
7ca647bd 5914 netdev->stats.rx_errors = hwstats->crcerrs + hwstats->rlec;
2d86f139 5915 netdev->stats.rx_dropped = 0;
7ca647bd
JP
5916 netdev->stats.rx_length_errors = hwstats->rlec;
5917 netdev->stats.rx_crc_errors = hwstats->crcerrs;
2d86f139 5918 netdev->stats.rx_missed_errors = total_mpc;
9a799d71
AK
5919}
5920
5921/**
d034acf1
AD
5922 * ixgbe_fdir_reinit_subtask - worker thread to reinit FDIR filter table
5923 * @adapter - pointer to the device adapter structure
9a799d71 5924 **/
d034acf1 5925static void ixgbe_fdir_reinit_subtask(struct ixgbe_adapter *adapter)
9a799d71 5926{
cf8280ee 5927 struct ixgbe_hw *hw = &adapter->hw;
fe49f04a 5928 int i;
cf8280ee 5929
d034acf1
AD
5930 if (!(adapter->flags2 & IXGBE_FLAG2_FDIR_REQUIRES_REINIT))
5931 return;
5932
5933 adapter->flags2 &= ~IXGBE_FLAG2_FDIR_REQUIRES_REINIT;
22d5a71b 5934
d034acf1 5935 /* if interface is down do nothing */
fe49f04a 5936 if (test_bit(__IXGBE_DOWN, &adapter->state))
d034acf1
AD
5937 return;
5938
5939 /* do nothing if we are not using signature filters */
5940 if (!(adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE))
5941 return;
5942
5943 adapter->fdir_overflow++;
5944
93c52dd0
AD
5945 if (ixgbe_reinit_fdir_tables_82599(hw) == 0) {
5946 for (i = 0; i < adapter->num_tx_queues; i++)
5947 set_bit(__IXGBE_TX_FDIR_INIT_DONE,
f0f9778d 5948 &(adapter->tx_ring[i]->state));
d034acf1
AD
5949 /* re-enable flow director interrupts */
5950 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_FLOW_DIR);
93c52dd0
AD
5951 } else {
5952 e_err(probe, "failed to finish FDIR re-initialization, "
5953 "ignored adding FDIR ATR filters\n");
5954 }
93c52dd0
AD
5955}
5956
5957/**
5958 * ixgbe_check_hang_subtask - check for hung queues and dropped interrupts
5959 * @adapter - pointer to the device adapter structure
5960 *
5961 * This function serves two purposes. First it strobes the interrupt lines
5962 * in order to make certain interrupts are occuring. Secondly it sets the
5963 * bits needed to check for TX hangs. As a result we should immediately
5964 * determine if a hang has occured.
5965 */
5966static void ixgbe_check_hang_subtask(struct ixgbe_adapter *adapter)
9a799d71 5967{
cf8280ee 5968 struct ixgbe_hw *hw = &adapter->hw;
fe49f04a
AD
5969 u64 eics = 0;
5970 int i;
cf8280ee 5971
93c52dd0
AD
5972 /* If we're down or resetting, just bail */
5973 if (test_bit(__IXGBE_DOWN, &adapter->state) ||
5974 test_bit(__IXGBE_RESETTING, &adapter->state))
5975 return;
22d5a71b 5976
93c52dd0
AD
5977 /* Force detection of hung controller */
5978 if (netif_carrier_ok(adapter->netdev)) {
5979 for (i = 0; i < adapter->num_tx_queues; i++)
5980 set_check_for_tx_hang(adapter->tx_ring[i]);
5981 }
22d5a71b 5982
fe49f04a
AD
5983 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
5984 /*
5985 * for legacy and MSI interrupts don't set any bits
5986 * that are enabled for EIAM, because this operation
5987 * would set *both* EIMS and EICS for any bit in EIAM
5988 */
5989 IXGBE_WRITE_REG(hw, IXGBE_EICS,
5990 (IXGBE_EICS_TCP_TIMER | IXGBE_EICS_OTHER));
93c52dd0
AD
5991 } else {
5992 /* get one bit for every active tx/rx interrupt vector */
5993 for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) {
5994 struct ixgbe_q_vector *qv = adapter->q_vector[i];
5995 if (qv->rxr_count || qv->txr_count)
5996 eics |= ((u64)1 << i);
5997 }
cf8280ee 5998 }
9a799d71 5999
93c52dd0 6000 /* Cause software interrupt to ensure rings are cleaned */
fe49f04a
AD
6001 ixgbe_irq_rearm_queues(adapter, eics);
6002
cf8280ee
JB
6003}
6004
e8e26350 6005/**
93c52dd0
AD
6006 * ixgbe_watchdog_update_link - update the link status
6007 * @adapter - pointer to the device adapter structure
6008 * @link_speed - pointer to a u32 to store the link_speed
e8e26350 6009 **/
93c52dd0 6010static void ixgbe_watchdog_update_link(struct ixgbe_adapter *adapter)
e8e26350 6011{
e8e26350 6012 struct ixgbe_hw *hw = &adapter->hw;
93c52dd0
AD
6013 u32 link_speed = adapter->link_speed;
6014 bool link_up = adapter->link_up;
c4cf55e5 6015 int i;
e8e26350 6016
93c52dd0
AD
6017 if (!(adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE))
6018 return;
6019
6020 if (hw->mac.ops.check_link) {
6021 hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
c4cf55e5 6022 } else {
93c52dd0
AD
6023 /* always assume link is up, if no check link function */
6024 link_speed = IXGBE_LINK_SPEED_10GB_FULL;
6025 link_up = true;
c4cf55e5 6026 }
93c52dd0
AD
6027 if (link_up) {
6028 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
6029 for (i = 0; i < MAX_TRAFFIC_CLASS; i++)
6030 hw->mac.ops.fc_enable(hw, i);
6031 } else {
6032 hw->mac.ops.fc_enable(hw, 0);
6033 }
6034 }
6035
6036 if (link_up ||
6037 time_after(jiffies, (adapter->link_check_timeout +
6038 IXGBE_TRY_LINK_TIMEOUT))) {
6039 adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
6040 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMC_LSC);
6041 IXGBE_WRITE_FLUSH(hw);
6042 }
6043
6044 adapter->link_up = link_up;
6045 adapter->link_speed = link_speed;
e8e26350
PW
6046}
6047
6048/**
93c52dd0
AD
6049 * ixgbe_watchdog_link_is_up - update netif_carrier status and
6050 * print link up message
6051 * @adapter - pointer to the device adapter structure
e8e26350 6052 **/
93c52dd0 6053static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter)
e8e26350 6054{
93c52dd0 6055 struct net_device *netdev = adapter->netdev;
e8e26350 6056 struct ixgbe_hw *hw = &adapter->hw;
93c52dd0
AD
6057 u32 link_speed = adapter->link_speed;
6058 bool flow_rx, flow_tx;
e8e26350 6059
93c52dd0
AD
6060 /* only continue if link was previously down */
6061 if (netif_carrier_ok(netdev))
a985b6c3 6062 return;
63d6e1d8 6063
93c52dd0 6064 adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP;
63d6e1d8 6065
93c52dd0
AD
6066 switch (hw->mac.type) {
6067 case ixgbe_mac_82598EB: {
6068 u32 frctl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
6069 u32 rmcs = IXGBE_READ_REG(hw, IXGBE_RMCS);
6070 flow_rx = !!(frctl & IXGBE_FCTRL_RFCE);
6071 flow_tx = !!(rmcs & IXGBE_RMCS_TFCE_802_3X);
6072 }
6073 break;
6074 case ixgbe_mac_X540:
6075 case ixgbe_mac_82599EB: {
6076 u32 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN);
6077 u32 fccfg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
6078 flow_rx = !!(mflcn & IXGBE_MFLCN_RFCE);
6079 flow_tx = !!(fccfg & IXGBE_FCCFG_TFCE_802_3X);
6080 }
6081 break;
6082 default:
6083 flow_tx = false;
6084 flow_rx = false;
6085 break;
e8e26350 6086 }
93c52dd0
AD
6087 e_info(drv, "NIC Link is Up %s, Flow Control: %s\n",
6088 (link_speed == IXGBE_LINK_SPEED_10GB_FULL ?
6089 "10 Gbps" :
6090 (link_speed == IXGBE_LINK_SPEED_1GB_FULL ?
6091 "1 Gbps" :
6092 (link_speed == IXGBE_LINK_SPEED_100_FULL ?
6093 "100 Mbps" :
6094 "unknown speed"))),
6095 ((flow_rx && flow_tx) ? "RX/TX" :
6096 (flow_rx ? "RX" :
6097 (flow_tx ? "TX" : "None"))));
e8e26350 6098
93c52dd0
AD
6099 netif_carrier_on(netdev);
6100#ifdef HAVE_IPLINK_VF_CONFIG
6101 ixgbe_check_vf_rate_limit(adapter);
6102#endif /* HAVE_IPLINK_VF_CONFIG */
e8e26350
PW
6103}
6104
c4cf55e5 6105/**
93c52dd0
AD
6106 * ixgbe_watchdog_link_is_down - update netif_carrier status and
6107 * print link down message
6108 * @adapter - pointer to the adapter structure
c4cf55e5 6109 **/
93c52dd0 6110static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter* adapter)
c4cf55e5 6111{
cf8280ee 6112 struct net_device *netdev = adapter->netdev;
c4cf55e5 6113 struct ixgbe_hw *hw = &adapter->hw;
10eec955 6114
93c52dd0
AD
6115 adapter->link_up = false;
6116 adapter->link_speed = 0;
cf8280ee 6117
93c52dd0
AD
6118 /* only continue if link was up previously */
6119 if (!netif_carrier_ok(netdev))
6120 return;
264857b8 6121
93c52dd0
AD
6122 /* poll for SFP+ cable when link is down */
6123 if (ixgbe_is_sfp(hw) && hw->mac.type == ixgbe_mac_82598EB)
6124 adapter->flags2 |= IXGBE_FLAG2_SEARCH_FOR_SFP;
9a799d71 6125
93c52dd0
AD
6126 e_info(drv, "NIC Link is Down\n");
6127 netif_carrier_off(netdev);
6128}
e8e26350 6129
93c52dd0
AD
6130/**
6131 * ixgbe_watchdog_flush_tx - flush queues on link down
6132 * @adapter - pointer to the device adapter structure
6133 **/
6134static void ixgbe_watchdog_flush_tx(struct ixgbe_adapter *adapter)
6135{
c4cf55e5 6136 int i;
93c52dd0 6137 int some_tx_pending = 0;
c4cf55e5 6138
93c52dd0 6139 if (!netif_carrier_ok(adapter->netdev)) {
bc59fcda 6140 for (i = 0; i < adapter->num_tx_queues; i++) {
93c52dd0 6141 struct ixgbe_ring *tx_ring = adapter->tx_ring[i];
bc59fcda
NS
6142 if (tx_ring->next_to_use != tx_ring->next_to_clean) {
6143 some_tx_pending = 1;
6144 break;
6145 }
6146 }
6147
6148 if (some_tx_pending) {
6149 /* We've lost link, so the controller stops DMA,
6150 * but we've got queued Tx work that's never going
6151 * to get done, so reset controller to flush Tx.
6152 * (Do the reset outside of interrupt context).
6153 */
c83c6cbd 6154 adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED;
bc59fcda 6155 }
c4cf55e5 6156 }
c4cf55e5
PWJ
6157}
6158
a985b6c3
GR
6159static void ixgbe_spoof_check(struct ixgbe_adapter *adapter)
6160{
6161 u32 ssvpc;
6162
6163 /* Do not perform spoof check for 82598 */
6164 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
6165 return;
6166
6167 ssvpc = IXGBE_READ_REG(&adapter->hw, IXGBE_SSVPC);
6168
6169 /*
6170 * ssvpc register is cleared on read, if zero then no
6171 * spoofed packets in the last interval.
6172 */
6173 if (!ssvpc)
6174 return;
6175
6176 e_warn(drv, "%d Spoofed packets detected\n", ssvpc);
6177}
6178
93c52dd0
AD
6179/**
6180 * ixgbe_watchdog_subtask - check and bring link up
6181 * @adapter - pointer to the device adapter structure
6182 **/
6183static void ixgbe_watchdog_subtask(struct ixgbe_adapter *adapter)
6184{
6185 /* if interface is down do nothing */
6186 if (test_bit(__IXGBE_DOWN, &adapter->state))
6187 return;
6188
6189 ixgbe_watchdog_update_link(adapter);
6190
6191 if (adapter->link_up)
6192 ixgbe_watchdog_link_is_up(adapter);
6193 else
6194 ixgbe_watchdog_link_is_down(adapter);
bc59fcda 6195
a985b6c3 6196 ixgbe_spoof_check(adapter);
9a799d71 6197 ixgbe_update_stats(adapter);
93c52dd0
AD
6198
6199 ixgbe_watchdog_flush_tx(adapter);
9a799d71 6200}
10eec955 6201
cf8280ee 6202/**
7086400d
AD
6203 * ixgbe_sfp_detection_subtask - poll for SFP+ cable
6204 * @adapter - the ixgbe adapter structure
cf8280ee 6205 **/
7086400d 6206static void ixgbe_sfp_detection_subtask(struct ixgbe_adapter *adapter)
cf8280ee 6207{
cf8280ee 6208 struct ixgbe_hw *hw = &adapter->hw;
7086400d 6209 s32 err;
cf8280ee 6210
7086400d
AD
6211 /* not searching for SFP so there is nothing to do here */
6212 if (!(adapter->flags2 & IXGBE_FLAG2_SEARCH_FOR_SFP) &&
6213 !(adapter->flags2 & IXGBE_FLAG2_SFP_NEEDS_RESET))
6214 return;
10eec955 6215
7086400d
AD
6216 /* someone else is in init, wait until next service event */
6217 if (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
6218 return;
cf8280ee 6219
7086400d
AD
6220 err = hw->phy.ops.identify_sfp(hw);
6221 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED)
6222 goto sfp_out;
264857b8 6223
7086400d
AD
6224 if (err == IXGBE_ERR_SFP_NOT_PRESENT) {
6225 /* If no cable is present, then we need to reset
6226 * the next time we find a good cable. */
6227 adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET;
cf8280ee 6228 }
9a799d71 6229
7086400d
AD
6230 /* exit on error */
6231 if (err)
6232 goto sfp_out;
e8e26350 6233
7086400d
AD
6234 /* exit if reset not needed */
6235 if (!(adapter->flags2 & IXGBE_FLAG2_SFP_NEEDS_RESET))
6236 goto sfp_out;
9a799d71 6237
7086400d 6238 adapter->flags2 &= ~IXGBE_FLAG2_SFP_NEEDS_RESET;
bc59fcda 6239
7086400d
AD
6240 /*
6241 * A module may be identified correctly, but the EEPROM may not have
6242 * support for that module. setup_sfp() will fail in that case, so
6243 * we should not allow that module to load.
6244 */
6245 if (hw->mac.type == ixgbe_mac_82598EB)
6246 err = hw->phy.ops.reset(hw);
6247 else
6248 err = hw->mac.ops.setup_sfp(hw);
6249
6250 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED)
6251 goto sfp_out;
6252
6253 adapter->flags |= IXGBE_FLAG_NEED_LINK_CONFIG;
6254 e_info(probe, "detected SFP+: %d\n", hw->phy.sfp_type);
6255
6256sfp_out:
6257 clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
6258
6259 if ((err == IXGBE_ERR_SFP_NOT_SUPPORTED) &&
6260 (adapter->netdev->reg_state == NETREG_REGISTERED)) {
6261 e_dev_err("failed to initialize because an unsupported "
6262 "SFP+ module type was detected.\n");
6263 e_dev_err("Reload the driver after installing a "
6264 "supported module.\n");
6265 unregister_netdev(adapter->netdev);
bc59fcda 6266 }
7086400d 6267}
bc59fcda 6268
7086400d
AD
6269/**
6270 * ixgbe_sfp_link_config_subtask - set up link SFP after module install
6271 * @adapter - the ixgbe adapter structure
6272 **/
6273static void ixgbe_sfp_link_config_subtask(struct ixgbe_adapter *adapter)
6274{
6275 struct ixgbe_hw *hw = &adapter->hw;
6276 u32 autoneg;
6277 bool negotiation;
6278
6279 if (!(adapter->flags & IXGBE_FLAG_NEED_LINK_CONFIG))
6280 return;
6281
6282 /* someone else is in init, wait until next service event */
6283 if (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
6284 return;
6285
6286 adapter->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG;
6287
6288 autoneg = hw->phy.autoneg_advertised;
6289 if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
6290 hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiation);
6291 hw->mac.autotry_restart = false;
6292 if (hw->mac.ops.setup_link)
6293 hw->mac.ops.setup_link(hw, autoneg, negotiation, true);
6294
6295 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
6296 adapter->link_check_timeout = jiffies;
6297 clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
6298}
6299
6300/**
6301 * ixgbe_service_timer - Timer Call-back
6302 * @data: pointer to adapter cast into an unsigned long
6303 **/
6304static void ixgbe_service_timer(unsigned long data)
6305{
6306 struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data;
6307 unsigned long next_event_offset;
6308
6309 /* poll faster when waiting for link */
6310 if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE)
6311 next_event_offset = HZ / 10;
6312 else
6313 next_event_offset = HZ * 2;
6314
6315 /* Reset the timer */
6316 mod_timer(&adapter->service_timer, next_event_offset + jiffies);
6317
6318 ixgbe_service_event_schedule(adapter);
6319}
6320
c83c6cbd
AD
6321static void ixgbe_reset_subtask(struct ixgbe_adapter *adapter)
6322{
6323 if (!(adapter->flags2 & IXGBE_FLAG2_RESET_REQUESTED))
6324 return;
6325
6326 adapter->flags2 &= ~IXGBE_FLAG2_RESET_REQUESTED;
6327
6328 /* If we're already down or resetting, just bail */
6329 if (test_bit(__IXGBE_DOWN, &adapter->state) ||
6330 test_bit(__IXGBE_RESETTING, &adapter->state))
6331 return;
6332
6333 ixgbe_dump(adapter);
6334 netdev_err(adapter->netdev, "Reset adapter\n");
6335 adapter->tx_timeout_count++;
6336
6337 ixgbe_reinit_locked(adapter);
6338}
6339
7086400d
AD
6340/**
6341 * ixgbe_service_task - manages and runs subtasks
6342 * @work: pointer to work_struct containing our data
6343 **/
6344static void ixgbe_service_task(struct work_struct *work)
6345{
6346 struct ixgbe_adapter *adapter = container_of(work,
6347 struct ixgbe_adapter,
6348 service_task);
6349
c83c6cbd 6350 ixgbe_reset_subtask(adapter);
7086400d
AD
6351 ixgbe_sfp_detection_subtask(adapter);
6352 ixgbe_sfp_link_config_subtask(adapter);
f0f9778d 6353 ixgbe_check_overtemp_subtask(adapter);
93c52dd0 6354 ixgbe_watchdog_subtask(adapter);
d034acf1 6355 ixgbe_fdir_reinit_subtask(adapter);
93c52dd0 6356 ixgbe_check_hang_subtask(adapter);
7086400d
AD
6357
6358 ixgbe_service_event_complete(adapter);
9a799d71
AK
6359}
6360
9a799d71 6361static int ixgbe_tso(struct ixgbe_adapter *adapter,
e8e9f696 6362 struct ixgbe_ring *tx_ring, struct sk_buff *skb,
5e09a105 6363 u32 tx_flags, u8 *hdr_len, __be16 protocol)
9a799d71
AK
6364{
6365 struct ixgbe_adv_tx_context_desc *context_desc;
6366 unsigned int i;
6367 int err;
6368 struct ixgbe_tx_buffer *tx_buffer_info;
9f8cdf4f
JB
6369 u32 vlan_macip_lens = 0, type_tucmd_mlhl;
6370 u32 mss_l4len_idx, l4len;
9a799d71
AK
6371
6372 if (skb_is_gso(skb)) {
6373 if (skb_header_cloned(skb)) {
6374 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
6375 if (err)
6376 return err;
6377 }
6378 l4len = tcp_hdrlen(skb);
6379 *hdr_len += l4len;
6380
5e09a105 6381 if (protocol == htons(ETH_P_IP)) {
9a799d71
AK
6382 struct iphdr *iph = ip_hdr(skb);
6383 iph->tot_len = 0;
6384 iph->check = 0;
6385 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
e8e9f696
JP
6386 iph->daddr, 0,
6387 IPPROTO_TCP,
6388 0);
8e1e8a47 6389 } else if (skb_is_gso_v6(skb)) {
9a799d71
AK
6390 ipv6_hdr(skb)->payload_len = 0;
6391 tcp_hdr(skb)->check =
6392 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
e8e9f696
JP
6393 &ipv6_hdr(skb)->daddr,
6394 0, IPPROTO_TCP, 0);
9a799d71
AK
6395 }
6396
6397 i = tx_ring->next_to_use;
6398
6399 tx_buffer_info = &tx_ring->tx_buffer_info[i];
31f05a2d 6400 context_desc = IXGBE_TX_CTXTDESC_ADV(tx_ring, i);
9a799d71
AK
6401
6402 /* VLAN MACLEN IPLEN */
6403 if (tx_flags & IXGBE_TX_FLAGS_VLAN)
6404 vlan_macip_lens |=
6405 (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK);
6406 vlan_macip_lens |= ((skb_network_offset(skb)) <<
e8e9f696 6407 IXGBE_ADVTXD_MACLEN_SHIFT);
9a799d71
AK
6408 *hdr_len += skb_network_offset(skb);
6409 vlan_macip_lens |=
6410 (skb_transport_header(skb) - skb_network_header(skb));
6411 *hdr_len +=
6412 (skb_transport_header(skb) - skb_network_header(skb));
6413 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
6414 context_desc->seqnum_seed = 0;
6415
6416 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
9f8cdf4f 6417 type_tucmd_mlhl = (IXGBE_TXD_CMD_DEXT |
e8e9f696 6418 IXGBE_ADVTXD_DTYP_CTXT);
9a799d71 6419
5e09a105 6420 if (protocol == htons(ETH_P_IP))
9a799d71
AK
6421 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
6422 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
6423 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
6424
6425 /* MSS L4LEN IDX */
9f8cdf4f 6426 mss_l4len_idx =
9a799d71
AK
6427 (skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT);
6428 mss_l4len_idx |= (l4len << IXGBE_ADVTXD_L4LEN_SHIFT);
4eeae6fd
PW
6429 /* use index 1 for TSO */
6430 mss_l4len_idx |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
9a799d71
AK
6431 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
6432
6433 tx_buffer_info->time_stamp = jiffies;
6434 tx_buffer_info->next_to_watch = i;
6435
6436 i++;
6437 if (i == tx_ring->count)
6438 i = 0;
6439 tx_ring->next_to_use = i;
6440
6441 return true;
6442 }
6443 return false;
6444}
6445
5e09a105
HZ
6446static u32 ixgbe_psum(struct ixgbe_adapter *adapter, struct sk_buff *skb,
6447 __be16 protocol)
7ca647bd
JP
6448{
6449 u32 rtn = 0;
7ca647bd
JP
6450
6451 switch (protocol) {
6452 case cpu_to_be16(ETH_P_IP):
6453 rtn |= IXGBE_ADVTXD_TUCMD_IPV4;
6454 switch (ip_hdr(skb)->protocol) {
6455 case IPPROTO_TCP:
6456 rtn |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
6457 break;
6458 case IPPROTO_SCTP:
6459 rtn |= IXGBE_ADVTXD_TUCMD_L4T_SCTP;
6460 break;
6461 }
6462 break;
6463 case cpu_to_be16(ETH_P_IPV6):
6464 /* XXX what about other V6 headers?? */
6465 switch (ipv6_hdr(skb)->nexthdr) {
6466 case IPPROTO_TCP:
6467 rtn |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
6468 break;
6469 case IPPROTO_SCTP:
6470 rtn |= IXGBE_ADVTXD_TUCMD_L4T_SCTP;
6471 break;
6472 }
6473 break;
6474 default:
6475 if (unlikely(net_ratelimit()))
6476 e_warn(probe, "partial checksum but proto=%x!\n",
5e09a105 6477 protocol);
7ca647bd
JP
6478 break;
6479 }
6480
6481 return rtn;
6482}
6483
9a799d71 6484static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter,
e8e9f696 6485 struct ixgbe_ring *tx_ring,
5e09a105
HZ
6486 struct sk_buff *skb, u32 tx_flags,
6487 __be16 protocol)
9a799d71
AK
6488{
6489 struct ixgbe_adv_tx_context_desc *context_desc;
6490 unsigned int i;
6491 struct ixgbe_tx_buffer *tx_buffer_info;
6492 u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
6493
6494 if (skb->ip_summed == CHECKSUM_PARTIAL ||
6495 (tx_flags & IXGBE_TX_FLAGS_VLAN)) {
6496 i = tx_ring->next_to_use;
6497 tx_buffer_info = &tx_ring->tx_buffer_info[i];
31f05a2d 6498 context_desc = IXGBE_TX_CTXTDESC_ADV(tx_ring, i);
9a799d71
AK
6499
6500 if (tx_flags & IXGBE_TX_FLAGS_VLAN)
6501 vlan_macip_lens |=
6502 (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK);
6503 vlan_macip_lens |= (skb_network_offset(skb) <<
e8e9f696 6504 IXGBE_ADVTXD_MACLEN_SHIFT);
9a799d71
AK
6505 if (skb->ip_summed == CHECKSUM_PARTIAL)
6506 vlan_macip_lens |= (skb_transport_header(skb) -
e8e9f696 6507 skb_network_header(skb));
9a799d71
AK
6508
6509 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
6510 context_desc->seqnum_seed = 0;
6511
6512 type_tucmd_mlhl |= (IXGBE_TXD_CMD_DEXT |
e8e9f696 6513 IXGBE_ADVTXD_DTYP_CTXT);
9a799d71 6514
7ca647bd 6515 if (skb->ip_summed == CHECKSUM_PARTIAL)
5e09a105 6516 type_tucmd_mlhl |= ixgbe_psum(adapter, skb, protocol);
9a799d71
AK
6517
6518 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
4eeae6fd 6519 /* use index zero for tx checksum offload */
9a799d71
AK
6520 context_desc->mss_l4len_idx = 0;
6521
6522 tx_buffer_info->time_stamp = jiffies;
6523 tx_buffer_info->next_to_watch = i;
9f8cdf4f 6524
9a799d71
AK
6525 i++;
6526 if (i == tx_ring->count)
6527 i = 0;
6528 tx_ring->next_to_use = i;
6529
6530 return true;
6531 }
9f8cdf4f 6532
9a799d71
AK
6533 return false;
6534}
6535
6536static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
e8e9f696
JP
6537 struct ixgbe_ring *tx_ring,
6538 struct sk_buff *skb, u32 tx_flags,
8ad494b0 6539 unsigned int first, const u8 hdr_len)
9a799d71 6540{
b6ec895e 6541 struct device *dev = tx_ring->dev;
9a799d71 6542 struct ixgbe_tx_buffer *tx_buffer_info;
eacd73f7
YZ
6543 unsigned int len;
6544 unsigned int total = skb->len;
9a799d71
AK
6545 unsigned int offset = 0, size, count = 0, i;
6546 unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
6547 unsigned int f;
8ad494b0
AD
6548 unsigned int bytecount = skb->len;
6549 u16 gso_segs = 1;
9a799d71
AK
6550
6551 i = tx_ring->next_to_use;
6552
eacd73f7
YZ
6553 if (tx_flags & IXGBE_TX_FLAGS_FCOE)
6554 /* excluding fcoe_crc_eof for FCoE */
6555 total -= sizeof(struct fcoe_crc_eof);
6556
6557 len = min(skb_headlen(skb), total);
9a799d71
AK
6558 while (len) {
6559 tx_buffer_info = &tx_ring->tx_buffer_info[i];
6560 size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD);
6561
6562 tx_buffer_info->length = size;
e5a43549 6563 tx_buffer_info->mapped_as_page = false;
b6ec895e 6564 tx_buffer_info->dma = dma_map_single(dev,
e5a43549 6565 skb->data + offset,
1b507730 6566 size, DMA_TO_DEVICE);
b6ec895e 6567 if (dma_mapping_error(dev, tx_buffer_info->dma))
e5a43549 6568 goto dma_error;
9a799d71
AK
6569 tx_buffer_info->time_stamp = jiffies;
6570 tx_buffer_info->next_to_watch = i;
6571
6572 len -= size;
eacd73f7 6573 total -= size;
9a799d71
AK
6574 offset += size;
6575 count++;
44df32c5
AD
6576
6577 if (len) {
6578 i++;
6579 if (i == tx_ring->count)
6580 i = 0;
6581 }
9a799d71
AK
6582 }
6583
6584 for (f = 0; f < nr_frags; f++) {
6585 struct skb_frag_struct *frag;
6586
6587 frag = &skb_shinfo(skb)->frags[f];
eacd73f7 6588 len = min((unsigned int)frag->size, total);
e5a43549 6589 offset = frag->page_offset;
9a799d71
AK
6590
6591 while (len) {
44df32c5
AD
6592 i++;
6593 if (i == tx_ring->count)
6594 i = 0;
6595
9a799d71
AK
6596 tx_buffer_info = &tx_ring->tx_buffer_info[i];
6597 size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD);
6598
6599 tx_buffer_info->length = size;
b6ec895e 6600 tx_buffer_info->dma = dma_map_page(dev,
e5a43549
AD
6601 frag->page,
6602 offset, size,
1b507730 6603 DMA_TO_DEVICE);
e5a43549 6604 tx_buffer_info->mapped_as_page = true;
b6ec895e 6605 if (dma_mapping_error(dev, tx_buffer_info->dma))
e5a43549 6606 goto dma_error;
9a799d71
AK
6607 tx_buffer_info->time_stamp = jiffies;
6608 tx_buffer_info->next_to_watch = i;
6609
6610 len -= size;
eacd73f7 6611 total -= size;
9a799d71
AK
6612 offset += size;
6613 count++;
9a799d71 6614 }
eacd73f7
YZ
6615 if (total == 0)
6616 break;
9a799d71 6617 }
44df32c5 6618
8ad494b0
AD
6619 if (tx_flags & IXGBE_TX_FLAGS_TSO)
6620 gso_segs = skb_shinfo(skb)->gso_segs;
6621#ifdef IXGBE_FCOE
6622 /* adjust for FCoE Sequence Offload */
6623 else if (tx_flags & IXGBE_TX_FLAGS_FSO)
6624 gso_segs = DIV_ROUND_UP(skb->len - hdr_len,
6625 skb_shinfo(skb)->gso_size);
6626#endif /* IXGBE_FCOE */
6627 bytecount += (gso_segs - 1) * hdr_len;
6628
6629 /* multiply data chunks by size of headers */
6630 tx_ring->tx_buffer_info[i].bytecount = bytecount;
6631 tx_ring->tx_buffer_info[i].gso_segs = gso_segs;
9a799d71
AK
6632 tx_ring->tx_buffer_info[i].skb = skb;
6633 tx_ring->tx_buffer_info[first].next_to_watch = i;
6634
e5a43549
AD
6635 return count;
6636
6637dma_error:
849c4542 6638 e_dev_err("TX DMA map failed\n");
e5a43549
AD
6639
6640 /* clear timestamp and dma mappings for failed tx_buffer_info map */
6641 tx_buffer_info->dma = 0;
6642 tx_buffer_info->time_stamp = 0;
6643 tx_buffer_info->next_to_watch = 0;
c1fa347f
RK
6644 if (count)
6645 count--;
e5a43549
AD
6646
6647 /* clear timestamp and dma mappings for remaining portion of packet */
c1fa347f 6648 while (count--) {
e8e9f696 6649 if (i == 0)
e5a43549 6650 i += tx_ring->count;
c1fa347f 6651 i--;
e5a43549 6652 tx_buffer_info = &tx_ring->tx_buffer_info[i];
b6ec895e 6653 ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
e5a43549
AD
6654 }
6655
e44d38e1 6656 return 0;
9a799d71
AK
6657}
6658
84ea2591 6659static void ixgbe_tx_queue(struct ixgbe_ring *tx_ring,
e8e9f696 6660 int tx_flags, int count, u32 paylen, u8 hdr_len)
9a799d71
AK
6661{
6662 union ixgbe_adv_tx_desc *tx_desc = NULL;
6663 struct ixgbe_tx_buffer *tx_buffer_info;
6664 u32 olinfo_status = 0, cmd_type_len = 0;
6665 unsigned int i;
6666 u32 txd_cmd = IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS | IXGBE_TXD_CMD_IFCS;
6667
6668 cmd_type_len |= IXGBE_ADVTXD_DTYP_DATA;
6669
6670 cmd_type_len |= IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT;
6671
6672 if (tx_flags & IXGBE_TX_FLAGS_VLAN)
6673 cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
6674
6675 if (tx_flags & IXGBE_TX_FLAGS_TSO) {
6676 cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
6677
6678 olinfo_status |= IXGBE_TXD_POPTS_TXSM <<
e8e9f696 6679 IXGBE_ADVTXD_POPTS_SHIFT;
9a799d71 6680
4eeae6fd
PW
6681 /* use index 1 context for tso */
6682 olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
9a799d71
AK
6683 if (tx_flags & IXGBE_TX_FLAGS_IPV4)
6684 olinfo_status |= IXGBE_TXD_POPTS_IXSM <<
e8e9f696 6685 IXGBE_ADVTXD_POPTS_SHIFT;
9a799d71
AK
6686
6687 } else if (tx_flags & IXGBE_TX_FLAGS_CSUM)
6688 olinfo_status |= IXGBE_TXD_POPTS_TXSM <<
e8e9f696 6689 IXGBE_ADVTXD_POPTS_SHIFT;
9a799d71 6690
eacd73f7
YZ
6691 if (tx_flags & IXGBE_TX_FLAGS_FCOE) {
6692 olinfo_status |= IXGBE_ADVTXD_CC;
6693 olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
6694 if (tx_flags & IXGBE_TX_FLAGS_FSO)
6695 cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
6696 }
6697
9a799d71
AK
6698 olinfo_status |= ((paylen - hdr_len) << IXGBE_ADVTXD_PAYLEN_SHIFT);
6699
6700 i = tx_ring->next_to_use;
6701 while (count--) {
6702 tx_buffer_info = &tx_ring->tx_buffer_info[i];
31f05a2d 6703 tx_desc = IXGBE_TX_DESC_ADV(tx_ring, i);
9a799d71
AK
6704 tx_desc->read.buffer_addr = cpu_to_le64(tx_buffer_info->dma);
6705 tx_desc->read.cmd_type_len =
e8e9f696 6706 cpu_to_le32(cmd_type_len | tx_buffer_info->length);
9a799d71 6707 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
9a799d71
AK
6708 i++;
6709 if (i == tx_ring->count)
6710 i = 0;
6711 }
6712
6713 tx_desc->read.cmd_type_len |= cpu_to_le32(txd_cmd);
6714
6715 /*
6716 * Force memory writes to complete before letting h/w
6717 * know there are new descriptors to fetch. (Only
6718 * applicable for weak-ordered memory model archs,
6719 * such as IA-64).
6720 */
6721 wmb();
6722
6723 tx_ring->next_to_use = i;
84ea2591 6724 writel(i, tx_ring->tail);
9a799d71
AK
6725}
6726
69830529
AD
6727static void ixgbe_atr(struct ixgbe_ring *ring, struct sk_buff *skb,
6728 u32 tx_flags, __be16 protocol)
6729{
6730 struct ixgbe_q_vector *q_vector = ring->q_vector;
6731 union ixgbe_atr_hash_dword input = { .dword = 0 };
6732 union ixgbe_atr_hash_dword common = { .dword = 0 };
6733 union {
6734 unsigned char *network;
6735 struct iphdr *ipv4;
6736 struct ipv6hdr *ipv6;
6737 } hdr;
ee9e0f0b 6738 struct tcphdr *th;
905e4a41 6739 __be16 vlan_id;
c4cf55e5 6740
69830529
AD
6741 /* if ring doesn't have a interrupt vector, cannot perform ATR */
6742 if (!q_vector)
6743 return;
6744
6745 /* do nothing if sampling is disabled */
6746 if (!ring->atr_sample_rate)
d3ead241 6747 return;
c4cf55e5 6748
69830529 6749 ring->atr_count++;
c4cf55e5 6750
69830529
AD
6751 /* snag network header to get L4 type and address */
6752 hdr.network = skb_network_header(skb);
6753
6754 /* Currently only IPv4/IPv6 with TCP is supported */
6755 if ((protocol != __constant_htons(ETH_P_IPV6) ||
6756 hdr.ipv6->nexthdr != IPPROTO_TCP) &&
6757 (protocol != __constant_htons(ETH_P_IP) ||
6758 hdr.ipv4->protocol != IPPROTO_TCP))
6759 return;
ee9e0f0b
AD
6760
6761 th = tcp_hdr(skb);
c4cf55e5 6762
69830529
AD
6763 /* skip this packet since the socket is closing */
6764 if (th->fin)
6765 return;
6766
6767 /* sample on all syn packets or once every atr sample count */
6768 if (!th->syn && (ring->atr_count < ring->atr_sample_rate))
6769 return;
6770
6771 /* reset sample count */
6772 ring->atr_count = 0;
6773
6774 vlan_id = htons(tx_flags >> IXGBE_TX_FLAGS_VLAN_SHIFT);
6775
6776 /*
6777 * src and dst are inverted, think how the receiver sees them
6778 *
6779 * The input is broken into two sections, a non-compressed section
6780 * containing vm_pool, vlan_id, and flow_type. The rest of the data
6781 * is XORed together and stored in the compressed dword.
6782 */
6783 input.formatted.vlan_id = vlan_id;
6784
6785 /*
6786 * since src port and flex bytes occupy the same word XOR them together
6787 * and write the value to source port portion of compressed dword
6788 */
6789 if (vlan_id)
6790 common.port.src ^= th->dest ^ __constant_htons(ETH_P_8021Q);
6791 else
6792 common.port.src ^= th->dest ^ protocol;
6793 common.port.dst ^= th->source;
6794
6795 if (protocol == __constant_htons(ETH_P_IP)) {
6796 input.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4;
6797 common.ip ^= hdr.ipv4->saddr ^ hdr.ipv4->daddr;
6798 } else {
6799 input.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV6;
6800 common.ip ^= hdr.ipv6->saddr.s6_addr32[0] ^
6801 hdr.ipv6->saddr.s6_addr32[1] ^
6802 hdr.ipv6->saddr.s6_addr32[2] ^
6803 hdr.ipv6->saddr.s6_addr32[3] ^
6804 hdr.ipv6->daddr.s6_addr32[0] ^
6805 hdr.ipv6->daddr.s6_addr32[1] ^
6806 hdr.ipv6->daddr.s6_addr32[2] ^
6807 hdr.ipv6->daddr.s6_addr32[3];
6808 }
c4cf55e5
PWJ
6809
6810 /* This assumes the Rx queue and Tx queue are bound to the same CPU */
69830529
AD
6811 ixgbe_fdir_add_signature_filter_82599(&q_vector->adapter->hw,
6812 input, common, ring->queue_index);
c4cf55e5
PWJ
6813}
6814
fc77dc3c 6815static int __ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, int size)
e092be60 6816{
fc77dc3c 6817 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
e092be60
AV
6818 /* Herbert's original patch had:
6819 * smp_mb__after_netif_stop_queue();
6820 * but since that doesn't exist yet, just open code it. */
6821 smp_mb();
6822
6823 /* We need to check again in a case another CPU has just
6824 * made room available. */
6825 if (likely(IXGBE_DESC_UNUSED(tx_ring) < size))
6826 return -EBUSY;
6827
6828 /* A reprieve! - use start_queue because it doesn't call schedule */
fc77dc3c 6829 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
5b7da515 6830 ++tx_ring->tx_stats.restart_queue;
e092be60
AV
6831 return 0;
6832}
6833
fc77dc3c 6834static int ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, int size)
e092be60
AV
6835{
6836 if (likely(IXGBE_DESC_UNUSED(tx_ring) >= size))
6837 return 0;
fc77dc3c 6838 return __ixgbe_maybe_stop_tx(tx_ring, size);
e092be60
AV
6839}
6840
09a3b1f8
SH
6841static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb)
6842{
6843 struct ixgbe_adapter *adapter = netdev_priv(dev);
5f715823 6844 int txq = smp_processor_id();
56075a98 6845#ifdef IXGBE_FCOE
5e09a105
HZ
6846 __be16 protocol;
6847
6848 protocol = vlan_get_protocol(skb);
6849
e5b64635
JF
6850 if (((protocol == htons(ETH_P_FCOE)) ||
6851 (protocol == htons(ETH_P_FIP))) &&
6852 (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) {
6853 txq &= (adapter->ring_feature[RING_F_FCOE].indices - 1);
6854 txq += adapter->ring_feature[RING_F_FCOE].mask;
6855 return txq;
56075a98
JF
6856 }
6857#endif
6858
fdd3d631
KK
6859 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
6860 while (unlikely(txq >= dev->real_num_tx_queues))
6861 txq -= dev->real_num_tx_queues;
5f715823 6862 return txq;
fdd3d631 6863 }
c4cf55e5 6864
09a3b1f8
SH
6865 return skb_tx_hash(dev, skb);
6866}
6867
fc77dc3c 6868netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
84418e3b
AD
6869 struct ixgbe_adapter *adapter,
6870 struct ixgbe_ring *tx_ring)
9a799d71 6871{
9a799d71
AK
6872 unsigned int first;
6873 unsigned int tx_flags = 0;
30eba97a 6874 u8 hdr_len = 0;
5f715823 6875 int tso;
9a799d71
AK
6876 int count = 0;
6877 unsigned int f;
5e09a105
HZ
6878 __be16 protocol;
6879
6880 protocol = vlan_get_protocol(skb);
9f8cdf4f 6881
eab6d18d 6882 if (vlan_tx_tag_present(skb)) {
9f8cdf4f 6883 tx_flags |= vlan_tx_tag_get(skb);
2f90b865
AD
6884 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
6885 tx_flags &= ~IXGBE_TX_FLAGS_VLAN_PRIO_MASK;
e5b64635 6886 tx_flags |= tx_ring->dcb_tc << 13;
2f90b865
AD
6887 }
6888 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
6889 tx_flags |= IXGBE_TX_FLAGS_VLAN;
33c66bd1
JF
6890 } else if (adapter->flags & IXGBE_FLAG_DCB_ENABLED &&
6891 skb->priority != TC_PRIO_CONTROL) {
e5b64635 6892 tx_flags |= tx_ring->dcb_tc << 13;
2ea186ae
JF
6893 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
6894 tx_flags |= IXGBE_TX_FLAGS_VLAN;
9a799d71 6895 }
eacd73f7 6896
09ad1cc0 6897#ifdef IXGBE_FCOE
56075a98
JF
6898 /* for FCoE with DCB, we force the priority to what
6899 * was specified by the switch */
6900 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED &&
e5b64635
JF
6901 (protocol == htons(ETH_P_FCOE)))
6902 tx_flags |= IXGBE_TX_FLAGS_FCOE;
ca77cd59
RL
6903#endif
6904
eacd73f7 6905 /* four things can cause us to need a context descriptor */
9f8cdf4f
JB
6906 if (skb_is_gso(skb) ||
6907 (skb->ip_summed == CHECKSUM_PARTIAL) ||
eacd73f7
YZ
6908 (tx_flags & IXGBE_TX_FLAGS_VLAN) ||
6909 (tx_flags & IXGBE_TX_FLAGS_FCOE))
9a799d71
AK
6910 count++;
6911
9f8cdf4f
JB
6912 count += TXD_USE_COUNT(skb_headlen(skb));
6913 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
9a799d71
AK
6914 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
6915
fc77dc3c 6916 if (ixgbe_maybe_stop_tx(tx_ring, count)) {
5b7da515 6917 tx_ring->tx_stats.tx_busy++;
9a799d71
AK
6918 return NETDEV_TX_BUSY;
6919 }
9a799d71 6920
9a799d71 6921 first = tx_ring->next_to_use;
eacd73f7
YZ
6922 if (tx_flags & IXGBE_TX_FLAGS_FCOE) {
6923#ifdef IXGBE_FCOE
6924 /* setup tx offload for FCoE */
6925 tso = ixgbe_fso(adapter, tx_ring, skb, tx_flags, &hdr_len);
6926 if (tso < 0) {
6927 dev_kfree_skb_any(skb);
6928 return NETDEV_TX_OK;
6929 }
6930 if (tso)
6931 tx_flags |= IXGBE_TX_FLAGS_FSO;
6932#endif /* IXGBE_FCOE */
6933 } else {
5e09a105 6934 if (protocol == htons(ETH_P_IP))
eacd73f7 6935 tx_flags |= IXGBE_TX_FLAGS_IPV4;
5e09a105
HZ
6936 tso = ixgbe_tso(adapter, tx_ring, skb, tx_flags, &hdr_len,
6937 protocol);
eacd73f7
YZ
6938 if (tso < 0) {
6939 dev_kfree_skb_any(skb);
6940 return NETDEV_TX_OK;
6941 }
9a799d71 6942
eacd73f7
YZ
6943 if (tso)
6944 tx_flags |= IXGBE_TX_FLAGS_TSO;
5e09a105
HZ
6945 else if (ixgbe_tx_csum(adapter, tx_ring, skb, tx_flags,
6946 protocol) &&
eacd73f7
YZ
6947 (skb->ip_summed == CHECKSUM_PARTIAL))
6948 tx_flags |= IXGBE_TX_FLAGS_CSUM;
6949 }
9a799d71 6950
8ad494b0 6951 count = ixgbe_tx_map(adapter, tx_ring, skb, tx_flags, first, hdr_len);
44df32c5 6952 if (count) {
c4cf55e5 6953 /* add the ATR filter if ATR is on */
69830529
AD
6954 if (test_bit(__IXGBE_TX_FDIR_INIT_DONE, &tx_ring->state))
6955 ixgbe_atr(tx_ring, skb, tx_flags, protocol);
84ea2591 6956 ixgbe_tx_queue(tx_ring, tx_flags, count, skb->len, hdr_len);
fc77dc3c 6957 ixgbe_maybe_stop_tx(tx_ring, DESC_NEEDED);
9a799d71 6958
44df32c5
AD
6959 } else {
6960 dev_kfree_skb_any(skb);
6961 tx_ring->tx_buffer_info[first].time_stamp = 0;
6962 tx_ring->next_to_use = first;
6963 }
9a799d71
AK
6964
6965 return NETDEV_TX_OK;
6966}
6967
84418e3b
AD
6968static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
6969{
6970 struct ixgbe_adapter *adapter = netdev_priv(netdev);
6971 struct ixgbe_ring *tx_ring;
6972
6973 tx_ring = adapter->tx_ring[skb->queue_mapping];
fc77dc3c 6974 return ixgbe_xmit_frame_ring(skb, adapter, tx_ring);
84418e3b
AD
6975}
6976
9a799d71
AK
6977/**
6978 * ixgbe_set_mac - Change the Ethernet Address of the NIC
6979 * @netdev: network interface device structure
6980 * @p: pointer to an address structure
6981 *
6982 * Returns 0 on success, negative on failure
6983 **/
6984static int ixgbe_set_mac(struct net_device *netdev, void *p)
6985{
6986 struct ixgbe_adapter *adapter = netdev_priv(netdev);
b4617240 6987 struct ixgbe_hw *hw = &adapter->hw;
9a799d71
AK
6988 struct sockaddr *addr = p;
6989
6990 if (!is_valid_ether_addr(addr->sa_data))
6991 return -EADDRNOTAVAIL;
6992
6993 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
b4617240 6994 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
9a799d71 6995
1cdd1ec8
GR
6996 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, adapter->num_vfs,
6997 IXGBE_RAH_AV);
9a799d71
AK
6998
6999 return 0;
7000}
7001
6b73e10d
BH
7002static int
7003ixgbe_mdio_read(struct net_device *netdev, int prtad, int devad, u16 addr)
7004{
7005 struct ixgbe_adapter *adapter = netdev_priv(netdev);
7006 struct ixgbe_hw *hw = &adapter->hw;
7007 u16 value;
7008 int rc;
7009
7010 if (prtad != hw->phy.mdio.prtad)
7011 return -EINVAL;
7012 rc = hw->phy.ops.read_reg(hw, addr, devad, &value);
7013 if (!rc)
7014 rc = value;
7015 return rc;
7016}
7017
7018static int ixgbe_mdio_write(struct net_device *netdev, int prtad, int devad,
7019 u16 addr, u16 value)
7020{
7021 struct ixgbe_adapter *adapter = netdev_priv(netdev);
7022 struct ixgbe_hw *hw = &adapter->hw;
7023
7024 if (prtad != hw->phy.mdio.prtad)
7025 return -EINVAL;
7026 return hw->phy.ops.write_reg(hw, addr, devad, value);
7027}
7028
7029static int ixgbe_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
7030{
7031 struct ixgbe_adapter *adapter = netdev_priv(netdev);
7032
7033 return mdio_mii_ioctl(&adapter->hw.phy.mdio, if_mii(req), cmd);
7034}
7035
0365e6e4
PW
7036/**
7037 * ixgbe_add_sanmac_netdev - Add the SAN MAC address to the corresponding
31278e71 7038 * netdev->dev_addrs
0365e6e4
PW
7039 * @netdev: network interface device structure
7040 *
7041 * Returns non-zero on failure
7042 **/
7043static int ixgbe_add_sanmac_netdev(struct net_device *dev)
7044{
7045 int err = 0;
7046 struct ixgbe_adapter *adapter = netdev_priv(dev);
7047 struct ixgbe_mac_info *mac = &adapter->hw.mac;
7048
7049 if (is_valid_ether_addr(mac->san_addr)) {
7050 rtnl_lock();
7051 err = dev_addr_add(dev, mac->san_addr, NETDEV_HW_ADDR_T_SAN);
7052 rtnl_unlock();
7053 }
7054 return err;
7055}
7056
7057/**
7058 * ixgbe_del_sanmac_netdev - Removes the SAN MAC address to the corresponding
31278e71 7059 * netdev->dev_addrs
0365e6e4
PW
7060 * @netdev: network interface device structure
7061 *
7062 * Returns non-zero on failure
7063 **/
7064static int ixgbe_del_sanmac_netdev(struct net_device *dev)
7065{
7066 int err = 0;
7067 struct ixgbe_adapter *adapter = netdev_priv(dev);
7068 struct ixgbe_mac_info *mac = &adapter->hw.mac;
7069
7070 if (is_valid_ether_addr(mac->san_addr)) {
7071 rtnl_lock();
7072 err = dev_addr_del(dev, mac->san_addr, NETDEV_HW_ADDR_T_SAN);
7073 rtnl_unlock();
7074 }
7075 return err;
7076}
7077
9a799d71
AK
7078#ifdef CONFIG_NET_POLL_CONTROLLER
7079/*
7080 * Polling 'interrupt' - used by things like netconsole to send skbs
7081 * without having to re-enable interrupts. It's not called while
7082 * the interrupt routine is executing.
7083 */
7084static void ixgbe_netpoll(struct net_device *netdev)
7085{
7086 struct ixgbe_adapter *adapter = netdev_priv(netdev);
8f9a7167 7087 int i;
9a799d71 7088
1a647bd2
AD
7089 /* if interface is down do nothing */
7090 if (test_bit(__IXGBE_DOWN, &adapter->state))
7091 return;
7092
9a799d71 7093 adapter->flags |= IXGBE_FLAG_IN_NETPOLL;
8f9a7167
PWJ
7094 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
7095 int num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
7096 for (i = 0; i < num_q_vectors; i++) {
7097 struct ixgbe_q_vector *q_vector = adapter->q_vector[i];
7098 ixgbe_msix_clean_many(0, q_vector);
7099 }
7100 } else {
7101 ixgbe_intr(adapter->pdev->irq, netdev);
7102 }
9a799d71 7103 adapter->flags &= ~IXGBE_FLAG_IN_NETPOLL;
9a799d71
AK
7104}
7105#endif
7106
de1036b1
ED
7107static struct rtnl_link_stats64 *ixgbe_get_stats64(struct net_device *netdev,
7108 struct rtnl_link_stats64 *stats)
7109{
7110 struct ixgbe_adapter *adapter = netdev_priv(netdev);
7111 int i;
7112
1a51502b 7113 rcu_read_lock();
de1036b1 7114 for (i = 0; i < adapter->num_rx_queues; i++) {
1a51502b 7115 struct ixgbe_ring *ring = ACCESS_ONCE(adapter->rx_ring[i]);
de1036b1
ED
7116 u64 bytes, packets;
7117 unsigned int start;
7118
1a51502b
ED
7119 if (ring) {
7120 do {
7121 start = u64_stats_fetch_begin_bh(&ring->syncp);
7122 packets = ring->stats.packets;
7123 bytes = ring->stats.bytes;
7124 } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
7125 stats->rx_packets += packets;
7126 stats->rx_bytes += bytes;
7127 }
de1036b1 7128 }
1ac9ad13
ED
7129
7130 for (i = 0; i < adapter->num_tx_queues; i++) {
7131 struct ixgbe_ring *ring = ACCESS_ONCE(adapter->tx_ring[i]);
7132 u64 bytes, packets;
7133 unsigned int start;
7134
7135 if (ring) {
7136 do {
7137 start = u64_stats_fetch_begin_bh(&ring->syncp);
7138 packets = ring->stats.packets;
7139 bytes = ring->stats.bytes;
7140 } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
7141 stats->tx_packets += packets;
7142 stats->tx_bytes += bytes;
7143 }
7144 }
1a51502b 7145 rcu_read_unlock();
de1036b1
ED
7146 /* following stats updated by ixgbe_watchdog_task() */
7147 stats->multicast = netdev->stats.multicast;
7148 stats->rx_errors = netdev->stats.rx_errors;
7149 stats->rx_length_errors = netdev->stats.rx_length_errors;
7150 stats->rx_crc_errors = netdev->stats.rx_crc_errors;
7151 stats->rx_missed_errors = netdev->stats.rx_missed_errors;
7152 return stats;
7153}
7154
8b1c0b24
JF
7155/* ixgbe_validate_rtr - verify 802.1Qp to Rx packet buffer mapping is valid.
7156 * #adapter: pointer to ixgbe_adapter
7157 * @tc: number of traffic classes currently enabled
7158 *
7159 * Configure a valid 802.1Qp to Rx packet buffer mapping ie confirm
7160 * 802.1Q priority maps to a packet buffer that exists.
7161 */
7162static void ixgbe_validate_rtr(struct ixgbe_adapter *adapter, u8 tc)
7163{
7164 struct ixgbe_hw *hw = &adapter->hw;
7165 u32 reg, rsave;
7166 int i;
7167
7168 /* 82598 have a static priority to TC mapping that can not
7169 * be changed so no validation is needed.
7170 */
7171 if (hw->mac.type == ixgbe_mac_82598EB)
7172 return;
7173
7174 reg = IXGBE_READ_REG(hw, IXGBE_RTRUP2TC);
7175 rsave = reg;
7176
7177 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
7178 u8 up2tc = reg >> (i * IXGBE_RTRUP2TC_UP_SHIFT);
7179
7180 /* If up2tc is out of bounds default to zero */
7181 if (up2tc > tc)
7182 reg &= ~(0x7 << IXGBE_RTRUP2TC_UP_SHIFT);
7183 }
7184
7185 if (reg != rsave)
7186 IXGBE_WRITE_REG(hw, IXGBE_RTRUP2TC, reg);
7187
7188 return;
7189}
7190
7191
7192/* ixgbe_setup_tc - routine to configure net_device for multiple traffic
7193 * classes.
7194 *
7195 * @netdev: net device to configure
7196 * @tc: number of traffic classes to enable
7197 */
7198int ixgbe_setup_tc(struct net_device *dev, u8 tc)
7199{
8b1c0b24
JF
7200 struct ixgbe_adapter *adapter = netdev_priv(dev);
7201 struct ixgbe_hw *hw = &adapter->hw;
8b1c0b24
JF
7202
7203 /* If DCB is anabled do not remove traffic classes, multiple
7204 * traffic classes are required to implement DCB
7205 */
7206 if (!tc && (adapter->flags & IXGBE_FLAG_DCB_ENABLED))
7207 return 0;
7208
7209 /* Hardware supports up to 8 traffic classes */
7210 if (tc > MAX_TRAFFIC_CLASS ||
7211 (hw->mac.type == ixgbe_mac_82598EB && tc < MAX_TRAFFIC_CLASS))
7212 return -EINVAL;
7213
7214 /* Hardware has to reinitialize queues and interrupts to
7215 * match packet buffer alignment. Unfortunantly, the
7216 * hardware is not flexible enough to do this dynamically.
7217 */
7218 if (netif_running(dev))
7219 ixgbe_close(dev);
7220 ixgbe_clear_interrupt_scheme(adapter);
7221
7222 if (tc)
7223 netdev_set_num_tc(dev, tc);
7224 else
7225 netdev_reset_tc(dev);
7226
8b1c0b24
JF
7227 ixgbe_init_interrupt_scheme(adapter);
7228 ixgbe_validate_rtr(adapter, tc);
7229 if (netif_running(dev))
7230 ixgbe_open(dev);
7231
7232 return 0;
7233}
de1036b1 7234
0edc3527 7235static const struct net_device_ops ixgbe_netdev_ops = {
e8e9f696 7236 .ndo_open = ixgbe_open,
0edc3527 7237 .ndo_stop = ixgbe_close,
00829823 7238 .ndo_start_xmit = ixgbe_xmit_frame,
09a3b1f8 7239 .ndo_select_queue = ixgbe_select_queue,
e90d400c 7240 .ndo_set_rx_mode = ixgbe_set_rx_mode,
0edc3527
SH
7241 .ndo_set_multicast_list = ixgbe_set_rx_mode,
7242 .ndo_validate_addr = eth_validate_addr,
7243 .ndo_set_mac_address = ixgbe_set_mac,
7244 .ndo_change_mtu = ixgbe_change_mtu,
7245 .ndo_tx_timeout = ixgbe_tx_timeout,
0edc3527
SH
7246 .ndo_vlan_rx_add_vid = ixgbe_vlan_rx_add_vid,
7247 .ndo_vlan_rx_kill_vid = ixgbe_vlan_rx_kill_vid,
6b73e10d 7248 .ndo_do_ioctl = ixgbe_ioctl,
7f01648a
GR
7249 .ndo_set_vf_mac = ixgbe_ndo_set_vf_mac,
7250 .ndo_set_vf_vlan = ixgbe_ndo_set_vf_vlan,
7251 .ndo_set_vf_tx_rate = ixgbe_ndo_set_vf_bw,
7252 .ndo_get_vf_config = ixgbe_ndo_get_vf_config,
de1036b1 7253 .ndo_get_stats64 = ixgbe_get_stats64,
24095aa3 7254 .ndo_setup_tc = ixgbe_setup_tc,
0edc3527
SH
7255#ifdef CONFIG_NET_POLL_CONTROLLER
7256 .ndo_poll_controller = ixgbe_netpoll,
7257#endif
332d4a7d
YZ
7258#ifdef IXGBE_FCOE
7259 .ndo_fcoe_ddp_setup = ixgbe_fcoe_ddp_get,
68a683cf 7260 .ndo_fcoe_ddp_target = ixgbe_fcoe_ddp_target,
332d4a7d 7261 .ndo_fcoe_ddp_done = ixgbe_fcoe_ddp_put,
8450ff8c
YZ
7262 .ndo_fcoe_enable = ixgbe_fcoe_enable,
7263 .ndo_fcoe_disable = ixgbe_fcoe_disable,
61a1fa10 7264 .ndo_fcoe_get_wwn = ixgbe_fcoe_get_wwn,
332d4a7d 7265#endif /* IXGBE_FCOE */
0edc3527
SH
7266};
7267
1cdd1ec8
GR
7268static void __devinit ixgbe_probe_vf(struct ixgbe_adapter *adapter,
7269 const struct ixgbe_info *ii)
7270{
7271#ifdef CONFIG_PCI_IOV
7272 struct ixgbe_hw *hw = &adapter->hw;
7273 int err;
a1cbb15c
GR
7274 int num_vf_macvlans, i;
7275 struct vf_macvlans *mv_list;
1cdd1ec8 7276
3377eba7 7277 if (hw->mac.type == ixgbe_mac_82598EB || !max_vfs)
1cdd1ec8
GR
7278 return;
7279
7280 /* The 82599 supports up to 64 VFs per physical function
7281 * but this implementation limits allocation to 63 so that
7282 * basic networking resources are still available to the
7283 * physical function
7284 */
7285 adapter->num_vfs = (max_vfs > 63) ? 63 : max_vfs;
7286 adapter->flags |= IXGBE_FLAG_SRIOV_ENABLED;
7287 err = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
7288 if (err) {
396e799c 7289 e_err(probe, "Failed to enable PCI sriov: %d\n", err);
1cdd1ec8
GR
7290 goto err_novfs;
7291 }
a1cbb15c
GR
7292
7293 num_vf_macvlans = hw->mac.num_rar_entries -
7294 (IXGBE_MAX_PF_MACVLANS + 1 + adapter->num_vfs);
7295
7296 adapter->mv_list = mv_list = kcalloc(num_vf_macvlans,
7297 sizeof(struct vf_macvlans),
7298 GFP_KERNEL);
7299 if (mv_list) {
7300 /* Initialize list of VF macvlans */
7301 INIT_LIST_HEAD(&adapter->vf_mvs.l);
7302 for (i = 0; i < num_vf_macvlans; i++) {
7303 mv_list->vf = -1;
7304 mv_list->free = true;
7305 mv_list->rar_entry = hw->mac.num_rar_entries -
7306 (i + adapter->num_vfs + 1);
7307 list_add(&mv_list->l, &adapter->vf_mvs.l);
7308 mv_list++;
7309 }
7310 }
7311
1cdd1ec8
GR
7312 /* If call to enable VFs succeeded then allocate memory
7313 * for per VF control structures.
7314 */
7315 adapter->vfinfo =
7316 kcalloc(adapter->num_vfs,
7317 sizeof(struct vf_data_storage), GFP_KERNEL);
7318 if (adapter->vfinfo) {
7319 /* Now that we're sure SR-IOV is enabled
7320 * and memory allocated set up the mailbox parameters
7321 */
7322 ixgbe_init_mbx_params_pf(hw);
7323 memcpy(&hw->mbx.ops, ii->mbx_ops,
7324 sizeof(hw->mbx.ops));
7325
7326 /* Disable RSC when in SR-IOV mode */
7327 adapter->flags2 &= ~(IXGBE_FLAG2_RSC_CAPABLE |
7328 IXGBE_FLAG2_RSC_ENABLED);
7329 return;
7330 }
7331
7332 /* Oh oh */
396e799c
ET
7333 e_err(probe, "Unable to allocate memory for VF Data Storage - "
7334 "SRIOV disabled\n");
1cdd1ec8
GR
7335 pci_disable_sriov(adapter->pdev);
7336
7337err_novfs:
7338 adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED;
7339 adapter->num_vfs = 0;
7340#endif /* CONFIG_PCI_IOV */
7341}
7342
9a799d71
AK
7343/**
7344 * ixgbe_probe - Device Initialization Routine
7345 * @pdev: PCI device information struct
7346 * @ent: entry in ixgbe_pci_tbl
7347 *
7348 * Returns 0 on success, negative on failure
7349 *
7350 * ixgbe_probe initializes an adapter identified by a pci_dev structure.
7351 * The OS initialization, configuring of the adapter private structure,
7352 * and a hardware reset occur.
7353 **/
7354static int __devinit ixgbe_probe(struct pci_dev *pdev,
e8e9f696 7355 const struct pci_device_id *ent)
9a799d71
AK
7356{
7357 struct net_device *netdev;
7358 struct ixgbe_adapter *adapter = NULL;
7359 struct ixgbe_hw *hw;
7360 const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data];
9a799d71
AK
7361 static int cards_found;
7362 int i, err, pci_using_dac;
289700db 7363 u8 part_str[IXGBE_PBANUM_LENGTH];
c85a2618 7364 unsigned int indices = num_possible_cpus();
eacd73f7
YZ
7365#ifdef IXGBE_FCOE
7366 u16 device_caps;
7367#endif
289700db 7368 u32 eec;
9a799d71 7369
bded64a7
AG
7370 /* Catch broken hardware that put the wrong VF device ID in
7371 * the PCIe SR-IOV capability.
7372 */
7373 if (pdev->is_virtfn) {
7374 WARN(1, KERN_ERR "%s (%hx:%hx) should not be a VF!\n",
7375 pci_name(pdev), pdev->vendor, pdev->device);
7376 return -EINVAL;
7377 }
7378
9ce77666 7379 err = pci_enable_device_mem(pdev);
9a799d71
AK
7380 if (err)
7381 return err;
7382
1b507730
NN
7383 if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
7384 !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
9a799d71
AK
7385 pci_using_dac = 1;
7386 } else {
1b507730 7387 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
9a799d71 7388 if (err) {
1b507730
NN
7389 err = dma_set_coherent_mask(&pdev->dev,
7390 DMA_BIT_MASK(32));
9a799d71 7391 if (err) {
b8bc0421
DC
7392 dev_err(&pdev->dev,
7393 "No usable DMA configuration, aborting\n");
9a799d71
AK
7394 goto err_dma;
7395 }
7396 }
7397 pci_using_dac = 0;
7398 }
7399
9ce77666 7400 err = pci_request_selected_regions(pdev, pci_select_bars(pdev,
e8e9f696 7401 IORESOURCE_MEM), ixgbe_driver_name);
9a799d71 7402 if (err) {
b8bc0421
DC
7403 dev_err(&pdev->dev,
7404 "pci_request_selected_regions failed 0x%x\n", err);
9a799d71
AK
7405 goto err_pci_reg;
7406 }
7407
19d5afd4 7408 pci_enable_pcie_error_reporting(pdev);
6fabd715 7409
9a799d71 7410 pci_set_master(pdev);
fb3b27bc 7411 pci_save_state(pdev);
9a799d71 7412
e901acd6
JF
7413#ifdef CONFIG_IXGBE_DCB
7414 indices *= MAX_TRAFFIC_CLASS;
7415#endif
7416
c85a2618
JF
7417 if (ii->mac == ixgbe_mac_82598EB)
7418 indices = min_t(unsigned int, indices, IXGBE_MAX_RSS_INDICES);
7419 else
7420 indices = min_t(unsigned int, indices, IXGBE_MAX_FDIR_INDICES);
7421
e901acd6 7422#ifdef IXGBE_FCOE
c85a2618
JF
7423 indices += min_t(unsigned int, num_possible_cpus(),
7424 IXGBE_MAX_FCOE_INDICES);
7425#endif
c85a2618 7426 netdev = alloc_etherdev_mq(sizeof(struct ixgbe_adapter), indices);
9a799d71
AK
7427 if (!netdev) {
7428 err = -ENOMEM;
7429 goto err_alloc_etherdev;
7430 }
7431
9a799d71
AK
7432 SET_NETDEV_DEV(netdev, &pdev->dev);
7433
9a799d71 7434 adapter = netdev_priv(netdev);
c60fbb00 7435 pci_set_drvdata(pdev, adapter);
9a799d71
AK
7436
7437 adapter->netdev = netdev;
7438 adapter->pdev = pdev;
7439 hw = &adapter->hw;
7440 hw->back = adapter;
7441 adapter->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
7442
05857980 7443 hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
e8e9f696 7444 pci_resource_len(pdev, 0));
9a799d71
AK
7445 if (!hw->hw_addr) {
7446 err = -EIO;
7447 goto err_ioremap;
7448 }
7449
7450 for (i = 1; i <= 5; i++) {
7451 if (pci_resource_len(pdev, i) == 0)
7452 continue;
7453 }
7454
0edc3527 7455 netdev->netdev_ops = &ixgbe_netdev_ops;
9a799d71 7456 ixgbe_set_ethtool_ops(netdev);
9a799d71 7457 netdev->watchdog_timeo = 5 * HZ;
9fe93afd 7458 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
9a799d71 7459
9a799d71
AK
7460 adapter->bd_number = cards_found;
7461
9a799d71
AK
7462 /* Setup hw api */
7463 memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops));
021230d4 7464 hw->mac.type = ii->mac;
9a799d71 7465
c44ade9e
JB
7466 /* EEPROM */
7467 memcpy(&hw->eeprom.ops, ii->eeprom_ops, sizeof(hw->eeprom.ops));
7468 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
7469 /* If EEPROM is valid (bit 8 = 1), use default otherwise use bit bang */
7470 if (!(eec & (1 << 8)))
7471 hw->eeprom.ops.read = &ixgbe_read_eeprom_bit_bang_generic;
7472
7473 /* PHY */
7474 memcpy(&hw->phy.ops, ii->phy_ops, sizeof(hw->phy.ops));
c4900be0 7475 hw->phy.sfp_type = ixgbe_sfp_type_unknown;
6b73e10d
BH
7476 /* ixgbe_identify_phy_generic will set prtad and mmds properly */
7477 hw->phy.mdio.prtad = MDIO_PRTAD_NONE;
7478 hw->phy.mdio.mmds = 0;
7479 hw->phy.mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
7480 hw->phy.mdio.dev = netdev;
7481 hw->phy.mdio.mdio_read = ixgbe_mdio_read;
7482 hw->phy.mdio.mdio_write = ixgbe_mdio_write;
c4900be0 7483
8ca783ab 7484 ii->get_invariants(hw);
9a799d71
AK
7485
7486 /* setup the private structure */
7487 err = ixgbe_sw_init(adapter);
7488 if (err)
7489 goto err_sw_init;
7490
e86bff0e 7491 /* Make it possible the adapter to be woken up via WOL */
b93a2226
DS
7492 switch (adapter->hw.mac.type) {
7493 case ixgbe_mac_82599EB:
7494 case ixgbe_mac_X540:
e86bff0e 7495 IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
b93a2226
DS
7496 break;
7497 default:
7498 break;
7499 }
e86bff0e 7500
bf069c97
DS
7501 /*
7502 * If there is a fan on this device and it has failed log the
7503 * failure.
7504 */
7505 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) {
7506 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
7507 if (esdp & IXGBE_ESDP_SDP1)
396e799c 7508 e_crit(probe, "Fan has stopped, replace the adapter\n");
bf069c97
DS
7509 }
7510
c44ade9e 7511 /* reset_hw fills in the perm_addr as well */
119fc60a 7512 hw->phy.reset_if_overtemp = true;
c44ade9e 7513 err = hw->mac.ops.reset_hw(hw);
119fc60a 7514 hw->phy.reset_if_overtemp = false;
8ca783ab
DS
7515 if (err == IXGBE_ERR_SFP_NOT_PRESENT &&
7516 hw->mac.type == ixgbe_mac_82598EB) {
8ca783ab
DS
7517 err = 0;
7518 } else if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
7086400d 7519 e_dev_err("failed to load because an unsupported SFP+ "
849c4542
ET
7520 "module type was detected.\n");
7521 e_dev_err("Reload the driver after installing a supported "
7522 "module.\n");
04f165ef
PW
7523 goto err_sw_init;
7524 } else if (err) {
849c4542 7525 e_dev_err("HW Init failed: %d\n", err);
c44ade9e
JB
7526 goto err_sw_init;
7527 }
7528
1cdd1ec8
GR
7529 ixgbe_probe_vf(adapter, ii);
7530
396e799c 7531 netdev->features = NETIF_F_SG |
e8e9f696
JP
7532 NETIF_F_IP_CSUM |
7533 NETIF_F_HW_VLAN_TX |
7534 NETIF_F_HW_VLAN_RX |
7535 NETIF_F_HW_VLAN_FILTER;
9a799d71 7536
e9990a9c 7537 netdev->features |= NETIF_F_IPV6_CSUM;
9a799d71 7538 netdev->features |= NETIF_F_TSO;
9a799d71 7539 netdev->features |= NETIF_F_TSO6;
78b6f4ce 7540 netdev->features |= NETIF_F_GRO;
67a74ee2 7541 netdev->features |= NETIF_F_RXHASH;
ad31c402 7542
58be7666
DS
7543 switch (adapter->hw.mac.type) {
7544 case ixgbe_mac_82599EB:
7545 case ixgbe_mac_X540:
45a5ead0 7546 netdev->features |= NETIF_F_SCTP_CSUM;
58be7666
DS
7547 break;
7548 default:
7549 break;
7550 }
45a5ead0 7551
ad31c402
JK
7552 netdev->vlan_features |= NETIF_F_TSO;
7553 netdev->vlan_features |= NETIF_F_TSO6;
22f32b7a 7554 netdev->vlan_features |= NETIF_F_IP_CSUM;
cd1da503 7555 netdev->vlan_features |= NETIF_F_IPV6_CSUM;
ad31c402
JK
7556 netdev->vlan_features |= NETIF_F_SG;
7557
1cdd1ec8
GR
7558 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
7559 adapter->flags &= ~(IXGBE_FLAG_RSS_ENABLED |
7560 IXGBE_FLAG_DCB_ENABLED);
2f90b865 7561
7a6b6f51 7562#ifdef CONFIG_IXGBE_DCB
2f90b865
AD
7563 netdev->dcbnl_ops = &dcbnl_ops;
7564#endif
7565
eacd73f7 7566#ifdef IXGBE_FCOE
0d551589 7567 if (adapter->flags & IXGBE_FLAG_FCOE_CAPABLE) {
eacd73f7
YZ
7568 if (hw->mac.ops.get_device_caps) {
7569 hw->mac.ops.get_device_caps(hw, &device_caps);
0d551589
YZ
7570 if (device_caps & IXGBE_DEVICE_CAPS_FCOE_OFFLOADS)
7571 adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE;
eacd73f7
YZ
7572 }
7573 }
5e09d7f6
YZ
7574 if (adapter->flags & IXGBE_FLAG_FCOE_CAPABLE) {
7575 netdev->vlan_features |= NETIF_F_FCOE_CRC;
7576 netdev->vlan_features |= NETIF_F_FSO;
7577 netdev->vlan_features |= NETIF_F_FCOE_MTU;
7578 }
eacd73f7 7579#endif /* IXGBE_FCOE */
7b872a55 7580 if (pci_using_dac) {
9a799d71 7581 netdev->features |= NETIF_F_HIGHDMA;
7b872a55
YZ
7582 netdev->vlan_features |= NETIF_F_HIGHDMA;
7583 }
9a799d71 7584
0c19d6af 7585 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
f8212f97
AD
7586 netdev->features |= NETIF_F_LRO;
7587
9a799d71 7588 /* make sure the EEPROM is good */
c44ade9e 7589 if (hw->eeprom.ops.validate_checksum(hw, NULL) < 0) {
849c4542 7590 e_dev_err("The EEPROM Checksum Is Not Valid\n");
9a799d71
AK
7591 err = -EIO;
7592 goto err_eeprom;
7593 }
7594
7595 memcpy(netdev->dev_addr, hw->mac.perm_addr, netdev->addr_len);
7596 memcpy(netdev->perm_addr, hw->mac.perm_addr, netdev->addr_len);
7597
c44ade9e 7598 if (ixgbe_validate_mac_addr(netdev->perm_addr)) {
849c4542 7599 e_dev_err("invalid MAC address\n");
9a799d71
AK
7600 err = -EIO;
7601 goto err_eeprom;
7602 }
7603
c6ecf39a
DS
7604 /* power down the optics for multispeed fiber and 82599 SFP+ fiber */
7605 if (hw->mac.ops.disable_tx_laser &&
7606 ((hw->phy.multispeed_fiber) ||
9f911707 7607 ((hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) &&
c6ecf39a 7608 (hw->mac.type == ixgbe_mac_82599EB))))
61fac744
PW
7609 hw->mac.ops.disable_tx_laser(hw);
7610
7086400d
AD
7611 setup_timer(&adapter->service_timer, &ixgbe_service_timer,
7612 (unsigned long) adapter);
9a799d71 7613
7086400d
AD
7614 INIT_WORK(&adapter->service_task, ixgbe_service_task);
7615 clear_bit(__IXGBE_SERVICE_SCHED, &adapter->state);
9a799d71 7616
021230d4
AV
7617 err = ixgbe_init_interrupt_scheme(adapter);
7618 if (err)
7619 goto err_sw_init;
9a799d71 7620
67a74ee2
ET
7621 if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED))
7622 netdev->features &= ~NETIF_F_RXHASH;
7623
e8e26350 7624 switch (pdev->device) {
0b077fea
DS
7625 case IXGBE_DEV_ID_82599_SFP:
7626 /* Only this subdevice supports WOL */
7627 if (pdev->subsystem_device == IXGBE_SUBDEV_ID_82599_SFP)
7628 adapter->wol = (IXGBE_WUFC_MAG | IXGBE_WUFC_EX |
7629 IXGBE_WUFC_MC | IXGBE_WUFC_BC);
7630 break;
50d6c681
AD
7631 case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
7632 /* All except this subdevice support WOL */
0b077fea
DS
7633 if (pdev->subsystem_device != IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ)
7634 adapter->wol = (IXGBE_WUFC_MAG | IXGBE_WUFC_EX |
7635 IXGBE_WUFC_MC | IXGBE_WUFC_BC);
7636 break;
e8e26350 7637 case IXGBE_DEV_ID_82599_KX4:
495dce12 7638 adapter->wol = (IXGBE_WUFC_MAG | IXGBE_WUFC_EX |
e8e9f696 7639 IXGBE_WUFC_MC | IXGBE_WUFC_BC);
e8e26350
PW
7640 break;
7641 default:
7642 adapter->wol = 0;
7643 break;
7644 }
e8e26350
PW
7645 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
7646
04f165ef
PW
7647 /* pick up the PCI bus settings for reporting later */
7648 hw->mac.ops.get_bus_info(hw);
7649
9a799d71 7650 /* print bus type/speed/width info */
849c4542 7651 e_dev_info("(PCI Express:%s:%s) %pM\n",
6716344c
DS
7652 (hw->bus.speed == ixgbe_bus_speed_5000 ? "5.0GT/s" :
7653 hw->bus.speed == ixgbe_bus_speed_2500 ? "2.5GT/s" :
e8e9f696
JP
7654 "Unknown"),
7655 (hw->bus.width == ixgbe_bus_width_pcie_x8 ? "Width x8" :
7656 hw->bus.width == ixgbe_bus_width_pcie_x4 ? "Width x4" :
7657 hw->bus.width == ixgbe_bus_width_pcie_x1 ? "Width x1" :
7658 "Unknown"),
7659 netdev->dev_addr);
289700db
DS
7660
7661 err = ixgbe_read_pba_string_generic(hw, part_str, IXGBE_PBANUM_LENGTH);
7662 if (err)
9fe93afd 7663 strncpy(part_str, "Unknown", IXGBE_PBANUM_LENGTH);
e8e26350 7664 if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present)
289700db 7665 e_dev_info("MAC: %d, PHY: %d, SFP+: %d, PBA No: %s\n",
849c4542 7666 hw->mac.type, hw->phy.type, hw->phy.sfp_type,
289700db 7667 part_str);
e8e26350 7668 else
289700db
DS
7669 e_dev_info("MAC: %d, PHY: %d, PBA No: %s\n",
7670 hw->mac.type, hw->phy.type, part_str);
9a799d71 7671
e8e26350 7672 if (hw->bus.width <= ixgbe_bus_width_pcie_x4) {
849c4542
ET
7673 e_dev_warn("PCI-Express bandwidth available for this card is "
7674 "not sufficient for optimal performance.\n");
7675 e_dev_warn("For optimal performance a x8 PCI-Express slot "
7676 "is required.\n");
0c254d86
AK
7677 }
7678
34b0368c
PWJ
7679 /* save off EEPROM version number */
7680 hw->eeprom.ops.read(hw, 0x29, &adapter->eeprom_version);
7681
9a799d71 7682 /* reset the hardware with the new settings */
794caeb2 7683 err = hw->mac.ops.start_hw(hw);
c44ade9e 7684
794caeb2
PWJ
7685 if (err == IXGBE_ERR_EEPROM_VERSION) {
7686 /* We are running on a pre-production device, log a warning */
849c4542
ET
7687 e_dev_warn("This device is a pre-production adapter/LOM. "
7688 "Please be aware there may be issues associated "
7689 "with your hardware. If you are experiencing "
7690 "problems please contact your Intel or hardware "
7691 "representative who provided you with this "
7692 "hardware.\n");
794caeb2 7693 }
9a799d71
AK
7694 strcpy(netdev->name, "eth%d");
7695 err = register_netdev(netdev);
7696 if (err)
7697 goto err_register;
7698
54386467
JB
7699 /* carrier off reporting is important to ethtool even BEFORE open */
7700 netif_carrier_off(netdev);
7701
5dd2d332 7702#ifdef CONFIG_IXGBE_DCA
652f093f 7703 if (dca_add_requester(&pdev->dev) == 0) {
bd0362dd 7704 adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
bd0362dd
JC
7705 ixgbe_setup_dca(adapter);
7706 }
7707#endif
1cdd1ec8 7708 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
396e799c 7709 e_info(probe, "IOV is enabled with %d VFs\n", adapter->num_vfs);
1cdd1ec8
GR
7710 for (i = 0; i < adapter->num_vfs; i++)
7711 ixgbe_vf_configuration(pdev, (i | 0x10000000));
7712 }
7713
9612de92
ET
7714 /* Inform firmware of driver version */
7715 if (hw->mac.ops.set_fw_drv_ver)
7716 hw->mac.ops.set_fw_drv_ver(hw, MAJ, MIN, BUILD, KFIX);
7717
0365e6e4
PW
7718 /* add san mac addr to netdev */
7719 ixgbe_add_sanmac_netdev(netdev);
9a799d71 7720
849c4542 7721 e_dev_info("Intel(R) 10 Gigabit Network Connection\n");
9a799d71
AK
7722 cards_found++;
7723 return 0;
7724
7725err_register:
5eba3699 7726 ixgbe_release_hw_control(adapter);
7a921c93 7727 ixgbe_clear_interrupt_scheme(adapter);
9a799d71
AK
7728err_sw_init:
7729err_eeprom:
1cdd1ec8
GR
7730 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
7731 ixgbe_disable_sriov(adapter);
7086400d 7732 adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP;
9a799d71
AK
7733 iounmap(hw->hw_addr);
7734err_ioremap:
7735 free_netdev(netdev);
7736err_alloc_etherdev:
e8e9f696
JP
7737 pci_release_selected_regions(pdev,
7738 pci_select_bars(pdev, IORESOURCE_MEM));
9a799d71
AK
7739err_pci_reg:
7740err_dma:
7741 pci_disable_device(pdev);
7742 return err;
7743}
7744
7745/**
7746 * ixgbe_remove - Device Removal Routine
7747 * @pdev: PCI device information struct
7748 *
7749 * ixgbe_remove is called by the PCI subsystem to alert the driver
7750 * that it should release a PCI device. The could be caused by a
7751 * Hot-Plug event, or because the driver is going to be removed from
7752 * memory.
7753 **/
7754static void __devexit ixgbe_remove(struct pci_dev *pdev)
7755{
c60fbb00
AD
7756 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
7757 struct net_device *netdev = adapter->netdev;
9a799d71
AK
7758
7759 set_bit(__IXGBE_DOWN, &adapter->state);
7086400d 7760 cancel_work_sync(&adapter->service_task);
9a799d71 7761
5dd2d332 7762#ifdef CONFIG_IXGBE_DCA
bd0362dd
JC
7763 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
7764 adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED;
7765 dca_remove_requester(&pdev->dev);
7766 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 1);
7767 }
7768
7769#endif
332d4a7d
YZ
7770#ifdef IXGBE_FCOE
7771 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
7772 ixgbe_cleanup_fcoe(adapter);
7773
7774#endif /* IXGBE_FCOE */
0365e6e4
PW
7775
7776 /* remove the added san mac */
7777 ixgbe_del_sanmac_netdev(netdev);
7778
c4900be0
DS
7779 if (netdev->reg_state == NETREG_REGISTERED)
7780 unregister_netdev(netdev);
9a799d71 7781
1cdd1ec8
GR
7782 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
7783 ixgbe_disable_sriov(adapter);
7784
7a921c93 7785 ixgbe_clear_interrupt_scheme(adapter);
5eba3699 7786
021230d4 7787 ixgbe_release_hw_control(adapter);
9a799d71
AK
7788
7789 iounmap(adapter->hw.hw_addr);
9ce77666 7790 pci_release_selected_regions(pdev, pci_select_bars(pdev,
e8e9f696 7791 IORESOURCE_MEM));
9a799d71 7792
849c4542 7793 e_dev_info("complete\n");
021230d4 7794
9a799d71
AK
7795 free_netdev(netdev);
7796
19d5afd4 7797 pci_disable_pcie_error_reporting(pdev);
6fabd715 7798
9a799d71
AK
7799 pci_disable_device(pdev);
7800}
7801
7802/**
7803 * ixgbe_io_error_detected - called when PCI error is detected
7804 * @pdev: Pointer to PCI device
7805 * @state: The current pci connection state
7806 *
7807 * This function is called after a PCI bus error affecting
7808 * this device has been detected.
7809 */
7810static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
e8e9f696 7811 pci_channel_state_t state)
9a799d71 7812{
c60fbb00
AD
7813 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
7814 struct net_device *netdev = adapter->netdev;
9a799d71
AK
7815
7816 netif_device_detach(netdev);
7817
3044b8d1
BL
7818 if (state == pci_channel_io_perm_failure)
7819 return PCI_ERS_RESULT_DISCONNECT;
7820
9a799d71
AK
7821 if (netif_running(netdev))
7822 ixgbe_down(adapter);
7823 pci_disable_device(pdev);
7824
b4617240 7825 /* Request a slot reset. */
9a799d71
AK
7826 return PCI_ERS_RESULT_NEED_RESET;
7827}
7828
7829/**
7830 * ixgbe_io_slot_reset - called after the pci bus has been reset.
7831 * @pdev: Pointer to PCI device
7832 *
7833 * Restart the card from scratch, as if from a cold-boot.
7834 */
7835static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev)
7836{
c60fbb00 7837 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
6fabd715
PWJ
7838 pci_ers_result_t result;
7839 int err;
9a799d71 7840
9ce77666 7841 if (pci_enable_device_mem(pdev)) {
396e799c 7842 e_err(probe, "Cannot re-enable PCI device after reset.\n");
6fabd715
PWJ
7843 result = PCI_ERS_RESULT_DISCONNECT;
7844 } else {
7845 pci_set_master(pdev);
7846 pci_restore_state(pdev);
c0e1f68b 7847 pci_save_state(pdev);
9a799d71 7848
dd4d8ca6 7849 pci_wake_from_d3(pdev, false);
9a799d71 7850
6fabd715 7851 ixgbe_reset(adapter);
88512539 7852 IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
6fabd715
PWJ
7853 result = PCI_ERS_RESULT_RECOVERED;
7854 }
7855
7856 err = pci_cleanup_aer_uncorrect_error_status(pdev);
7857 if (err) {
849c4542
ET
7858 e_dev_err("pci_cleanup_aer_uncorrect_error_status "
7859 "failed 0x%0x\n", err);
6fabd715
PWJ
7860 /* non-fatal, continue */
7861 }
9a799d71 7862
6fabd715 7863 return result;
9a799d71
AK
7864}
7865
7866/**
7867 * ixgbe_io_resume - called when traffic can start flowing again.
7868 * @pdev: Pointer to PCI device
7869 *
7870 * This callback is called when the error recovery driver tells us that
7871 * its OK to resume normal operation.
7872 */
7873static void ixgbe_io_resume(struct pci_dev *pdev)
7874{
c60fbb00
AD
7875 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
7876 struct net_device *netdev = adapter->netdev;
9a799d71
AK
7877
7878 if (netif_running(netdev)) {
7879 if (ixgbe_up(adapter)) {
396e799c 7880 e_info(probe, "ixgbe_up failed after reset\n");
9a799d71
AK
7881 return;
7882 }
7883 }
7884
7885 netif_device_attach(netdev);
9a799d71
AK
7886}
7887
7888static struct pci_error_handlers ixgbe_err_handler = {
7889 .error_detected = ixgbe_io_error_detected,
7890 .slot_reset = ixgbe_io_slot_reset,
7891 .resume = ixgbe_io_resume,
7892};
7893
7894static struct pci_driver ixgbe_driver = {
7895 .name = ixgbe_driver_name,
7896 .id_table = ixgbe_pci_tbl,
7897 .probe = ixgbe_probe,
7898 .remove = __devexit_p(ixgbe_remove),
7899#ifdef CONFIG_PM
7900 .suspend = ixgbe_suspend,
7901 .resume = ixgbe_resume,
7902#endif
7903 .shutdown = ixgbe_shutdown,
7904 .err_handler = &ixgbe_err_handler
7905};
7906
7907/**
7908 * ixgbe_init_module - Driver Registration Routine
7909 *
7910 * ixgbe_init_module is the first routine called when the driver is
7911 * loaded. All it does is register with the PCI subsystem.
7912 **/
7913static int __init ixgbe_init_module(void)
7914{
7915 int ret;
c7689578 7916 pr_info("%s - version %s\n", ixgbe_driver_string, ixgbe_driver_version);
849c4542 7917 pr_info("%s\n", ixgbe_copyright);
9a799d71 7918
5dd2d332 7919#ifdef CONFIG_IXGBE_DCA
bd0362dd 7920 dca_register_notify(&dca_notifier);
bd0362dd 7921#endif
5dd2d332 7922
9a799d71
AK
7923 ret = pci_register_driver(&ixgbe_driver);
7924 return ret;
7925}
b4617240 7926
9a799d71
AK
7927module_init(ixgbe_init_module);
7928
7929/**
7930 * ixgbe_exit_module - Driver Exit Cleanup Routine
7931 *
7932 * ixgbe_exit_module is called just before the driver is removed
7933 * from memory.
7934 **/
7935static void __exit ixgbe_exit_module(void)
7936{
5dd2d332 7937#ifdef CONFIG_IXGBE_DCA
bd0362dd
JC
7938 dca_unregister_notify(&dca_notifier);
7939#endif
9a799d71 7940 pci_unregister_driver(&ixgbe_driver);
1a51502b 7941 rcu_barrier(); /* Wait for completion of call_rcu()'s */
9a799d71 7942}
bd0362dd 7943
5dd2d332 7944#ifdef CONFIG_IXGBE_DCA
bd0362dd 7945static int ixgbe_notify_dca(struct notifier_block *nb, unsigned long event,
e8e9f696 7946 void *p)
bd0362dd
JC
7947{
7948 int ret_val;
7949
7950 ret_val = driver_for_each_device(&ixgbe_driver.driver, NULL, &event,
e8e9f696 7951 __ixgbe_notify_dca);
bd0362dd
JC
7952
7953 return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
7954}
b453368d 7955
5dd2d332 7956#endif /* CONFIG_IXGBE_DCA */
849c4542 7957
9a799d71
AK
7958module_exit(ixgbe_exit_module);
7959
7960/* ixgbe_main.c */