]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/net/ethernet/intel/igb/igb_main.c
igb: Change how we populate the RSS indirection table
[mirror_ubuntu-artful-kernel.git] / drivers / net / ethernet / intel / igb / igb_main.c
CommitLineData
9d5c8243
AK
1/*******************************************************************************
2
3 Intel(R) Gigabit Ethernet Linux driver
6e861326 4 Copyright(c) 2007-2012 Intel Corporation.
9d5c8243
AK
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
876d2d6f
JK
28#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
29
9d5c8243
AK
30#include <linux/module.h>
31#include <linux/types.h>
32#include <linux/init.h>
b2cb09b1 33#include <linux/bitops.h>
9d5c8243
AK
34#include <linux/vmalloc.h>
35#include <linux/pagemap.h>
36#include <linux/netdevice.h>
9d5c8243 37#include <linux/ipv6.h>
5a0e3ad6 38#include <linux/slab.h>
9d5c8243
AK
39#include <net/checksum.h>
40#include <net/ip6_checksum.h>
c6cb090b 41#include <linux/net_tstamp.h>
9d5c8243
AK
42#include <linux/mii.h>
43#include <linux/ethtool.h>
01789349 44#include <linux/if.h>
9d5c8243
AK
45#include <linux/if_vlan.h>
46#include <linux/pci.h>
c54106bb 47#include <linux/pci-aspm.h>
9d5c8243
AK
48#include <linux/delay.h>
49#include <linux/interrupt.h>
7d13a7d0
AD
50#include <linux/ip.h>
51#include <linux/tcp.h>
52#include <linux/sctp.h>
9d5c8243 53#include <linux/if_ether.h>
40a914fa 54#include <linux/aer.h>
70c71606 55#include <linux/prefetch.h>
749ab2cd 56#include <linux/pm_runtime.h>
421e02f0 57#ifdef CONFIG_IGB_DCA
fe4506b6
JC
58#include <linux/dca.h>
59#endif
9d5c8243
AK
60#include "igb.h"
61
200e5fd5
CW
62#define MAJ 4
63#define MIN 0
64#define BUILD 1
0d1fe82d 65#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
929dd047 66__stringify(BUILD) "-k"
9d5c8243
AK
67char igb_driver_name[] = "igb";
68char igb_driver_version[] = DRV_VERSION;
69static const char igb_driver_string[] =
70 "Intel(R) Gigabit Ethernet Network Driver";
6e861326 71static const char igb_copyright[] = "Copyright (c) 2007-2012 Intel Corporation.";
9d5c8243 72
9d5c8243
AK
73static const struct e1000_info *igb_info_tbl[] = {
74 [board_82575] = &e1000_82575_info,
75};
76
a3aa1884 77static DEFINE_PCI_DEVICE_TABLE(igb_pci_tbl) = {
f96a8a0b
CW
78 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I211_COPPER), board_82575 },
79 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_COPPER), board_82575 },
80 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_FIBER), board_82575 },
81 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SERDES), board_82575 },
82 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SGMII), board_82575 },
d2ba2ed8
AD
83 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_COPPER), board_82575 },
84 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_FIBER), board_82575 },
85 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SERDES), board_82575 },
86 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SGMII), board_82575 },
55cac248
AD
87 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER), board_82575 },
88 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_FIBER), board_82575 },
6493d24f 89 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_QUAD_FIBER), board_82575 },
55cac248
AD
90 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SERDES), board_82575 },
91 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SGMII), board_82575 },
92 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER_DUAL), board_82575 },
308fb39a
JG
93 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SGMII), board_82575 },
94 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SERDES), board_82575 },
1b5dda33
GJ
95 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_BACKPLANE), board_82575 },
96 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SFP), board_82575 },
2d064c06 97 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576), board_82575 },
9eb2341d 98 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS), board_82575 },
747d49ba 99 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS_SERDES), board_82575 },
2d064c06
AD
100 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_FIBER), board_82575 },
101 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES), board_82575 },
4703bf73 102 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES_QUAD), board_82575 },
b894fa26 103 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER_ET2), board_82575 },
c8ea5ea9 104 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER), board_82575 },
9d5c8243
AK
105 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_COPPER), board_82575 },
106 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_FIBER_SERDES), board_82575 },
107 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575GB_QUAD_COPPER), board_82575 },
108 /* required last entry */
109 {0, }
110};
111
112MODULE_DEVICE_TABLE(pci, igb_pci_tbl);
113
114void igb_reset(struct igb_adapter *);
115static int igb_setup_all_tx_resources(struct igb_adapter *);
116static int igb_setup_all_rx_resources(struct igb_adapter *);
117static void igb_free_all_tx_resources(struct igb_adapter *);
118static void igb_free_all_rx_resources(struct igb_adapter *);
06cf2666 119static void igb_setup_mrqc(struct igb_adapter *);
9d5c8243
AK
120static int igb_probe(struct pci_dev *, const struct pci_device_id *);
121static void __devexit igb_remove(struct pci_dev *pdev);
122static int igb_sw_init(struct igb_adapter *);
123static int igb_open(struct net_device *);
124static int igb_close(struct net_device *);
125static void igb_configure_tx(struct igb_adapter *);
126static void igb_configure_rx(struct igb_adapter *);
9d5c8243
AK
127static void igb_clean_all_tx_rings(struct igb_adapter *);
128static void igb_clean_all_rx_rings(struct igb_adapter *);
3b644cf6
MW
129static void igb_clean_tx_ring(struct igb_ring *);
130static void igb_clean_rx_ring(struct igb_ring *);
ff41f8dc 131static void igb_set_rx_mode(struct net_device *);
9d5c8243
AK
132static void igb_update_phy_info(unsigned long);
133static void igb_watchdog(unsigned long);
134static void igb_watchdog_task(struct work_struct *);
cd392f5c 135static netdev_tx_t igb_xmit_frame(struct sk_buff *skb, struct net_device *);
12dcd86b
ED
136static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *dev,
137 struct rtnl_link_stats64 *stats);
9d5c8243
AK
138static int igb_change_mtu(struct net_device *, int);
139static int igb_set_mac(struct net_device *, void *);
68d480c4 140static void igb_set_uta(struct igb_adapter *adapter);
9d5c8243
AK
141static irqreturn_t igb_intr(int irq, void *);
142static irqreturn_t igb_intr_msi(int irq, void *);
143static irqreturn_t igb_msix_other(int irq, void *);
047e0030 144static irqreturn_t igb_msix_ring(int irq, void *);
421e02f0 145#ifdef CONFIG_IGB_DCA
047e0030 146static void igb_update_dca(struct igb_q_vector *);
fe4506b6 147static void igb_setup_dca(struct igb_adapter *);
421e02f0 148#endif /* CONFIG_IGB_DCA */
661086df 149static int igb_poll(struct napi_struct *, int);
13fde97a 150static bool igb_clean_tx_irq(struct igb_q_vector *);
cd392f5c 151static bool igb_clean_rx_irq(struct igb_q_vector *, int);
9d5c8243
AK
152static int igb_ioctl(struct net_device *, struct ifreq *, int cmd);
153static void igb_tx_timeout(struct net_device *);
154static void igb_reset_task(struct work_struct *);
c8f44aff 155static void igb_vlan_mode(struct net_device *netdev, netdev_features_t features);
8e586137
JP
156static int igb_vlan_rx_add_vid(struct net_device *, u16);
157static int igb_vlan_rx_kill_vid(struct net_device *, u16);
9d5c8243 158static void igb_restore_vlan(struct igb_adapter *);
26ad9178 159static void igb_rar_set_qsel(struct igb_adapter *, u8 *, u32 , u8);
4ae196df
AD
160static void igb_ping_all_vfs(struct igb_adapter *);
161static void igb_msg_task(struct igb_adapter *);
4ae196df 162static void igb_vmm_control(struct igb_adapter *);
f2ca0dbe 163static int igb_set_vf_mac(struct igb_adapter *, int, unsigned char *);
4ae196df 164static void igb_restore_vf_multicasts(struct igb_adapter *adapter);
8151d294
WM
165static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac);
166static int igb_ndo_set_vf_vlan(struct net_device *netdev,
167 int vf, u16 vlan, u8 qos);
168static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate);
169static int igb_ndo_get_vf_config(struct net_device *netdev, int vf,
170 struct ifla_vf_info *ivi);
17dc566c 171static void igb_check_vf_rate_limit(struct igb_adapter *);
46a01698
RL
172
173#ifdef CONFIG_PCI_IOV
0224d663 174static int igb_vf_configure(struct igb_adapter *adapter, int vf);
f557147c 175static bool igb_vfs_are_assigned(struct igb_adapter *adapter);
46a01698 176#endif
9d5c8243 177
9d5c8243 178#ifdef CONFIG_PM
d9dd966d 179#ifdef CONFIG_PM_SLEEP
749ab2cd 180static int igb_suspend(struct device *);
d9dd966d 181#endif
749ab2cd
YZ
182static int igb_resume(struct device *);
183#ifdef CONFIG_PM_RUNTIME
184static int igb_runtime_suspend(struct device *dev);
185static int igb_runtime_resume(struct device *dev);
186static int igb_runtime_idle(struct device *dev);
187#endif
188static const struct dev_pm_ops igb_pm_ops = {
189 SET_SYSTEM_SLEEP_PM_OPS(igb_suspend, igb_resume)
190 SET_RUNTIME_PM_OPS(igb_runtime_suspend, igb_runtime_resume,
191 igb_runtime_idle)
192};
9d5c8243
AK
193#endif
194static void igb_shutdown(struct pci_dev *);
421e02f0 195#ifdef CONFIG_IGB_DCA
fe4506b6
JC
196static int igb_notify_dca(struct notifier_block *, unsigned long, void *);
197static struct notifier_block dca_notifier = {
198 .notifier_call = igb_notify_dca,
199 .next = NULL,
200 .priority = 0
201};
202#endif
9d5c8243
AK
203#ifdef CONFIG_NET_POLL_CONTROLLER
204/* for netdump / net console */
205static void igb_netpoll(struct net_device *);
206#endif
37680117 207#ifdef CONFIG_PCI_IOV
2a3abf6d
AD
208static unsigned int max_vfs = 0;
209module_param(max_vfs, uint, 0);
210MODULE_PARM_DESC(max_vfs, "Maximum number of virtual functions to allocate "
211 "per physical function");
212#endif /* CONFIG_PCI_IOV */
213
9d5c8243
AK
214static pci_ers_result_t igb_io_error_detected(struct pci_dev *,
215 pci_channel_state_t);
216static pci_ers_result_t igb_io_slot_reset(struct pci_dev *);
217static void igb_io_resume(struct pci_dev *);
218
219static struct pci_error_handlers igb_err_handler = {
220 .error_detected = igb_io_error_detected,
221 .slot_reset = igb_io_slot_reset,
222 .resume = igb_io_resume,
223};
224
b6e0c419 225static void igb_init_dmac(struct igb_adapter *adapter, u32 pba);
9d5c8243
AK
226
227static struct pci_driver igb_driver = {
228 .name = igb_driver_name,
229 .id_table = igb_pci_tbl,
230 .probe = igb_probe,
231 .remove = __devexit_p(igb_remove),
232#ifdef CONFIG_PM
749ab2cd 233 .driver.pm = &igb_pm_ops,
9d5c8243
AK
234#endif
235 .shutdown = igb_shutdown,
236 .err_handler = &igb_err_handler
237};
238
239MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
240MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver");
241MODULE_LICENSE("GPL");
242MODULE_VERSION(DRV_VERSION);
243
b3f4d599 244#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
245static int debug = -1;
246module_param(debug, int, 0);
247MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
248
c97ec42a
TI
249struct igb_reg_info {
250 u32 ofs;
251 char *name;
252};
253
254static const struct igb_reg_info igb_reg_info_tbl[] = {
255
256 /* General Registers */
257 {E1000_CTRL, "CTRL"},
258 {E1000_STATUS, "STATUS"},
259 {E1000_CTRL_EXT, "CTRL_EXT"},
260
261 /* Interrupt Registers */
262 {E1000_ICR, "ICR"},
263
264 /* RX Registers */
265 {E1000_RCTL, "RCTL"},
266 {E1000_RDLEN(0), "RDLEN"},
267 {E1000_RDH(0), "RDH"},
268 {E1000_RDT(0), "RDT"},
269 {E1000_RXDCTL(0), "RXDCTL"},
270 {E1000_RDBAL(0), "RDBAL"},
271 {E1000_RDBAH(0), "RDBAH"},
272
273 /* TX Registers */
274 {E1000_TCTL, "TCTL"},
275 {E1000_TDBAL(0), "TDBAL"},
276 {E1000_TDBAH(0), "TDBAH"},
277 {E1000_TDLEN(0), "TDLEN"},
278 {E1000_TDH(0), "TDH"},
279 {E1000_TDT(0), "TDT"},
280 {E1000_TXDCTL(0), "TXDCTL"},
281 {E1000_TDFH, "TDFH"},
282 {E1000_TDFT, "TDFT"},
283 {E1000_TDFHS, "TDFHS"},
284 {E1000_TDFPC, "TDFPC"},
285
286 /* List Terminator */
287 {}
288};
289
290/*
291 * igb_regdump - register printout routine
292 */
293static void igb_regdump(struct e1000_hw *hw, struct igb_reg_info *reginfo)
294{
295 int n = 0;
296 char rname[16];
297 u32 regs[8];
298
299 switch (reginfo->ofs) {
300 case E1000_RDLEN(0):
301 for (n = 0; n < 4; n++)
302 regs[n] = rd32(E1000_RDLEN(n));
303 break;
304 case E1000_RDH(0):
305 for (n = 0; n < 4; n++)
306 regs[n] = rd32(E1000_RDH(n));
307 break;
308 case E1000_RDT(0):
309 for (n = 0; n < 4; n++)
310 regs[n] = rd32(E1000_RDT(n));
311 break;
312 case E1000_RXDCTL(0):
313 for (n = 0; n < 4; n++)
314 regs[n] = rd32(E1000_RXDCTL(n));
315 break;
316 case E1000_RDBAL(0):
317 for (n = 0; n < 4; n++)
318 regs[n] = rd32(E1000_RDBAL(n));
319 break;
320 case E1000_RDBAH(0):
321 for (n = 0; n < 4; n++)
322 regs[n] = rd32(E1000_RDBAH(n));
323 break;
324 case E1000_TDBAL(0):
325 for (n = 0; n < 4; n++)
326 regs[n] = rd32(E1000_RDBAL(n));
327 break;
328 case E1000_TDBAH(0):
329 for (n = 0; n < 4; n++)
330 regs[n] = rd32(E1000_TDBAH(n));
331 break;
332 case E1000_TDLEN(0):
333 for (n = 0; n < 4; n++)
334 regs[n] = rd32(E1000_TDLEN(n));
335 break;
336 case E1000_TDH(0):
337 for (n = 0; n < 4; n++)
338 regs[n] = rd32(E1000_TDH(n));
339 break;
340 case E1000_TDT(0):
341 for (n = 0; n < 4; n++)
342 regs[n] = rd32(E1000_TDT(n));
343 break;
344 case E1000_TXDCTL(0):
345 for (n = 0; n < 4; n++)
346 regs[n] = rd32(E1000_TXDCTL(n));
347 break;
348 default:
876d2d6f 349 pr_info("%-15s %08x\n", reginfo->name, rd32(reginfo->ofs));
c97ec42a
TI
350 return;
351 }
352
353 snprintf(rname, 16, "%s%s", reginfo->name, "[0-3]");
876d2d6f
JK
354 pr_info("%-15s %08x %08x %08x %08x\n", rname, regs[0], regs[1],
355 regs[2], regs[3]);
c97ec42a
TI
356}
357
358/*
359 * igb_dump - Print registers, tx-rings and rx-rings
360 */
361static void igb_dump(struct igb_adapter *adapter)
362{
363 struct net_device *netdev = adapter->netdev;
364 struct e1000_hw *hw = &adapter->hw;
365 struct igb_reg_info *reginfo;
c97ec42a
TI
366 struct igb_ring *tx_ring;
367 union e1000_adv_tx_desc *tx_desc;
368 struct my_u0 { u64 a; u64 b; } *u0;
c97ec42a
TI
369 struct igb_ring *rx_ring;
370 union e1000_adv_rx_desc *rx_desc;
371 u32 staterr;
6ad4edfc 372 u16 i, n;
c97ec42a
TI
373
374 if (!netif_msg_hw(adapter))
375 return;
376
377 /* Print netdevice Info */
378 if (netdev) {
379 dev_info(&adapter->pdev->dev, "Net device Info\n");
876d2d6f
JK
380 pr_info("Device Name state trans_start "
381 "last_rx\n");
382 pr_info("%-15s %016lX %016lX %016lX\n", netdev->name,
383 netdev->state, netdev->trans_start, netdev->last_rx);
c97ec42a
TI
384 }
385
386 /* Print Registers */
387 dev_info(&adapter->pdev->dev, "Register Dump\n");
876d2d6f 388 pr_info(" Register Name Value\n");
c97ec42a
TI
389 for (reginfo = (struct igb_reg_info *)igb_reg_info_tbl;
390 reginfo->name; reginfo++) {
391 igb_regdump(hw, reginfo);
392 }
393
394 /* Print TX Ring Summary */
395 if (!netdev || !netif_running(netdev))
396 goto exit;
397
398 dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
876d2d6f 399 pr_info("Queue [NTU] [NTC] [bi(ntc)->dma ] leng ntw timestamp\n");
c97ec42a 400 for (n = 0; n < adapter->num_tx_queues; n++) {
06034649 401 struct igb_tx_buffer *buffer_info;
c97ec42a 402 tx_ring = adapter->tx_ring[n];
06034649 403 buffer_info = &tx_ring->tx_buffer_info[tx_ring->next_to_clean];
876d2d6f
JK
404 pr_info(" %5d %5X %5X %016llX %04X %p %016llX\n",
405 n, tx_ring->next_to_use, tx_ring->next_to_clean,
406 (u64)buffer_info->dma,
407 buffer_info->length,
408 buffer_info->next_to_watch,
409 (u64)buffer_info->time_stamp);
c97ec42a
TI
410 }
411
412 /* Print TX Rings */
413 if (!netif_msg_tx_done(adapter))
414 goto rx_ring_summary;
415
416 dev_info(&adapter->pdev->dev, "TX Rings Dump\n");
417
418 /* Transmit Descriptor Formats
419 *
420 * Advanced Transmit Descriptor
421 * +--------------------------------------------------------------+
422 * 0 | Buffer Address [63:0] |
423 * +--------------------------------------------------------------+
424 * 8 | PAYLEN | PORTS |CC|IDX | STA | DCMD |DTYP|MAC|RSV| DTALEN |
425 * +--------------------------------------------------------------+
426 * 63 46 45 40 39 38 36 35 32 31 24 15 0
427 */
428
429 for (n = 0; n < adapter->num_tx_queues; n++) {
430 tx_ring = adapter->tx_ring[n];
876d2d6f
JK
431 pr_info("------------------------------------\n");
432 pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index);
433 pr_info("------------------------------------\n");
434 pr_info("T [desc] [address 63:0 ] [PlPOCIStDDM Ln] "
435 "[bi->dma ] leng ntw timestamp "
436 "bi->skb\n");
c97ec42a
TI
437
438 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
876d2d6f 439 const char *next_desc;
06034649 440 struct igb_tx_buffer *buffer_info;
60136906 441 tx_desc = IGB_TX_DESC(tx_ring, i);
06034649 442 buffer_info = &tx_ring->tx_buffer_info[i];
c97ec42a 443 u0 = (struct my_u0 *)tx_desc;
876d2d6f
JK
444 if (i == tx_ring->next_to_use &&
445 i == tx_ring->next_to_clean)
446 next_desc = " NTC/U";
447 else if (i == tx_ring->next_to_use)
448 next_desc = " NTU";
449 else if (i == tx_ring->next_to_clean)
450 next_desc = " NTC";
451 else
452 next_desc = "";
453
454 pr_info("T [0x%03X] %016llX %016llX %016llX"
455 " %04X %p %016llX %p%s\n", i,
c97ec42a
TI
456 le64_to_cpu(u0->a),
457 le64_to_cpu(u0->b),
458 (u64)buffer_info->dma,
459 buffer_info->length,
460 buffer_info->next_to_watch,
461 (u64)buffer_info->time_stamp,
876d2d6f 462 buffer_info->skb, next_desc);
c97ec42a 463
b669588a 464 if (netif_msg_pktdata(adapter) && buffer_info->skb)
c97ec42a
TI
465 print_hex_dump(KERN_INFO, "",
466 DUMP_PREFIX_ADDRESS,
b669588a 467 16, 1, buffer_info->skb->data,
c97ec42a
TI
468 buffer_info->length, true);
469 }
470 }
471
472 /* Print RX Rings Summary */
473rx_ring_summary:
474 dev_info(&adapter->pdev->dev, "RX Rings Summary\n");
876d2d6f 475 pr_info("Queue [NTU] [NTC]\n");
c97ec42a
TI
476 for (n = 0; n < adapter->num_rx_queues; n++) {
477 rx_ring = adapter->rx_ring[n];
876d2d6f
JK
478 pr_info(" %5d %5X %5X\n",
479 n, rx_ring->next_to_use, rx_ring->next_to_clean);
c97ec42a
TI
480 }
481
482 /* Print RX Rings */
483 if (!netif_msg_rx_status(adapter))
484 goto exit;
485
486 dev_info(&adapter->pdev->dev, "RX Rings Dump\n");
487
488 /* Advanced Receive Descriptor (Read) Format
489 * 63 1 0
490 * +-----------------------------------------------------+
491 * 0 | Packet Buffer Address [63:1] |A0/NSE|
492 * +----------------------------------------------+------+
493 * 8 | Header Buffer Address [63:1] | DD |
494 * +-----------------------------------------------------+
495 *
496 *
497 * Advanced Receive Descriptor (Write-Back) Format
498 *
499 * 63 48 47 32 31 30 21 20 17 16 4 3 0
500 * +------------------------------------------------------+
501 * 0 | Packet IP |SPH| HDR_LEN | RSV|Packet| RSS |
502 * | Checksum Ident | | | | Type | Type |
503 * +------------------------------------------------------+
504 * 8 | VLAN Tag | Length | Extended Error | Extended Status |
505 * +------------------------------------------------------+
506 * 63 48 47 32 31 20 19 0
507 */
508
509 for (n = 0; n < adapter->num_rx_queues; n++) {
510 rx_ring = adapter->rx_ring[n];
876d2d6f
JK
511 pr_info("------------------------------------\n");
512 pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index);
513 pr_info("------------------------------------\n");
514 pr_info("R [desc] [ PktBuf A0] [ HeadBuf DD] "
515 "[bi->dma ] [bi->skb] <-- Adv Rx Read format\n");
516 pr_info("RWB[desc] [PcsmIpSHl PtRs] [vl er S cks ln] -----"
517 "----------- [bi->skb] <-- Adv Rx Write-Back format\n");
c97ec42a
TI
518
519 for (i = 0; i < rx_ring->count; i++) {
876d2d6f 520 const char *next_desc;
06034649
AD
521 struct igb_rx_buffer *buffer_info;
522 buffer_info = &rx_ring->rx_buffer_info[i];
60136906 523 rx_desc = IGB_RX_DESC(rx_ring, i);
c97ec42a
TI
524 u0 = (struct my_u0 *)rx_desc;
525 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
876d2d6f
JK
526
527 if (i == rx_ring->next_to_use)
528 next_desc = " NTU";
529 else if (i == rx_ring->next_to_clean)
530 next_desc = " NTC";
531 else
532 next_desc = "";
533
c97ec42a
TI
534 if (staterr & E1000_RXD_STAT_DD) {
535 /* Descriptor Done */
876d2d6f
JK
536 pr_info("%s[0x%03X] %016llX %016llX -------"
537 "--------- %p%s\n", "RWB", i,
c97ec42a
TI
538 le64_to_cpu(u0->a),
539 le64_to_cpu(u0->b),
876d2d6f 540 buffer_info->skb, next_desc);
c97ec42a 541 } else {
876d2d6f
JK
542 pr_info("%s[0x%03X] %016llX %016llX %016llX"
543 " %p%s\n", "R ", i,
c97ec42a
TI
544 le64_to_cpu(u0->a),
545 le64_to_cpu(u0->b),
546 (u64)buffer_info->dma,
876d2d6f 547 buffer_info->skb, next_desc);
c97ec42a 548
b669588a
ET
549 if (netif_msg_pktdata(adapter) &&
550 buffer_info->dma && buffer_info->skb) {
c97ec42a 551 print_hex_dump(KERN_INFO, "",
b669588a
ET
552 DUMP_PREFIX_ADDRESS,
553 16, 1, buffer_info->skb->data,
554 IGB_RX_HDR_LEN, true);
44390ca6
AD
555 print_hex_dump(KERN_INFO, "",
556 DUMP_PREFIX_ADDRESS,
557 16, 1,
b669588a
ET
558 page_address(buffer_info->page) +
559 buffer_info->page_offset,
44390ca6 560 PAGE_SIZE/2, true);
c97ec42a
TI
561 }
562 }
c97ec42a
TI
563 }
564 }
565
566exit:
567 return;
568}
569
9d5c8243 570/**
c041076a 571 * igb_get_hw_dev - return device
9d5c8243
AK
572 * used by hardware layer to print debugging information
573 **/
c041076a 574struct net_device *igb_get_hw_dev(struct e1000_hw *hw)
9d5c8243
AK
575{
576 struct igb_adapter *adapter = hw->back;
c041076a 577 return adapter->netdev;
9d5c8243 578}
38c845c7 579
9d5c8243
AK
580/**
581 * igb_init_module - Driver Registration Routine
582 *
583 * igb_init_module is the first routine called when the driver is
584 * loaded. All it does is register with the PCI subsystem.
585 **/
586static int __init igb_init_module(void)
587{
588 int ret;
876d2d6f 589 pr_info("%s - version %s\n",
9d5c8243
AK
590 igb_driver_string, igb_driver_version);
591
876d2d6f 592 pr_info("%s\n", igb_copyright);
9d5c8243 593
421e02f0 594#ifdef CONFIG_IGB_DCA
fe4506b6
JC
595 dca_register_notify(&dca_notifier);
596#endif
bbd98fe4 597 ret = pci_register_driver(&igb_driver);
9d5c8243
AK
598 return ret;
599}
600
601module_init(igb_init_module);
602
603/**
604 * igb_exit_module - Driver Exit Cleanup Routine
605 *
606 * igb_exit_module is called just before the driver is removed
607 * from memory.
608 **/
609static void __exit igb_exit_module(void)
610{
421e02f0 611#ifdef CONFIG_IGB_DCA
fe4506b6
JC
612 dca_unregister_notify(&dca_notifier);
613#endif
9d5c8243
AK
614 pci_unregister_driver(&igb_driver);
615}
616
617module_exit(igb_exit_module);
618
26bc19ec
AD
619#define Q_IDX_82576(i) (((i & 0x1) << 3) + (i >> 1))
620/**
621 * igb_cache_ring_register - Descriptor ring to register mapping
622 * @adapter: board private structure to initialize
623 *
624 * Once we know the feature-set enabled for the device, we'll cache
625 * the register offset the descriptor ring is assigned to.
626 **/
627static void igb_cache_ring_register(struct igb_adapter *adapter)
628{
ee1b9f06 629 int i = 0, j = 0;
047e0030 630 u32 rbase_offset = adapter->vfs_allocated_count;
26bc19ec
AD
631
632 switch (adapter->hw.mac.type) {
633 case e1000_82576:
634 /* The queues are allocated for virtualization such that VF 0
635 * is allocated queues 0 and 8, VF 1 queues 1 and 9, etc.
636 * In order to avoid collision we start at the first free queue
637 * and continue consuming queues in the same sequence
638 */
ee1b9f06 639 if (adapter->vfs_allocated_count) {
a99955fc 640 for (; i < adapter->rss_queues; i++)
3025a446
AD
641 adapter->rx_ring[i]->reg_idx = rbase_offset +
642 Q_IDX_82576(i);
ee1b9f06 643 }
26bc19ec 644 case e1000_82575:
55cac248 645 case e1000_82580:
d2ba2ed8 646 case e1000_i350:
f96a8a0b
CW
647 case e1000_i210:
648 case e1000_i211:
26bc19ec 649 default:
ee1b9f06 650 for (; i < adapter->num_rx_queues; i++)
3025a446 651 adapter->rx_ring[i]->reg_idx = rbase_offset + i;
ee1b9f06 652 for (; j < adapter->num_tx_queues; j++)
3025a446 653 adapter->tx_ring[j]->reg_idx = rbase_offset + j;
26bc19ec
AD
654 break;
655 }
656}
657
047e0030
AD
658static void igb_free_queues(struct igb_adapter *adapter)
659{
3025a446 660 int i;
047e0030 661
3025a446
AD
662 for (i = 0; i < adapter->num_tx_queues; i++) {
663 kfree(adapter->tx_ring[i]);
664 adapter->tx_ring[i] = NULL;
665 }
666 for (i = 0; i < adapter->num_rx_queues; i++) {
667 kfree(adapter->rx_ring[i]);
668 adapter->rx_ring[i] = NULL;
669 }
047e0030
AD
670 adapter->num_rx_queues = 0;
671 adapter->num_tx_queues = 0;
672}
673
9d5c8243
AK
674/**
675 * igb_alloc_queues - Allocate memory for all rings
676 * @adapter: board private structure to initialize
677 *
678 * We allocate one ring per queue at run-time since we don't know the
679 * number of queues at compile-time.
680 **/
681static int igb_alloc_queues(struct igb_adapter *adapter)
682{
3025a446 683 struct igb_ring *ring;
9d5c8243
AK
684 int i;
685
661086df 686 for (i = 0; i < adapter->num_tx_queues; i++) {
f33005a6 687 ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL);
3025a446
AD
688 if (!ring)
689 goto err;
68fd9910 690 ring->count = adapter->tx_ring_count;
661086df 691 ring->queue_index = i;
59d71989 692 ring->dev = &adapter->pdev->dev;
e694e964 693 ring->netdev = adapter->netdev;
85ad76b2
AD
694 /* For 82575, context index must be unique per ring. */
695 if (adapter->hw.mac.type == e1000_82575)
866cff06 696 set_bit(IGB_RING_FLAG_TX_CTX_IDX, &ring->flags);
3025a446 697 adapter->tx_ring[i] = ring;
661086df 698 }
85ad76b2 699
9d5c8243 700 for (i = 0; i < adapter->num_rx_queues; i++) {
f33005a6 701 ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL);
3025a446
AD
702 if (!ring)
703 goto err;
68fd9910 704 ring->count = adapter->rx_ring_count;
844290e5 705 ring->queue_index = i;
59d71989 706 ring->dev = &adapter->pdev->dev;
e694e964 707 ring->netdev = adapter->netdev;
85ad76b2
AD
708 /* set flag indicating ring supports SCTP checksum offload */
709 if (adapter->hw.mac.type >= e1000_82576)
866cff06 710 set_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags);
8be10e91 711
f96a8a0b
CW
712 /*
713 * On i350, i210, and i211, loopback VLAN packets
714 * have the tag byte-swapped.
715 * */
716 if (adapter->hw.mac.type >= e1000_i350)
8be10e91
AD
717 set_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &ring->flags);
718
3025a446 719 adapter->rx_ring[i] = ring;
9d5c8243 720 }
26bc19ec
AD
721
722 igb_cache_ring_register(adapter);
9d5c8243 723
047e0030 724 return 0;
a88f10ec 725
047e0030
AD
726err:
727 igb_free_queues(adapter);
d1a8c9e1 728
047e0030 729 return -ENOMEM;
a88f10ec
AD
730}
731
4be000c8
AD
732/**
733 * igb_write_ivar - configure ivar for given MSI-X vector
734 * @hw: pointer to the HW structure
735 * @msix_vector: vector number we are allocating to a given ring
736 * @index: row index of IVAR register to write within IVAR table
737 * @offset: column offset of in IVAR, should be multiple of 8
738 *
739 * This function is intended to handle the writing of the IVAR register
740 * for adapters 82576 and newer. The IVAR table consists of 2 columns,
741 * each containing an cause allocation for an Rx and Tx ring, and a
742 * variable number of rows depending on the number of queues supported.
743 **/
744static void igb_write_ivar(struct e1000_hw *hw, int msix_vector,
745 int index, int offset)
746{
747 u32 ivar = array_rd32(E1000_IVAR0, index);
748
749 /* clear any bits that are currently set */
750 ivar &= ~((u32)0xFF << offset);
751
752 /* write vector and valid bit */
753 ivar |= (msix_vector | E1000_IVAR_VALID) << offset;
754
755 array_wr32(E1000_IVAR0, index, ivar);
756}
757
9d5c8243 758#define IGB_N0_QUEUE -1
047e0030 759static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector)
9d5c8243 760{
047e0030 761 struct igb_adapter *adapter = q_vector->adapter;
9d5c8243 762 struct e1000_hw *hw = &adapter->hw;
047e0030
AD
763 int rx_queue = IGB_N0_QUEUE;
764 int tx_queue = IGB_N0_QUEUE;
4be000c8 765 u32 msixbm = 0;
047e0030 766
0ba82994
AD
767 if (q_vector->rx.ring)
768 rx_queue = q_vector->rx.ring->reg_idx;
769 if (q_vector->tx.ring)
770 tx_queue = q_vector->tx.ring->reg_idx;
2d064c06
AD
771
772 switch (hw->mac.type) {
773 case e1000_82575:
9d5c8243
AK
774 /* The 82575 assigns vectors using a bitmask, which matches the
775 bitmask for the EICR/EIMS/EIMC registers. To assign one
776 or more queues to a vector, we write the appropriate bits
777 into the MSIXBM register for that vector. */
047e0030 778 if (rx_queue > IGB_N0_QUEUE)
9d5c8243 779 msixbm = E1000_EICR_RX_QUEUE0 << rx_queue;
047e0030 780 if (tx_queue > IGB_N0_QUEUE)
9d5c8243 781 msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue;
feeb2721
AD
782 if (!adapter->msix_entries && msix_vector == 0)
783 msixbm |= E1000_EIMS_OTHER;
9d5c8243 784 array_wr32(E1000_MSIXBM(0), msix_vector, msixbm);
047e0030 785 q_vector->eims_value = msixbm;
2d064c06
AD
786 break;
787 case e1000_82576:
4be000c8
AD
788 /*
789 * 82576 uses a table that essentially consists of 2 columns
790 * with 8 rows. The ordering is column-major so we use the
791 * lower 3 bits as the row index, and the 4th bit as the
792 * column offset.
793 */
794 if (rx_queue > IGB_N0_QUEUE)
795 igb_write_ivar(hw, msix_vector,
796 rx_queue & 0x7,
797 (rx_queue & 0x8) << 1);
798 if (tx_queue > IGB_N0_QUEUE)
799 igb_write_ivar(hw, msix_vector,
800 tx_queue & 0x7,
801 ((tx_queue & 0x8) << 1) + 8);
047e0030 802 q_vector->eims_value = 1 << msix_vector;
2d064c06 803 break;
55cac248 804 case e1000_82580:
d2ba2ed8 805 case e1000_i350:
f96a8a0b
CW
806 case e1000_i210:
807 case e1000_i211:
4be000c8
AD
808 /*
809 * On 82580 and newer adapters the scheme is similar to 82576
810 * however instead of ordering column-major we have things
811 * ordered row-major. So we traverse the table by using
812 * bit 0 as the column offset, and the remaining bits as the
813 * row index.
814 */
815 if (rx_queue > IGB_N0_QUEUE)
816 igb_write_ivar(hw, msix_vector,
817 rx_queue >> 1,
818 (rx_queue & 0x1) << 4);
819 if (tx_queue > IGB_N0_QUEUE)
820 igb_write_ivar(hw, msix_vector,
821 tx_queue >> 1,
822 ((tx_queue & 0x1) << 4) + 8);
55cac248
AD
823 q_vector->eims_value = 1 << msix_vector;
824 break;
2d064c06
AD
825 default:
826 BUG();
827 break;
828 }
26b39276
AD
829
830 /* add q_vector eims value to global eims_enable_mask */
831 adapter->eims_enable_mask |= q_vector->eims_value;
832
833 /* configure q_vector to set itr on first interrupt */
834 q_vector->set_itr = 1;
9d5c8243
AK
835}
836
837/**
838 * igb_configure_msix - Configure MSI-X hardware
839 *
840 * igb_configure_msix sets up the hardware to properly
841 * generate MSI-X interrupts.
842 **/
843static void igb_configure_msix(struct igb_adapter *adapter)
844{
845 u32 tmp;
846 int i, vector = 0;
847 struct e1000_hw *hw = &adapter->hw;
848
849 adapter->eims_enable_mask = 0;
9d5c8243
AK
850
851 /* set vector for other causes, i.e. link changes */
2d064c06
AD
852 switch (hw->mac.type) {
853 case e1000_82575:
9d5c8243
AK
854 tmp = rd32(E1000_CTRL_EXT);
855 /* enable MSI-X PBA support*/
856 tmp |= E1000_CTRL_EXT_PBA_CLR;
857
858 /* Auto-Mask interrupts upon ICR read. */
859 tmp |= E1000_CTRL_EXT_EIAME;
860 tmp |= E1000_CTRL_EXT_IRCA;
861
862 wr32(E1000_CTRL_EXT, tmp);
047e0030
AD
863
864 /* enable msix_other interrupt */
865 array_wr32(E1000_MSIXBM(0), vector++,
866 E1000_EIMS_OTHER);
844290e5 867 adapter->eims_other = E1000_EIMS_OTHER;
9d5c8243 868
2d064c06
AD
869 break;
870
871 case e1000_82576:
55cac248 872 case e1000_82580:
d2ba2ed8 873 case e1000_i350:
f96a8a0b
CW
874 case e1000_i210:
875 case e1000_i211:
047e0030
AD
876 /* Turn on MSI-X capability first, or our settings
877 * won't stick. And it will take days to debug. */
878 wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE |
879 E1000_GPIE_PBA | E1000_GPIE_EIAME |
880 E1000_GPIE_NSICR);
881
882 /* enable msix_other interrupt */
883 adapter->eims_other = 1 << vector;
2d064c06 884 tmp = (vector++ | E1000_IVAR_VALID) << 8;
2d064c06 885
047e0030 886 wr32(E1000_IVAR_MISC, tmp);
2d064c06
AD
887 break;
888 default:
889 /* do nothing, since nothing else supports MSI-X */
890 break;
891 } /* switch (hw->mac.type) */
047e0030
AD
892
893 adapter->eims_enable_mask |= adapter->eims_other;
894
26b39276
AD
895 for (i = 0; i < adapter->num_q_vectors; i++)
896 igb_assign_vector(adapter->q_vector[i], vector++);
047e0030 897
9d5c8243
AK
898 wrfl();
899}
900
901/**
902 * igb_request_msix - Initialize MSI-X interrupts
903 *
904 * igb_request_msix allocates MSI-X vectors and requests interrupts from the
905 * kernel.
906 **/
907static int igb_request_msix(struct igb_adapter *adapter)
908{
909 struct net_device *netdev = adapter->netdev;
047e0030 910 struct e1000_hw *hw = &adapter->hw;
9d5c8243
AK
911 int i, err = 0, vector = 0;
912
047e0030 913 err = request_irq(adapter->msix_entries[vector].vector,
a0607fd3 914 igb_msix_other, 0, netdev->name, adapter);
047e0030
AD
915 if (err)
916 goto out;
917 vector++;
918
919 for (i = 0; i < adapter->num_q_vectors; i++) {
920 struct igb_q_vector *q_vector = adapter->q_vector[i];
921
922 q_vector->itr_register = hw->hw_addr + E1000_EITR(vector);
923
0ba82994 924 if (q_vector->rx.ring && q_vector->tx.ring)
047e0030 925 sprintf(q_vector->name, "%s-TxRx-%u", netdev->name,
0ba82994
AD
926 q_vector->rx.ring->queue_index);
927 else if (q_vector->tx.ring)
047e0030 928 sprintf(q_vector->name, "%s-tx-%u", netdev->name,
0ba82994
AD
929 q_vector->tx.ring->queue_index);
930 else if (q_vector->rx.ring)
047e0030 931 sprintf(q_vector->name, "%s-rx-%u", netdev->name,
0ba82994 932 q_vector->rx.ring->queue_index);
9d5c8243 933 else
047e0030
AD
934 sprintf(q_vector->name, "%s-unused", netdev->name);
935
9d5c8243 936 err = request_irq(adapter->msix_entries[vector].vector,
a0607fd3 937 igb_msix_ring, 0, q_vector->name,
047e0030 938 q_vector);
9d5c8243
AK
939 if (err)
940 goto out;
9d5c8243
AK
941 vector++;
942 }
943
9d5c8243
AK
944 igb_configure_msix(adapter);
945 return 0;
946out:
947 return err;
948}
949
950static void igb_reset_interrupt_capability(struct igb_adapter *adapter)
951{
952 if (adapter->msix_entries) {
953 pci_disable_msix(adapter->pdev);
954 kfree(adapter->msix_entries);
955 adapter->msix_entries = NULL;
047e0030 956 } else if (adapter->flags & IGB_FLAG_HAS_MSI) {
9d5c8243 957 pci_disable_msi(adapter->pdev);
047e0030 958 }
9d5c8243
AK
959}
960
047e0030
AD
961/**
962 * igb_free_q_vectors - Free memory allocated for interrupt vectors
963 * @adapter: board private structure to initialize
964 *
965 * This function frees the memory allocated to the q_vectors. In addition if
966 * NAPI is enabled it will delete any references to the NAPI struct prior
967 * to freeing the q_vector.
968 **/
969static void igb_free_q_vectors(struct igb_adapter *adapter)
970{
971 int v_idx;
972
973 for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
974 struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
975 adapter->q_vector[v_idx] = NULL;
fe0592b4
NN
976 if (!q_vector)
977 continue;
047e0030
AD
978 netif_napi_del(&q_vector->napi);
979 kfree(q_vector);
980 }
981 adapter->num_q_vectors = 0;
982}
983
984/**
985 * igb_clear_interrupt_scheme - reset the device to a state of no interrupts
986 *
987 * This function resets the device so that it has 0 rx queues, tx queues, and
988 * MSI-X interrupts allocated.
989 */
990static void igb_clear_interrupt_scheme(struct igb_adapter *adapter)
991{
992 igb_free_queues(adapter);
993 igb_free_q_vectors(adapter);
994 igb_reset_interrupt_capability(adapter);
995}
9d5c8243
AK
996
997/**
998 * igb_set_interrupt_capability - set MSI or MSI-X if supported
999 *
1000 * Attempt to configure interrupts using the best available
1001 * capabilities of the hardware and kernel.
1002 **/
21adef3e 1003static int igb_set_interrupt_capability(struct igb_adapter *adapter)
9d5c8243
AK
1004{
1005 int err;
1006 int numvecs, i;
1007
83b7180d 1008 /* Number of supported queues. */
a99955fc 1009 adapter->num_rx_queues = adapter->rss_queues;
5fa8517f
GR
1010 if (adapter->vfs_allocated_count)
1011 adapter->num_tx_queues = 1;
1012 else
1013 adapter->num_tx_queues = adapter->rss_queues;
83b7180d 1014
047e0030
AD
1015 /* start with one vector for every rx queue */
1016 numvecs = adapter->num_rx_queues;
1017
3ad2f3fb 1018 /* if tx handler is separate add 1 for every tx queue */
a99955fc
AD
1019 if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS))
1020 numvecs += adapter->num_tx_queues;
047e0030
AD
1021
1022 /* store the number of vectors reserved for queues */
1023 adapter->num_q_vectors = numvecs;
1024
1025 /* add 1 vector for link status interrupts */
1026 numvecs++;
9d5c8243
AK
1027 adapter->msix_entries = kcalloc(numvecs, sizeof(struct msix_entry),
1028 GFP_KERNEL);
f96a8a0b 1029
9d5c8243
AK
1030 if (!adapter->msix_entries)
1031 goto msi_only;
1032
1033 for (i = 0; i < numvecs; i++)
1034 adapter->msix_entries[i].entry = i;
1035
1036 err = pci_enable_msix(adapter->pdev,
1037 adapter->msix_entries,
1038 numvecs);
1039 if (err == 0)
34a20e89 1040 goto out;
9d5c8243
AK
1041
1042 igb_reset_interrupt_capability(adapter);
1043
1044 /* If we can't do MSI-X, try MSI */
1045msi_only:
2a3abf6d
AD
1046#ifdef CONFIG_PCI_IOV
1047 /* disable SR-IOV for non MSI-X configurations */
1048 if (adapter->vf_data) {
1049 struct e1000_hw *hw = &adapter->hw;
1050 /* disable iov and allow time for transactions to clear */
1051 pci_disable_sriov(adapter->pdev);
1052 msleep(500);
1053
1054 kfree(adapter->vf_data);
1055 adapter->vf_data = NULL;
1056 wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
945a5151 1057 wrfl();
2a3abf6d
AD
1058 msleep(100);
1059 dev_info(&adapter->pdev->dev, "IOV Disabled\n");
1060 }
1061#endif
4fc82adf 1062 adapter->vfs_allocated_count = 0;
a99955fc 1063 adapter->rss_queues = 1;
4fc82adf 1064 adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
9d5c8243 1065 adapter->num_rx_queues = 1;
661086df 1066 adapter->num_tx_queues = 1;
047e0030 1067 adapter->num_q_vectors = 1;
9d5c8243 1068 if (!pci_enable_msi(adapter->pdev))
7dfc16fa 1069 adapter->flags |= IGB_FLAG_HAS_MSI;
34a20e89 1070out:
21adef3e 1071 /* Notify the stack of the (possibly) reduced queue counts. */
cfb8c3aa 1072 rtnl_lock();
21adef3e 1073 netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues);
cfb8c3aa
BP
1074 err = netif_set_real_num_rx_queues(adapter->netdev,
1075 adapter->num_rx_queues);
1076 rtnl_unlock();
1077 return err;
9d5c8243
AK
1078}
1079
047e0030
AD
1080/**
1081 * igb_alloc_q_vectors - Allocate memory for interrupt vectors
1082 * @adapter: board private structure to initialize
1083 *
1084 * We allocate one q_vector per queue interrupt. If allocation fails we
1085 * return -ENOMEM.
1086 **/
1087static int igb_alloc_q_vectors(struct igb_adapter *adapter)
1088{
1089 struct igb_q_vector *q_vector;
1090 struct e1000_hw *hw = &adapter->hw;
1091 int v_idx;
1092
1093 for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
f33005a6
AD
1094 q_vector = kzalloc(sizeof(struct igb_q_vector),
1095 GFP_KERNEL);
047e0030
AD
1096 if (!q_vector)
1097 goto err_out;
1098 q_vector->adapter = adapter;
047e0030
AD
1099 q_vector->itr_register = hw->hw_addr + E1000_EITR(0);
1100 q_vector->itr_val = IGB_START_ITR;
047e0030
AD
1101 netif_napi_add(adapter->netdev, &q_vector->napi, igb_poll, 64);
1102 adapter->q_vector[v_idx] = q_vector;
1103 }
81c2fc22 1104
047e0030
AD
1105 return 0;
1106
1107err_out:
fe0592b4 1108 igb_free_q_vectors(adapter);
047e0030
AD
1109 return -ENOMEM;
1110}
1111
1112static void igb_map_rx_ring_to_vector(struct igb_adapter *adapter,
1113 int ring_idx, int v_idx)
1114{
3025a446 1115 struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
047e0030 1116
0ba82994
AD
1117 q_vector->rx.ring = adapter->rx_ring[ring_idx];
1118 q_vector->rx.ring->q_vector = q_vector;
1119 q_vector->rx.count++;
4fc82adf
AD
1120 q_vector->itr_val = adapter->rx_itr_setting;
1121 if (q_vector->itr_val && q_vector->itr_val <= 3)
1122 q_vector->itr_val = IGB_START_ITR;
047e0030
AD
1123}
1124
1125static void igb_map_tx_ring_to_vector(struct igb_adapter *adapter,
1126 int ring_idx, int v_idx)
1127{
3025a446 1128 struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
047e0030 1129
0ba82994
AD
1130 q_vector->tx.ring = adapter->tx_ring[ring_idx];
1131 q_vector->tx.ring->q_vector = q_vector;
1132 q_vector->tx.count++;
4fc82adf 1133 q_vector->itr_val = adapter->tx_itr_setting;
0ba82994 1134 q_vector->tx.work_limit = adapter->tx_work_limit;
4fc82adf
AD
1135 if (q_vector->itr_val && q_vector->itr_val <= 3)
1136 q_vector->itr_val = IGB_START_ITR;
047e0030
AD
1137}
1138
1139/**
1140 * igb_map_ring_to_vector - maps allocated queues to vectors
1141 *
1142 * This function maps the recently allocated queues to vectors.
1143 **/
1144static int igb_map_ring_to_vector(struct igb_adapter *adapter)
1145{
1146 int i;
1147 int v_idx = 0;
1148
1149 if ((adapter->num_q_vectors < adapter->num_rx_queues) ||
1150 (adapter->num_q_vectors < adapter->num_tx_queues))
1151 return -ENOMEM;
1152
1153 if (adapter->num_q_vectors >=
1154 (adapter->num_rx_queues + adapter->num_tx_queues)) {
1155 for (i = 0; i < adapter->num_rx_queues; i++)
1156 igb_map_rx_ring_to_vector(adapter, i, v_idx++);
1157 for (i = 0; i < adapter->num_tx_queues; i++)
1158 igb_map_tx_ring_to_vector(adapter, i, v_idx++);
1159 } else {
1160 for (i = 0; i < adapter->num_rx_queues; i++) {
1161 if (i < adapter->num_tx_queues)
1162 igb_map_tx_ring_to_vector(adapter, i, v_idx);
1163 igb_map_rx_ring_to_vector(adapter, i, v_idx++);
1164 }
1165 for (; i < adapter->num_tx_queues; i++)
1166 igb_map_tx_ring_to_vector(adapter, i, v_idx++);
1167 }
1168 return 0;
1169}
1170
1171/**
1172 * igb_init_interrupt_scheme - initialize interrupts, allocate queues/vectors
1173 *
1174 * This function initializes the interrupts and allocates all of the queues.
1175 **/
1176static int igb_init_interrupt_scheme(struct igb_adapter *adapter)
1177{
1178 struct pci_dev *pdev = adapter->pdev;
1179 int err;
1180
21adef3e
BH
1181 err = igb_set_interrupt_capability(adapter);
1182 if (err)
1183 return err;
047e0030
AD
1184
1185 err = igb_alloc_q_vectors(adapter);
1186 if (err) {
1187 dev_err(&pdev->dev, "Unable to allocate memory for vectors\n");
1188 goto err_alloc_q_vectors;
1189 }
1190
1191 err = igb_alloc_queues(adapter);
1192 if (err) {
1193 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
1194 goto err_alloc_queues;
1195 }
1196
1197 err = igb_map_ring_to_vector(adapter);
1198 if (err) {
1199 dev_err(&pdev->dev, "Invalid q_vector to ring mapping\n");
1200 goto err_map_queues;
1201 }
1202
1203
1204 return 0;
1205err_map_queues:
1206 igb_free_queues(adapter);
1207err_alloc_queues:
1208 igb_free_q_vectors(adapter);
1209err_alloc_q_vectors:
1210 igb_reset_interrupt_capability(adapter);
1211 return err;
1212}
1213
9d5c8243
AK
1214/**
1215 * igb_request_irq - initialize interrupts
1216 *
1217 * Attempts to configure interrupts using the best available
1218 * capabilities of the hardware and kernel.
1219 **/
1220static int igb_request_irq(struct igb_adapter *adapter)
1221{
1222 struct net_device *netdev = adapter->netdev;
047e0030 1223 struct pci_dev *pdev = adapter->pdev;
9d5c8243
AK
1224 int err = 0;
1225
1226 if (adapter->msix_entries) {
1227 err = igb_request_msix(adapter);
844290e5 1228 if (!err)
9d5c8243 1229 goto request_done;
9d5c8243 1230 /* fall back to MSI */
047e0030 1231 igb_clear_interrupt_scheme(adapter);
c74d588e 1232 if (!pci_enable_msi(pdev))
7dfc16fa 1233 adapter->flags |= IGB_FLAG_HAS_MSI;
9d5c8243
AK
1234 igb_free_all_tx_resources(adapter);
1235 igb_free_all_rx_resources(adapter);
047e0030 1236 adapter->num_tx_queues = 1;
9d5c8243 1237 adapter->num_rx_queues = 1;
047e0030
AD
1238 adapter->num_q_vectors = 1;
1239 err = igb_alloc_q_vectors(adapter);
1240 if (err) {
1241 dev_err(&pdev->dev,
1242 "Unable to allocate memory for vectors\n");
1243 goto request_done;
1244 }
1245 err = igb_alloc_queues(adapter);
1246 if (err) {
1247 dev_err(&pdev->dev,
1248 "Unable to allocate memory for queues\n");
1249 igb_free_q_vectors(adapter);
1250 goto request_done;
1251 }
1252 igb_setup_all_tx_resources(adapter);
1253 igb_setup_all_rx_resources(adapter);
9d5c8243 1254 }
844290e5 1255
c74d588e
AD
1256 igb_assign_vector(adapter->q_vector[0], 0);
1257
7dfc16fa 1258 if (adapter->flags & IGB_FLAG_HAS_MSI) {
c74d588e 1259 err = request_irq(pdev->irq, igb_intr_msi, 0,
047e0030 1260 netdev->name, adapter);
9d5c8243
AK
1261 if (!err)
1262 goto request_done;
047e0030 1263
9d5c8243
AK
1264 /* fall back to legacy interrupts */
1265 igb_reset_interrupt_capability(adapter);
7dfc16fa 1266 adapter->flags &= ~IGB_FLAG_HAS_MSI;
9d5c8243
AK
1267 }
1268
c74d588e 1269 err = request_irq(pdev->irq, igb_intr, IRQF_SHARED,
047e0030 1270 netdev->name, adapter);
9d5c8243 1271
6cb5e577 1272 if (err)
c74d588e 1273 dev_err(&pdev->dev, "Error %d getting interrupt\n",
9d5c8243 1274 err);
9d5c8243
AK
1275
1276request_done:
1277 return err;
1278}
1279
1280static void igb_free_irq(struct igb_adapter *adapter)
1281{
9d5c8243
AK
1282 if (adapter->msix_entries) {
1283 int vector = 0, i;
1284
047e0030 1285 free_irq(adapter->msix_entries[vector++].vector, adapter);
9d5c8243 1286
0d1ae7f4 1287 for (i = 0; i < adapter->num_q_vectors; i++)
047e0030 1288 free_irq(adapter->msix_entries[vector++].vector,
0d1ae7f4 1289 adapter->q_vector[i]);
047e0030
AD
1290 } else {
1291 free_irq(adapter->pdev->irq, adapter);
9d5c8243 1292 }
9d5c8243
AK
1293}
1294
1295/**
1296 * igb_irq_disable - Mask off interrupt generation on the NIC
1297 * @adapter: board private structure
1298 **/
1299static void igb_irq_disable(struct igb_adapter *adapter)
1300{
1301 struct e1000_hw *hw = &adapter->hw;
1302
25568a53
AD
1303 /*
1304 * we need to be careful when disabling interrupts. The VFs are also
1305 * mapped into these registers and so clearing the bits can cause
1306 * issues on the VF drivers so we only need to clear what we set
1307 */
9d5c8243 1308 if (adapter->msix_entries) {
2dfd1212
AD
1309 u32 regval = rd32(E1000_EIAM);
1310 wr32(E1000_EIAM, regval & ~adapter->eims_enable_mask);
1311 wr32(E1000_EIMC, adapter->eims_enable_mask);
1312 regval = rd32(E1000_EIAC);
1313 wr32(E1000_EIAC, regval & ~adapter->eims_enable_mask);
9d5c8243 1314 }
844290e5
PW
1315
1316 wr32(E1000_IAM, 0);
9d5c8243
AK
1317 wr32(E1000_IMC, ~0);
1318 wrfl();
81a61859
ET
1319 if (adapter->msix_entries) {
1320 int i;
1321 for (i = 0; i < adapter->num_q_vectors; i++)
1322 synchronize_irq(adapter->msix_entries[i].vector);
1323 } else {
1324 synchronize_irq(adapter->pdev->irq);
1325 }
9d5c8243
AK
1326}
1327
1328/**
1329 * igb_irq_enable - Enable default interrupt generation settings
1330 * @adapter: board private structure
1331 **/
1332static void igb_irq_enable(struct igb_adapter *adapter)
1333{
1334 struct e1000_hw *hw = &adapter->hw;
1335
1336 if (adapter->msix_entries) {
06218a8d 1337 u32 ims = E1000_IMS_LSC | E1000_IMS_DOUTSYNC | E1000_IMS_DRSTA;
2dfd1212
AD
1338 u32 regval = rd32(E1000_EIAC);
1339 wr32(E1000_EIAC, regval | adapter->eims_enable_mask);
1340 regval = rd32(E1000_EIAM);
1341 wr32(E1000_EIAM, regval | adapter->eims_enable_mask);
844290e5 1342 wr32(E1000_EIMS, adapter->eims_enable_mask);
25568a53 1343 if (adapter->vfs_allocated_count) {
4ae196df 1344 wr32(E1000_MBVFIMR, 0xFF);
25568a53
AD
1345 ims |= E1000_IMS_VMMB;
1346 }
1347 wr32(E1000_IMS, ims);
844290e5 1348 } else {
55cac248
AD
1349 wr32(E1000_IMS, IMS_ENABLE_MASK |
1350 E1000_IMS_DRSTA);
1351 wr32(E1000_IAM, IMS_ENABLE_MASK |
1352 E1000_IMS_DRSTA);
844290e5 1353 }
9d5c8243
AK
1354}
1355
1356static void igb_update_mng_vlan(struct igb_adapter *adapter)
1357{
51466239 1358 struct e1000_hw *hw = &adapter->hw;
9d5c8243
AK
1359 u16 vid = adapter->hw.mng_cookie.vlan_id;
1360 u16 old_vid = adapter->mng_vlan_id;
51466239
AD
1361
1362 if (hw->mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
1363 /* add VID to filter table */
1364 igb_vfta_set(hw, vid, true);
1365 adapter->mng_vlan_id = vid;
1366 } else {
1367 adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
1368 }
1369
1370 if ((old_vid != (u16)IGB_MNG_VLAN_NONE) &&
1371 (vid != old_vid) &&
b2cb09b1 1372 !test_bit(old_vid, adapter->active_vlans)) {
51466239
AD
1373 /* remove VID from filter table */
1374 igb_vfta_set(hw, old_vid, false);
9d5c8243
AK
1375 }
1376}
1377
1378/**
1379 * igb_release_hw_control - release control of the h/w to f/w
1380 * @adapter: address of board private structure
1381 *
1382 * igb_release_hw_control resets CTRL_EXT:DRV_LOAD bit.
1383 * For ASF and Pass Through versions of f/w this means that the
1384 * driver is no longer loaded.
1385 *
1386 **/
1387static void igb_release_hw_control(struct igb_adapter *adapter)
1388{
1389 struct e1000_hw *hw = &adapter->hw;
1390 u32 ctrl_ext;
1391
1392 /* Let firmware take over control of h/w */
1393 ctrl_ext = rd32(E1000_CTRL_EXT);
1394 wr32(E1000_CTRL_EXT,
1395 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
1396}
1397
9d5c8243
AK
1398/**
1399 * igb_get_hw_control - get control of the h/w from f/w
1400 * @adapter: address of board private structure
1401 *
1402 * igb_get_hw_control sets CTRL_EXT:DRV_LOAD bit.
1403 * For ASF and Pass Through versions of f/w this means that
1404 * the driver is loaded.
1405 *
1406 **/
1407static void igb_get_hw_control(struct igb_adapter *adapter)
1408{
1409 struct e1000_hw *hw = &adapter->hw;
1410 u32 ctrl_ext;
1411
1412 /* Let firmware know the driver has taken over */
1413 ctrl_ext = rd32(E1000_CTRL_EXT);
1414 wr32(E1000_CTRL_EXT,
1415 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
1416}
1417
9d5c8243
AK
1418/**
1419 * igb_configure - configure the hardware for RX and TX
1420 * @adapter: private board structure
1421 **/
1422static void igb_configure(struct igb_adapter *adapter)
1423{
1424 struct net_device *netdev = adapter->netdev;
1425 int i;
1426
1427 igb_get_hw_control(adapter);
ff41f8dc 1428 igb_set_rx_mode(netdev);
9d5c8243
AK
1429
1430 igb_restore_vlan(adapter);
9d5c8243 1431
85b430b4 1432 igb_setup_tctl(adapter);
06cf2666 1433 igb_setup_mrqc(adapter);
9d5c8243 1434 igb_setup_rctl(adapter);
85b430b4
AD
1435
1436 igb_configure_tx(adapter);
9d5c8243 1437 igb_configure_rx(adapter);
662d7205
AD
1438
1439 igb_rx_fifo_flush_82575(&adapter->hw);
1440
c493ea45 1441 /* call igb_desc_unused which always leaves
9d5c8243
AK
1442 * at least 1 descriptor unused to make sure
1443 * next_to_use != next_to_clean */
1444 for (i = 0; i < adapter->num_rx_queues; i++) {
3025a446 1445 struct igb_ring *ring = adapter->rx_ring[i];
cd392f5c 1446 igb_alloc_rx_buffers(ring, igb_desc_unused(ring));
9d5c8243 1447 }
9d5c8243
AK
1448}
1449
88a268c1
NN
1450/**
1451 * igb_power_up_link - Power up the phy/serdes link
1452 * @adapter: address of board private structure
1453 **/
1454void igb_power_up_link(struct igb_adapter *adapter)
1455{
76886596
AA
1456 igb_reset_phy(&adapter->hw);
1457
88a268c1
NN
1458 if (adapter->hw.phy.media_type == e1000_media_type_copper)
1459 igb_power_up_phy_copper(&adapter->hw);
1460 else
1461 igb_power_up_serdes_link_82575(&adapter->hw);
1462}
1463
1464/**
1465 * igb_power_down_link - Power down the phy/serdes link
1466 * @adapter: address of board private structure
1467 */
1468static void igb_power_down_link(struct igb_adapter *adapter)
1469{
1470 if (adapter->hw.phy.media_type == e1000_media_type_copper)
1471 igb_power_down_phy_copper_82575(&adapter->hw);
1472 else
1473 igb_shutdown_serdes_link_82575(&adapter->hw);
1474}
9d5c8243
AK
1475
1476/**
1477 * igb_up - Open the interface and prepare it to handle traffic
1478 * @adapter: board private structure
1479 **/
9d5c8243
AK
1480int igb_up(struct igb_adapter *adapter)
1481{
1482 struct e1000_hw *hw = &adapter->hw;
1483 int i;
1484
1485 /* hardware has been reset, we need to reload some things */
1486 igb_configure(adapter);
1487
1488 clear_bit(__IGB_DOWN, &adapter->state);
1489
0d1ae7f4
AD
1490 for (i = 0; i < adapter->num_q_vectors; i++)
1491 napi_enable(&(adapter->q_vector[i]->napi));
1492
844290e5 1493 if (adapter->msix_entries)
9d5c8243 1494 igb_configure_msix(adapter);
feeb2721
AD
1495 else
1496 igb_assign_vector(adapter->q_vector[0], 0);
9d5c8243
AK
1497
1498 /* Clear any pending interrupts. */
1499 rd32(E1000_ICR);
1500 igb_irq_enable(adapter);
1501
d4960307
AD
1502 /* notify VFs that reset has been completed */
1503 if (adapter->vfs_allocated_count) {
1504 u32 reg_data = rd32(E1000_CTRL_EXT);
1505 reg_data |= E1000_CTRL_EXT_PFRSTD;
1506 wr32(E1000_CTRL_EXT, reg_data);
1507 }
1508
4cb9be7a
JB
1509 netif_tx_start_all_queues(adapter->netdev);
1510
25568a53
AD
1511 /* start the watchdog. */
1512 hw->mac.get_link_status = 1;
1513 schedule_work(&adapter->watchdog_task);
1514
9d5c8243
AK
1515 return 0;
1516}
1517
1518void igb_down(struct igb_adapter *adapter)
1519{
9d5c8243 1520 struct net_device *netdev = adapter->netdev;
330a6d6a 1521 struct e1000_hw *hw = &adapter->hw;
9d5c8243
AK
1522 u32 tctl, rctl;
1523 int i;
1524
1525 /* signal that we're down so the interrupt handler does not
1526 * reschedule our watchdog timer */
1527 set_bit(__IGB_DOWN, &adapter->state);
1528
1529 /* disable receives in the hardware */
1530 rctl = rd32(E1000_RCTL);
1531 wr32(E1000_RCTL, rctl & ~E1000_RCTL_EN);
1532 /* flush and sleep below */
1533
fd2ea0a7 1534 netif_tx_stop_all_queues(netdev);
9d5c8243
AK
1535
1536 /* disable transmits in the hardware */
1537 tctl = rd32(E1000_TCTL);
1538 tctl &= ~E1000_TCTL_EN;
1539 wr32(E1000_TCTL, tctl);
1540 /* flush both disables and wait for them to finish */
1541 wrfl();
1542 msleep(10);
1543
0d1ae7f4
AD
1544 for (i = 0; i < adapter->num_q_vectors; i++)
1545 napi_disable(&(adapter->q_vector[i]->napi));
9d5c8243 1546
9d5c8243
AK
1547 igb_irq_disable(adapter);
1548
1549 del_timer_sync(&adapter->watchdog_timer);
1550 del_timer_sync(&adapter->phy_info_timer);
1551
9d5c8243 1552 netif_carrier_off(netdev);
04fe6358
AD
1553
1554 /* record the stats before reset*/
12dcd86b
ED
1555 spin_lock(&adapter->stats64_lock);
1556 igb_update_stats(adapter, &adapter->stats64);
1557 spin_unlock(&adapter->stats64_lock);
04fe6358 1558
9d5c8243
AK
1559 adapter->link_speed = 0;
1560 adapter->link_duplex = 0;
1561
3023682e
JK
1562 if (!pci_channel_offline(adapter->pdev))
1563 igb_reset(adapter);
9d5c8243
AK
1564 igb_clean_all_tx_rings(adapter);
1565 igb_clean_all_rx_rings(adapter);
7e0e99ef
AD
1566#ifdef CONFIG_IGB_DCA
1567
1568 /* since we reset the hardware DCA settings were cleared */
1569 igb_setup_dca(adapter);
1570#endif
9d5c8243
AK
1571}
1572
1573void igb_reinit_locked(struct igb_adapter *adapter)
1574{
1575 WARN_ON(in_interrupt());
1576 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
1577 msleep(1);
1578 igb_down(adapter);
1579 igb_up(adapter);
1580 clear_bit(__IGB_RESETTING, &adapter->state);
1581}
1582
1583void igb_reset(struct igb_adapter *adapter)
1584{
090b1795 1585 struct pci_dev *pdev = adapter->pdev;
9d5c8243 1586 struct e1000_hw *hw = &adapter->hw;
2d064c06
AD
1587 struct e1000_mac_info *mac = &hw->mac;
1588 struct e1000_fc_info *fc = &hw->fc;
9d5c8243
AK
1589 u32 pba = 0, tx_space, min_tx_space, min_rx_space;
1590 u16 hwm;
1591
1592 /* Repartition Pba for greater than 9k mtu
1593 * To take effect CTRL.RST is required.
1594 */
fa4dfae0 1595 switch (mac->type) {
d2ba2ed8 1596 case e1000_i350:
55cac248
AD
1597 case e1000_82580:
1598 pba = rd32(E1000_RXPBS);
1599 pba = igb_rxpbs_adjust_82580(pba);
1600 break;
fa4dfae0 1601 case e1000_82576:
d249be54
AD
1602 pba = rd32(E1000_RXPBS);
1603 pba &= E1000_RXPBS_SIZE_MASK_82576;
fa4dfae0
AD
1604 break;
1605 case e1000_82575:
f96a8a0b
CW
1606 case e1000_i210:
1607 case e1000_i211:
fa4dfae0
AD
1608 default:
1609 pba = E1000_PBA_34K;
1610 break;
2d064c06 1611 }
9d5c8243 1612
2d064c06
AD
1613 if ((adapter->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) &&
1614 (mac->type < e1000_82576)) {
9d5c8243
AK
1615 /* adjust PBA for jumbo frames */
1616 wr32(E1000_PBA, pba);
1617
1618 /* To maintain wire speed transmits, the Tx FIFO should be
1619 * large enough to accommodate two full transmit packets,
1620 * rounded up to the next 1KB and expressed in KB. Likewise,
1621 * the Rx FIFO should be large enough to accommodate at least
1622 * one full receive packet and is similarly rounded up and
1623 * expressed in KB. */
1624 pba = rd32(E1000_PBA);
1625 /* upper 16 bits has Tx packet buffer allocation size in KB */
1626 tx_space = pba >> 16;
1627 /* lower 16 bits has Rx packet buffer allocation size in KB */
1628 pba &= 0xffff;
1629 /* the tx fifo also stores 16 bytes of information about the tx
1630 * but don't include ethernet FCS because hardware appends it */
1631 min_tx_space = (adapter->max_frame_size +
85e8d004 1632 sizeof(union e1000_adv_tx_desc) -
9d5c8243
AK
1633 ETH_FCS_LEN) * 2;
1634 min_tx_space = ALIGN(min_tx_space, 1024);
1635 min_tx_space >>= 10;
1636 /* software strips receive CRC, so leave room for it */
1637 min_rx_space = adapter->max_frame_size;
1638 min_rx_space = ALIGN(min_rx_space, 1024);
1639 min_rx_space >>= 10;
1640
1641 /* If current Tx allocation is less than the min Tx FIFO size,
1642 * and the min Tx FIFO size is less than the current Rx FIFO
1643 * allocation, take space away from current Rx allocation */
1644 if (tx_space < min_tx_space &&
1645 ((min_tx_space - tx_space) < pba)) {
1646 pba = pba - (min_tx_space - tx_space);
1647
1648 /* if short on rx space, rx wins and must trump tx
1649 * adjustment */
1650 if (pba < min_rx_space)
1651 pba = min_rx_space;
1652 }
2d064c06 1653 wr32(E1000_PBA, pba);
9d5c8243 1654 }
9d5c8243
AK
1655
1656 /* flow control settings */
1657 /* The high water mark must be low enough to fit one full frame
1658 * (or the size used for early receive) above it in the Rx FIFO.
1659 * Set it to the lower of:
1660 * - 90% of the Rx FIFO size, or
1661 * - the full Rx FIFO size minus one full frame */
1662 hwm = min(((pba << 10) * 9 / 10),
2d064c06 1663 ((pba << 10) - 2 * adapter->max_frame_size));
9d5c8243 1664
d405ea3e
AD
1665 fc->high_water = hwm & 0xFFF0; /* 16-byte granularity */
1666 fc->low_water = fc->high_water - 16;
9d5c8243
AK
1667 fc->pause_time = 0xFFFF;
1668 fc->send_xon = 1;
0cce119a 1669 fc->current_mode = fc->requested_mode;
9d5c8243 1670
4ae196df
AD
1671 /* disable receive for all VFs and wait one second */
1672 if (adapter->vfs_allocated_count) {
1673 int i;
1674 for (i = 0 ; i < adapter->vfs_allocated_count; i++)
8fa7e0f7 1675 adapter->vf_data[i].flags &= IGB_VF_FLAG_PF_SET_MAC;
4ae196df
AD
1676
1677 /* ping all the active vfs to let them know we are going down */
f2ca0dbe 1678 igb_ping_all_vfs(adapter);
4ae196df
AD
1679
1680 /* disable transmits and receives */
1681 wr32(E1000_VFRE, 0);
1682 wr32(E1000_VFTE, 0);
1683 }
1684
9d5c8243 1685 /* Allow time for pending master requests to run */
330a6d6a 1686 hw->mac.ops.reset_hw(hw);
9d5c8243
AK
1687 wr32(E1000_WUC, 0);
1688
330a6d6a 1689 if (hw->mac.ops.init_hw(hw))
090b1795 1690 dev_err(&pdev->dev, "Hardware Error\n");
831ec0b4 1691
a27416bb
MV
1692 /*
1693 * Flow control settings reset on hardware reset, so guarantee flow
1694 * control is off when forcing speed.
1695 */
1696 if (!hw->mac.autoneg)
1697 igb_force_mac_fc(hw);
1698
b6e0c419 1699 igb_init_dmac(adapter, pba);
88a268c1
NN
1700 if (!netif_running(adapter->netdev))
1701 igb_power_down_link(adapter);
1702
9d5c8243
AK
1703 igb_update_mng_vlan(adapter);
1704
1705 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
1706 wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE);
1707
1f6e8178
MV
1708#ifdef CONFIG_IGB_PTP
1709 /* Re-enable PTP, where applicable. */
1710 igb_ptp_reset(adapter);
1711#endif /* CONFIG_IGB_PTP */
1712
330a6d6a 1713 igb_get_phy_info(hw);
9d5c8243
AK
1714}
1715
c8f44aff
MM
1716static netdev_features_t igb_fix_features(struct net_device *netdev,
1717 netdev_features_t features)
b2cb09b1
JP
1718{
1719 /*
1720 * Since there is no support for separate rx/tx vlan accel
1721 * enable/disable make sure tx flag is always in same state as rx.
1722 */
1723 if (features & NETIF_F_HW_VLAN_RX)
1724 features |= NETIF_F_HW_VLAN_TX;
1725 else
1726 features &= ~NETIF_F_HW_VLAN_TX;
1727
1728 return features;
1729}
1730
c8f44aff
MM
1731static int igb_set_features(struct net_device *netdev,
1732 netdev_features_t features)
ac52caa3 1733{
c8f44aff 1734 netdev_features_t changed = netdev->features ^ features;
89eaefb6 1735 struct igb_adapter *adapter = netdev_priv(netdev);
ac52caa3 1736
b2cb09b1
JP
1737 if (changed & NETIF_F_HW_VLAN_RX)
1738 igb_vlan_mode(netdev, features);
1739
89eaefb6
BG
1740 if (!(changed & NETIF_F_RXALL))
1741 return 0;
1742
1743 netdev->features = features;
1744
1745 if (netif_running(netdev))
1746 igb_reinit_locked(adapter);
1747 else
1748 igb_reset(adapter);
1749
ac52caa3
MM
1750 return 0;
1751}
1752
2e5c6922 1753static const struct net_device_ops igb_netdev_ops = {
559e9c49 1754 .ndo_open = igb_open,
2e5c6922 1755 .ndo_stop = igb_close,
cd392f5c 1756 .ndo_start_xmit = igb_xmit_frame,
12dcd86b 1757 .ndo_get_stats64 = igb_get_stats64,
ff41f8dc 1758 .ndo_set_rx_mode = igb_set_rx_mode,
2e5c6922
SH
1759 .ndo_set_mac_address = igb_set_mac,
1760 .ndo_change_mtu = igb_change_mtu,
1761 .ndo_do_ioctl = igb_ioctl,
1762 .ndo_tx_timeout = igb_tx_timeout,
1763 .ndo_validate_addr = eth_validate_addr,
2e5c6922
SH
1764 .ndo_vlan_rx_add_vid = igb_vlan_rx_add_vid,
1765 .ndo_vlan_rx_kill_vid = igb_vlan_rx_kill_vid,
8151d294
WM
1766 .ndo_set_vf_mac = igb_ndo_set_vf_mac,
1767 .ndo_set_vf_vlan = igb_ndo_set_vf_vlan,
1768 .ndo_set_vf_tx_rate = igb_ndo_set_vf_bw,
1769 .ndo_get_vf_config = igb_ndo_get_vf_config,
2e5c6922
SH
1770#ifdef CONFIG_NET_POLL_CONTROLLER
1771 .ndo_poll_controller = igb_netpoll,
1772#endif
b2cb09b1
JP
1773 .ndo_fix_features = igb_fix_features,
1774 .ndo_set_features = igb_set_features,
2e5c6922
SH
1775};
1776
d67974f0
CW
1777/**
1778 * igb_set_fw_version - Configure version string for ethtool
1779 * @adapter: adapter struct
1780 *
1781 **/
1782void igb_set_fw_version(struct igb_adapter *adapter)
1783{
1784 struct e1000_hw *hw = &adapter->hw;
1785 u16 eeprom_verh, eeprom_verl, comb_verh, comb_verl, comb_offset;
1786 u16 major, build, patch, fw_version;
1787 u32 etrack_id;
1788
1789 hw->nvm.ops.read(hw, 5, 1, &fw_version);
1790 if (adapter->hw.mac.type != e1000_i211) {
1791 hw->nvm.ops.read(hw, NVM_ETRACK_WORD, 1, &eeprom_verh);
1792 hw->nvm.ops.read(hw, (NVM_ETRACK_WORD + 1), 1, &eeprom_verl);
1793 etrack_id = (eeprom_verh << IGB_ETRACK_SHIFT) | eeprom_verl;
1794
1795 /* combo image version needs to be found */
1796 hw->nvm.ops.read(hw, NVM_COMB_VER_PTR, 1, &comb_offset);
1797 if ((comb_offset != 0x0) &&
1798 (comb_offset != IGB_NVM_VER_INVALID)) {
1799 hw->nvm.ops.read(hw, (NVM_COMB_VER_OFF + comb_offset
1800 + 1), 1, &comb_verh);
1801 hw->nvm.ops.read(hw, (NVM_COMB_VER_OFF + comb_offset),
1802 1, &comb_verl);
1803
1804 /* Only display Option Rom if it exists and is valid */
1805 if ((comb_verh && comb_verl) &&
1806 ((comb_verh != IGB_NVM_VER_INVALID) &&
1807 (comb_verl != IGB_NVM_VER_INVALID))) {
1808 major = comb_verl >> IGB_COMB_VER_SHFT;
1809 build = (comb_verl << IGB_COMB_VER_SHFT) |
1810 (comb_verh >> IGB_COMB_VER_SHFT);
1811 patch = comb_verh & IGB_COMB_VER_MASK;
1812 snprintf(adapter->fw_version,
1813 sizeof(adapter->fw_version),
1814 "%d.%d%d, 0x%08x, %d.%d.%d",
1815 (fw_version & IGB_MAJOR_MASK) >>
1816 IGB_MAJOR_SHIFT,
1817 (fw_version & IGB_MINOR_MASK) >>
1818 IGB_MINOR_SHIFT,
1819 (fw_version & IGB_BUILD_MASK),
1820 etrack_id, major, build, patch);
1821 goto out;
1822 }
1823 }
1824 snprintf(adapter->fw_version, sizeof(adapter->fw_version),
1825 "%d.%d%d, 0x%08x",
1826 (fw_version & IGB_MAJOR_MASK) >> IGB_MAJOR_SHIFT,
1827 (fw_version & IGB_MINOR_MASK) >> IGB_MINOR_SHIFT,
1828 (fw_version & IGB_BUILD_MASK), etrack_id);
1829 } else {
1830 snprintf(adapter->fw_version, sizeof(adapter->fw_version),
1831 "%d.%d%d",
1832 (fw_version & IGB_MAJOR_MASK) >> IGB_MAJOR_SHIFT,
1833 (fw_version & IGB_MINOR_MASK) >> IGB_MINOR_SHIFT,
1834 (fw_version & IGB_BUILD_MASK));
1835 }
1836out:
1837 return;
1838}
1839
9d5c8243
AK
1840/**
1841 * igb_probe - Device Initialization Routine
1842 * @pdev: PCI device information struct
1843 * @ent: entry in igb_pci_tbl
1844 *
1845 * Returns 0 on success, negative on failure
1846 *
1847 * igb_probe initializes an adapter identified by a pci_dev structure.
1848 * The OS initialization, configuring of the adapter private structure,
1849 * and a hardware reset occur.
1850 **/
1851static int __devinit igb_probe(struct pci_dev *pdev,
1852 const struct pci_device_id *ent)
1853{
1854 struct net_device *netdev;
1855 struct igb_adapter *adapter;
1856 struct e1000_hw *hw;
4337e993 1857 u16 eeprom_data = 0;
9835fd73 1858 s32 ret_val;
4337e993 1859 static int global_quad_port_a; /* global quad port a indication */
9d5c8243
AK
1860 const struct e1000_info *ei = igb_info_tbl[ent->driver_data];
1861 unsigned long mmio_start, mmio_len;
2d6a5e95 1862 int err, pci_using_dac;
9d5c8243 1863 u16 eeprom_apme_mask = IGB_EEPROM_APME;
9835fd73 1864 u8 part_str[E1000_PBANUM_LENGTH];
9d5c8243 1865
bded64a7
AG
1866 /* Catch broken hardware that put the wrong VF device ID in
1867 * the PCIe SR-IOV capability.
1868 */
1869 if (pdev->is_virtfn) {
1870 WARN(1, KERN_ERR "%s (%hx:%hx) should not be a VF!\n",
f96a8a0b 1871 pci_name(pdev), pdev->vendor, pdev->device);
bded64a7
AG
1872 return -EINVAL;
1873 }
1874
aed5dec3 1875 err = pci_enable_device_mem(pdev);
9d5c8243
AK
1876 if (err)
1877 return err;
1878
1879 pci_using_dac = 0;
59d71989 1880 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
9d5c8243 1881 if (!err) {
59d71989 1882 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
9d5c8243
AK
1883 if (!err)
1884 pci_using_dac = 1;
1885 } else {
59d71989 1886 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
9d5c8243 1887 if (err) {
59d71989 1888 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
9d5c8243
AK
1889 if (err) {
1890 dev_err(&pdev->dev, "No usable DMA "
1891 "configuration, aborting\n");
1892 goto err_dma;
1893 }
1894 }
1895 }
1896
aed5dec3
AD
1897 err = pci_request_selected_regions(pdev, pci_select_bars(pdev,
1898 IORESOURCE_MEM),
1899 igb_driver_name);
9d5c8243
AK
1900 if (err)
1901 goto err_pci_reg;
1902
19d5afd4 1903 pci_enable_pcie_error_reporting(pdev);
40a914fa 1904
9d5c8243 1905 pci_set_master(pdev);
c682fc23 1906 pci_save_state(pdev);
9d5c8243
AK
1907
1908 err = -ENOMEM;
1bfaf07b 1909 netdev = alloc_etherdev_mq(sizeof(struct igb_adapter),
1cc3bd87 1910 IGB_MAX_TX_QUEUES);
9d5c8243
AK
1911 if (!netdev)
1912 goto err_alloc_etherdev;
1913
1914 SET_NETDEV_DEV(netdev, &pdev->dev);
1915
1916 pci_set_drvdata(pdev, netdev);
1917 adapter = netdev_priv(netdev);
1918 adapter->netdev = netdev;
1919 adapter->pdev = pdev;
1920 hw = &adapter->hw;
1921 hw->back = adapter;
b3f4d599 1922 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
9d5c8243
AK
1923
1924 mmio_start = pci_resource_start(pdev, 0);
1925 mmio_len = pci_resource_len(pdev, 0);
1926
1927 err = -EIO;
28b0759c
AD
1928 hw->hw_addr = ioremap(mmio_start, mmio_len);
1929 if (!hw->hw_addr)
9d5c8243
AK
1930 goto err_ioremap;
1931
2e5c6922 1932 netdev->netdev_ops = &igb_netdev_ops;
9d5c8243 1933 igb_set_ethtool_ops(netdev);
9d5c8243 1934 netdev->watchdog_timeo = 5 * HZ;
9d5c8243
AK
1935
1936 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
1937
1938 netdev->mem_start = mmio_start;
1939 netdev->mem_end = mmio_start + mmio_len;
1940
9d5c8243
AK
1941 /* PCI config space info */
1942 hw->vendor_id = pdev->vendor;
1943 hw->device_id = pdev->device;
1944 hw->revision_id = pdev->revision;
1945 hw->subsystem_vendor_id = pdev->subsystem_vendor;
1946 hw->subsystem_device_id = pdev->subsystem_device;
1947
9d5c8243
AK
1948 /* Copy the default MAC, PHY and NVM function pointers */
1949 memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
1950 memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
1951 memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops));
1952 /* Initialize skew-specific constants */
1953 err = ei->get_invariants(hw);
1954 if (err)
450c87c8 1955 goto err_sw_init;
9d5c8243 1956
450c87c8 1957 /* setup the private structure */
9d5c8243
AK
1958 err = igb_sw_init(adapter);
1959 if (err)
1960 goto err_sw_init;
1961
1962 igb_get_bus_info_pcie(hw);
1963
1964 hw->phy.autoneg_wait_to_complete = false;
9d5c8243
AK
1965
1966 /* Copper options */
1967 if (hw->phy.media_type == e1000_media_type_copper) {
1968 hw->phy.mdix = AUTO_ALL_MODES;
1969 hw->phy.disable_polarity_correction = false;
1970 hw->phy.ms_type = e1000_ms_hw_default;
1971 }
1972
1973 if (igb_check_reset_block(hw))
1974 dev_info(&pdev->dev,
1975 "PHY reset is blocked due to SOL/IDER session.\n");
1976
077887c3
AD
1977 /*
1978 * features is initialized to 0 in allocation, it might have bits
1979 * set by igb_sw_init so we should use an or instead of an
1980 * assignment.
1981 */
1982 netdev->features |= NETIF_F_SG |
1983 NETIF_F_IP_CSUM |
1984 NETIF_F_IPV6_CSUM |
1985 NETIF_F_TSO |
1986 NETIF_F_TSO6 |
1987 NETIF_F_RXHASH |
1988 NETIF_F_RXCSUM |
1989 NETIF_F_HW_VLAN_RX |
1990 NETIF_F_HW_VLAN_TX;
1991
1992 /* copy netdev features into list of user selectable features */
1993 netdev->hw_features |= netdev->features;
89eaefb6 1994 netdev->hw_features |= NETIF_F_RXALL;
077887c3
AD
1995
1996 /* set this bit last since it cannot be part of hw_features */
1997 netdev->features |= NETIF_F_HW_VLAN_FILTER;
1998
1999 netdev->vlan_features |= NETIF_F_TSO |
2000 NETIF_F_TSO6 |
2001 NETIF_F_IP_CSUM |
2002 NETIF_F_IPV6_CSUM |
2003 NETIF_F_SG;
48f29ffc 2004
6b8f0922
BG
2005 netdev->priv_flags |= IFF_SUPP_NOFCS;
2006
7b872a55 2007 if (pci_using_dac) {
9d5c8243 2008 netdev->features |= NETIF_F_HIGHDMA;
7b872a55
YZ
2009 netdev->vlan_features |= NETIF_F_HIGHDMA;
2010 }
9d5c8243 2011
ac52caa3
MM
2012 if (hw->mac.type >= e1000_82576) {
2013 netdev->hw_features |= NETIF_F_SCTP_CSUM;
b9473560 2014 netdev->features |= NETIF_F_SCTP_CSUM;
ac52caa3 2015 }
b9473560 2016
01789349
JP
2017 netdev->priv_flags |= IFF_UNICAST_FLT;
2018
330a6d6a 2019 adapter->en_mng_pt = igb_enable_mng_pass_thru(hw);
9d5c8243
AK
2020
2021 /* before reading the NVM, reset the controller to put the device in a
2022 * known good starting state */
2023 hw->mac.ops.reset_hw(hw);
2024
f96a8a0b
CW
2025 /*
2026 * make sure the NVM is good , i211 parts have special NVM that
2027 * doesn't contain a checksum
2028 */
2029 if (hw->mac.type != e1000_i211) {
2030 if (hw->nvm.ops.validate(hw) < 0) {
2031 dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n");
2032 err = -EIO;
2033 goto err_eeprom;
2034 }
9d5c8243
AK
2035 }
2036
2037 /* copy the MAC address out of the NVM */
2038 if (hw->mac.ops.read_mac_addr(hw))
2039 dev_err(&pdev->dev, "NVM Read Error\n");
2040
2041 memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len);
2042 memcpy(netdev->perm_addr, hw->mac.addr, netdev->addr_len);
2043
2044 if (!is_valid_ether_addr(netdev->perm_addr)) {
2045 dev_err(&pdev->dev, "Invalid MAC Address\n");
2046 err = -EIO;
2047 goto err_eeprom;
2048 }
2049
d67974f0
CW
2050 /* get firmware version for ethtool -i */
2051 igb_set_fw_version(adapter);
2052
c061b18d 2053 setup_timer(&adapter->watchdog_timer, igb_watchdog,
0e340485 2054 (unsigned long) adapter);
c061b18d 2055 setup_timer(&adapter->phy_info_timer, igb_update_phy_info,
0e340485 2056 (unsigned long) adapter);
9d5c8243
AK
2057
2058 INIT_WORK(&adapter->reset_task, igb_reset_task);
2059 INIT_WORK(&adapter->watchdog_task, igb_watchdog_task);
2060
450c87c8 2061 /* Initialize link properties that are user-changeable */
9d5c8243
AK
2062 adapter->fc_autoneg = true;
2063 hw->mac.autoneg = true;
2064 hw->phy.autoneg_advertised = 0x2f;
2065
0cce119a
AD
2066 hw->fc.requested_mode = e1000_fc_default;
2067 hw->fc.current_mode = e1000_fc_default;
9d5c8243 2068
9d5c8243
AK
2069 igb_validate_mdi_setting(hw);
2070
9d5c8243
AK
2071 /* Initial Wake on LAN setting If APM wake is enabled in the EEPROM,
2072 * enable the ACPI Magic Packet filter
2073 */
2074
a2cf8b6c 2075 if (hw->bus.func == 0)
312c75ae 2076 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
6d337dce 2077 else if (hw->mac.type >= e1000_82580)
55cac248
AD
2078 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A +
2079 NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1,
2080 &eeprom_data);
a2cf8b6c
AD
2081 else if (hw->bus.func == 1)
2082 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
9d5c8243
AK
2083
2084 if (eeprom_data & eeprom_apme_mask)
2085 adapter->eeprom_wol |= E1000_WUFC_MAG;
2086
2087 /* now that we have the eeprom settings, apply the special cases where
2088 * the eeprom may be wrong or the board simply won't support wake on
2089 * lan on a particular port */
2090 switch (pdev->device) {
2091 case E1000_DEV_ID_82575GB_QUAD_COPPER:
2092 adapter->eeprom_wol = 0;
2093 break;
2094 case E1000_DEV_ID_82575EB_FIBER_SERDES:
2d064c06
AD
2095 case E1000_DEV_ID_82576_FIBER:
2096 case E1000_DEV_ID_82576_SERDES:
9d5c8243
AK
2097 /* Wake events only supported on port A for dual fiber
2098 * regardless of eeprom setting */
2099 if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1)
2100 adapter->eeprom_wol = 0;
2101 break;
c8ea5ea9 2102 case E1000_DEV_ID_82576_QUAD_COPPER:
d5aa2252 2103 case E1000_DEV_ID_82576_QUAD_COPPER_ET2:
c8ea5ea9
AD
2104 /* if quad port adapter, disable WoL on all but port A */
2105 if (global_quad_port_a != 0)
2106 adapter->eeprom_wol = 0;
2107 else
2108 adapter->flags |= IGB_FLAG_QUAD_PORT_A;
2109 /* Reset for multiple quad port adapters */
2110 if (++global_quad_port_a == 4)
2111 global_quad_port_a = 0;
2112 break;
9d5c8243
AK
2113 }
2114
2115 /* initialize the wol settings based on the eeprom settings */
2116 adapter->wol = adapter->eeprom_wol;
e1b86d84 2117 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
9d5c8243
AK
2118
2119 /* reset the hardware with the new settings */
2120 igb_reset(adapter);
2121
2122 /* let the f/w know that the h/w is now under the control of the
2123 * driver. */
2124 igb_get_hw_control(adapter);
2125
9d5c8243
AK
2126 strcpy(netdev->name, "eth%d");
2127 err = register_netdev(netdev);
2128 if (err)
2129 goto err_register;
2130
b168dfc5
JB
2131 /* carrier off reporting is important to ethtool even BEFORE open */
2132 netif_carrier_off(netdev);
2133
421e02f0 2134#ifdef CONFIG_IGB_DCA
bbd98fe4 2135 if (dca_add_requester(&pdev->dev) == 0) {
7dfc16fa 2136 adapter->flags |= IGB_FLAG_DCA_ENABLED;
fe4506b6 2137 dev_info(&pdev->dev, "DCA enabled\n");
fe4506b6
JC
2138 igb_setup_dca(adapter);
2139 }
fe4506b6 2140
38c845c7 2141#endif
3c89f6d0 2142
7ebae817 2143#ifdef CONFIG_IGB_PTP
673b8b70 2144 /* do hw tstamp init after resetting */
7ebae817 2145 igb_ptp_init(adapter);
3c89f6d0 2146#endif /* CONFIG_IGB_PTP */
673b8b70 2147
9d5c8243
AK
2148 dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n");
2149 /* print bus type/speed/width info */
7c510e4b 2150 dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n",
9d5c8243 2151 netdev->name,
559e9c49 2152 ((hw->bus.speed == e1000_bus_speed_2500) ? "2.5Gb/s" :
ff846f52 2153 (hw->bus.speed == e1000_bus_speed_5000) ? "5.0Gb/s" :
559e9c49 2154 "unknown"),
59c3de89
AD
2155 ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" :
2156 (hw->bus.width == e1000_bus_width_pcie_x2) ? "Width x2" :
2157 (hw->bus.width == e1000_bus_width_pcie_x1) ? "Width x1" :
2158 "unknown"),
7c510e4b 2159 netdev->dev_addr);
9d5c8243 2160
9835fd73
CW
2161 ret_val = igb_read_part_string(hw, part_str, E1000_PBANUM_LENGTH);
2162 if (ret_val)
2163 strcpy(part_str, "Unknown");
2164 dev_info(&pdev->dev, "%s: PBA No: %s\n", netdev->name, part_str);
9d5c8243
AK
2165 dev_info(&pdev->dev,
2166 "Using %s interrupts. %d rx queue(s), %d tx queue(s)\n",
2167 adapter->msix_entries ? "MSI-X" :
7dfc16fa 2168 (adapter->flags & IGB_FLAG_HAS_MSI) ? "MSI" : "legacy",
9d5c8243 2169 adapter->num_rx_queues, adapter->num_tx_queues);
09b068d4
CW
2170 switch (hw->mac.type) {
2171 case e1000_i350:
f96a8a0b
CW
2172 case e1000_i210:
2173 case e1000_i211:
09b068d4
CW
2174 igb_set_eee_i350(hw);
2175 break;
2176 default:
2177 break;
2178 }
749ab2cd
YZ
2179
2180 pm_runtime_put_noidle(&pdev->dev);
9d5c8243
AK
2181 return 0;
2182
2183err_register:
2184 igb_release_hw_control(adapter);
2185err_eeprom:
2186 if (!igb_check_reset_block(hw))
f5f4cf08 2187 igb_reset_phy(hw);
9d5c8243
AK
2188
2189 if (hw->flash_address)
2190 iounmap(hw->flash_address);
9d5c8243 2191err_sw_init:
047e0030 2192 igb_clear_interrupt_scheme(adapter);
9d5c8243
AK
2193 iounmap(hw->hw_addr);
2194err_ioremap:
2195 free_netdev(netdev);
2196err_alloc_etherdev:
559e9c49
AD
2197 pci_release_selected_regions(pdev,
2198 pci_select_bars(pdev, IORESOURCE_MEM));
9d5c8243
AK
2199err_pci_reg:
2200err_dma:
2201 pci_disable_device(pdev);
2202 return err;
2203}
2204
2205/**
2206 * igb_remove - Device Removal Routine
2207 * @pdev: PCI device information struct
2208 *
2209 * igb_remove is called by the PCI subsystem to alert the driver
2210 * that it should release a PCI device. The could be caused by a
2211 * Hot-Plug event, or because the driver is going to be removed from
2212 * memory.
2213 **/
2214static void __devexit igb_remove(struct pci_dev *pdev)
2215{
2216 struct net_device *netdev = pci_get_drvdata(pdev);
2217 struct igb_adapter *adapter = netdev_priv(netdev);
fe4506b6 2218 struct e1000_hw *hw = &adapter->hw;
9d5c8243 2219
749ab2cd 2220 pm_runtime_get_noresume(&pdev->dev);
7ebae817 2221#ifdef CONFIG_IGB_PTP
a79f4f88 2222 igb_ptp_stop(adapter);
3c89f6d0 2223#endif /* CONFIG_IGB_PTP */
749ab2cd 2224
760141a5
TH
2225 /*
2226 * The watchdog timer may be rescheduled, so explicitly
2227 * disable watchdog from being rescheduled.
2228 */
9d5c8243
AK
2229 set_bit(__IGB_DOWN, &adapter->state);
2230 del_timer_sync(&adapter->watchdog_timer);
2231 del_timer_sync(&adapter->phy_info_timer);
2232
760141a5
TH
2233 cancel_work_sync(&adapter->reset_task);
2234 cancel_work_sync(&adapter->watchdog_task);
9d5c8243 2235
421e02f0 2236#ifdef CONFIG_IGB_DCA
7dfc16fa 2237 if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
fe4506b6
JC
2238 dev_info(&pdev->dev, "DCA disabled\n");
2239 dca_remove_requester(&pdev->dev);
7dfc16fa 2240 adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
cbd347ad 2241 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE);
fe4506b6
JC
2242 }
2243#endif
2244
9d5c8243
AK
2245 /* Release control of h/w to f/w. If f/w is AMT enabled, this
2246 * would have already happened in close and is redundant. */
2247 igb_release_hw_control(adapter);
2248
2249 unregister_netdev(netdev);
2250
047e0030 2251 igb_clear_interrupt_scheme(adapter);
9d5c8243 2252
37680117
AD
2253#ifdef CONFIG_PCI_IOV
2254 /* reclaim resources allocated to VFs */
2255 if (adapter->vf_data) {
2256 /* disable iov and allow time for transactions to clear */
f557147c
SA
2257 if (igb_vfs_are_assigned(adapter)) {
2258 dev_info(&pdev->dev, "Unloading driver while VFs are assigned - VFs will not be deallocated\n");
2259 } else {
0224d663
GR
2260 pci_disable_sriov(pdev);
2261 msleep(500);
0224d663 2262 }
37680117
AD
2263
2264 kfree(adapter->vf_data);
2265 adapter->vf_data = NULL;
2266 wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
945a5151 2267 wrfl();
37680117
AD
2268 msleep(100);
2269 dev_info(&pdev->dev, "IOV Disabled\n");
2270 }
2271#endif
559e9c49 2272
28b0759c
AD
2273 iounmap(hw->hw_addr);
2274 if (hw->flash_address)
2275 iounmap(hw->flash_address);
559e9c49
AD
2276 pci_release_selected_regions(pdev,
2277 pci_select_bars(pdev, IORESOURCE_MEM));
9d5c8243 2278
1128c756 2279 kfree(adapter->shadow_vfta);
9d5c8243
AK
2280 free_netdev(netdev);
2281
19d5afd4 2282 pci_disable_pcie_error_reporting(pdev);
40a914fa 2283
9d5c8243
AK
2284 pci_disable_device(pdev);
2285}
2286
a6b623e0
AD
2287/**
2288 * igb_probe_vfs - Initialize vf data storage and add VFs to pci config space
2289 * @adapter: board private structure to initialize
2290 *
2291 * This function initializes the vf specific data storage and then attempts to
2292 * allocate the VFs. The reason for ordering it this way is because it is much
2293 * mor expensive time wise to disable SR-IOV than it is to allocate and free
2294 * the memory for the VFs.
2295 **/
2296static void __devinit igb_probe_vfs(struct igb_adapter * adapter)
2297{
2298#ifdef CONFIG_PCI_IOV
2299 struct pci_dev *pdev = adapter->pdev;
f96a8a0b 2300 struct e1000_hw *hw = &adapter->hw;
f557147c 2301 int old_vfs = pci_num_vf(adapter->pdev);
0224d663 2302 int i;
a6b623e0 2303
f96a8a0b
CW
2304 /* Virtualization features not supported on i210 family. */
2305 if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211))
2306 return;
2307
0224d663
GR
2308 if (old_vfs) {
2309 dev_info(&pdev->dev, "%d pre-allocated VFs found - override "
2310 "max_vfs setting of %d\n", old_vfs, max_vfs);
2311 adapter->vfs_allocated_count = old_vfs;
a6b623e0
AD
2312 }
2313
0224d663
GR
2314 if (!adapter->vfs_allocated_count)
2315 return;
2316
2317 adapter->vf_data = kcalloc(adapter->vfs_allocated_count,
2318 sizeof(struct vf_data_storage), GFP_KERNEL);
f96a8a0b 2319
0224d663
GR
2320 /* if allocation failed then we do not support SR-IOV */
2321 if (!adapter->vf_data) {
a6b623e0 2322 adapter->vfs_allocated_count = 0;
0224d663
GR
2323 dev_err(&pdev->dev, "Unable to allocate memory for VF "
2324 "Data Storage\n");
2325 goto out;
a6b623e0 2326 }
0224d663
GR
2327
2328 if (!old_vfs) {
2329 if (pci_enable_sriov(pdev, adapter->vfs_allocated_count))
2330 goto err_out;
2331 }
2332 dev_info(&pdev->dev, "%d VFs allocated\n",
2333 adapter->vfs_allocated_count);
2334 for (i = 0; i < adapter->vfs_allocated_count; i++)
2335 igb_vf_configure(adapter, i);
2336
2337 /* DMA Coalescing is not supported in IOV mode. */
2338 adapter->flags &= ~IGB_FLAG_DMAC;
2339 goto out;
2340err_out:
2341 kfree(adapter->vf_data);
2342 adapter->vf_data = NULL;
2343 adapter->vfs_allocated_count = 0;
2344out:
2345 return;
a6b623e0
AD
2346#endif /* CONFIG_PCI_IOV */
2347}
2348
9d5c8243
AK
2349/**
2350 * igb_sw_init - Initialize general software structures (struct igb_adapter)
2351 * @adapter: board private structure to initialize
2352 *
2353 * igb_sw_init initializes the Adapter private data structure.
2354 * Fields are initialized based on PCI device information and
2355 * OS network device settings (MTU size).
2356 **/
2357static int __devinit igb_sw_init(struct igb_adapter *adapter)
2358{
2359 struct e1000_hw *hw = &adapter->hw;
2360 struct net_device *netdev = adapter->netdev;
2361 struct pci_dev *pdev = adapter->pdev;
374a542d 2362 u32 max_rss_queues;
9d5c8243
AK
2363
2364 pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word);
2365
13fde97a 2366 /* set default ring sizes */
68fd9910
AD
2367 adapter->tx_ring_count = IGB_DEFAULT_TXD;
2368 adapter->rx_ring_count = IGB_DEFAULT_RXD;
13fde97a
AD
2369
2370 /* set default ITR values */
4fc82adf
AD
2371 adapter->rx_itr_setting = IGB_DEFAULT_ITR;
2372 adapter->tx_itr_setting = IGB_DEFAULT_ITR;
2373
13fde97a
AD
2374 /* set default work limits */
2375 adapter->tx_work_limit = IGB_DEFAULT_TX_WORK;
2376
153285f9
AD
2377 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN +
2378 VLAN_HLEN;
9d5c8243
AK
2379 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
2380
12dcd86b 2381 spin_lock_init(&adapter->stats64_lock);
a6b623e0 2382#ifdef CONFIG_PCI_IOV
6b78bb1d
CW
2383 switch (hw->mac.type) {
2384 case e1000_82576:
2385 case e1000_i350:
9b082d73
SA
2386 if (max_vfs > 7) {
2387 dev_warn(&pdev->dev,
2388 "Maximum of 7 VFs per PF, using max\n");
2389 adapter->vfs_allocated_count = 7;
2390 } else
2391 adapter->vfs_allocated_count = max_vfs;
6b78bb1d
CW
2392 break;
2393 default:
2394 break;
2395 }
a6b623e0 2396#endif /* CONFIG_PCI_IOV */
374a542d
MV
2397
2398 /* Determine the maximum number of RSS queues supported. */
f96a8a0b 2399 switch (hw->mac.type) {
374a542d
MV
2400 case e1000_i211:
2401 max_rss_queues = IGB_MAX_RX_QUEUES_I211;
2402 break;
2403 case e1000_82575:
f96a8a0b 2404 case e1000_i210:
374a542d
MV
2405 max_rss_queues = IGB_MAX_RX_QUEUES_82575;
2406 break;
2407 case e1000_i350:
2408 /* I350 cannot do RSS and SR-IOV at the same time */
2409 if (!!adapter->vfs_allocated_count) {
2410 max_rss_queues = 1;
2411 break;
2412 }
2413 /* fall through */
2414 case e1000_82576:
2415 if (!!adapter->vfs_allocated_count) {
2416 max_rss_queues = 2;
2417 break;
2418 }
2419 /* fall through */
2420 case e1000_82580:
2421 default:
2422 max_rss_queues = IGB_MAX_RX_QUEUES;
f96a8a0b 2423 break;
374a542d
MV
2424 }
2425
2426 adapter->rss_queues = min_t(u32, max_rss_queues, num_online_cpus());
2427
2428 /* Determine if we need to pair queues. */
2429 switch (hw->mac.type) {
2430 case e1000_82575:
f96a8a0b 2431 case e1000_i211:
374a542d 2432 /* Device supports enough interrupts without queue pairing. */
f96a8a0b 2433 break;
374a542d
MV
2434 case e1000_82576:
2435 /*
2436 * If VFs are going to be allocated with RSS queues then we
2437 * should pair the queues in order to conserve interrupts due
2438 * to limited supply.
2439 */
2440 if ((adapter->rss_queues > 1) &&
2441 (adapter->vfs_allocated_count > 6))
2442 adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
2443 /* fall through */
2444 case e1000_82580:
2445 case e1000_i350:
2446 case e1000_i210:
f96a8a0b 2447 default:
374a542d
MV
2448 /*
2449 * If rss_queues > half of max_rss_queues, pair the queues in
2450 * order to conserve interrupts due to limited supply.
2451 */
2452 if (adapter->rss_queues > (max_rss_queues / 2))
2453 adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
f96a8a0b
CW
2454 break;
2455 }
a99955fc 2456
1128c756
CW
2457 /* Setup and initialize a copy of the hw vlan table array */
2458 adapter->shadow_vfta = kzalloc(sizeof(u32) *
2459 E1000_VLAN_FILTER_TBL_SIZE,
2460 GFP_ATOMIC);
2461
a6b623e0 2462 /* This call may decrease the number of queues */
047e0030 2463 if (igb_init_interrupt_scheme(adapter)) {
9d5c8243
AK
2464 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
2465 return -ENOMEM;
2466 }
2467
a6b623e0
AD
2468 igb_probe_vfs(adapter);
2469
9d5c8243
AK
2470 /* Explicitly disable IRQ since the NIC can be in any state. */
2471 igb_irq_disable(adapter);
2472
f96a8a0b 2473 if (hw->mac.type >= e1000_i350)
831ec0b4
CW
2474 adapter->flags &= ~IGB_FLAG_DMAC;
2475
9d5c8243
AK
2476 set_bit(__IGB_DOWN, &adapter->state);
2477 return 0;
2478}
2479
2480/**
2481 * igb_open - Called when a network interface is made active
2482 * @netdev: network interface device structure
2483 *
2484 * Returns 0 on success, negative value on failure
2485 *
2486 * The open entry point is called when a network interface is made
2487 * active by the system (IFF_UP). At this point all resources needed
2488 * for transmit and receive operations are allocated, the interrupt
2489 * handler is registered with the OS, the watchdog timer is started,
2490 * and the stack is notified that the interface is ready.
2491 **/
749ab2cd 2492static int __igb_open(struct net_device *netdev, bool resuming)
9d5c8243
AK
2493{
2494 struct igb_adapter *adapter = netdev_priv(netdev);
2495 struct e1000_hw *hw = &adapter->hw;
749ab2cd 2496 struct pci_dev *pdev = adapter->pdev;
9d5c8243
AK
2497 int err;
2498 int i;
2499
2500 /* disallow open during test */
749ab2cd
YZ
2501 if (test_bit(__IGB_TESTING, &adapter->state)) {
2502 WARN_ON(resuming);
9d5c8243 2503 return -EBUSY;
749ab2cd
YZ
2504 }
2505
2506 if (!resuming)
2507 pm_runtime_get_sync(&pdev->dev);
9d5c8243 2508
b168dfc5
JB
2509 netif_carrier_off(netdev);
2510
9d5c8243
AK
2511 /* allocate transmit descriptors */
2512 err = igb_setup_all_tx_resources(adapter);
2513 if (err)
2514 goto err_setup_tx;
2515
2516 /* allocate receive descriptors */
2517 err = igb_setup_all_rx_resources(adapter);
2518 if (err)
2519 goto err_setup_rx;
2520
88a268c1 2521 igb_power_up_link(adapter);
9d5c8243 2522
9d5c8243
AK
2523 /* before we allocate an interrupt, we must be ready to handle it.
2524 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
2525 * as soon as we call pci_request_irq, so we have to setup our
2526 * clean_rx handler before we do so. */
2527 igb_configure(adapter);
2528
2529 err = igb_request_irq(adapter);
2530 if (err)
2531 goto err_req_irq;
2532
2533 /* From here on the code is the same as igb_up() */
2534 clear_bit(__IGB_DOWN, &adapter->state);
2535
0d1ae7f4
AD
2536 for (i = 0; i < adapter->num_q_vectors; i++)
2537 napi_enable(&(adapter->q_vector[i]->napi));
9d5c8243
AK
2538
2539 /* Clear any pending interrupts. */
2540 rd32(E1000_ICR);
844290e5
PW
2541
2542 igb_irq_enable(adapter);
2543
d4960307
AD
2544 /* notify VFs that reset has been completed */
2545 if (adapter->vfs_allocated_count) {
2546 u32 reg_data = rd32(E1000_CTRL_EXT);
2547 reg_data |= E1000_CTRL_EXT_PFRSTD;
2548 wr32(E1000_CTRL_EXT, reg_data);
2549 }
2550
d55b53ff
JK
2551 netif_tx_start_all_queues(netdev);
2552
749ab2cd
YZ
2553 if (!resuming)
2554 pm_runtime_put(&pdev->dev);
2555
25568a53
AD
2556 /* start the watchdog. */
2557 hw->mac.get_link_status = 1;
2558 schedule_work(&adapter->watchdog_task);
9d5c8243
AK
2559
2560 return 0;
2561
2562err_req_irq:
2563 igb_release_hw_control(adapter);
88a268c1 2564 igb_power_down_link(adapter);
9d5c8243
AK
2565 igb_free_all_rx_resources(adapter);
2566err_setup_rx:
2567 igb_free_all_tx_resources(adapter);
2568err_setup_tx:
2569 igb_reset(adapter);
749ab2cd
YZ
2570 if (!resuming)
2571 pm_runtime_put(&pdev->dev);
9d5c8243
AK
2572
2573 return err;
2574}
2575
749ab2cd
YZ
2576static int igb_open(struct net_device *netdev)
2577{
2578 return __igb_open(netdev, false);
2579}
2580
9d5c8243
AK
2581/**
2582 * igb_close - Disables a network interface
2583 * @netdev: network interface device structure
2584 *
2585 * Returns 0, this is not allowed to fail
2586 *
2587 * The close entry point is called when an interface is de-activated
2588 * by the OS. The hardware is still under the driver's control, but
2589 * needs to be disabled. A global MAC reset is issued to stop the
2590 * hardware, and all transmit and receive resources are freed.
2591 **/
749ab2cd 2592static int __igb_close(struct net_device *netdev, bool suspending)
9d5c8243
AK
2593{
2594 struct igb_adapter *adapter = netdev_priv(netdev);
749ab2cd 2595 struct pci_dev *pdev = adapter->pdev;
9d5c8243
AK
2596
2597 WARN_ON(test_bit(__IGB_RESETTING, &adapter->state));
9d5c8243 2598
749ab2cd
YZ
2599 if (!suspending)
2600 pm_runtime_get_sync(&pdev->dev);
2601
2602 igb_down(adapter);
9d5c8243
AK
2603 igb_free_irq(adapter);
2604
2605 igb_free_all_tx_resources(adapter);
2606 igb_free_all_rx_resources(adapter);
2607
749ab2cd
YZ
2608 if (!suspending)
2609 pm_runtime_put_sync(&pdev->dev);
9d5c8243
AK
2610 return 0;
2611}
2612
749ab2cd
YZ
2613static int igb_close(struct net_device *netdev)
2614{
2615 return __igb_close(netdev, false);
2616}
2617
9d5c8243
AK
2618/**
2619 * igb_setup_tx_resources - allocate Tx resources (Descriptors)
9d5c8243
AK
2620 * @tx_ring: tx descriptor ring (for a specific queue) to setup
2621 *
2622 * Return 0 on success, negative on failure
2623 **/
80785298 2624int igb_setup_tx_resources(struct igb_ring *tx_ring)
9d5c8243 2625{
59d71989 2626 struct device *dev = tx_ring->dev;
9d5c8243
AK
2627 int size;
2628
06034649 2629 size = sizeof(struct igb_tx_buffer) * tx_ring->count;
f33005a6
AD
2630
2631 tx_ring->tx_buffer_info = vzalloc(size);
06034649 2632 if (!tx_ring->tx_buffer_info)
9d5c8243 2633 goto err;
9d5c8243
AK
2634
2635 /* round up to nearest 4K */
85e8d004 2636 tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
9d5c8243
AK
2637 tx_ring->size = ALIGN(tx_ring->size, 4096);
2638
59d71989
AD
2639 tx_ring->desc = dma_alloc_coherent(dev,
2640 tx_ring->size,
2641 &tx_ring->dma,
2642 GFP_KERNEL);
9d5c8243
AK
2643 if (!tx_ring->desc)
2644 goto err;
2645
9d5c8243
AK
2646 tx_ring->next_to_use = 0;
2647 tx_ring->next_to_clean = 0;
81c2fc22 2648
9d5c8243
AK
2649 return 0;
2650
2651err:
06034649 2652 vfree(tx_ring->tx_buffer_info);
f33005a6
AD
2653 tx_ring->tx_buffer_info = NULL;
2654 dev_err(dev, "Unable to allocate memory for the Tx descriptor ring\n");
9d5c8243
AK
2655 return -ENOMEM;
2656}
2657
2658/**
2659 * igb_setup_all_tx_resources - wrapper to allocate Tx resources
2660 * (Descriptors) for all queues
2661 * @adapter: board private structure
2662 *
2663 * Return 0 on success, negative on failure
2664 **/
2665static int igb_setup_all_tx_resources(struct igb_adapter *adapter)
2666{
439705e1 2667 struct pci_dev *pdev = adapter->pdev;
9d5c8243
AK
2668 int i, err = 0;
2669
2670 for (i = 0; i < adapter->num_tx_queues; i++) {
3025a446 2671 err = igb_setup_tx_resources(adapter->tx_ring[i]);
9d5c8243 2672 if (err) {
439705e1 2673 dev_err(&pdev->dev,
9d5c8243
AK
2674 "Allocation for Tx Queue %u failed\n", i);
2675 for (i--; i >= 0; i--)
3025a446 2676 igb_free_tx_resources(adapter->tx_ring[i]);
9d5c8243
AK
2677 break;
2678 }
2679 }
2680
2681 return err;
2682}
2683
2684/**
85b430b4
AD
2685 * igb_setup_tctl - configure the transmit control registers
2686 * @adapter: Board private structure
9d5c8243 2687 **/
d7ee5b3a 2688void igb_setup_tctl(struct igb_adapter *adapter)
9d5c8243 2689{
9d5c8243
AK
2690 struct e1000_hw *hw = &adapter->hw;
2691 u32 tctl;
9d5c8243 2692
85b430b4
AD
2693 /* disable queue 0 which is enabled by default on 82575 and 82576 */
2694 wr32(E1000_TXDCTL(0), 0);
9d5c8243
AK
2695
2696 /* Program the Transmit Control Register */
9d5c8243
AK
2697 tctl = rd32(E1000_TCTL);
2698 tctl &= ~E1000_TCTL_CT;
2699 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
2700 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
2701
2702 igb_config_collision_dist(hw);
2703
9d5c8243
AK
2704 /* Enable transmits */
2705 tctl |= E1000_TCTL_EN;
2706
2707 wr32(E1000_TCTL, tctl);
2708}
2709
85b430b4
AD
2710/**
2711 * igb_configure_tx_ring - Configure transmit ring after Reset
2712 * @adapter: board private structure
2713 * @ring: tx ring to configure
2714 *
2715 * Configure a transmit ring after a reset.
2716 **/
d7ee5b3a
AD
2717void igb_configure_tx_ring(struct igb_adapter *adapter,
2718 struct igb_ring *ring)
85b430b4
AD
2719{
2720 struct e1000_hw *hw = &adapter->hw;
a74420e0 2721 u32 txdctl = 0;
85b430b4
AD
2722 u64 tdba = ring->dma;
2723 int reg_idx = ring->reg_idx;
2724
2725 /* disable the queue */
a74420e0 2726 wr32(E1000_TXDCTL(reg_idx), 0);
85b430b4
AD
2727 wrfl();
2728 mdelay(10);
2729
2730 wr32(E1000_TDLEN(reg_idx),
2731 ring->count * sizeof(union e1000_adv_tx_desc));
2732 wr32(E1000_TDBAL(reg_idx),
2733 tdba & 0x00000000ffffffffULL);
2734 wr32(E1000_TDBAH(reg_idx), tdba >> 32);
2735
fce99e34 2736 ring->tail = hw->hw_addr + E1000_TDT(reg_idx);
a74420e0 2737 wr32(E1000_TDH(reg_idx), 0);
fce99e34 2738 writel(0, ring->tail);
85b430b4
AD
2739
2740 txdctl |= IGB_TX_PTHRESH;
2741 txdctl |= IGB_TX_HTHRESH << 8;
2742 txdctl |= IGB_TX_WTHRESH << 16;
2743
2744 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
2745 wr32(E1000_TXDCTL(reg_idx), txdctl);
2746}
2747
2748/**
2749 * igb_configure_tx - Configure transmit Unit after Reset
2750 * @adapter: board private structure
2751 *
2752 * Configure the Tx unit of the MAC after a reset.
2753 **/
2754static void igb_configure_tx(struct igb_adapter *adapter)
2755{
2756 int i;
2757
2758 for (i = 0; i < adapter->num_tx_queues; i++)
3025a446 2759 igb_configure_tx_ring(adapter, adapter->tx_ring[i]);
85b430b4
AD
2760}
2761
9d5c8243
AK
2762/**
2763 * igb_setup_rx_resources - allocate Rx resources (Descriptors)
9d5c8243
AK
2764 * @rx_ring: rx descriptor ring (for a specific queue) to setup
2765 *
2766 * Returns 0 on success, negative on failure
2767 **/
80785298 2768int igb_setup_rx_resources(struct igb_ring *rx_ring)
9d5c8243 2769{
59d71989 2770 struct device *dev = rx_ring->dev;
f33005a6 2771 int size;
9d5c8243 2772
06034649 2773 size = sizeof(struct igb_rx_buffer) * rx_ring->count;
f33005a6
AD
2774
2775 rx_ring->rx_buffer_info = vzalloc(size);
06034649 2776 if (!rx_ring->rx_buffer_info)
9d5c8243 2777 goto err;
9d5c8243 2778
9d5c8243
AK
2779
2780 /* Round up to nearest 4K */
f33005a6 2781 rx_ring->size = rx_ring->count * sizeof(union e1000_adv_rx_desc);
9d5c8243
AK
2782 rx_ring->size = ALIGN(rx_ring->size, 4096);
2783
59d71989
AD
2784 rx_ring->desc = dma_alloc_coherent(dev,
2785 rx_ring->size,
2786 &rx_ring->dma,
2787 GFP_KERNEL);
9d5c8243
AK
2788 if (!rx_ring->desc)
2789 goto err;
2790
2791 rx_ring->next_to_clean = 0;
2792 rx_ring->next_to_use = 0;
9d5c8243 2793
9d5c8243
AK
2794 return 0;
2795
2796err:
06034649
AD
2797 vfree(rx_ring->rx_buffer_info);
2798 rx_ring->rx_buffer_info = NULL;
f33005a6 2799 dev_err(dev, "Unable to allocate memory for the Rx descriptor ring\n");
9d5c8243
AK
2800 return -ENOMEM;
2801}
2802
2803/**
2804 * igb_setup_all_rx_resources - wrapper to allocate Rx resources
2805 * (Descriptors) for all queues
2806 * @adapter: board private structure
2807 *
2808 * Return 0 on success, negative on failure
2809 **/
2810static int igb_setup_all_rx_resources(struct igb_adapter *adapter)
2811{
439705e1 2812 struct pci_dev *pdev = adapter->pdev;
9d5c8243
AK
2813 int i, err = 0;
2814
2815 for (i = 0; i < adapter->num_rx_queues; i++) {
3025a446 2816 err = igb_setup_rx_resources(adapter->rx_ring[i]);
9d5c8243 2817 if (err) {
439705e1 2818 dev_err(&pdev->dev,
9d5c8243
AK
2819 "Allocation for Rx Queue %u failed\n", i);
2820 for (i--; i >= 0; i--)
3025a446 2821 igb_free_rx_resources(adapter->rx_ring[i]);
9d5c8243
AK
2822 break;
2823 }
2824 }
2825
2826 return err;
2827}
2828
06cf2666
AD
2829/**
2830 * igb_setup_mrqc - configure the multiple receive queue control registers
2831 * @adapter: Board private structure
2832 **/
2833static void igb_setup_mrqc(struct igb_adapter *adapter)
2834{
2835 struct e1000_hw *hw = &adapter->hw;
2836 u32 mrqc, rxcsum;
797fd4be 2837 u32 j, num_rx_queues, shift = 0;
06cf2666
AD
2838 static const u8 rsshash[40] = {
2839 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2, 0x41, 0x67,
2840 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0, 0xd0, 0xca, 0x2b, 0xcb,
2841 0xae, 0x7b, 0x30, 0xb4, 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30,
2842 0xf2, 0x0c, 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa };
2843
2844 /* Fill out hash function seeds */
2845 for (j = 0; j < 10; j++) {
2846 u32 rsskey = rsshash[(j * 4)];
2847 rsskey |= rsshash[(j * 4) + 1] << 8;
2848 rsskey |= rsshash[(j * 4) + 2] << 16;
2849 rsskey |= rsshash[(j * 4) + 3] << 24;
2850 array_wr32(E1000_RSSRK(0), j, rsskey);
2851 }
2852
a99955fc 2853 num_rx_queues = adapter->rss_queues;
06cf2666 2854
797fd4be
AD
2855 switch (hw->mac.type) {
2856 case e1000_82575:
2857 shift = 6;
2858 break;
2859 case e1000_82576:
2860 /* 82576 supports 2 RSS queues for SR-IOV */
2861 if (adapter->vfs_allocated_count) {
06cf2666
AD
2862 shift = 3;
2863 num_rx_queues = 2;
06cf2666 2864 }
797fd4be
AD
2865 break;
2866 default:
2867 break;
06cf2666
AD
2868 }
2869
797fd4be
AD
2870 /*
2871 * Populate the indirection table 4 entries at a time. To do this
2872 * we are generating the results for n and n+2 and then interleaving
2873 * those with the results with n+1 and n+3.
2874 */
2875 for (j = 0; j < 32; j++) {
2876 /* first pass generates n and n+2 */
2877 u32 base = ((j * 0x00040004) + 0x00020000) * num_rx_queues;
2878 u32 reta = (base & 0x07800780) >> (7 - shift);
2879
2880 /* second pass generates n+1 and n+3 */
2881 base += 0x00010001 * num_rx_queues;
2882 reta |= (base & 0x07800780) << (1 + shift);
2883
2884 wr32(E1000_RETA(j), reta);
06cf2666
AD
2885 }
2886
2887 /*
2888 * Disable raw packet checksumming so that RSS hash is placed in
2889 * descriptor on writeback. No need to enable TCP/UDP/IP checksum
2890 * offloads as they are enabled by default
2891 */
2892 rxcsum = rd32(E1000_RXCSUM);
2893 rxcsum |= E1000_RXCSUM_PCSD;
2894
2895 if (adapter->hw.mac.type >= e1000_82576)
2896 /* Enable Receive Checksum Offload for SCTP */
2897 rxcsum |= E1000_RXCSUM_CRCOFL;
2898
2899 /* Don't need to set TUOFL or IPOFL, they default to 1 */
2900 wr32(E1000_RXCSUM, rxcsum);
f96a8a0b
CW
2901 /*
2902 * Generate RSS hash based on TCP port numbers and/or
2903 * IPv4/v6 src and dst addresses since UDP cannot be
2904 * hashed reliably due to IP fragmentation
2905 */
2906
2907 mrqc = E1000_MRQC_RSS_FIELD_IPV4 |
2908 E1000_MRQC_RSS_FIELD_IPV4_TCP |
2909 E1000_MRQC_RSS_FIELD_IPV6 |
2910 E1000_MRQC_RSS_FIELD_IPV6_TCP |
2911 E1000_MRQC_RSS_FIELD_IPV6_TCP_EX;
06cf2666
AD
2912
2913 /* If VMDq is enabled then we set the appropriate mode for that, else
2914 * we default to RSS so that an RSS hash is calculated per packet even
2915 * if we are only using one queue */
2916 if (adapter->vfs_allocated_count) {
2917 if (hw->mac.type > e1000_82575) {
2918 /* Set the default pool for the PF's first queue */
2919 u32 vtctl = rd32(E1000_VT_CTL);
2920 vtctl &= ~(E1000_VT_CTL_DEFAULT_POOL_MASK |
2921 E1000_VT_CTL_DISABLE_DEF_POOL);
2922 vtctl |= adapter->vfs_allocated_count <<
2923 E1000_VT_CTL_DEFAULT_POOL_SHIFT;
2924 wr32(E1000_VT_CTL, vtctl);
2925 }
a99955fc 2926 if (adapter->rss_queues > 1)
f96a8a0b 2927 mrqc |= E1000_MRQC_ENABLE_VMDQ_RSS_2Q;
06cf2666 2928 else
f96a8a0b 2929 mrqc |= E1000_MRQC_ENABLE_VMDQ;
06cf2666 2930 } else {
f96a8a0b
CW
2931 if (hw->mac.type != e1000_i211)
2932 mrqc |= E1000_MRQC_ENABLE_RSS_4Q;
06cf2666
AD
2933 }
2934 igb_vmm_control(adapter);
2935
06cf2666
AD
2936 wr32(E1000_MRQC, mrqc);
2937}
2938
9d5c8243
AK
2939/**
2940 * igb_setup_rctl - configure the receive control registers
2941 * @adapter: Board private structure
2942 **/
d7ee5b3a 2943void igb_setup_rctl(struct igb_adapter *adapter)
9d5c8243
AK
2944{
2945 struct e1000_hw *hw = &adapter->hw;
2946 u32 rctl;
9d5c8243
AK
2947
2948 rctl = rd32(E1000_RCTL);
2949
2950 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
69d728ba 2951 rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
9d5c8243 2952
69d728ba 2953 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_RDMTS_HALF |
28b0759c 2954 (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
9d5c8243 2955
87cb7e8c
AK
2956 /*
2957 * enable stripping of CRC. It's unlikely this will break BMC
2958 * redirection as it did with e1000. Newer features require
2959 * that the HW strips the CRC.
73cd78f1 2960 */
87cb7e8c 2961 rctl |= E1000_RCTL_SECRC;
9d5c8243 2962
559e9c49 2963 /* disable store bad packets and clear size bits. */
ec54d7d6 2964 rctl &= ~(E1000_RCTL_SBP | E1000_RCTL_SZ_256);
9d5c8243 2965
6ec43fe6
AD
2966 /* enable LPE to prevent packets larger than max_frame_size */
2967 rctl |= E1000_RCTL_LPE;
9d5c8243 2968
952f72a8
AD
2969 /* disable queue 0 to prevent tail write w/o re-config */
2970 wr32(E1000_RXDCTL(0), 0);
9d5c8243 2971
e1739522
AD
2972 /* Attention!!! For SR-IOV PF driver operations you must enable
2973 * queue drop for all VF and PF queues to prevent head of line blocking
2974 * if an un-trusted VF does not provide descriptors to hardware.
2975 */
2976 if (adapter->vfs_allocated_count) {
e1739522
AD
2977 /* set all queue drop enable bits */
2978 wr32(E1000_QDE, ALL_QUEUES);
e1739522
AD
2979 }
2980
89eaefb6
BG
2981 /* This is useful for sniffing bad packets. */
2982 if (adapter->netdev->features & NETIF_F_RXALL) {
2983 /* UPE and MPE will be handled by normal PROMISC logic
2984 * in e1000e_set_rx_mode */
2985 rctl |= (E1000_RCTL_SBP | /* Receive bad packets */
2986 E1000_RCTL_BAM | /* RX All Bcast Pkts */
2987 E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */
2988
2989 rctl &= ~(E1000_RCTL_VFE | /* Disable VLAN filter */
2990 E1000_RCTL_DPF | /* Allow filtered pause */
2991 E1000_RCTL_CFIEN); /* Dis VLAN CFIEN Filter */
2992 /* Do not mess with E1000_CTRL_VME, it affects transmit as well,
2993 * and that breaks VLANs.
2994 */
2995 }
2996
9d5c8243
AK
2997 wr32(E1000_RCTL, rctl);
2998}
2999
7d5753f0
AD
3000static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size,
3001 int vfn)
3002{
3003 struct e1000_hw *hw = &adapter->hw;
3004 u32 vmolr;
3005
3006 /* if it isn't the PF check to see if VFs are enabled and
3007 * increase the size to support vlan tags */
3008 if (vfn < adapter->vfs_allocated_count &&
3009 adapter->vf_data[vfn].vlans_enabled)
3010 size += VLAN_TAG_SIZE;
3011
3012 vmolr = rd32(E1000_VMOLR(vfn));
3013 vmolr &= ~E1000_VMOLR_RLPML_MASK;
3014 vmolr |= size | E1000_VMOLR_LPE;
3015 wr32(E1000_VMOLR(vfn), vmolr);
3016
3017 return 0;
3018}
3019
e1739522
AD
3020/**
3021 * igb_rlpml_set - set maximum receive packet size
3022 * @adapter: board private structure
3023 *
3024 * Configure maximum receivable packet size.
3025 **/
3026static void igb_rlpml_set(struct igb_adapter *adapter)
3027{
153285f9 3028 u32 max_frame_size = adapter->max_frame_size;
e1739522
AD
3029 struct e1000_hw *hw = &adapter->hw;
3030 u16 pf_id = adapter->vfs_allocated_count;
3031
e1739522
AD
3032 if (pf_id) {
3033 igb_set_vf_rlpml(adapter, max_frame_size, pf_id);
153285f9
AD
3034 /*
3035 * If we're in VMDQ or SR-IOV mode, then set global RLPML
3036 * to our max jumbo frame size, in case we need to enable
3037 * jumbo frames on one of the rings later.
3038 * This will not pass over-length frames into the default
3039 * queue because it's gated by the VMOLR.RLPML.
3040 */
7d5753f0 3041 max_frame_size = MAX_JUMBO_FRAME_SIZE;
e1739522
AD
3042 }
3043
3044 wr32(E1000_RLPML, max_frame_size);
3045}
3046
8151d294
WM
3047static inline void igb_set_vmolr(struct igb_adapter *adapter,
3048 int vfn, bool aupe)
7d5753f0
AD
3049{
3050 struct e1000_hw *hw = &adapter->hw;
3051 u32 vmolr;
3052
3053 /*
3054 * This register exists only on 82576 and newer so if we are older then
3055 * we should exit and do nothing
3056 */
3057 if (hw->mac.type < e1000_82576)
3058 return;
3059
3060 vmolr = rd32(E1000_VMOLR(vfn));
8151d294
WM
3061 vmolr |= E1000_VMOLR_STRVLAN; /* Strip vlan tags */
3062 if (aupe)
3063 vmolr |= E1000_VMOLR_AUPE; /* Accept untagged packets */
3064 else
3065 vmolr &= ~(E1000_VMOLR_AUPE); /* Tagged packets ONLY */
7d5753f0
AD
3066
3067 /* clear all bits that might not be set */
3068 vmolr &= ~(E1000_VMOLR_BAM | E1000_VMOLR_RSSE);
3069
a99955fc 3070 if (adapter->rss_queues > 1 && vfn == adapter->vfs_allocated_count)
7d5753f0
AD
3071 vmolr |= E1000_VMOLR_RSSE; /* enable RSS */
3072 /*
3073 * for VMDq only allow the VFs and pool 0 to accept broadcast and
3074 * multicast packets
3075 */
3076 if (vfn <= adapter->vfs_allocated_count)
3077 vmolr |= E1000_VMOLR_BAM; /* Accept broadcast */
3078
3079 wr32(E1000_VMOLR(vfn), vmolr);
3080}
3081
85b430b4
AD
3082/**
3083 * igb_configure_rx_ring - Configure a receive ring after Reset
3084 * @adapter: board private structure
3085 * @ring: receive ring to be configured
3086 *
3087 * Configure the Rx unit of the MAC after a reset.
3088 **/
d7ee5b3a
AD
3089void igb_configure_rx_ring(struct igb_adapter *adapter,
3090 struct igb_ring *ring)
85b430b4
AD
3091{
3092 struct e1000_hw *hw = &adapter->hw;
3093 u64 rdba = ring->dma;
3094 int reg_idx = ring->reg_idx;
a74420e0 3095 u32 srrctl = 0, rxdctl = 0;
85b430b4
AD
3096
3097 /* disable the queue */
a74420e0 3098 wr32(E1000_RXDCTL(reg_idx), 0);
85b430b4
AD
3099
3100 /* Set DMA base address registers */
3101 wr32(E1000_RDBAL(reg_idx),
3102 rdba & 0x00000000ffffffffULL);
3103 wr32(E1000_RDBAH(reg_idx), rdba >> 32);
3104 wr32(E1000_RDLEN(reg_idx),
3105 ring->count * sizeof(union e1000_adv_rx_desc));
3106
3107 /* initialize head and tail */
fce99e34 3108 ring->tail = hw->hw_addr + E1000_RDT(reg_idx);
a74420e0 3109 wr32(E1000_RDH(reg_idx), 0);
fce99e34 3110 writel(0, ring->tail);
85b430b4 3111
952f72a8 3112 /* set descriptor configuration */
44390ca6 3113 srrctl = IGB_RX_HDR_LEN << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
952f72a8 3114#if (PAGE_SIZE / 2) > IGB_RXBUFFER_16384
44390ca6 3115 srrctl |= IGB_RXBUFFER_16384 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
952f72a8 3116#else
44390ca6 3117 srrctl |= (PAGE_SIZE / 2) >> E1000_SRRCTL_BSIZEPKT_SHIFT;
952f72a8 3118#endif
44390ca6 3119 srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
3c89f6d0 3120#ifdef CONFIG_IGB_PTP
06218a8d 3121 if (hw->mac.type >= e1000_82580)
757b77e2 3122 srrctl |= E1000_SRRCTL_TIMESTAMP;
3c89f6d0 3123#endif /* CONFIG_IGB_PTP */
e6bdb6fe
NN
3124 /* Only set Drop Enable if we are supporting multiple queues */
3125 if (adapter->vfs_allocated_count || adapter->num_rx_queues > 1)
3126 srrctl |= E1000_SRRCTL_DROP_EN;
952f72a8
AD
3127
3128 wr32(E1000_SRRCTL(reg_idx), srrctl);
3129
7d5753f0 3130 /* set filtering for VMDQ pools */
8151d294 3131 igb_set_vmolr(adapter, reg_idx & 0x7, true);
7d5753f0 3132
85b430b4
AD
3133 rxdctl |= IGB_RX_PTHRESH;
3134 rxdctl |= IGB_RX_HTHRESH << 8;
3135 rxdctl |= IGB_RX_WTHRESH << 16;
a74420e0
AD
3136
3137 /* enable receive descriptor fetching */
3138 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
85b430b4
AD
3139 wr32(E1000_RXDCTL(reg_idx), rxdctl);
3140}
3141
9d5c8243
AK
3142/**
3143 * igb_configure_rx - Configure receive Unit after Reset
3144 * @adapter: board private structure
3145 *
3146 * Configure the Rx unit of the MAC after a reset.
3147 **/
3148static void igb_configure_rx(struct igb_adapter *adapter)
3149{
9107584e 3150 int i;
9d5c8243 3151
68d480c4
AD
3152 /* set UTA to appropriate mode */
3153 igb_set_uta(adapter);
3154
26ad9178
AD
3155 /* set the correct pool for the PF default MAC address in entry 0 */
3156 igb_rar_set_qsel(adapter, adapter->hw.mac.addr, 0,
3157 adapter->vfs_allocated_count);
3158
06cf2666
AD
3159 /* Setup the HW Rx Head and Tail Descriptor Pointers and
3160 * the Base and Length of the Rx Descriptor Ring */
3161 for (i = 0; i < adapter->num_rx_queues; i++)
3025a446 3162 igb_configure_rx_ring(adapter, adapter->rx_ring[i]);
9d5c8243
AK
3163}
3164
3165/**
3166 * igb_free_tx_resources - Free Tx Resources per Queue
9d5c8243
AK
3167 * @tx_ring: Tx descriptor ring for a specific queue
3168 *
3169 * Free all transmit software resources
3170 **/
68fd9910 3171void igb_free_tx_resources(struct igb_ring *tx_ring)
9d5c8243 3172{
3b644cf6 3173 igb_clean_tx_ring(tx_ring);
9d5c8243 3174
06034649
AD
3175 vfree(tx_ring->tx_buffer_info);
3176 tx_ring->tx_buffer_info = NULL;
9d5c8243 3177
439705e1
AD
3178 /* if not set, then don't free */
3179 if (!tx_ring->desc)
3180 return;
3181
59d71989
AD
3182 dma_free_coherent(tx_ring->dev, tx_ring->size,
3183 tx_ring->desc, tx_ring->dma);
9d5c8243
AK
3184
3185 tx_ring->desc = NULL;
3186}
3187
3188/**
3189 * igb_free_all_tx_resources - Free Tx Resources for All Queues
3190 * @adapter: board private structure
3191 *
3192 * Free all transmit software resources
3193 **/
3194static void igb_free_all_tx_resources(struct igb_adapter *adapter)
3195{
3196 int i;
3197
3198 for (i = 0; i < adapter->num_tx_queues; i++)
3025a446 3199 igb_free_tx_resources(adapter->tx_ring[i]);
9d5c8243
AK
3200}
3201
ebe42d16
AD
3202void igb_unmap_and_free_tx_resource(struct igb_ring *ring,
3203 struct igb_tx_buffer *tx_buffer)
3204{
3205 if (tx_buffer->skb) {
3206 dev_kfree_skb_any(tx_buffer->skb);
3207 if (tx_buffer->dma)
3208 dma_unmap_single(ring->dev,
3209 tx_buffer->dma,
3210 tx_buffer->length,
3211 DMA_TO_DEVICE);
3212 } else if (tx_buffer->dma) {
3213 dma_unmap_page(ring->dev,
3214 tx_buffer->dma,
3215 tx_buffer->length,
3216 DMA_TO_DEVICE);
3217 }
3218 tx_buffer->next_to_watch = NULL;
3219 tx_buffer->skb = NULL;
3220 tx_buffer->dma = 0;
3221 /* buffer_info must be completely set up in the transmit path */
9d5c8243
AK
3222}
3223
3224/**
3225 * igb_clean_tx_ring - Free Tx Buffers
9d5c8243
AK
3226 * @tx_ring: ring to be cleaned
3227 **/
3b644cf6 3228static void igb_clean_tx_ring(struct igb_ring *tx_ring)
9d5c8243 3229{
06034649 3230 struct igb_tx_buffer *buffer_info;
9d5c8243 3231 unsigned long size;
6ad4edfc 3232 u16 i;
9d5c8243 3233
06034649 3234 if (!tx_ring->tx_buffer_info)
9d5c8243
AK
3235 return;
3236 /* Free all the Tx ring sk_buffs */
3237
3238 for (i = 0; i < tx_ring->count; i++) {
06034649 3239 buffer_info = &tx_ring->tx_buffer_info[i];
80785298 3240 igb_unmap_and_free_tx_resource(tx_ring, buffer_info);
9d5c8243
AK
3241 }
3242
dad8a3b3
JF
3243 netdev_tx_reset_queue(txring_txq(tx_ring));
3244
06034649
AD
3245 size = sizeof(struct igb_tx_buffer) * tx_ring->count;
3246 memset(tx_ring->tx_buffer_info, 0, size);
9d5c8243
AK
3247
3248 /* Zero out the descriptor ring */
9d5c8243
AK
3249 memset(tx_ring->desc, 0, tx_ring->size);
3250
3251 tx_ring->next_to_use = 0;
3252 tx_ring->next_to_clean = 0;
9d5c8243
AK
3253}
3254
3255/**
3256 * igb_clean_all_tx_rings - Free Tx Buffers for all queues
3257 * @adapter: board private structure
3258 **/
3259static void igb_clean_all_tx_rings(struct igb_adapter *adapter)
3260{
3261 int i;
3262
3263 for (i = 0; i < adapter->num_tx_queues; i++)
3025a446 3264 igb_clean_tx_ring(adapter->tx_ring[i]);
9d5c8243
AK
3265}
3266
3267/**
3268 * igb_free_rx_resources - Free Rx Resources
9d5c8243
AK
3269 * @rx_ring: ring to clean the resources from
3270 *
3271 * Free all receive software resources
3272 **/
68fd9910 3273void igb_free_rx_resources(struct igb_ring *rx_ring)
9d5c8243 3274{
3b644cf6 3275 igb_clean_rx_ring(rx_ring);
9d5c8243 3276
06034649
AD
3277 vfree(rx_ring->rx_buffer_info);
3278 rx_ring->rx_buffer_info = NULL;
9d5c8243 3279
439705e1
AD
3280 /* if not set, then don't free */
3281 if (!rx_ring->desc)
3282 return;
3283
59d71989
AD
3284 dma_free_coherent(rx_ring->dev, rx_ring->size,
3285 rx_ring->desc, rx_ring->dma);
9d5c8243
AK
3286
3287 rx_ring->desc = NULL;
3288}
3289
3290/**
3291 * igb_free_all_rx_resources - Free Rx Resources for All Queues
3292 * @adapter: board private structure
3293 *
3294 * Free all receive software resources
3295 **/
3296static void igb_free_all_rx_resources(struct igb_adapter *adapter)
3297{
3298 int i;
3299
3300 for (i = 0; i < adapter->num_rx_queues; i++)
3025a446 3301 igb_free_rx_resources(adapter->rx_ring[i]);
9d5c8243
AK
3302}
3303
3304/**
3305 * igb_clean_rx_ring - Free Rx Buffers per Queue
9d5c8243
AK
3306 * @rx_ring: ring to free buffers from
3307 **/
3b644cf6 3308static void igb_clean_rx_ring(struct igb_ring *rx_ring)
9d5c8243 3309{
9d5c8243 3310 unsigned long size;
c023cd88 3311 u16 i;
9d5c8243 3312
06034649 3313 if (!rx_ring->rx_buffer_info)
9d5c8243 3314 return;
439705e1 3315
9d5c8243
AK
3316 /* Free all the Rx ring sk_buffs */
3317 for (i = 0; i < rx_ring->count; i++) {
06034649 3318 struct igb_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i];
9d5c8243 3319 if (buffer_info->dma) {
59d71989 3320 dma_unmap_single(rx_ring->dev,
80785298 3321 buffer_info->dma,
44390ca6 3322 IGB_RX_HDR_LEN,
59d71989 3323 DMA_FROM_DEVICE);
9d5c8243
AK
3324 buffer_info->dma = 0;
3325 }
3326
3327 if (buffer_info->skb) {
3328 dev_kfree_skb(buffer_info->skb);
3329 buffer_info->skb = NULL;
3330 }
6ec43fe6 3331 if (buffer_info->page_dma) {
59d71989 3332 dma_unmap_page(rx_ring->dev,
80785298 3333 buffer_info->page_dma,
6ec43fe6 3334 PAGE_SIZE / 2,
59d71989 3335 DMA_FROM_DEVICE);
6ec43fe6
AD
3336 buffer_info->page_dma = 0;
3337 }
9d5c8243 3338 if (buffer_info->page) {
9d5c8243
AK
3339 put_page(buffer_info->page);
3340 buffer_info->page = NULL;
bf36c1a0 3341 buffer_info->page_offset = 0;
9d5c8243
AK
3342 }
3343 }
3344
06034649
AD
3345 size = sizeof(struct igb_rx_buffer) * rx_ring->count;
3346 memset(rx_ring->rx_buffer_info, 0, size);
9d5c8243
AK
3347
3348 /* Zero out the descriptor ring */
3349 memset(rx_ring->desc, 0, rx_ring->size);
3350
3351 rx_ring->next_to_clean = 0;
3352 rx_ring->next_to_use = 0;
9d5c8243
AK
3353}
3354
3355/**
3356 * igb_clean_all_rx_rings - Free Rx Buffers for all queues
3357 * @adapter: board private structure
3358 **/
3359static void igb_clean_all_rx_rings(struct igb_adapter *adapter)
3360{
3361 int i;
3362
3363 for (i = 0; i < adapter->num_rx_queues; i++)
3025a446 3364 igb_clean_rx_ring(adapter->rx_ring[i]);
9d5c8243
AK
3365}
3366
3367/**
3368 * igb_set_mac - Change the Ethernet Address of the NIC
3369 * @netdev: network interface device structure
3370 * @p: pointer to an address structure
3371 *
3372 * Returns 0 on success, negative on failure
3373 **/
3374static int igb_set_mac(struct net_device *netdev, void *p)
3375{
3376 struct igb_adapter *adapter = netdev_priv(netdev);
28b0759c 3377 struct e1000_hw *hw = &adapter->hw;
9d5c8243
AK
3378 struct sockaddr *addr = p;
3379
3380 if (!is_valid_ether_addr(addr->sa_data))
3381 return -EADDRNOTAVAIL;
3382
3383 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
28b0759c 3384 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
9d5c8243 3385
26ad9178
AD
3386 /* set the correct pool for the new PF MAC address in entry 0 */
3387 igb_rar_set_qsel(adapter, hw->mac.addr, 0,
3388 adapter->vfs_allocated_count);
e1739522 3389
9d5c8243
AK
3390 return 0;
3391}
3392
3393/**
68d480c4 3394 * igb_write_mc_addr_list - write multicast addresses to MTA
9d5c8243
AK
3395 * @netdev: network interface device structure
3396 *
68d480c4
AD
3397 * Writes multicast address list to the MTA hash table.
3398 * Returns: -ENOMEM on failure
3399 * 0 on no addresses written
3400 * X on writing X addresses to MTA
9d5c8243 3401 **/
68d480c4 3402static int igb_write_mc_addr_list(struct net_device *netdev)
9d5c8243
AK
3403{
3404 struct igb_adapter *adapter = netdev_priv(netdev);
3405 struct e1000_hw *hw = &adapter->hw;
22bedad3 3406 struct netdev_hw_addr *ha;
68d480c4 3407 u8 *mta_list;
9d5c8243
AK
3408 int i;
3409
4cd24eaf 3410 if (netdev_mc_empty(netdev)) {
68d480c4
AD
3411 /* nothing to program, so clear mc list */
3412 igb_update_mc_addr_list(hw, NULL, 0);
3413 igb_restore_vf_multicasts(adapter);
3414 return 0;
3415 }
9d5c8243 3416
4cd24eaf 3417 mta_list = kzalloc(netdev_mc_count(netdev) * 6, GFP_ATOMIC);
68d480c4
AD
3418 if (!mta_list)
3419 return -ENOMEM;
ff41f8dc 3420
68d480c4 3421 /* The shared function expects a packed array of only addresses. */
48e2f183 3422 i = 0;
22bedad3
JP
3423 netdev_for_each_mc_addr(ha, netdev)
3424 memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
68d480c4 3425
68d480c4
AD
3426 igb_update_mc_addr_list(hw, mta_list, i);
3427 kfree(mta_list);
3428
4cd24eaf 3429 return netdev_mc_count(netdev);
68d480c4
AD
3430}
3431
3432/**
3433 * igb_write_uc_addr_list - write unicast addresses to RAR table
3434 * @netdev: network interface device structure
3435 *
3436 * Writes unicast address list to the RAR table.
3437 * Returns: -ENOMEM on failure/insufficient address space
3438 * 0 on no addresses written
3439 * X on writing X addresses to the RAR table
3440 **/
3441static int igb_write_uc_addr_list(struct net_device *netdev)
3442{
3443 struct igb_adapter *adapter = netdev_priv(netdev);
3444 struct e1000_hw *hw = &adapter->hw;
3445 unsigned int vfn = adapter->vfs_allocated_count;
3446 unsigned int rar_entries = hw->mac.rar_entry_count - (vfn + 1);
3447 int count = 0;
3448
3449 /* return ENOMEM indicating insufficient memory for addresses */
32e7bfc4 3450 if (netdev_uc_count(netdev) > rar_entries)
68d480c4 3451 return -ENOMEM;
9d5c8243 3452
32e7bfc4 3453 if (!netdev_uc_empty(netdev) && rar_entries) {
ff41f8dc 3454 struct netdev_hw_addr *ha;
32e7bfc4
JP
3455
3456 netdev_for_each_uc_addr(ha, netdev) {
ff41f8dc
AD
3457 if (!rar_entries)
3458 break;
26ad9178
AD
3459 igb_rar_set_qsel(adapter, ha->addr,
3460 rar_entries--,
68d480c4
AD
3461 vfn);
3462 count++;
ff41f8dc
AD
3463 }
3464 }
3465 /* write the addresses in reverse order to avoid write combining */
3466 for (; rar_entries > 0 ; rar_entries--) {
3467 wr32(E1000_RAH(rar_entries), 0);
3468 wr32(E1000_RAL(rar_entries), 0);
3469 }
3470 wrfl();
3471
68d480c4
AD
3472 return count;
3473}
3474
3475/**
3476 * igb_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
3477 * @netdev: network interface device structure
3478 *
3479 * The set_rx_mode entry point is called whenever the unicast or multicast
3480 * address lists or the network interface flags are updated. This routine is
3481 * responsible for configuring the hardware for proper unicast, multicast,
3482 * promiscuous mode, and all-multi behavior.
3483 **/
3484static void igb_set_rx_mode(struct net_device *netdev)
3485{
3486 struct igb_adapter *adapter = netdev_priv(netdev);
3487 struct e1000_hw *hw = &adapter->hw;
3488 unsigned int vfn = adapter->vfs_allocated_count;
3489 u32 rctl, vmolr = 0;
3490 int count;
3491
3492 /* Check for Promiscuous and All Multicast modes */
3493 rctl = rd32(E1000_RCTL);
3494
3495 /* clear the effected bits */
3496 rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_VFE);
3497
3498 if (netdev->flags & IFF_PROMISC) {
3499 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
3500 vmolr |= (E1000_VMOLR_ROPE | E1000_VMOLR_MPME);
3501 } else {
3502 if (netdev->flags & IFF_ALLMULTI) {
3503 rctl |= E1000_RCTL_MPE;
3504 vmolr |= E1000_VMOLR_MPME;
3505 } else {
3506 /*
3507 * Write addresses to the MTA, if the attempt fails
25985edc 3508 * then we should just turn on promiscuous mode so
68d480c4
AD
3509 * that we can at least receive multicast traffic
3510 */
3511 count = igb_write_mc_addr_list(netdev);
3512 if (count < 0) {
3513 rctl |= E1000_RCTL_MPE;
3514 vmolr |= E1000_VMOLR_MPME;
3515 } else if (count) {
3516 vmolr |= E1000_VMOLR_ROMPE;
3517 }
3518 }
3519 /*
3520 * Write addresses to available RAR registers, if there is not
3521 * sufficient space to store all the addresses then enable
25985edc 3522 * unicast promiscuous mode
68d480c4
AD
3523 */
3524 count = igb_write_uc_addr_list(netdev);
3525 if (count < 0) {
3526 rctl |= E1000_RCTL_UPE;
3527 vmolr |= E1000_VMOLR_ROPE;
3528 }
3529 rctl |= E1000_RCTL_VFE;
28fc06f5 3530 }
68d480c4 3531 wr32(E1000_RCTL, rctl);
28fc06f5 3532
68d480c4
AD
3533 /*
3534 * In order to support SR-IOV and eventually VMDq it is necessary to set
3535 * the VMOLR to enable the appropriate modes. Without this workaround
3536 * we will have issues with VLAN tag stripping not being done for frames
3537 * that are only arriving because we are the default pool
3538 */
f96a8a0b 3539 if ((hw->mac.type < e1000_82576) || (hw->mac.type > e1000_i350))
28fc06f5 3540 return;
9d5c8243 3541
68d480c4
AD
3542 vmolr |= rd32(E1000_VMOLR(vfn)) &
3543 ~(E1000_VMOLR_ROPE | E1000_VMOLR_MPME | E1000_VMOLR_ROMPE);
3544 wr32(E1000_VMOLR(vfn), vmolr);
28fc06f5 3545 igb_restore_vf_multicasts(adapter);
9d5c8243
AK
3546}
3547
13800469
GR
3548static void igb_check_wvbr(struct igb_adapter *adapter)
3549{
3550 struct e1000_hw *hw = &adapter->hw;
3551 u32 wvbr = 0;
3552
3553 switch (hw->mac.type) {
3554 case e1000_82576:
3555 case e1000_i350:
3556 if (!(wvbr = rd32(E1000_WVBR)))
3557 return;
3558 break;
3559 default:
3560 break;
3561 }
3562
3563 adapter->wvbr |= wvbr;
3564}
3565
3566#define IGB_STAGGERED_QUEUE_OFFSET 8
3567
3568static void igb_spoof_check(struct igb_adapter *adapter)
3569{
3570 int j;
3571
3572 if (!adapter->wvbr)
3573 return;
3574
3575 for(j = 0; j < adapter->vfs_allocated_count; j++) {
3576 if (adapter->wvbr & (1 << j) ||
3577 adapter->wvbr & (1 << (j + IGB_STAGGERED_QUEUE_OFFSET))) {
3578 dev_warn(&adapter->pdev->dev,
3579 "Spoof event(s) detected on VF %d\n", j);
3580 adapter->wvbr &=
3581 ~((1 << j) |
3582 (1 << (j + IGB_STAGGERED_QUEUE_OFFSET)));
3583 }
3584 }
3585}
3586
9d5c8243
AK
3587/* Need to wait a few seconds after link up to get diagnostic information from
3588 * the phy */
3589static void igb_update_phy_info(unsigned long data)
3590{
3591 struct igb_adapter *adapter = (struct igb_adapter *) data;
f5f4cf08 3592 igb_get_phy_info(&adapter->hw);
9d5c8243
AK
3593}
3594
4d6b725e
AD
3595/**
3596 * igb_has_link - check shared code for link and determine up/down
3597 * @adapter: pointer to driver private info
3598 **/
3145535a 3599bool igb_has_link(struct igb_adapter *adapter)
4d6b725e
AD
3600{
3601 struct e1000_hw *hw = &adapter->hw;
3602 bool link_active = false;
3603 s32 ret_val = 0;
3604
3605 /* get_link_status is set on LSC (link status) interrupt or
3606 * rx sequence error interrupt. get_link_status will stay
3607 * false until the e1000_check_for_link establishes link
3608 * for copper adapters ONLY
3609 */
3610 switch (hw->phy.media_type) {
3611 case e1000_media_type_copper:
3612 if (hw->mac.get_link_status) {
3613 ret_val = hw->mac.ops.check_for_link(hw);
3614 link_active = !hw->mac.get_link_status;
3615 } else {
3616 link_active = true;
3617 }
3618 break;
4d6b725e
AD
3619 case e1000_media_type_internal_serdes:
3620 ret_val = hw->mac.ops.check_for_link(hw);
3621 link_active = hw->mac.serdes_has_link;
3622 break;
3623 default:
3624 case e1000_media_type_unknown:
3625 break;
3626 }
3627
3628 return link_active;
3629}
3630
563988dc
SA
3631static bool igb_thermal_sensor_event(struct e1000_hw *hw, u32 event)
3632{
3633 bool ret = false;
3634 u32 ctrl_ext, thstat;
3635
f96a8a0b 3636 /* check for thermal sensor event on i350 copper only */
563988dc
SA
3637 if (hw->mac.type == e1000_i350) {
3638 thstat = rd32(E1000_THSTAT);
3639 ctrl_ext = rd32(E1000_CTRL_EXT);
3640
3641 if ((hw->phy.media_type == e1000_media_type_copper) &&
3642 !(ctrl_ext & E1000_CTRL_EXT_LINK_MODE_SGMII)) {
3643 ret = !!(thstat & event);
3644 }
3645 }
3646
3647 return ret;
3648}
3649
9d5c8243
AK
3650/**
3651 * igb_watchdog - Timer Call-back
3652 * @data: pointer to adapter cast into an unsigned long
3653 **/
3654static void igb_watchdog(unsigned long data)
3655{
3656 struct igb_adapter *adapter = (struct igb_adapter *)data;
3657 /* Do the rest outside of interrupt context */
3658 schedule_work(&adapter->watchdog_task);
3659}
3660
3661static void igb_watchdog_task(struct work_struct *work)
3662{
3663 struct igb_adapter *adapter = container_of(work,
559e9c49
AD
3664 struct igb_adapter,
3665 watchdog_task);
9d5c8243 3666 struct e1000_hw *hw = &adapter->hw;
9d5c8243 3667 struct net_device *netdev = adapter->netdev;
563988dc 3668 u32 link;
7a6ea550 3669 int i;
9d5c8243 3670
4d6b725e 3671 link = igb_has_link(adapter);
9d5c8243 3672 if (link) {
749ab2cd
YZ
3673 /* Cancel scheduled suspend requests. */
3674 pm_runtime_resume(netdev->dev.parent);
3675
9d5c8243
AK
3676 if (!netif_carrier_ok(netdev)) {
3677 u32 ctrl;
330a6d6a
AD
3678 hw->mac.ops.get_speed_and_duplex(hw,
3679 &adapter->link_speed,
3680 &adapter->link_duplex);
9d5c8243
AK
3681
3682 ctrl = rd32(E1000_CTRL);
527d47c1 3683 /* Links status message must follow this format */
876d2d6f
JK
3684 printk(KERN_INFO "igb: %s NIC Link is Up %d Mbps %s "
3685 "Duplex, Flow Control: %s\n",
559e9c49
AD
3686 netdev->name,
3687 adapter->link_speed,
3688 adapter->link_duplex == FULL_DUPLEX ?
876d2d6f
JK
3689 "Full" : "Half",
3690 (ctrl & E1000_CTRL_TFCE) &&
3691 (ctrl & E1000_CTRL_RFCE) ? "RX/TX" :
3692 (ctrl & E1000_CTRL_RFCE) ? "RX" :
3693 (ctrl & E1000_CTRL_TFCE) ? "TX" : "None");
9d5c8243 3694
563988dc 3695 /* check for thermal sensor event */
876d2d6f
JK
3696 if (igb_thermal_sensor_event(hw,
3697 E1000_THSTAT_LINK_THROTTLE)) {
3698 netdev_info(netdev, "The network adapter link "
3699 "speed was downshifted because it "
3700 "overheated\n");
7ef5ed1c 3701 }
563988dc 3702
d07f3e37 3703 /* adjust timeout factor according to speed/duplex */
9d5c8243
AK
3704 adapter->tx_timeout_factor = 1;
3705 switch (adapter->link_speed) {
3706 case SPEED_10:
9d5c8243
AK
3707 adapter->tx_timeout_factor = 14;
3708 break;
3709 case SPEED_100:
9d5c8243
AK
3710 /* maybe add some timeout factor ? */
3711 break;
3712 }
3713
3714 netif_carrier_on(netdev);
9d5c8243 3715
4ae196df 3716 igb_ping_all_vfs(adapter);
17dc566c 3717 igb_check_vf_rate_limit(adapter);
4ae196df 3718
4b1a9877 3719 /* link state has changed, schedule phy info update */
9d5c8243
AK
3720 if (!test_bit(__IGB_DOWN, &adapter->state))
3721 mod_timer(&adapter->phy_info_timer,
3722 round_jiffies(jiffies + 2 * HZ));
3723 }
3724 } else {
3725 if (netif_carrier_ok(netdev)) {
3726 adapter->link_speed = 0;
3727 adapter->link_duplex = 0;
563988dc
SA
3728
3729 /* check for thermal sensor event */
876d2d6f
JK
3730 if (igb_thermal_sensor_event(hw,
3731 E1000_THSTAT_PWR_DOWN)) {
3732 netdev_err(netdev, "The network adapter was "
3733 "stopped because it overheated\n");
7ef5ed1c 3734 }
563988dc 3735
527d47c1
AD
3736 /* Links status message must follow this format */
3737 printk(KERN_INFO "igb: %s NIC Link is Down\n",
3738 netdev->name);
9d5c8243 3739 netif_carrier_off(netdev);
4b1a9877 3740
4ae196df
AD
3741 igb_ping_all_vfs(adapter);
3742
4b1a9877 3743 /* link state has changed, schedule phy info update */
9d5c8243
AK
3744 if (!test_bit(__IGB_DOWN, &adapter->state))
3745 mod_timer(&adapter->phy_info_timer,
3746 round_jiffies(jiffies + 2 * HZ));
749ab2cd
YZ
3747
3748 pm_schedule_suspend(netdev->dev.parent,
3749 MSEC_PER_SEC * 5);
9d5c8243
AK
3750 }
3751 }
3752
12dcd86b
ED
3753 spin_lock(&adapter->stats64_lock);
3754 igb_update_stats(adapter, &adapter->stats64);
3755 spin_unlock(&adapter->stats64_lock);
9d5c8243 3756
dbabb065 3757 for (i = 0; i < adapter->num_tx_queues; i++) {
3025a446 3758 struct igb_ring *tx_ring = adapter->tx_ring[i];
dbabb065 3759 if (!netif_carrier_ok(netdev)) {
9d5c8243
AK
3760 /* We've lost link, so the controller stops DMA,
3761 * but we've got queued Tx work that's never going
3762 * to get done, so reset controller to flush Tx.
3763 * (Do the reset outside of interrupt context). */
dbabb065
AD
3764 if (igb_desc_unused(tx_ring) + 1 < tx_ring->count) {
3765 adapter->tx_timeout_count++;
3766 schedule_work(&adapter->reset_task);
3767 /* return immediately since reset is imminent */
3768 return;
3769 }
9d5c8243 3770 }
9d5c8243 3771
dbabb065 3772 /* Force detection of hung controller every watchdog period */
6d095fa8 3773 set_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
dbabb065 3774 }
f7ba205e 3775
9d5c8243 3776 /* Cause software interrupt to ensure rx ring is cleaned */
7a6ea550 3777 if (adapter->msix_entries) {
047e0030 3778 u32 eics = 0;
0d1ae7f4
AD
3779 for (i = 0; i < adapter->num_q_vectors; i++)
3780 eics |= adapter->q_vector[i]->eims_value;
7a6ea550
AD
3781 wr32(E1000_EICS, eics);
3782 } else {
3783 wr32(E1000_ICS, E1000_ICS_RXDMT0);
3784 }
9d5c8243 3785
13800469
GR
3786 igb_spoof_check(adapter);
3787
9d5c8243
AK
3788 /* Reset the timer */
3789 if (!test_bit(__IGB_DOWN, &adapter->state))
3790 mod_timer(&adapter->watchdog_timer,
3791 round_jiffies(jiffies + 2 * HZ));
3792}
3793
3794enum latency_range {
3795 lowest_latency = 0,
3796 low_latency = 1,
3797 bulk_latency = 2,
3798 latency_invalid = 255
3799};
3800
6eb5a7f1
AD
3801/**
3802 * igb_update_ring_itr - update the dynamic ITR value based on packet size
3803 *
3804 * Stores a new ITR value based on strictly on packet size. This
3805 * algorithm is less sophisticated than that used in igb_update_itr,
3806 * due to the difficulty of synchronizing statistics across multiple
eef35c2d 3807 * receive rings. The divisors and thresholds used by this function
6eb5a7f1
AD
3808 * were determined based on theoretical maximum wire speed and testing
3809 * data, in order to minimize response time while increasing bulk
3810 * throughput.
3811 * This functionality is controlled by the InterruptThrottleRate module
3812 * parameter (see igb_param.c)
3813 * NOTE: This function is called only when operating in a multiqueue
3814 * receive environment.
047e0030 3815 * @q_vector: pointer to q_vector
6eb5a7f1 3816 **/
047e0030 3817static void igb_update_ring_itr(struct igb_q_vector *q_vector)
9d5c8243 3818{
047e0030 3819 int new_val = q_vector->itr_val;
6eb5a7f1 3820 int avg_wire_size = 0;
047e0030 3821 struct igb_adapter *adapter = q_vector->adapter;
12dcd86b 3822 unsigned int packets;
9d5c8243 3823
6eb5a7f1
AD
3824 /* For non-gigabit speeds, just fix the interrupt rate at 4000
3825 * ints/sec - ITR timer value of 120 ticks.
3826 */
3827 if (adapter->link_speed != SPEED_1000) {
0ba82994 3828 new_val = IGB_4K_ITR;
6eb5a7f1 3829 goto set_itr_val;
9d5c8243 3830 }
047e0030 3831
0ba82994
AD
3832 packets = q_vector->rx.total_packets;
3833 if (packets)
3834 avg_wire_size = q_vector->rx.total_bytes / packets;
047e0030 3835
0ba82994
AD
3836 packets = q_vector->tx.total_packets;
3837 if (packets)
3838 avg_wire_size = max_t(u32, avg_wire_size,
3839 q_vector->tx.total_bytes / packets);
047e0030
AD
3840
3841 /* if avg_wire_size isn't set no work was done */
3842 if (!avg_wire_size)
3843 goto clear_counts;
9d5c8243 3844
6eb5a7f1
AD
3845 /* Add 24 bytes to size to account for CRC, preamble, and gap */
3846 avg_wire_size += 24;
3847
3848 /* Don't starve jumbo frames */
3849 avg_wire_size = min(avg_wire_size, 3000);
9d5c8243 3850
6eb5a7f1
AD
3851 /* Give a little boost to mid-size frames */
3852 if ((avg_wire_size > 300) && (avg_wire_size < 1200))
3853 new_val = avg_wire_size / 3;
3854 else
3855 new_val = avg_wire_size / 2;
9d5c8243 3856
0ba82994
AD
3857 /* conservative mode (itr 3) eliminates the lowest_latency setting */
3858 if (new_val < IGB_20K_ITR &&
3859 ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
3860 (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
3861 new_val = IGB_20K_ITR;
abe1c363 3862
6eb5a7f1 3863set_itr_val:
047e0030
AD
3864 if (new_val != q_vector->itr_val) {
3865 q_vector->itr_val = new_val;
3866 q_vector->set_itr = 1;
9d5c8243 3867 }
6eb5a7f1 3868clear_counts:
0ba82994
AD
3869 q_vector->rx.total_bytes = 0;
3870 q_vector->rx.total_packets = 0;
3871 q_vector->tx.total_bytes = 0;
3872 q_vector->tx.total_packets = 0;
9d5c8243
AK
3873}
3874
3875/**
3876 * igb_update_itr - update the dynamic ITR value based on statistics
3877 * Stores a new ITR value based on packets and byte
3878 * counts during the last interrupt. The advantage of per interrupt
3879 * computation is faster updates and more accurate ITR for the current
3880 * traffic pattern. Constants in this function were computed
3881 * based on theoretical maximum wire speed and thresholds were set based
3882 * on testing data as well as attempting to minimize response time
3883 * while increasing bulk throughput.
3884 * this functionality is controlled by the InterruptThrottleRate module
3885 * parameter (see igb_param.c)
3886 * NOTE: These calculations are only valid when operating in a single-
3887 * queue environment.
0ba82994
AD
3888 * @q_vector: pointer to q_vector
3889 * @ring_container: ring info to update the itr for
9d5c8243 3890 **/
0ba82994
AD
3891static void igb_update_itr(struct igb_q_vector *q_vector,
3892 struct igb_ring_container *ring_container)
9d5c8243 3893{
0ba82994
AD
3894 unsigned int packets = ring_container->total_packets;
3895 unsigned int bytes = ring_container->total_bytes;
3896 u8 itrval = ring_container->itr;
9d5c8243 3897
0ba82994 3898 /* no packets, exit with status unchanged */
9d5c8243 3899 if (packets == 0)
0ba82994 3900 return;
9d5c8243 3901
0ba82994 3902 switch (itrval) {
9d5c8243
AK
3903 case lowest_latency:
3904 /* handle TSO and jumbo frames */
3905 if (bytes/packets > 8000)
0ba82994 3906 itrval = bulk_latency;
9d5c8243 3907 else if ((packets < 5) && (bytes > 512))
0ba82994 3908 itrval = low_latency;
9d5c8243
AK
3909 break;
3910 case low_latency: /* 50 usec aka 20000 ints/s */
3911 if (bytes > 10000) {
3912 /* this if handles the TSO accounting */
3913 if (bytes/packets > 8000) {
0ba82994 3914 itrval = bulk_latency;
9d5c8243 3915 } else if ((packets < 10) || ((bytes/packets) > 1200)) {
0ba82994 3916 itrval = bulk_latency;
9d5c8243 3917 } else if ((packets > 35)) {
0ba82994 3918 itrval = lowest_latency;
9d5c8243
AK
3919 }
3920 } else if (bytes/packets > 2000) {
0ba82994 3921 itrval = bulk_latency;
9d5c8243 3922 } else if (packets <= 2 && bytes < 512) {
0ba82994 3923 itrval = lowest_latency;
9d5c8243
AK
3924 }
3925 break;
3926 case bulk_latency: /* 250 usec aka 4000 ints/s */
3927 if (bytes > 25000) {
3928 if (packets > 35)
0ba82994 3929 itrval = low_latency;
1e5c3d21 3930 } else if (bytes < 1500) {
0ba82994 3931 itrval = low_latency;
9d5c8243
AK
3932 }
3933 break;
3934 }
3935
0ba82994
AD
3936 /* clear work counters since we have the values we need */
3937 ring_container->total_bytes = 0;
3938 ring_container->total_packets = 0;
3939
3940 /* write updated itr to ring container */
3941 ring_container->itr = itrval;
9d5c8243
AK
3942}
3943
0ba82994 3944static void igb_set_itr(struct igb_q_vector *q_vector)
9d5c8243 3945{
0ba82994 3946 struct igb_adapter *adapter = q_vector->adapter;
047e0030 3947 u32 new_itr = q_vector->itr_val;
0ba82994 3948 u8 current_itr = 0;
9d5c8243
AK
3949
3950 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
3951 if (adapter->link_speed != SPEED_1000) {
3952 current_itr = 0;
0ba82994 3953 new_itr = IGB_4K_ITR;
9d5c8243
AK
3954 goto set_itr_now;
3955 }
3956
0ba82994
AD
3957 igb_update_itr(q_vector, &q_vector->tx);
3958 igb_update_itr(q_vector, &q_vector->rx);
9d5c8243 3959
0ba82994 3960 current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
9d5c8243 3961
6eb5a7f1 3962 /* conservative mode (itr 3) eliminates the lowest_latency setting */
0ba82994
AD
3963 if (current_itr == lowest_latency &&
3964 ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
3965 (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
6eb5a7f1
AD
3966 current_itr = low_latency;
3967
9d5c8243
AK
3968 switch (current_itr) {
3969 /* counts and packets in update_itr are dependent on these numbers */
3970 case lowest_latency:
0ba82994 3971 new_itr = IGB_70K_ITR; /* 70,000 ints/sec */
9d5c8243
AK
3972 break;
3973 case low_latency:
0ba82994 3974 new_itr = IGB_20K_ITR; /* 20,000 ints/sec */
9d5c8243
AK
3975 break;
3976 case bulk_latency:
0ba82994 3977 new_itr = IGB_4K_ITR; /* 4,000 ints/sec */
9d5c8243
AK
3978 break;
3979 default:
3980 break;
3981 }
3982
3983set_itr_now:
047e0030 3984 if (new_itr != q_vector->itr_val) {
9d5c8243
AK
3985 /* this attempts to bias the interrupt rate towards Bulk
3986 * by adding intermediate steps when interrupt rate is
3987 * increasing */
047e0030
AD
3988 new_itr = new_itr > q_vector->itr_val ?
3989 max((new_itr * q_vector->itr_val) /
3990 (new_itr + (q_vector->itr_val >> 2)),
0ba82994 3991 new_itr) :
9d5c8243
AK
3992 new_itr;
3993 /* Don't write the value here; it resets the adapter's
3994 * internal timer, and causes us to delay far longer than
3995 * we should between interrupts. Instead, we write the ITR
3996 * value at the beginning of the next interrupt so the timing
3997 * ends up being correct.
3998 */
047e0030
AD
3999 q_vector->itr_val = new_itr;
4000 q_vector->set_itr = 1;
9d5c8243 4001 }
9d5c8243
AK
4002}
4003
c50b52a0
SH
4004static void igb_tx_ctxtdesc(struct igb_ring *tx_ring, u32 vlan_macip_lens,
4005 u32 type_tucmd, u32 mss_l4len_idx)
7d13a7d0
AD
4006{
4007 struct e1000_adv_tx_context_desc *context_desc;
4008 u16 i = tx_ring->next_to_use;
4009
4010 context_desc = IGB_TX_CTXTDESC(tx_ring, i);
4011
4012 i++;
4013 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
4014
4015 /* set bits to identify this as an advanced context descriptor */
4016 type_tucmd |= E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT;
4017
4018 /* For 82575, context index must be unique per ring. */
866cff06 4019 if (test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
7d13a7d0
AD
4020 mss_l4len_idx |= tx_ring->reg_idx << 4;
4021
4022 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
4023 context_desc->seqnum_seed = 0;
4024 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
4025 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
4026}
4027
7af40ad9
AD
4028static int igb_tso(struct igb_ring *tx_ring,
4029 struct igb_tx_buffer *first,
4030 u8 *hdr_len)
9d5c8243 4031{
7af40ad9 4032 struct sk_buff *skb = first->skb;
7d13a7d0
AD
4033 u32 vlan_macip_lens, type_tucmd;
4034 u32 mss_l4len_idx, l4len;
4035
4036 if (!skb_is_gso(skb))
4037 return 0;
9d5c8243
AK
4038
4039 if (skb_header_cloned(skb)) {
7af40ad9 4040 int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
9d5c8243
AK
4041 if (err)
4042 return err;
4043 }
4044
7d13a7d0
AD
4045 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
4046 type_tucmd = E1000_ADVTXD_TUCMD_L4T_TCP;
9d5c8243 4047
7af40ad9 4048 if (first->protocol == __constant_htons(ETH_P_IP)) {
9d5c8243
AK
4049 struct iphdr *iph = ip_hdr(skb);
4050 iph->tot_len = 0;
4051 iph->check = 0;
4052 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
4053 iph->daddr, 0,
4054 IPPROTO_TCP,
4055 0);
7d13a7d0 4056 type_tucmd |= E1000_ADVTXD_TUCMD_IPV4;
7af40ad9
AD
4057 first->tx_flags |= IGB_TX_FLAGS_TSO |
4058 IGB_TX_FLAGS_CSUM |
4059 IGB_TX_FLAGS_IPV4;
8e1e8a47 4060 } else if (skb_is_gso_v6(skb)) {
9d5c8243
AK
4061 ipv6_hdr(skb)->payload_len = 0;
4062 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
4063 &ipv6_hdr(skb)->daddr,
4064 0, IPPROTO_TCP, 0);
7af40ad9
AD
4065 first->tx_flags |= IGB_TX_FLAGS_TSO |
4066 IGB_TX_FLAGS_CSUM;
9d5c8243
AK
4067 }
4068
7af40ad9 4069 /* compute header lengths */
7d13a7d0
AD
4070 l4len = tcp_hdrlen(skb);
4071 *hdr_len = skb_transport_offset(skb) + l4len;
9d5c8243 4072
7af40ad9
AD
4073 /* update gso size and bytecount with header size */
4074 first->gso_segs = skb_shinfo(skb)->gso_segs;
4075 first->bytecount += (first->gso_segs - 1) * *hdr_len;
4076
9d5c8243 4077 /* MSS L4LEN IDX */
7d13a7d0
AD
4078 mss_l4len_idx = l4len << E1000_ADVTXD_L4LEN_SHIFT;
4079 mss_l4len_idx |= skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT;
9d5c8243 4080
7d13a7d0
AD
4081 /* VLAN MACLEN IPLEN */
4082 vlan_macip_lens = skb_network_header_len(skb);
4083 vlan_macip_lens |= skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT;
7af40ad9 4084 vlan_macip_lens |= first->tx_flags & IGB_TX_FLAGS_VLAN_MASK;
9d5c8243 4085
7d13a7d0 4086 igb_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, mss_l4len_idx);
9d5c8243 4087
7d13a7d0 4088 return 1;
9d5c8243
AK
4089}
4090
7af40ad9 4091static void igb_tx_csum(struct igb_ring *tx_ring, struct igb_tx_buffer *first)
9d5c8243 4092{
7af40ad9 4093 struct sk_buff *skb = first->skb;
7d13a7d0
AD
4094 u32 vlan_macip_lens = 0;
4095 u32 mss_l4len_idx = 0;
4096 u32 type_tucmd = 0;
9d5c8243 4097
7d13a7d0 4098 if (skb->ip_summed != CHECKSUM_PARTIAL) {
7af40ad9
AD
4099 if (!(first->tx_flags & IGB_TX_FLAGS_VLAN))
4100 return;
7d13a7d0
AD
4101 } else {
4102 u8 l4_hdr = 0;
7af40ad9 4103 switch (first->protocol) {
7d13a7d0
AD
4104 case __constant_htons(ETH_P_IP):
4105 vlan_macip_lens |= skb_network_header_len(skb);
4106 type_tucmd |= E1000_ADVTXD_TUCMD_IPV4;
4107 l4_hdr = ip_hdr(skb)->protocol;
4108 break;
4109 case __constant_htons(ETH_P_IPV6):
4110 vlan_macip_lens |= skb_network_header_len(skb);
4111 l4_hdr = ipv6_hdr(skb)->nexthdr;
4112 break;
4113 default:
4114 if (unlikely(net_ratelimit())) {
4115 dev_warn(tx_ring->dev,
4116 "partial checksum but proto=%x!\n",
7af40ad9 4117 first->protocol);
fa4a7ef3 4118 }
7d13a7d0
AD
4119 break;
4120 }
fa4a7ef3 4121
7d13a7d0
AD
4122 switch (l4_hdr) {
4123 case IPPROTO_TCP:
4124 type_tucmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
4125 mss_l4len_idx = tcp_hdrlen(skb) <<
4126 E1000_ADVTXD_L4LEN_SHIFT;
4127 break;
4128 case IPPROTO_SCTP:
4129 type_tucmd |= E1000_ADVTXD_TUCMD_L4T_SCTP;
4130 mss_l4len_idx = sizeof(struct sctphdr) <<
4131 E1000_ADVTXD_L4LEN_SHIFT;
4132 break;
4133 case IPPROTO_UDP:
4134 mss_l4len_idx = sizeof(struct udphdr) <<
4135 E1000_ADVTXD_L4LEN_SHIFT;
4136 break;
4137 default:
4138 if (unlikely(net_ratelimit())) {
4139 dev_warn(tx_ring->dev,
4140 "partial checksum but l4 proto=%x!\n",
4141 l4_hdr);
44b0cda3 4142 }
7d13a7d0 4143 break;
9d5c8243 4144 }
7af40ad9
AD
4145
4146 /* update TX checksum flag */
4147 first->tx_flags |= IGB_TX_FLAGS_CSUM;
7d13a7d0 4148 }
9d5c8243 4149
7d13a7d0 4150 vlan_macip_lens |= skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT;
7af40ad9 4151 vlan_macip_lens |= first->tx_flags & IGB_TX_FLAGS_VLAN_MASK;
9d5c8243 4152
7d13a7d0 4153 igb_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, mss_l4len_idx);
9d5c8243
AK
4154}
4155
e032afc8
AD
4156static __le32 igb_tx_cmd_type(u32 tx_flags)
4157{
4158 /* set type for advanced descriptor with frame checksum insertion */
4159 __le32 cmd_type = cpu_to_le32(E1000_ADVTXD_DTYP_DATA |
4160 E1000_ADVTXD_DCMD_IFCS |
4161 E1000_ADVTXD_DCMD_DEXT);
4162
4163 /* set HW vlan bit if vlan is present */
4164 if (tx_flags & IGB_TX_FLAGS_VLAN)
4165 cmd_type |= cpu_to_le32(E1000_ADVTXD_DCMD_VLE);
4166
3c89f6d0 4167#ifdef CONFIG_IGB_PTP
e032afc8 4168 /* set timestamp bit if present */
1f6e8178 4169 if (unlikely(tx_flags & IGB_TX_FLAGS_TSTAMP))
e032afc8 4170 cmd_type |= cpu_to_le32(E1000_ADVTXD_MAC_TSTAMP);
3c89f6d0 4171#endif /* CONFIG_IGB_PTP */
e032afc8
AD
4172
4173 /* set segmentation bits for TSO */
4174 if (tx_flags & IGB_TX_FLAGS_TSO)
4175 cmd_type |= cpu_to_le32(E1000_ADVTXD_DCMD_TSE);
4176
4177 return cmd_type;
4178}
4179
7af40ad9
AD
4180static void igb_tx_olinfo_status(struct igb_ring *tx_ring,
4181 union e1000_adv_tx_desc *tx_desc,
4182 u32 tx_flags, unsigned int paylen)
e032afc8
AD
4183{
4184 u32 olinfo_status = paylen << E1000_ADVTXD_PAYLEN_SHIFT;
4185
4186 /* 82575 requires a unique index per ring if any offload is enabled */
4187 if ((tx_flags & (IGB_TX_FLAGS_CSUM | IGB_TX_FLAGS_VLAN)) &&
866cff06 4188 test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
e032afc8
AD
4189 olinfo_status |= tx_ring->reg_idx << 4;
4190
4191 /* insert L4 checksum */
4192 if (tx_flags & IGB_TX_FLAGS_CSUM) {
4193 olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
4194
4195 /* insert IPv4 checksum */
4196 if (tx_flags & IGB_TX_FLAGS_IPV4)
4197 olinfo_status |= E1000_TXD_POPTS_IXSM << 8;
4198 }
4199
7af40ad9 4200 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
e032afc8
AD
4201}
4202
ebe42d16
AD
4203/*
4204 * The largest size we can write to the descriptor is 65535. In order to
4205 * maintain a power of two alignment we have to limit ourselves to 32K.
4206 */
4207#define IGB_MAX_TXD_PWR 15
7af40ad9 4208#define IGB_MAX_DATA_PER_TXD (1<<IGB_MAX_TXD_PWR)
9d5c8243 4209
7af40ad9
AD
4210static void igb_tx_map(struct igb_ring *tx_ring,
4211 struct igb_tx_buffer *first,
ebe42d16 4212 const u8 hdr_len)
9d5c8243 4213{
7af40ad9 4214 struct sk_buff *skb = first->skb;
ebe42d16
AD
4215 struct igb_tx_buffer *tx_buffer_info;
4216 union e1000_adv_tx_desc *tx_desc;
4217 dma_addr_t dma;
4218 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
4219 unsigned int data_len = skb->data_len;
4220 unsigned int size = skb_headlen(skb);
4221 unsigned int paylen = skb->len - hdr_len;
4222 __le32 cmd_type;
7af40ad9 4223 u32 tx_flags = first->tx_flags;
ebe42d16 4224 u16 i = tx_ring->next_to_use;
ebe42d16
AD
4225
4226 tx_desc = IGB_TX_DESC(tx_ring, i);
4227
7af40ad9 4228 igb_tx_olinfo_status(tx_ring, tx_desc, tx_flags, paylen);
ebe42d16
AD
4229 cmd_type = igb_tx_cmd_type(tx_flags);
4230
4231 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
4232 if (dma_mapping_error(tx_ring->dev, dma))
6366ad33 4233 goto dma_error;
9d5c8243 4234
ebe42d16
AD
4235 /* record length, and DMA address */
4236 first->length = size;
4237 first->dma = dma;
ebe42d16
AD
4238 tx_desc->read.buffer_addr = cpu_to_le64(dma);
4239
4240 for (;;) {
4241 while (unlikely(size > IGB_MAX_DATA_PER_TXD)) {
4242 tx_desc->read.cmd_type_len =
4243 cmd_type | cpu_to_le32(IGB_MAX_DATA_PER_TXD);
4244
4245 i++;
4246 tx_desc++;
4247 if (i == tx_ring->count) {
4248 tx_desc = IGB_TX_DESC(tx_ring, 0);
4249 i = 0;
4250 }
4251
4252 dma += IGB_MAX_DATA_PER_TXD;
4253 size -= IGB_MAX_DATA_PER_TXD;
4254
4255 tx_desc->read.olinfo_status = 0;
4256 tx_desc->read.buffer_addr = cpu_to_le64(dma);
4257 }
4258
4259 if (likely(!data_len))
4260 break;
2bbfebe2 4261
ebe42d16 4262 tx_desc->read.cmd_type_len = cmd_type | cpu_to_le32(size);
9d5c8243 4263
65689fef 4264 i++;
ebe42d16
AD
4265 tx_desc++;
4266 if (i == tx_ring->count) {
4267 tx_desc = IGB_TX_DESC(tx_ring, 0);
65689fef 4268 i = 0;
ebe42d16 4269 }
65689fef 4270
9e903e08 4271 size = skb_frag_size(frag);
ebe42d16
AD
4272 data_len -= size;
4273
4274 dma = skb_frag_dma_map(tx_ring->dev, frag, 0,
4275 size, DMA_TO_DEVICE);
4276 if (dma_mapping_error(tx_ring->dev, dma))
6366ad33
AD
4277 goto dma_error;
4278
ebe42d16
AD
4279 tx_buffer_info = &tx_ring->tx_buffer_info[i];
4280 tx_buffer_info->length = size;
4281 tx_buffer_info->dma = dma;
4282
4283 tx_desc->read.olinfo_status = 0;
4284 tx_desc->read.buffer_addr = cpu_to_le64(dma);
4285
4286 frag++;
9d5c8243
AK
4287 }
4288
bdbc0631
ED
4289 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
4290
ebe42d16
AD
4291 /* write last descriptor with RS and EOP bits */
4292 cmd_type |= cpu_to_le32(size) | cpu_to_le32(IGB_TXD_DCMD);
6b8f0922
BG
4293 if (unlikely(skb->no_fcs))
4294 cmd_type &= ~(cpu_to_le32(E1000_ADVTXD_DCMD_IFCS));
ebe42d16 4295 tx_desc->read.cmd_type_len = cmd_type;
8542db05
AD
4296
4297 /* set the timestamp */
4298 first->time_stamp = jiffies;
4299
ebe42d16
AD
4300 /*
4301 * Force memory writes to complete before letting h/w know there
4302 * are new descriptors to fetch. (Only applicable for weak-ordered
4303 * memory model archs, such as IA-64).
4304 *
4305 * We also need this memory barrier to make certain all of the
4306 * status bits have been updated before next_to_watch is written.
4307 */
4308 wmb();
4309
8542db05 4310 /* set next_to_watch value indicating a packet is present */
ebe42d16 4311 first->next_to_watch = tx_desc;
9d5c8243 4312
ebe42d16
AD
4313 i++;
4314 if (i == tx_ring->count)
4315 i = 0;
6366ad33 4316
ebe42d16 4317 tx_ring->next_to_use = i;
6366ad33 4318
ebe42d16 4319 writel(i, tx_ring->tail);
6366ad33 4320
ebe42d16
AD
4321 /* we need this if more than one processor can write to our tail
4322 * at a time, it syncronizes IO on IA64/Altix systems */
4323 mmiowb();
4324
4325 return;
4326
4327dma_error:
4328 dev_err(tx_ring->dev, "TX DMA map failed\n");
4329
4330 /* clear dma mappings for failed tx_buffer_info map */
4331 for (;;) {
4332 tx_buffer_info = &tx_ring->tx_buffer_info[i];
4333 igb_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
4334 if (tx_buffer_info == first)
4335 break;
a77ff709
NN
4336 if (i == 0)
4337 i = tx_ring->count;
6366ad33 4338 i--;
6366ad33
AD
4339 }
4340
9d5c8243 4341 tx_ring->next_to_use = i;
9d5c8243
AK
4342}
4343
6ad4edfc 4344static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size)
9d5c8243 4345{
e694e964
AD
4346 struct net_device *netdev = tx_ring->netdev;
4347
661086df 4348 netif_stop_subqueue(netdev, tx_ring->queue_index);
661086df 4349
9d5c8243
AK
4350 /* Herbert's original patch had:
4351 * smp_mb__after_netif_stop_queue();
4352 * but since that doesn't exist yet, just open code it. */
4353 smp_mb();
4354
4355 /* We need to check again in a case another CPU has just
4356 * made room available. */
c493ea45 4357 if (igb_desc_unused(tx_ring) < size)
9d5c8243
AK
4358 return -EBUSY;
4359
4360 /* A reprieve! */
661086df 4361 netif_wake_subqueue(netdev, tx_ring->queue_index);
12dcd86b
ED
4362
4363 u64_stats_update_begin(&tx_ring->tx_syncp2);
4364 tx_ring->tx_stats.restart_queue2++;
4365 u64_stats_update_end(&tx_ring->tx_syncp2);
4366
9d5c8243
AK
4367 return 0;
4368}
4369
6ad4edfc 4370static inline int igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size)
9d5c8243 4371{
c493ea45 4372 if (igb_desc_unused(tx_ring) >= size)
9d5c8243 4373 return 0;
e694e964 4374 return __igb_maybe_stop_tx(tx_ring, size);
9d5c8243
AK
4375}
4376
cd392f5c
AD
4377netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
4378 struct igb_ring *tx_ring)
9d5c8243 4379{
1f6e8178
MV
4380#ifdef CONFIG_IGB_PTP
4381 struct igb_adapter *adapter = netdev_priv(tx_ring->netdev);
4382#endif /* CONFIG_IGB_PTP */
8542db05 4383 struct igb_tx_buffer *first;
ebe42d16 4384 int tso;
91d4ee33 4385 u32 tx_flags = 0;
31f6adbb 4386 __be16 protocol = vlan_get_protocol(skb);
91d4ee33 4387 u8 hdr_len = 0;
9d5c8243 4388
9d5c8243
AK
4389 /* need: 1 descriptor per page,
4390 * + 2 desc gap to keep tail from touching head,
4391 * + 1 desc for skb->data,
4392 * + 1 desc for context descriptor,
4393 * otherwise try next time */
e694e964 4394 if (igb_maybe_stop_tx(tx_ring, skb_shinfo(skb)->nr_frags + 4)) {
9d5c8243 4395 /* this is a hard error */
9d5c8243
AK
4396 return NETDEV_TX_BUSY;
4397 }
33af6bcc 4398
7af40ad9
AD
4399 /* record the location of the first descriptor for this packet */
4400 first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
4401 first->skb = skb;
4402 first->bytecount = skb->len;
4403 first->gso_segs = 1;
4404
3c89f6d0 4405#ifdef CONFIG_IGB_PTP
1f6e8178
MV
4406 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4407 !(adapter->ptp_tx_skb))) {
2244d07b 4408 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
33af6bcc 4409 tx_flags |= IGB_TX_FLAGS_TSTAMP;
1f6e8178
MV
4410
4411 adapter->ptp_tx_skb = skb_get(skb);
4412 if (adapter->hw.mac.type == e1000_82576)
4413 schedule_work(&adapter->ptp_tx_work);
33af6bcc 4414 }
3c89f6d0 4415#endif /* CONFIG_IGB_PTP */
9d5c8243 4416
eab6d18d 4417 if (vlan_tx_tag_present(skb)) {
9d5c8243
AK
4418 tx_flags |= IGB_TX_FLAGS_VLAN;
4419 tx_flags |= (vlan_tx_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT);
4420 }
4421
7af40ad9
AD
4422 /* record initial flags and protocol */
4423 first->tx_flags = tx_flags;
4424 first->protocol = protocol;
cdfd01fc 4425
7af40ad9
AD
4426 tso = igb_tso(tx_ring, first, &hdr_len);
4427 if (tso < 0)
7d13a7d0 4428 goto out_drop;
7af40ad9
AD
4429 else if (!tso)
4430 igb_tx_csum(tx_ring, first);
9d5c8243 4431
7af40ad9 4432 igb_tx_map(tx_ring, first, hdr_len);
85ad76b2
AD
4433
4434 /* Make sure there is space in the ring for the next send. */
e694e964 4435 igb_maybe_stop_tx(tx_ring, MAX_SKB_FRAGS + 4);
85ad76b2 4436
9d5c8243 4437 return NETDEV_TX_OK;
7d13a7d0
AD
4438
4439out_drop:
7af40ad9
AD
4440 igb_unmap_and_free_tx_resource(tx_ring, first);
4441
7d13a7d0 4442 return NETDEV_TX_OK;
9d5c8243
AK
4443}
4444
1cc3bd87
AD
4445static inline struct igb_ring *igb_tx_queue_mapping(struct igb_adapter *adapter,
4446 struct sk_buff *skb)
4447{
4448 unsigned int r_idx = skb->queue_mapping;
4449
4450 if (r_idx >= adapter->num_tx_queues)
4451 r_idx = r_idx % adapter->num_tx_queues;
4452
4453 return adapter->tx_ring[r_idx];
4454}
4455
cd392f5c
AD
4456static netdev_tx_t igb_xmit_frame(struct sk_buff *skb,
4457 struct net_device *netdev)
9d5c8243
AK
4458{
4459 struct igb_adapter *adapter = netdev_priv(netdev);
b1a436c3
AD
4460
4461 if (test_bit(__IGB_DOWN, &adapter->state)) {
4462 dev_kfree_skb_any(skb);
4463 return NETDEV_TX_OK;
4464 }
4465
4466 if (skb->len <= 0) {
4467 dev_kfree_skb_any(skb);
4468 return NETDEV_TX_OK;
4469 }
4470
1cc3bd87
AD
4471 /*
4472 * The minimum packet size with TCTL.PSP set is 17 so pad the skb
4473 * in order to meet this minimum size requirement.
4474 */
4475 if (skb->len < 17) {
4476 if (skb_padto(skb, 17))
4477 return NETDEV_TX_OK;
4478 skb->len = 17;
4479 }
9d5c8243 4480
1cc3bd87 4481 return igb_xmit_frame_ring(skb, igb_tx_queue_mapping(adapter, skb));
9d5c8243
AK
4482}
4483
4484/**
4485 * igb_tx_timeout - Respond to a Tx Hang
4486 * @netdev: network interface device structure
4487 **/
4488static void igb_tx_timeout(struct net_device *netdev)
4489{
4490 struct igb_adapter *adapter = netdev_priv(netdev);
4491 struct e1000_hw *hw = &adapter->hw;
4492
4493 /* Do the reset outside of interrupt context */
4494 adapter->tx_timeout_count++;
f7ba205e 4495
06218a8d 4496 if (hw->mac.type >= e1000_82580)
55cac248
AD
4497 hw->dev_spec._82575.global_device_reset = true;
4498
9d5c8243 4499 schedule_work(&adapter->reset_task);
265de409
AD
4500 wr32(E1000_EICS,
4501 (adapter->eims_enable_mask & ~adapter->eims_other));
9d5c8243
AK
4502}
4503
4504static void igb_reset_task(struct work_struct *work)
4505{
4506 struct igb_adapter *adapter;
4507 adapter = container_of(work, struct igb_adapter, reset_task);
4508
c97ec42a
TI
4509 igb_dump(adapter);
4510 netdev_err(adapter->netdev, "Reset adapter\n");
9d5c8243
AK
4511 igb_reinit_locked(adapter);
4512}
4513
4514/**
12dcd86b 4515 * igb_get_stats64 - Get System Network Statistics
9d5c8243 4516 * @netdev: network interface device structure
12dcd86b 4517 * @stats: rtnl_link_stats64 pointer
9d5c8243 4518 *
9d5c8243 4519 **/
12dcd86b
ED
4520static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *netdev,
4521 struct rtnl_link_stats64 *stats)
9d5c8243 4522{
12dcd86b
ED
4523 struct igb_adapter *adapter = netdev_priv(netdev);
4524
4525 spin_lock(&adapter->stats64_lock);
4526 igb_update_stats(adapter, &adapter->stats64);
4527 memcpy(stats, &adapter->stats64, sizeof(*stats));
4528 spin_unlock(&adapter->stats64_lock);
4529
4530 return stats;
9d5c8243
AK
4531}
4532
4533/**
4534 * igb_change_mtu - Change the Maximum Transfer Unit
4535 * @netdev: network interface device structure
4536 * @new_mtu: new value for maximum frame size
4537 *
4538 * Returns 0 on success, negative on failure
4539 **/
4540static int igb_change_mtu(struct net_device *netdev, int new_mtu)
4541{
4542 struct igb_adapter *adapter = netdev_priv(netdev);
090b1795 4543 struct pci_dev *pdev = adapter->pdev;
153285f9 4544 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
9d5c8243 4545
c809d227 4546 if ((new_mtu < 68) || (max_frame > MAX_JUMBO_FRAME_SIZE)) {
090b1795 4547 dev_err(&pdev->dev, "Invalid MTU setting\n");
9d5c8243
AK
4548 return -EINVAL;
4549 }
4550
153285f9 4551#define MAX_STD_JUMBO_FRAME_SIZE 9238
9d5c8243 4552 if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
090b1795 4553 dev_err(&pdev->dev, "MTU > 9216 not supported.\n");
9d5c8243
AK
4554 return -EINVAL;
4555 }
4556
4557 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
4558 msleep(1);
73cd78f1 4559
9d5c8243
AK
4560 /* igb_down has a dependency on max_frame_size */
4561 adapter->max_frame_size = max_frame;
559e9c49 4562
4c844851
AD
4563 if (netif_running(netdev))
4564 igb_down(adapter);
9d5c8243 4565
090b1795 4566 dev_info(&pdev->dev, "changing MTU from %d to %d\n",
9d5c8243
AK
4567 netdev->mtu, new_mtu);
4568 netdev->mtu = new_mtu;
4569
4570 if (netif_running(netdev))
4571 igb_up(adapter);
4572 else
4573 igb_reset(adapter);
4574
4575 clear_bit(__IGB_RESETTING, &adapter->state);
4576
4577 return 0;
4578}
4579
4580/**
4581 * igb_update_stats - Update the board statistics counters
4582 * @adapter: board private structure
4583 **/
4584
12dcd86b
ED
4585void igb_update_stats(struct igb_adapter *adapter,
4586 struct rtnl_link_stats64 *net_stats)
9d5c8243
AK
4587{
4588 struct e1000_hw *hw = &adapter->hw;
4589 struct pci_dev *pdev = adapter->pdev;
fa3d9a6d 4590 u32 reg, mpc;
9d5c8243 4591 u16 phy_tmp;
3f9c0164
AD
4592 int i;
4593 u64 bytes, packets;
12dcd86b
ED
4594 unsigned int start;
4595 u64 _bytes, _packets;
9d5c8243
AK
4596
4597#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
4598
4599 /*
4600 * Prevent stats update while adapter is being reset, or if the pci
4601 * connection is down.
4602 */
4603 if (adapter->link_speed == 0)
4604 return;
4605 if (pci_channel_offline(pdev))
4606 return;
4607
3f9c0164
AD
4608 bytes = 0;
4609 packets = 0;
4610 for (i = 0; i < adapter->num_rx_queues; i++) {
ae1c07a6 4611 u32 rqdpc = rd32(E1000_RQDPC(i));
3025a446 4612 struct igb_ring *ring = adapter->rx_ring[i];
12dcd86b 4613
ae1c07a6
AD
4614 if (rqdpc) {
4615 ring->rx_stats.drops += rqdpc;
4616 net_stats->rx_fifo_errors += rqdpc;
4617 }
12dcd86b
ED
4618
4619 do {
4620 start = u64_stats_fetch_begin_bh(&ring->rx_syncp);
4621 _bytes = ring->rx_stats.bytes;
4622 _packets = ring->rx_stats.packets;
4623 } while (u64_stats_fetch_retry_bh(&ring->rx_syncp, start));
4624 bytes += _bytes;
4625 packets += _packets;
3f9c0164
AD
4626 }
4627
128e45eb
AD
4628 net_stats->rx_bytes = bytes;
4629 net_stats->rx_packets = packets;
3f9c0164
AD
4630
4631 bytes = 0;
4632 packets = 0;
4633 for (i = 0; i < adapter->num_tx_queues; i++) {
3025a446 4634 struct igb_ring *ring = adapter->tx_ring[i];
12dcd86b
ED
4635 do {
4636 start = u64_stats_fetch_begin_bh(&ring->tx_syncp);
4637 _bytes = ring->tx_stats.bytes;
4638 _packets = ring->tx_stats.packets;
4639 } while (u64_stats_fetch_retry_bh(&ring->tx_syncp, start));
4640 bytes += _bytes;
4641 packets += _packets;
3f9c0164 4642 }
128e45eb
AD
4643 net_stats->tx_bytes = bytes;
4644 net_stats->tx_packets = packets;
3f9c0164
AD
4645
4646 /* read stats registers */
9d5c8243
AK
4647 adapter->stats.crcerrs += rd32(E1000_CRCERRS);
4648 adapter->stats.gprc += rd32(E1000_GPRC);
4649 adapter->stats.gorc += rd32(E1000_GORCL);
4650 rd32(E1000_GORCH); /* clear GORCL */
4651 adapter->stats.bprc += rd32(E1000_BPRC);
4652 adapter->stats.mprc += rd32(E1000_MPRC);
4653 adapter->stats.roc += rd32(E1000_ROC);
4654
4655 adapter->stats.prc64 += rd32(E1000_PRC64);
4656 adapter->stats.prc127 += rd32(E1000_PRC127);
4657 adapter->stats.prc255 += rd32(E1000_PRC255);
4658 adapter->stats.prc511 += rd32(E1000_PRC511);
4659 adapter->stats.prc1023 += rd32(E1000_PRC1023);
4660 adapter->stats.prc1522 += rd32(E1000_PRC1522);
4661 adapter->stats.symerrs += rd32(E1000_SYMERRS);
4662 adapter->stats.sec += rd32(E1000_SEC);
4663
fa3d9a6d
MW
4664 mpc = rd32(E1000_MPC);
4665 adapter->stats.mpc += mpc;
4666 net_stats->rx_fifo_errors += mpc;
9d5c8243
AK
4667 adapter->stats.scc += rd32(E1000_SCC);
4668 adapter->stats.ecol += rd32(E1000_ECOL);
4669 adapter->stats.mcc += rd32(E1000_MCC);
4670 adapter->stats.latecol += rd32(E1000_LATECOL);
4671 adapter->stats.dc += rd32(E1000_DC);
4672 adapter->stats.rlec += rd32(E1000_RLEC);
4673 adapter->stats.xonrxc += rd32(E1000_XONRXC);
4674 adapter->stats.xontxc += rd32(E1000_XONTXC);
4675 adapter->stats.xoffrxc += rd32(E1000_XOFFRXC);
4676 adapter->stats.xofftxc += rd32(E1000_XOFFTXC);
4677 adapter->stats.fcruc += rd32(E1000_FCRUC);
4678 adapter->stats.gptc += rd32(E1000_GPTC);
4679 adapter->stats.gotc += rd32(E1000_GOTCL);
4680 rd32(E1000_GOTCH); /* clear GOTCL */
fa3d9a6d 4681 adapter->stats.rnbc += rd32(E1000_RNBC);
9d5c8243
AK
4682 adapter->stats.ruc += rd32(E1000_RUC);
4683 adapter->stats.rfc += rd32(E1000_RFC);
4684 adapter->stats.rjc += rd32(E1000_RJC);
4685 adapter->stats.tor += rd32(E1000_TORH);
4686 adapter->stats.tot += rd32(E1000_TOTH);
4687 adapter->stats.tpr += rd32(E1000_TPR);
4688
4689 adapter->stats.ptc64 += rd32(E1000_PTC64);
4690 adapter->stats.ptc127 += rd32(E1000_PTC127);
4691 adapter->stats.ptc255 += rd32(E1000_PTC255);
4692 adapter->stats.ptc511 += rd32(E1000_PTC511);
4693 adapter->stats.ptc1023 += rd32(E1000_PTC1023);
4694 adapter->stats.ptc1522 += rd32(E1000_PTC1522);
4695
4696 adapter->stats.mptc += rd32(E1000_MPTC);
4697 adapter->stats.bptc += rd32(E1000_BPTC);
4698
2d0b0f69
NN
4699 adapter->stats.tpt += rd32(E1000_TPT);
4700 adapter->stats.colc += rd32(E1000_COLC);
9d5c8243
AK
4701
4702 adapter->stats.algnerrc += rd32(E1000_ALGNERRC);
43915c7c
NN
4703 /* read internal phy specific stats */
4704 reg = rd32(E1000_CTRL_EXT);
4705 if (!(reg & E1000_CTRL_EXT_LINK_MODE_MASK)) {
4706 adapter->stats.rxerrc += rd32(E1000_RXERRC);
3dbdf969
CW
4707
4708 /* this stat has invalid values on i210/i211 */
4709 if ((hw->mac.type != e1000_i210) &&
4710 (hw->mac.type != e1000_i211))
4711 adapter->stats.tncrs += rd32(E1000_TNCRS);
43915c7c
NN
4712 }
4713
9d5c8243
AK
4714 adapter->stats.tsctc += rd32(E1000_TSCTC);
4715 adapter->stats.tsctfc += rd32(E1000_TSCTFC);
4716
4717 adapter->stats.iac += rd32(E1000_IAC);
4718 adapter->stats.icrxoc += rd32(E1000_ICRXOC);
4719 adapter->stats.icrxptc += rd32(E1000_ICRXPTC);
4720 adapter->stats.icrxatc += rd32(E1000_ICRXATC);
4721 adapter->stats.ictxptc += rd32(E1000_ICTXPTC);
4722 adapter->stats.ictxatc += rd32(E1000_ICTXATC);
4723 adapter->stats.ictxqec += rd32(E1000_ICTXQEC);
4724 adapter->stats.ictxqmtc += rd32(E1000_ICTXQMTC);
4725 adapter->stats.icrxdmtc += rd32(E1000_ICRXDMTC);
4726
4727 /* Fill out the OS statistics structure */
128e45eb
AD
4728 net_stats->multicast = adapter->stats.mprc;
4729 net_stats->collisions = adapter->stats.colc;
9d5c8243
AK
4730
4731 /* Rx Errors */
4732
4733 /* RLEC on some newer hardware can be incorrect so build
8c0ab70a 4734 * our own version based on RUC and ROC */
128e45eb 4735 net_stats->rx_errors = adapter->stats.rxerrc +
9d5c8243
AK
4736 adapter->stats.crcerrs + adapter->stats.algnerrc +
4737 adapter->stats.ruc + adapter->stats.roc +
4738 adapter->stats.cexterr;
128e45eb
AD
4739 net_stats->rx_length_errors = adapter->stats.ruc +
4740 adapter->stats.roc;
4741 net_stats->rx_crc_errors = adapter->stats.crcerrs;
4742 net_stats->rx_frame_errors = adapter->stats.algnerrc;
4743 net_stats->rx_missed_errors = adapter->stats.mpc;
9d5c8243
AK
4744
4745 /* Tx Errors */
128e45eb
AD
4746 net_stats->tx_errors = adapter->stats.ecol +
4747 adapter->stats.latecol;
4748 net_stats->tx_aborted_errors = adapter->stats.ecol;
4749 net_stats->tx_window_errors = adapter->stats.latecol;
4750 net_stats->tx_carrier_errors = adapter->stats.tncrs;
9d5c8243
AK
4751
4752 /* Tx Dropped needs to be maintained elsewhere */
4753
4754 /* Phy Stats */
4755 if (hw->phy.media_type == e1000_media_type_copper) {
4756 if ((adapter->link_speed == SPEED_1000) &&
73cd78f1 4757 (!igb_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
9d5c8243
AK
4758 phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
4759 adapter->phy_stats.idle_errors += phy_tmp;
4760 }
4761 }
4762
4763 /* Management Stats */
4764 adapter->stats.mgptc += rd32(E1000_MGTPTC);
4765 adapter->stats.mgprc += rd32(E1000_MGTPRC);
4766 adapter->stats.mgpdc += rd32(E1000_MGTPDC);
0a915b95
CW
4767
4768 /* OS2BMC Stats */
4769 reg = rd32(E1000_MANC);
4770 if (reg & E1000_MANC_EN_BMC2OS) {
4771 adapter->stats.o2bgptc += rd32(E1000_O2BGPTC);
4772 adapter->stats.o2bspc += rd32(E1000_O2BSPC);
4773 adapter->stats.b2ospc += rd32(E1000_B2OSPC);
4774 adapter->stats.b2ogprc += rd32(E1000_B2OGPRC);
4775 }
9d5c8243
AK
4776}
4777
9d5c8243
AK
4778static irqreturn_t igb_msix_other(int irq, void *data)
4779{
047e0030 4780 struct igb_adapter *adapter = data;
9d5c8243 4781 struct e1000_hw *hw = &adapter->hw;
844290e5 4782 u32 icr = rd32(E1000_ICR);
844290e5 4783 /* reading ICR causes bit 31 of EICR to be cleared */
dda0e083 4784
7f081d40
AD
4785 if (icr & E1000_ICR_DRSTA)
4786 schedule_work(&adapter->reset_task);
4787
047e0030 4788 if (icr & E1000_ICR_DOUTSYNC) {
dda0e083
AD
4789 /* HW is reporting DMA is out of sync */
4790 adapter->stats.doosync++;
13800469
GR
4791 /* The DMA Out of Sync is also indication of a spoof event
4792 * in IOV mode. Check the Wrong VM Behavior register to
4793 * see if it is really a spoof event. */
4794 igb_check_wvbr(adapter);
dda0e083 4795 }
eebbbdba 4796
4ae196df
AD
4797 /* Check for a mailbox event */
4798 if (icr & E1000_ICR_VMMB)
4799 igb_msg_task(adapter);
4800
4801 if (icr & E1000_ICR_LSC) {
4802 hw->mac.get_link_status = 1;
4803 /* guard against interrupt when we're going down */
4804 if (!test_bit(__IGB_DOWN, &adapter->state))
4805 mod_timer(&adapter->watchdog_timer, jiffies + 1);
4806 }
4807
1f6e8178
MV
4808#ifdef CONFIG_IGB_PTP
4809 if (icr & E1000_ICR_TS) {
4810 u32 tsicr = rd32(E1000_TSICR);
4811
4812 if (tsicr & E1000_TSICR_TXTS) {
4813 /* acknowledge the interrupt */
4814 wr32(E1000_TSICR, E1000_TSICR_TXTS);
4815 /* retrieve hardware timestamp */
4816 schedule_work(&adapter->ptp_tx_work);
4817 }
4818 }
4819#endif /* CONFIG_IGB_PTP */
4820
844290e5 4821 wr32(E1000_EIMS, adapter->eims_other);
9d5c8243
AK
4822
4823 return IRQ_HANDLED;
4824}
4825
047e0030 4826static void igb_write_itr(struct igb_q_vector *q_vector)
9d5c8243 4827{
26b39276 4828 struct igb_adapter *adapter = q_vector->adapter;
047e0030 4829 u32 itr_val = q_vector->itr_val & 0x7FFC;
9d5c8243 4830
047e0030
AD
4831 if (!q_vector->set_itr)
4832 return;
73cd78f1 4833
047e0030
AD
4834 if (!itr_val)
4835 itr_val = 0x4;
661086df 4836
26b39276
AD
4837 if (adapter->hw.mac.type == e1000_82575)
4838 itr_val |= itr_val << 16;
661086df 4839 else
0ba82994 4840 itr_val |= E1000_EITR_CNT_IGNR;
661086df 4841
047e0030
AD
4842 writel(itr_val, q_vector->itr_register);
4843 q_vector->set_itr = 0;
6eb5a7f1
AD
4844}
4845
047e0030 4846static irqreturn_t igb_msix_ring(int irq, void *data)
9d5c8243 4847{
047e0030 4848 struct igb_q_vector *q_vector = data;
9d5c8243 4849
047e0030
AD
4850 /* Write the ITR value calculated from the previous interrupt. */
4851 igb_write_itr(q_vector);
9d5c8243 4852
047e0030 4853 napi_schedule(&q_vector->napi);
844290e5 4854
047e0030 4855 return IRQ_HANDLED;
fe4506b6
JC
4856}
4857
421e02f0 4858#ifdef CONFIG_IGB_DCA
047e0030 4859static void igb_update_dca(struct igb_q_vector *q_vector)
fe4506b6 4860{
047e0030 4861 struct igb_adapter *adapter = q_vector->adapter;
fe4506b6
JC
4862 struct e1000_hw *hw = &adapter->hw;
4863 int cpu = get_cpu();
fe4506b6 4864
047e0030
AD
4865 if (q_vector->cpu == cpu)
4866 goto out_no_update;
4867
0ba82994
AD
4868 if (q_vector->tx.ring) {
4869 int q = q_vector->tx.ring->reg_idx;
047e0030
AD
4870 u32 dca_txctrl = rd32(E1000_DCA_TXCTRL(q));
4871 if (hw->mac.type == e1000_82575) {
4872 dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK;
4873 dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
2d064c06 4874 } else {
047e0030
AD
4875 dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK_82576;
4876 dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) <<
4877 E1000_DCA_TXCTRL_CPUID_SHIFT;
4878 }
4879 dca_txctrl |= E1000_DCA_TXCTRL_DESC_DCA_EN;
4880 wr32(E1000_DCA_TXCTRL(q), dca_txctrl);
4881 }
0ba82994
AD
4882 if (q_vector->rx.ring) {
4883 int q = q_vector->rx.ring->reg_idx;
047e0030
AD
4884 u32 dca_rxctrl = rd32(E1000_DCA_RXCTRL(q));
4885 if (hw->mac.type == e1000_82575) {
2d064c06 4886 dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK;
92be7917 4887 dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
047e0030
AD
4888 } else {
4889 dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK_82576;
4890 dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) <<
4891 E1000_DCA_RXCTRL_CPUID_SHIFT;
2d064c06 4892 }
fe4506b6
JC
4893 dca_rxctrl |= E1000_DCA_RXCTRL_DESC_DCA_EN;
4894 dca_rxctrl |= E1000_DCA_RXCTRL_HEAD_DCA_EN;
4895 dca_rxctrl |= E1000_DCA_RXCTRL_DATA_DCA_EN;
4896 wr32(E1000_DCA_RXCTRL(q), dca_rxctrl);
fe4506b6 4897 }
047e0030
AD
4898 q_vector->cpu = cpu;
4899out_no_update:
fe4506b6
JC
4900 put_cpu();
4901}
4902
4903static void igb_setup_dca(struct igb_adapter *adapter)
4904{
7e0e99ef 4905 struct e1000_hw *hw = &adapter->hw;
fe4506b6
JC
4906 int i;
4907
7dfc16fa 4908 if (!(adapter->flags & IGB_FLAG_DCA_ENABLED))
fe4506b6
JC
4909 return;
4910
7e0e99ef
AD
4911 /* Always use CB2 mode, difference is masked in the CB driver. */
4912 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_CB2);
4913
047e0030 4914 for (i = 0; i < adapter->num_q_vectors; i++) {
26b39276
AD
4915 adapter->q_vector[i]->cpu = -1;
4916 igb_update_dca(adapter->q_vector[i]);
fe4506b6
JC
4917 }
4918}
4919
4920static int __igb_notify_dca(struct device *dev, void *data)
4921{
4922 struct net_device *netdev = dev_get_drvdata(dev);
4923 struct igb_adapter *adapter = netdev_priv(netdev);
090b1795 4924 struct pci_dev *pdev = adapter->pdev;
fe4506b6
JC
4925 struct e1000_hw *hw = &adapter->hw;
4926 unsigned long event = *(unsigned long *)data;
4927
4928 switch (event) {
4929 case DCA_PROVIDER_ADD:
4930 /* if already enabled, don't do it again */
7dfc16fa 4931 if (adapter->flags & IGB_FLAG_DCA_ENABLED)
fe4506b6 4932 break;
fe4506b6 4933 if (dca_add_requester(dev) == 0) {
bbd98fe4 4934 adapter->flags |= IGB_FLAG_DCA_ENABLED;
090b1795 4935 dev_info(&pdev->dev, "DCA enabled\n");
fe4506b6
JC
4936 igb_setup_dca(adapter);
4937 break;
4938 }
4939 /* Fall Through since DCA is disabled. */
4940 case DCA_PROVIDER_REMOVE:
7dfc16fa 4941 if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
fe4506b6 4942 /* without this a class_device is left
047e0030 4943 * hanging around in the sysfs model */
fe4506b6 4944 dca_remove_requester(dev);
090b1795 4945 dev_info(&pdev->dev, "DCA disabled\n");
7dfc16fa 4946 adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
cbd347ad 4947 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE);
fe4506b6
JC
4948 }
4949 break;
4950 }
bbd98fe4 4951
fe4506b6 4952 return 0;
9d5c8243
AK
4953}
4954
fe4506b6
JC
4955static int igb_notify_dca(struct notifier_block *nb, unsigned long event,
4956 void *p)
4957{
4958 int ret_val;
4959
4960 ret_val = driver_for_each_device(&igb_driver.driver, NULL, &event,
4961 __igb_notify_dca);
4962
4963 return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
4964}
421e02f0 4965#endif /* CONFIG_IGB_DCA */
9d5c8243 4966
0224d663
GR
4967#ifdef CONFIG_PCI_IOV
4968static int igb_vf_configure(struct igb_adapter *adapter, int vf)
4969{
4970 unsigned char mac_addr[ETH_ALEN];
0224d663 4971
7efd26d0 4972 eth_random_addr(mac_addr);
0224d663
GR
4973 igb_set_vf_mac(adapter, vf, mac_addr);
4974
f557147c 4975 return 0;
0224d663
GR
4976}
4977
f557147c 4978static bool igb_vfs_are_assigned(struct igb_adapter *adapter)
0224d663 4979{
0224d663 4980 struct pci_dev *pdev = adapter->pdev;
f557147c
SA
4981 struct pci_dev *vfdev;
4982 int dev_id;
0224d663
GR
4983
4984 switch (adapter->hw.mac.type) {
4985 case e1000_82576:
f557147c 4986 dev_id = IGB_82576_VF_DEV_ID;
0224d663
GR
4987 break;
4988 case e1000_i350:
f557147c 4989 dev_id = IGB_I350_VF_DEV_ID;
0224d663
GR
4990 break;
4991 default:
f557147c 4992 return false;
0224d663
GR
4993 }
4994
f557147c
SA
4995 /* loop through all the VFs to see if we own any that are assigned */
4996 vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, dev_id, NULL);
4997 while (vfdev) {
4998 /* if we don't own it we don't care */
4999 if (vfdev->is_virtfn && vfdev->physfn == pdev) {
5000 /* if it is assigned we cannot release it */
5001 if (vfdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
0224d663
GR
5002 return true;
5003 }
f557147c
SA
5004
5005 vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, dev_id, vfdev);
0224d663 5006 }
f557147c 5007
0224d663
GR
5008 return false;
5009}
5010
5011#endif
4ae196df
AD
5012static void igb_ping_all_vfs(struct igb_adapter *adapter)
5013{
5014 struct e1000_hw *hw = &adapter->hw;
5015 u32 ping;
5016 int i;
5017
5018 for (i = 0 ; i < adapter->vfs_allocated_count; i++) {
5019 ping = E1000_PF_CONTROL_MSG;
f2ca0dbe 5020 if (adapter->vf_data[i].flags & IGB_VF_FLAG_CTS)
4ae196df
AD
5021 ping |= E1000_VT_MSGTYPE_CTS;
5022 igb_write_mbx(hw, &ping, 1, i);
5023 }
5024}
5025
7d5753f0
AD
5026static int igb_set_vf_promisc(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
5027{
5028 struct e1000_hw *hw = &adapter->hw;
5029 u32 vmolr = rd32(E1000_VMOLR(vf));
5030 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
5031
d85b9004 5032 vf_data->flags &= ~(IGB_VF_FLAG_UNI_PROMISC |
7d5753f0
AD
5033 IGB_VF_FLAG_MULTI_PROMISC);
5034 vmolr &= ~(E1000_VMOLR_ROPE | E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
5035
5036 if (*msgbuf & E1000_VF_SET_PROMISC_MULTICAST) {
5037 vmolr |= E1000_VMOLR_MPME;
d85b9004 5038 vf_data->flags |= IGB_VF_FLAG_MULTI_PROMISC;
7d5753f0
AD
5039 *msgbuf &= ~E1000_VF_SET_PROMISC_MULTICAST;
5040 } else {
5041 /*
5042 * if we have hashes and we are clearing a multicast promisc
5043 * flag we need to write the hashes to the MTA as this step
5044 * was previously skipped
5045 */
5046 if (vf_data->num_vf_mc_hashes > 30) {
5047 vmolr |= E1000_VMOLR_MPME;
5048 } else if (vf_data->num_vf_mc_hashes) {
5049 int j;
5050 vmolr |= E1000_VMOLR_ROMPE;
5051 for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
5052 igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
5053 }
5054 }
5055
5056 wr32(E1000_VMOLR(vf), vmolr);
5057
5058 /* there are flags left unprocessed, likely not supported */
5059 if (*msgbuf & E1000_VT_MSGINFO_MASK)
5060 return -EINVAL;
5061
5062 return 0;
5063
5064}
5065
4ae196df
AD
5066static int igb_set_vf_multicasts(struct igb_adapter *adapter,
5067 u32 *msgbuf, u32 vf)
5068{
5069 int n = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
5070 u16 *hash_list = (u16 *)&msgbuf[1];
5071 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
5072 int i;
5073
7d5753f0 5074 /* salt away the number of multicast addresses assigned
4ae196df
AD
5075 * to this VF for later use to restore when the PF multi cast
5076 * list changes
5077 */
5078 vf_data->num_vf_mc_hashes = n;
5079
7d5753f0
AD
5080 /* only up to 30 hash values supported */
5081 if (n > 30)
5082 n = 30;
5083
5084 /* store the hashes for later use */
4ae196df 5085 for (i = 0; i < n; i++)
a419aef8 5086 vf_data->vf_mc_hashes[i] = hash_list[i];
4ae196df
AD
5087
5088 /* Flush and reset the mta with the new values */
ff41f8dc 5089 igb_set_rx_mode(adapter->netdev);
4ae196df
AD
5090
5091 return 0;
5092}
5093
5094static void igb_restore_vf_multicasts(struct igb_adapter *adapter)
5095{
5096 struct e1000_hw *hw = &adapter->hw;
5097 struct vf_data_storage *vf_data;
5098 int i, j;
5099
5100 for (i = 0; i < adapter->vfs_allocated_count; i++) {
7d5753f0
AD
5101 u32 vmolr = rd32(E1000_VMOLR(i));
5102 vmolr &= ~(E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
5103
4ae196df 5104 vf_data = &adapter->vf_data[i];
7d5753f0
AD
5105
5106 if ((vf_data->num_vf_mc_hashes > 30) ||
5107 (vf_data->flags & IGB_VF_FLAG_MULTI_PROMISC)) {
5108 vmolr |= E1000_VMOLR_MPME;
5109 } else if (vf_data->num_vf_mc_hashes) {
5110 vmolr |= E1000_VMOLR_ROMPE;
5111 for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
5112 igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
5113 }
5114 wr32(E1000_VMOLR(i), vmolr);
4ae196df
AD
5115 }
5116}
5117
5118static void igb_clear_vf_vfta(struct igb_adapter *adapter, u32 vf)
5119{
5120 struct e1000_hw *hw = &adapter->hw;
5121 u32 pool_mask, reg, vid;
5122 int i;
5123
5124 pool_mask = 1 << (E1000_VLVF_POOLSEL_SHIFT + vf);
5125
5126 /* Find the vlan filter for this id */
5127 for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
5128 reg = rd32(E1000_VLVF(i));
5129
5130 /* remove the vf from the pool */
5131 reg &= ~pool_mask;
5132
5133 /* if pool is empty then remove entry from vfta */
5134 if (!(reg & E1000_VLVF_POOLSEL_MASK) &&
5135 (reg & E1000_VLVF_VLANID_ENABLE)) {
5136 reg = 0;
5137 vid = reg & E1000_VLVF_VLANID_MASK;
5138 igb_vfta_set(hw, vid, false);
5139 }
5140
5141 wr32(E1000_VLVF(i), reg);
5142 }
ae641bdc
AD
5143
5144 adapter->vf_data[vf].vlans_enabled = 0;
4ae196df
AD
5145}
5146
5147static s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf)
5148{
5149 struct e1000_hw *hw = &adapter->hw;
5150 u32 reg, i;
5151
51466239
AD
5152 /* The vlvf table only exists on 82576 hardware and newer */
5153 if (hw->mac.type < e1000_82576)
5154 return -1;
5155
5156 /* we only need to do this if VMDq is enabled */
4ae196df
AD
5157 if (!adapter->vfs_allocated_count)
5158 return -1;
5159
5160 /* Find the vlan filter for this id */
5161 for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
5162 reg = rd32(E1000_VLVF(i));
5163 if ((reg & E1000_VLVF_VLANID_ENABLE) &&
5164 vid == (reg & E1000_VLVF_VLANID_MASK))
5165 break;
5166 }
5167
5168 if (add) {
5169 if (i == E1000_VLVF_ARRAY_SIZE) {
5170 /* Did not find a matching VLAN ID entry that was
5171 * enabled. Search for a free filter entry, i.e.
5172 * one without the enable bit set
5173 */
5174 for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
5175 reg = rd32(E1000_VLVF(i));
5176 if (!(reg & E1000_VLVF_VLANID_ENABLE))
5177 break;
5178 }
5179 }
5180 if (i < E1000_VLVF_ARRAY_SIZE) {
5181 /* Found an enabled/available entry */
5182 reg |= 1 << (E1000_VLVF_POOLSEL_SHIFT + vf);
5183
5184 /* if !enabled we need to set this up in vfta */
5185 if (!(reg & E1000_VLVF_VLANID_ENABLE)) {
51466239
AD
5186 /* add VID to filter table */
5187 igb_vfta_set(hw, vid, true);
4ae196df
AD
5188 reg |= E1000_VLVF_VLANID_ENABLE;
5189 }
cad6d05f
AD
5190 reg &= ~E1000_VLVF_VLANID_MASK;
5191 reg |= vid;
4ae196df 5192 wr32(E1000_VLVF(i), reg);
ae641bdc
AD
5193
5194 /* do not modify RLPML for PF devices */
5195 if (vf >= adapter->vfs_allocated_count)
5196 return 0;
5197
5198 if (!adapter->vf_data[vf].vlans_enabled) {
5199 u32 size;
5200 reg = rd32(E1000_VMOLR(vf));
5201 size = reg & E1000_VMOLR_RLPML_MASK;
5202 size += 4;
5203 reg &= ~E1000_VMOLR_RLPML_MASK;
5204 reg |= size;
5205 wr32(E1000_VMOLR(vf), reg);
5206 }
ae641bdc 5207
51466239 5208 adapter->vf_data[vf].vlans_enabled++;
4ae196df
AD
5209 }
5210 } else {
5211 if (i < E1000_VLVF_ARRAY_SIZE) {
5212 /* remove vf from the pool */
5213 reg &= ~(1 << (E1000_VLVF_POOLSEL_SHIFT + vf));
5214 /* if pool is empty then remove entry from vfta */
5215 if (!(reg & E1000_VLVF_POOLSEL_MASK)) {
5216 reg = 0;
5217 igb_vfta_set(hw, vid, false);
5218 }
5219 wr32(E1000_VLVF(i), reg);
ae641bdc
AD
5220
5221 /* do not modify RLPML for PF devices */
5222 if (vf >= adapter->vfs_allocated_count)
5223 return 0;
5224
5225 adapter->vf_data[vf].vlans_enabled--;
5226 if (!adapter->vf_data[vf].vlans_enabled) {
5227 u32 size;
5228 reg = rd32(E1000_VMOLR(vf));
5229 size = reg & E1000_VMOLR_RLPML_MASK;
5230 size -= 4;
5231 reg &= ~E1000_VMOLR_RLPML_MASK;
5232 reg |= size;
5233 wr32(E1000_VMOLR(vf), reg);
5234 }
4ae196df
AD
5235 }
5236 }
8151d294
WM
5237 return 0;
5238}
5239
5240static void igb_set_vmvir(struct igb_adapter *adapter, u32 vid, u32 vf)
5241{
5242 struct e1000_hw *hw = &adapter->hw;
5243
5244 if (vid)
5245 wr32(E1000_VMVIR(vf), (vid | E1000_VMVIR_VLANA_DEFAULT));
5246 else
5247 wr32(E1000_VMVIR(vf), 0);
5248}
5249
5250static int igb_ndo_set_vf_vlan(struct net_device *netdev,
5251 int vf, u16 vlan, u8 qos)
5252{
5253 int err = 0;
5254 struct igb_adapter *adapter = netdev_priv(netdev);
5255
5256 if ((vf >= adapter->vfs_allocated_count) || (vlan > 4095) || (qos > 7))
5257 return -EINVAL;
5258 if (vlan || qos) {
5259 err = igb_vlvf_set(adapter, vlan, !!vlan, vf);
5260 if (err)
5261 goto out;
5262 igb_set_vmvir(adapter, vlan | (qos << VLAN_PRIO_SHIFT), vf);
5263 igb_set_vmolr(adapter, vf, !vlan);
5264 adapter->vf_data[vf].pf_vlan = vlan;
5265 adapter->vf_data[vf].pf_qos = qos;
5266 dev_info(&adapter->pdev->dev,
5267 "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf);
5268 if (test_bit(__IGB_DOWN, &adapter->state)) {
5269 dev_warn(&adapter->pdev->dev,
5270 "The VF VLAN has been set,"
5271 " but the PF device is not up.\n");
5272 dev_warn(&adapter->pdev->dev,
5273 "Bring the PF device up before"
5274 " attempting to use the VF device.\n");
5275 }
5276 } else {
5277 igb_vlvf_set(adapter, adapter->vf_data[vf].pf_vlan,
5278 false, vf);
5279 igb_set_vmvir(adapter, vlan, vf);
5280 igb_set_vmolr(adapter, vf, true);
5281 adapter->vf_data[vf].pf_vlan = 0;
5282 adapter->vf_data[vf].pf_qos = 0;
5283 }
5284out:
5285 return err;
4ae196df
AD
5286}
5287
5288static int igb_set_vf_vlan(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
5289{
5290 int add = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
5291 int vid = (msgbuf[1] & E1000_VLVF_VLANID_MASK);
5292
5293 return igb_vlvf_set(adapter, vid, add, vf);
5294}
5295
f2ca0dbe 5296static inline void igb_vf_reset(struct igb_adapter *adapter, u32 vf)
4ae196df 5297{
8fa7e0f7
GR
5298 /* clear flags - except flag that indicates PF has set the MAC */
5299 adapter->vf_data[vf].flags &= IGB_VF_FLAG_PF_SET_MAC;
f2ca0dbe 5300 adapter->vf_data[vf].last_nack = jiffies;
4ae196df
AD
5301
5302 /* reset offloads to defaults */
8151d294 5303 igb_set_vmolr(adapter, vf, true);
4ae196df
AD
5304
5305 /* reset vlans for device */
5306 igb_clear_vf_vfta(adapter, vf);
8151d294
WM
5307 if (adapter->vf_data[vf].pf_vlan)
5308 igb_ndo_set_vf_vlan(adapter->netdev, vf,
5309 adapter->vf_data[vf].pf_vlan,
5310 adapter->vf_data[vf].pf_qos);
5311 else
5312 igb_clear_vf_vfta(adapter, vf);
4ae196df
AD
5313
5314 /* reset multicast table array for vf */
5315 adapter->vf_data[vf].num_vf_mc_hashes = 0;
5316
5317 /* Flush and reset the mta with the new values */
ff41f8dc 5318 igb_set_rx_mode(adapter->netdev);
4ae196df
AD
5319}
5320
f2ca0dbe
AD
5321static void igb_vf_reset_event(struct igb_adapter *adapter, u32 vf)
5322{
5323 unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
5324
5325 /* generate a new mac address as we were hotplug removed/added */
8151d294 5326 if (!(adapter->vf_data[vf].flags & IGB_VF_FLAG_PF_SET_MAC))
7efd26d0 5327 eth_random_addr(vf_mac);
f2ca0dbe
AD
5328
5329 /* process remaining reset events */
5330 igb_vf_reset(adapter, vf);
5331}
5332
5333static void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf)
4ae196df
AD
5334{
5335 struct e1000_hw *hw = &adapter->hw;
5336 unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
ff41f8dc 5337 int rar_entry = hw->mac.rar_entry_count - (vf + 1);
4ae196df
AD
5338 u32 reg, msgbuf[3];
5339 u8 *addr = (u8 *)(&msgbuf[1]);
5340
5341 /* process all the same items cleared in a function level reset */
f2ca0dbe 5342 igb_vf_reset(adapter, vf);
4ae196df
AD
5343
5344 /* set vf mac address */
26ad9178 5345 igb_rar_set_qsel(adapter, vf_mac, rar_entry, vf);
4ae196df
AD
5346
5347 /* enable transmit and receive for vf */
5348 reg = rd32(E1000_VFTE);
5349 wr32(E1000_VFTE, reg | (1 << vf));
5350 reg = rd32(E1000_VFRE);
5351 wr32(E1000_VFRE, reg | (1 << vf));
5352
8fa7e0f7 5353 adapter->vf_data[vf].flags |= IGB_VF_FLAG_CTS;
4ae196df
AD
5354
5355 /* reply to reset with ack and vf mac address */
5356 msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_ACK;
5357 memcpy(addr, vf_mac, 6);
5358 igb_write_mbx(hw, msgbuf, 3, vf);
5359}
5360
5361static int igb_set_vf_mac_addr(struct igb_adapter *adapter, u32 *msg, int vf)
5362{
de42edde
GR
5363 /*
5364 * The VF MAC Address is stored in a packed array of bytes
5365 * starting at the second 32 bit word of the msg array
5366 */
f2ca0dbe
AD
5367 unsigned char *addr = (char *)&msg[1];
5368 int err = -1;
4ae196df 5369
f2ca0dbe
AD
5370 if (is_valid_ether_addr(addr))
5371 err = igb_set_vf_mac(adapter, vf, addr);
4ae196df 5372
f2ca0dbe 5373 return err;
4ae196df
AD
5374}
5375
5376static void igb_rcv_ack_from_vf(struct igb_adapter *adapter, u32 vf)
5377{
5378 struct e1000_hw *hw = &adapter->hw;
f2ca0dbe 5379 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
4ae196df
AD
5380 u32 msg = E1000_VT_MSGTYPE_NACK;
5381
5382 /* if device isn't clear to send it shouldn't be reading either */
f2ca0dbe
AD
5383 if (!(vf_data->flags & IGB_VF_FLAG_CTS) &&
5384 time_after(jiffies, vf_data->last_nack + (2 * HZ))) {
4ae196df 5385 igb_write_mbx(hw, &msg, 1, vf);
f2ca0dbe 5386 vf_data->last_nack = jiffies;
4ae196df
AD
5387 }
5388}
5389
f2ca0dbe 5390static void igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf)
4ae196df 5391{
f2ca0dbe
AD
5392 struct pci_dev *pdev = adapter->pdev;
5393 u32 msgbuf[E1000_VFMAILBOX_SIZE];
4ae196df 5394 struct e1000_hw *hw = &adapter->hw;
f2ca0dbe 5395 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
4ae196df
AD
5396 s32 retval;
5397
f2ca0dbe 5398 retval = igb_read_mbx(hw, msgbuf, E1000_VFMAILBOX_SIZE, vf);
4ae196df 5399
fef45f4c
AD
5400 if (retval) {
5401 /* if receive failed revoke VF CTS stats and restart init */
f2ca0dbe 5402 dev_err(&pdev->dev, "Error receiving message from VF\n");
fef45f4c
AD
5403 vf_data->flags &= ~IGB_VF_FLAG_CTS;
5404 if (!time_after(jiffies, vf_data->last_nack + (2 * HZ)))
5405 return;
5406 goto out;
5407 }
4ae196df
AD
5408
5409 /* this is a message we already processed, do nothing */
5410 if (msgbuf[0] & (E1000_VT_MSGTYPE_ACK | E1000_VT_MSGTYPE_NACK))
f2ca0dbe 5411 return;
4ae196df
AD
5412
5413 /*
5414 * until the vf completes a reset it should not be
5415 * allowed to start any configuration.
5416 */
5417
5418 if (msgbuf[0] == E1000_VF_RESET) {
5419 igb_vf_reset_msg(adapter, vf);
f2ca0dbe 5420 return;
4ae196df
AD
5421 }
5422
f2ca0dbe 5423 if (!(vf_data->flags & IGB_VF_FLAG_CTS)) {
fef45f4c
AD
5424 if (!time_after(jiffies, vf_data->last_nack + (2 * HZ)))
5425 return;
5426 retval = -1;
5427 goto out;
4ae196df
AD
5428 }
5429
5430 switch ((msgbuf[0] & 0xFFFF)) {
5431 case E1000_VF_SET_MAC_ADDR:
a6b5ea35
GR
5432 retval = -EINVAL;
5433 if (!(vf_data->flags & IGB_VF_FLAG_PF_SET_MAC))
5434 retval = igb_set_vf_mac_addr(adapter, msgbuf, vf);
5435 else
5436 dev_warn(&pdev->dev,
5437 "VF %d attempted to override administratively "
5438 "set MAC address\nReload the VF driver to "
5439 "resume operations\n", vf);
4ae196df 5440 break;
7d5753f0
AD
5441 case E1000_VF_SET_PROMISC:
5442 retval = igb_set_vf_promisc(adapter, msgbuf, vf);
5443 break;
4ae196df
AD
5444 case E1000_VF_SET_MULTICAST:
5445 retval = igb_set_vf_multicasts(adapter, msgbuf, vf);
5446 break;
5447 case E1000_VF_SET_LPE:
5448 retval = igb_set_vf_rlpml(adapter, msgbuf[1], vf);
5449 break;
5450 case E1000_VF_SET_VLAN:
a6b5ea35
GR
5451 retval = -1;
5452 if (vf_data->pf_vlan)
5453 dev_warn(&pdev->dev,
5454 "VF %d attempted to override administratively "
5455 "set VLAN tag\nReload the VF driver to "
5456 "resume operations\n", vf);
8151d294
WM
5457 else
5458 retval = igb_set_vf_vlan(adapter, msgbuf, vf);
4ae196df
AD
5459 break;
5460 default:
090b1795 5461 dev_err(&pdev->dev, "Unhandled Msg %08x\n", msgbuf[0]);
4ae196df
AD
5462 retval = -1;
5463 break;
5464 }
5465
fef45f4c
AD
5466 msgbuf[0] |= E1000_VT_MSGTYPE_CTS;
5467out:
4ae196df
AD
5468 /* notify the VF of the results of what it sent us */
5469 if (retval)
5470 msgbuf[0] |= E1000_VT_MSGTYPE_NACK;
5471 else
5472 msgbuf[0] |= E1000_VT_MSGTYPE_ACK;
5473
4ae196df 5474 igb_write_mbx(hw, msgbuf, 1, vf);
f2ca0dbe 5475}
4ae196df 5476
f2ca0dbe
AD
5477static void igb_msg_task(struct igb_adapter *adapter)
5478{
5479 struct e1000_hw *hw = &adapter->hw;
5480 u32 vf;
5481
5482 for (vf = 0; vf < adapter->vfs_allocated_count; vf++) {
5483 /* process any reset requests */
5484 if (!igb_check_for_rst(hw, vf))
5485 igb_vf_reset_event(adapter, vf);
5486
5487 /* process any messages pending */
5488 if (!igb_check_for_msg(hw, vf))
5489 igb_rcv_msg_from_vf(adapter, vf);
5490
5491 /* process any acks */
5492 if (!igb_check_for_ack(hw, vf))
5493 igb_rcv_ack_from_vf(adapter, vf);
5494 }
4ae196df
AD
5495}
5496
68d480c4
AD
5497/**
5498 * igb_set_uta - Set unicast filter table address
5499 * @adapter: board private structure
5500 *
5501 * The unicast table address is a register array of 32-bit registers.
5502 * The table is meant to be used in a way similar to how the MTA is used
5503 * however due to certain limitations in the hardware it is necessary to
25985edc
LDM
5504 * set all the hash bits to 1 and use the VMOLR ROPE bit as a promiscuous
5505 * enable bit to allow vlan tag stripping when promiscuous mode is enabled
68d480c4
AD
5506 **/
5507static void igb_set_uta(struct igb_adapter *adapter)
5508{
5509 struct e1000_hw *hw = &adapter->hw;
5510 int i;
5511
5512 /* The UTA table only exists on 82576 hardware and newer */
5513 if (hw->mac.type < e1000_82576)
5514 return;
5515
5516 /* we only need to do this if VMDq is enabled */
5517 if (!adapter->vfs_allocated_count)
5518 return;
5519
5520 for (i = 0; i < hw->mac.uta_reg_count; i++)
5521 array_wr32(E1000_UTA, i, ~0);
5522}
5523
9d5c8243
AK
5524/**
5525 * igb_intr_msi - Interrupt Handler
5526 * @irq: interrupt number
5527 * @data: pointer to a network interface device structure
5528 **/
5529static irqreturn_t igb_intr_msi(int irq, void *data)
5530{
047e0030
AD
5531 struct igb_adapter *adapter = data;
5532 struct igb_q_vector *q_vector = adapter->q_vector[0];
9d5c8243
AK
5533 struct e1000_hw *hw = &adapter->hw;
5534 /* read ICR disables interrupts using IAM */
5535 u32 icr = rd32(E1000_ICR);
5536
047e0030 5537 igb_write_itr(q_vector);
9d5c8243 5538
7f081d40
AD
5539 if (icr & E1000_ICR_DRSTA)
5540 schedule_work(&adapter->reset_task);
5541
047e0030 5542 if (icr & E1000_ICR_DOUTSYNC) {
dda0e083
AD
5543 /* HW is reporting DMA is out of sync */
5544 adapter->stats.doosync++;
5545 }
5546
9d5c8243
AK
5547 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
5548 hw->mac.get_link_status = 1;
5549 if (!test_bit(__IGB_DOWN, &adapter->state))
5550 mod_timer(&adapter->watchdog_timer, jiffies + 1);
5551 }
5552
1f6e8178
MV
5553#ifdef CONFIG_IGB_PTP
5554 if (icr & E1000_ICR_TS) {
5555 u32 tsicr = rd32(E1000_TSICR);
5556
5557 if (tsicr & E1000_TSICR_TXTS) {
5558 /* acknowledge the interrupt */
5559 wr32(E1000_TSICR, E1000_TSICR_TXTS);
5560 /* retrieve hardware timestamp */
5561 schedule_work(&adapter->ptp_tx_work);
5562 }
5563 }
5564#endif /* CONFIG_IGB_PTP */
5565
047e0030 5566 napi_schedule(&q_vector->napi);
9d5c8243
AK
5567
5568 return IRQ_HANDLED;
5569}
5570
5571/**
4a3c6433 5572 * igb_intr - Legacy Interrupt Handler
9d5c8243
AK
5573 * @irq: interrupt number
5574 * @data: pointer to a network interface device structure
5575 **/
5576static irqreturn_t igb_intr(int irq, void *data)
5577{
047e0030
AD
5578 struct igb_adapter *adapter = data;
5579 struct igb_q_vector *q_vector = adapter->q_vector[0];
9d5c8243
AK
5580 struct e1000_hw *hw = &adapter->hw;
5581 /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No
5582 * need for the IMC write */
5583 u32 icr = rd32(E1000_ICR);
9d5c8243
AK
5584
5585 /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
5586 * not set, then the adapter didn't send an interrupt */
5587 if (!(icr & E1000_ICR_INT_ASSERTED))
5588 return IRQ_NONE;
5589
0ba82994
AD
5590 igb_write_itr(q_vector);
5591
7f081d40
AD
5592 if (icr & E1000_ICR_DRSTA)
5593 schedule_work(&adapter->reset_task);
5594
047e0030 5595 if (icr & E1000_ICR_DOUTSYNC) {
dda0e083
AD
5596 /* HW is reporting DMA is out of sync */
5597 adapter->stats.doosync++;
5598 }
5599
9d5c8243
AK
5600 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
5601 hw->mac.get_link_status = 1;
5602 /* guard against interrupt when we're going down */
5603 if (!test_bit(__IGB_DOWN, &adapter->state))
5604 mod_timer(&adapter->watchdog_timer, jiffies + 1);
5605 }
5606
1f6e8178
MV
5607#ifdef CONFIG_IGB_PTP
5608 if (icr & E1000_ICR_TS) {
5609 u32 tsicr = rd32(E1000_TSICR);
5610
5611 if (tsicr & E1000_TSICR_TXTS) {
5612 /* acknowledge the interrupt */
5613 wr32(E1000_TSICR, E1000_TSICR_TXTS);
5614 /* retrieve hardware timestamp */
5615 schedule_work(&adapter->ptp_tx_work);
5616 }
5617 }
5618#endif /* CONFIG_IGB_PTP */
5619
047e0030 5620 napi_schedule(&q_vector->napi);
9d5c8243
AK
5621
5622 return IRQ_HANDLED;
5623}
5624
c50b52a0 5625static void igb_ring_irq_enable(struct igb_q_vector *q_vector)
9d5c8243 5626{
047e0030 5627 struct igb_adapter *adapter = q_vector->adapter;
46544258 5628 struct e1000_hw *hw = &adapter->hw;
9d5c8243 5629
0ba82994
AD
5630 if ((q_vector->rx.ring && (adapter->rx_itr_setting & 3)) ||
5631 (!q_vector->rx.ring && (adapter->tx_itr_setting & 3))) {
5632 if ((adapter->num_q_vectors == 1) && !adapter->vf_data)
5633 igb_set_itr(q_vector);
46544258 5634 else
047e0030 5635 igb_update_ring_itr(q_vector);
9d5c8243
AK
5636 }
5637
46544258
AD
5638 if (!test_bit(__IGB_DOWN, &adapter->state)) {
5639 if (adapter->msix_entries)
047e0030 5640 wr32(E1000_EIMS, q_vector->eims_value);
46544258
AD
5641 else
5642 igb_irq_enable(adapter);
5643 }
9d5c8243
AK
5644}
5645
46544258
AD
5646/**
5647 * igb_poll - NAPI Rx polling callback
5648 * @napi: napi polling structure
5649 * @budget: count of how many packets we should handle
5650 **/
5651static int igb_poll(struct napi_struct *napi, int budget)
9d5c8243 5652{
047e0030
AD
5653 struct igb_q_vector *q_vector = container_of(napi,
5654 struct igb_q_vector,
5655 napi);
16eb8815 5656 bool clean_complete = true;
9d5c8243 5657
421e02f0 5658#ifdef CONFIG_IGB_DCA
047e0030
AD
5659 if (q_vector->adapter->flags & IGB_FLAG_DCA_ENABLED)
5660 igb_update_dca(q_vector);
fe4506b6 5661#endif
0ba82994 5662 if (q_vector->tx.ring)
13fde97a 5663 clean_complete = igb_clean_tx_irq(q_vector);
9d5c8243 5664
0ba82994 5665 if (q_vector->rx.ring)
cd392f5c 5666 clean_complete &= igb_clean_rx_irq(q_vector, budget);
047e0030 5667
16eb8815
AD
5668 /* If all work not completed, return budget and keep polling */
5669 if (!clean_complete)
5670 return budget;
46544258 5671
9d5c8243 5672 /* If not enough Rx work done, exit the polling mode */
16eb8815
AD
5673 napi_complete(napi);
5674 igb_ring_irq_enable(q_vector);
9d5c8243 5675
16eb8815 5676 return 0;
9d5c8243 5677}
6d8126f9 5678
9d5c8243
AK
5679/**
5680 * igb_clean_tx_irq - Reclaim resources after transmit completes
047e0030 5681 * @q_vector: pointer to q_vector containing needed info
49ce9c2c 5682 *
9d5c8243
AK
5683 * returns true if ring is completely cleaned
5684 **/
047e0030 5685static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
9d5c8243 5686{
047e0030 5687 struct igb_adapter *adapter = q_vector->adapter;
0ba82994 5688 struct igb_ring *tx_ring = q_vector->tx.ring;
06034649 5689 struct igb_tx_buffer *tx_buffer;
f4128785 5690 union e1000_adv_tx_desc *tx_desc;
9d5c8243 5691 unsigned int total_bytes = 0, total_packets = 0;
0ba82994 5692 unsigned int budget = q_vector->tx.work_limit;
8542db05 5693 unsigned int i = tx_ring->next_to_clean;
9d5c8243 5694
13fde97a
AD
5695 if (test_bit(__IGB_DOWN, &adapter->state))
5696 return true;
0e014cb1 5697
06034649 5698 tx_buffer = &tx_ring->tx_buffer_info[i];
13fde97a 5699 tx_desc = IGB_TX_DESC(tx_ring, i);
8542db05 5700 i -= tx_ring->count;
9d5c8243 5701
f4128785
AD
5702 do {
5703 union e1000_adv_tx_desc *eop_desc = tx_buffer->next_to_watch;
8542db05
AD
5704
5705 /* if next_to_watch is not set then there is no work pending */
5706 if (!eop_desc)
5707 break;
13fde97a 5708
f4128785
AD
5709 /* prevent any other reads prior to eop_desc */
5710 rmb();
5711
13fde97a
AD
5712 /* if DD is not set pending work has not been completed */
5713 if (!(eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)))
5714 break;
5715
8542db05
AD
5716 /* clear next_to_watch to prevent false hangs */
5717 tx_buffer->next_to_watch = NULL;
9d5c8243 5718
ebe42d16
AD
5719 /* update the statistics for this packet */
5720 total_bytes += tx_buffer->bytecount;
5721 total_packets += tx_buffer->gso_segs;
13fde97a 5722
ebe42d16
AD
5723 /* free the skb */
5724 dev_kfree_skb_any(tx_buffer->skb);
5725 tx_buffer->skb = NULL;
13fde97a 5726
ebe42d16
AD
5727 /* unmap skb header data */
5728 dma_unmap_single(tx_ring->dev,
5729 tx_buffer->dma,
5730 tx_buffer->length,
5731 DMA_TO_DEVICE);
5732
5733 /* clear last DMA location and unmap remaining buffers */
5734 while (tx_desc != eop_desc) {
5735 tx_buffer->dma = 0;
9d5c8243 5736
13fde97a
AD
5737 tx_buffer++;
5738 tx_desc++;
9d5c8243 5739 i++;
8542db05
AD
5740 if (unlikely(!i)) {
5741 i -= tx_ring->count;
06034649 5742 tx_buffer = tx_ring->tx_buffer_info;
13fde97a
AD
5743 tx_desc = IGB_TX_DESC(tx_ring, 0);
5744 }
ebe42d16
AD
5745
5746 /* unmap any remaining paged data */
5747 if (tx_buffer->dma) {
5748 dma_unmap_page(tx_ring->dev,
5749 tx_buffer->dma,
5750 tx_buffer->length,
5751 DMA_TO_DEVICE);
5752 }
5753 }
5754
5755 /* clear last DMA location */
5756 tx_buffer->dma = 0;
5757
5758 /* move us one more past the eop_desc for start of next pkt */
5759 tx_buffer++;
5760 tx_desc++;
5761 i++;
5762 if (unlikely(!i)) {
5763 i -= tx_ring->count;
5764 tx_buffer = tx_ring->tx_buffer_info;
5765 tx_desc = IGB_TX_DESC(tx_ring, 0);
5766 }
f4128785
AD
5767
5768 /* issue prefetch for next Tx descriptor */
5769 prefetch(tx_desc);
5770
5771 /* update budget accounting */
5772 budget--;
5773 } while (likely(budget));
0e014cb1 5774
bdbc0631
ED
5775 netdev_tx_completed_queue(txring_txq(tx_ring),
5776 total_packets, total_bytes);
8542db05 5777 i += tx_ring->count;
9d5c8243 5778 tx_ring->next_to_clean = i;
13fde97a
AD
5779 u64_stats_update_begin(&tx_ring->tx_syncp);
5780 tx_ring->tx_stats.bytes += total_bytes;
5781 tx_ring->tx_stats.packets += total_packets;
5782 u64_stats_update_end(&tx_ring->tx_syncp);
0ba82994
AD
5783 q_vector->tx.total_bytes += total_bytes;
5784 q_vector->tx.total_packets += total_packets;
9d5c8243 5785
6d095fa8 5786 if (test_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags)) {
13fde97a 5787 struct e1000_hw *hw = &adapter->hw;
12dcd86b 5788
9d5c8243
AK
5789 /* Detect a transmit hang in hardware, this serializes the
5790 * check with the clearing of time_stamp and movement of i */
6d095fa8 5791 clear_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
f4128785 5792 if (tx_buffer->next_to_watch &&
8542db05 5793 time_after(jiffies, tx_buffer->time_stamp +
8e95a202
JP
5794 (adapter->tx_timeout_factor * HZ)) &&
5795 !(rd32(E1000_STATUS) & E1000_STATUS_TXOFF)) {
9d5c8243 5796
9d5c8243 5797 /* detected Tx unit hang */
59d71989 5798 dev_err(tx_ring->dev,
9d5c8243 5799 "Detected Tx Unit Hang\n"
2d064c06 5800 " Tx Queue <%d>\n"
9d5c8243
AK
5801 " TDH <%x>\n"
5802 " TDT <%x>\n"
5803 " next_to_use <%x>\n"
5804 " next_to_clean <%x>\n"
9d5c8243
AK
5805 "buffer_info[next_to_clean]\n"
5806 " time_stamp <%lx>\n"
8542db05 5807 " next_to_watch <%p>\n"
9d5c8243
AK
5808 " jiffies <%lx>\n"
5809 " desc.status <%x>\n",
2d064c06 5810 tx_ring->queue_index,
238ac817 5811 rd32(E1000_TDH(tx_ring->reg_idx)),
fce99e34 5812 readl(tx_ring->tail),
9d5c8243
AK
5813 tx_ring->next_to_use,
5814 tx_ring->next_to_clean,
8542db05 5815 tx_buffer->time_stamp,
f4128785 5816 tx_buffer->next_to_watch,
9d5c8243 5817 jiffies,
f4128785 5818 tx_buffer->next_to_watch->wb.status);
13fde97a
AD
5819 netif_stop_subqueue(tx_ring->netdev,
5820 tx_ring->queue_index);
5821
5822 /* we are about to reset, no point in enabling stuff */
5823 return true;
9d5c8243
AK
5824 }
5825 }
13fde97a
AD
5826
5827 if (unlikely(total_packets &&
5828 netif_carrier_ok(tx_ring->netdev) &&
5829 igb_desc_unused(tx_ring) >= IGB_TX_QUEUE_WAKE)) {
5830 /* Make sure that anybody stopping the queue after this
5831 * sees the new next_to_clean.
5832 */
5833 smp_mb();
5834 if (__netif_subqueue_stopped(tx_ring->netdev,
5835 tx_ring->queue_index) &&
5836 !(test_bit(__IGB_DOWN, &adapter->state))) {
5837 netif_wake_subqueue(tx_ring->netdev,
5838 tx_ring->queue_index);
5839
5840 u64_stats_update_begin(&tx_ring->tx_syncp);
5841 tx_ring->tx_stats.restart_queue++;
5842 u64_stats_update_end(&tx_ring->tx_syncp);
5843 }
5844 }
5845
5846 return !!budget;
9d5c8243
AK
5847}
5848
cd392f5c 5849static inline void igb_rx_checksum(struct igb_ring *ring,
3ceb90fd
AD
5850 union e1000_adv_rx_desc *rx_desc,
5851 struct sk_buff *skb)
9d5c8243 5852{
bc8acf2c 5853 skb_checksum_none_assert(skb);
9d5c8243 5854
294e7d78 5855 /* Ignore Checksum bit is set */
3ceb90fd 5856 if (igb_test_staterr(rx_desc, E1000_RXD_STAT_IXSM))
294e7d78
AD
5857 return;
5858
5859 /* Rx checksum disabled via ethtool */
5860 if (!(ring->netdev->features & NETIF_F_RXCSUM))
9d5c8243 5861 return;
85ad76b2 5862
9d5c8243 5863 /* TCP/UDP checksum error bit is set */
3ceb90fd
AD
5864 if (igb_test_staterr(rx_desc,
5865 E1000_RXDEXT_STATERR_TCPE |
5866 E1000_RXDEXT_STATERR_IPE)) {
b9473560
JB
5867 /*
5868 * work around errata with sctp packets where the TCPE aka
5869 * L4E bit is set incorrectly on 64 byte (60 byte w/o crc)
5870 * packets, (aka let the stack check the crc32c)
5871 */
866cff06
AD
5872 if (!((skb->len == 60) &&
5873 test_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags))) {
12dcd86b 5874 u64_stats_update_begin(&ring->rx_syncp);
04a5fcaa 5875 ring->rx_stats.csum_err++;
12dcd86b
ED
5876 u64_stats_update_end(&ring->rx_syncp);
5877 }
9d5c8243 5878 /* let the stack verify checksum errors */
9d5c8243
AK
5879 return;
5880 }
5881 /* It must be a TCP or UDP packet with a valid checksum */
3ceb90fd
AD
5882 if (igb_test_staterr(rx_desc, E1000_RXD_STAT_TCPCS |
5883 E1000_RXD_STAT_UDPCS))
9d5c8243
AK
5884 skb->ip_summed = CHECKSUM_UNNECESSARY;
5885
3ceb90fd
AD
5886 dev_dbg(ring->dev, "cksum success: bits %08X\n",
5887 le32_to_cpu(rx_desc->wb.upper.status_error));
9d5c8243
AK
5888}
5889
077887c3
AD
5890static inline void igb_rx_hash(struct igb_ring *ring,
5891 union e1000_adv_rx_desc *rx_desc,
5892 struct sk_buff *skb)
5893{
5894 if (ring->netdev->features & NETIF_F_RXHASH)
5895 skb->rxhash = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss);
5896}
5897
8be10e91
AD
5898static void igb_rx_vlan(struct igb_ring *ring,
5899 union e1000_adv_rx_desc *rx_desc,
5900 struct sk_buff *skb)
5901{
5902 if (igb_test_staterr(rx_desc, E1000_RXD_STAT_VP)) {
5903 u16 vid;
5904 if (igb_test_staterr(rx_desc, E1000_RXDEXT_STATERR_LB) &&
5905 test_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &ring->flags))
5906 vid = be16_to_cpu(rx_desc->wb.upper.vlan);
5907 else
5908 vid = le16_to_cpu(rx_desc->wb.upper.vlan);
5909
5910 __vlan_hwaccel_put_tag(skb, vid);
5911 }
5912}
5913
44390ca6 5914static inline u16 igb_get_hlen(union e1000_adv_rx_desc *rx_desc)
2d94d8ab
AD
5915{
5916 /* HW will not DMA in data larger than the given buffer, even if it
5917 * parses the (NFS, of course) header to be larger. In that case, it
5918 * fills the header buffer and spills the rest into the page.
5919 */
5920 u16 hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hdr_info) &
5921 E1000_RXDADV_HDRBUFLEN_MASK) >> E1000_RXDADV_HDRBUFLEN_SHIFT;
44390ca6
AD
5922 if (hlen > IGB_RX_HDR_LEN)
5923 hlen = IGB_RX_HDR_LEN;
2d94d8ab
AD
5924 return hlen;
5925}
5926
cd392f5c 5927static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, int budget)
9d5c8243 5928{
0ba82994 5929 struct igb_ring *rx_ring = q_vector->rx.ring;
16eb8815
AD
5930 union e1000_adv_rx_desc *rx_desc;
5931 const int current_node = numa_node_id();
9d5c8243 5932 unsigned int total_bytes = 0, total_packets = 0;
16eb8815
AD
5933 u16 cleaned_count = igb_desc_unused(rx_ring);
5934 u16 i = rx_ring->next_to_clean;
9d5c8243 5935
60136906 5936 rx_desc = IGB_RX_DESC(rx_ring, i);
9d5c8243 5937
3ceb90fd 5938 while (igb_test_staterr(rx_desc, E1000_RXD_STAT_DD)) {
06034649 5939 struct igb_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i];
16eb8815
AD
5940 struct sk_buff *skb = buffer_info->skb;
5941 union e1000_adv_rx_desc *next_rxd;
9d5c8243 5942
69d3ca53 5943 buffer_info->skb = NULL;
16eb8815 5944 prefetch(skb->data);
69d3ca53
AD
5945
5946 i++;
5947 if (i == rx_ring->count)
5948 i = 0;
42d0781a 5949
60136906 5950 next_rxd = IGB_RX_DESC(rx_ring, i);
69d3ca53 5951 prefetch(next_rxd);
9d5c8243 5952
16eb8815
AD
5953 /*
5954 * This memory barrier is needed to keep us from reading
5955 * any other fields out of the rx_desc until we know the
5956 * RXD_STAT_DD bit is set
5957 */
5958 rmb();
9d5c8243 5959
16eb8815
AD
5960 if (!skb_is_nonlinear(skb)) {
5961 __skb_put(skb, igb_get_hlen(rx_desc));
5962 dma_unmap_single(rx_ring->dev, buffer_info->dma,
44390ca6 5963 IGB_RX_HDR_LEN,
59d71989 5964 DMA_FROM_DEVICE);
91615f76 5965 buffer_info->dma = 0;
bf36c1a0
AD
5966 }
5967
16eb8815
AD
5968 if (rx_desc->wb.upper.length) {
5969 u16 length = le16_to_cpu(rx_desc->wb.upper.length);
bf36c1a0 5970
aa913403 5971 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
bf36c1a0
AD
5972 buffer_info->page,
5973 buffer_info->page_offset,
5974 length);
5975
16eb8815
AD
5976 skb->len += length;
5977 skb->data_len += length;
95b9c1df 5978 skb->truesize += PAGE_SIZE / 2;
16eb8815 5979
d1eff350
AD
5980 if ((page_count(buffer_info->page) != 1) ||
5981 (page_to_nid(buffer_info->page) != current_node))
bf36c1a0
AD
5982 buffer_info->page = NULL;
5983 else
5984 get_page(buffer_info->page);
9d5c8243 5985
16eb8815
AD
5986 dma_unmap_page(rx_ring->dev, buffer_info->page_dma,
5987 PAGE_SIZE / 2, DMA_FROM_DEVICE);
5988 buffer_info->page_dma = 0;
9d5c8243 5989 }
9d5c8243 5990
3ceb90fd 5991 if (!igb_test_staterr(rx_desc, E1000_RXD_STAT_EOP)) {
06034649
AD
5992 struct igb_rx_buffer *next_buffer;
5993 next_buffer = &rx_ring->rx_buffer_info[i];
b2d56536
AD
5994 buffer_info->skb = next_buffer->skb;
5995 buffer_info->dma = next_buffer->dma;
5996 next_buffer->skb = skb;
5997 next_buffer->dma = 0;
bf36c1a0
AD
5998 goto next_desc;
5999 }
44390ca6 6000
89eaefb6
BG
6001 if (unlikely((igb_test_staterr(rx_desc,
6002 E1000_RXDEXT_ERR_FRAME_ERR_MASK))
6003 && !(rx_ring->netdev->features & NETIF_F_RXALL))) {
16eb8815 6004 dev_kfree_skb_any(skb);
9d5c8243
AK
6005 goto next_desc;
6006 }
9d5c8243 6007
7ebae817 6008#ifdef CONFIG_IGB_PTP
a79f4f88 6009 igb_ptp_rx_hwtstamp(q_vector, rx_desc, skb);
3c89f6d0 6010#endif /* CONFIG_IGB_PTP */
077887c3 6011 igb_rx_hash(rx_ring, rx_desc, skb);
3ceb90fd 6012 igb_rx_checksum(rx_ring, rx_desc, skb);
8be10e91 6013 igb_rx_vlan(rx_ring, rx_desc, skb);
3ceb90fd
AD
6014
6015 total_bytes += skb->len;
6016 total_packets++;
6017
6018 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
6019
b2cb09b1 6020 napi_gro_receive(&q_vector->napi, skb);
9d5c8243 6021
16eb8815 6022 budget--;
9d5c8243 6023next_desc:
16eb8815
AD
6024 if (!budget)
6025 break;
6026
6027 cleaned_count++;
9d5c8243
AK
6028 /* return some buffers to hardware, one at a time is too slow */
6029 if (cleaned_count >= IGB_RX_BUFFER_WRITE) {
cd392f5c 6030 igb_alloc_rx_buffers(rx_ring, cleaned_count);
9d5c8243
AK
6031 cleaned_count = 0;
6032 }
6033
6034 /* use prefetched values */
6035 rx_desc = next_rxd;
9d5c8243 6036 }
bf36c1a0 6037
9d5c8243 6038 rx_ring->next_to_clean = i;
12dcd86b 6039 u64_stats_update_begin(&rx_ring->rx_syncp);
9d5c8243
AK
6040 rx_ring->rx_stats.packets += total_packets;
6041 rx_ring->rx_stats.bytes += total_bytes;
12dcd86b 6042 u64_stats_update_end(&rx_ring->rx_syncp);
0ba82994
AD
6043 q_vector->rx.total_packets += total_packets;
6044 q_vector->rx.total_bytes += total_bytes;
c023cd88
AD
6045
6046 if (cleaned_count)
cd392f5c 6047 igb_alloc_rx_buffers(rx_ring, cleaned_count);
c023cd88 6048
16eb8815 6049 return !!budget;
9d5c8243
AK
6050}
6051
c023cd88 6052static bool igb_alloc_mapped_skb(struct igb_ring *rx_ring,
06034649 6053 struct igb_rx_buffer *bi)
c023cd88
AD
6054{
6055 struct sk_buff *skb = bi->skb;
6056 dma_addr_t dma = bi->dma;
6057
6058 if (dma)
6059 return true;
6060
6061 if (likely(!skb)) {
6062 skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
6063 IGB_RX_HDR_LEN);
6064 bi->skb = skb;
6065 if (!skb) {
6066 rx_ring->rx_stats.alloc_failed++;
6067 return false;
6068 }
6069
6070 /* initialize skb for ring */
6071 skb_record_rx_queue(skb, rx_ring->queue_index);
6072 }
6073
6074 dma = dma_map_single(rx_ring->dev, skb->data,
6075 IGB_RX_HDR_LEN, DMA_FROM_DEVICE);
6076
6077 if (dma_mapping_error(rx_ring->dev, dma)) {
6078 rx_ring->rx_stats.alloc_failed++;
6079 return false;
6080 }
6081
6082 bi->dma = dma;
6083 return true;
6084}
6085
6086static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
06034649 6087 struct igb_rx_buffer *bi)
c023cd88
AD
6088{
6089 struct page *page = bi->page;
6090 dma_addr_t page_dma = bi->page_dma;
6091 unsigned int page_offset = bi->page_offset ^ (PAGE_SIZE / 2);
6092
6093 if (page_dma)
6094 return true;
6095
6096 if (!page) {
0614002b 6097 page = __skb_alloc_page(GFP_ATOMIC, bi->skb);
c023cd88
AD
6098 bi->page = page;
6099 if (unlikely(!page)) {
6100 rx_ring->rx_stats.alloc_failed++;
6101 return false;
6102 }
6103 }
6104
6105 page_dma = dma_map_page(rx_ring->dev, page,
6106 page_offset, PAGE_SIZE / 2,
6107 DMA_FROM_DEVICE);
6108
6109 if (dma_mapping_error(rx_ring->dev, page_dma)) {
6110 rx_ring->rx_stats.alloc_failed++;
6111 return false;
6112 }
6113
6114 bi->page_dma = page_dma;
6115 bi->page_offset = page_offset;
6116 return true;
6117}
6118
9d5c8243 6119/**
cd392f5c 6120 * igb_alloc_rx_buffers - Replace used receive buffers; packet split
9d5c8243
AK
6121 * @adapter: address of board private structure
6122 **/
cd392f5c 6123void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)
9d5c8243 6124{
9d5c8243 6125 union e1000_adv_rx_desc *rx_desc;
06034649 6126 struct igb_rx_buffer *bi;
c023cd88 6127 u16 i = rx_ring->next_to_use;
9d5c8243 6128
60136906 6129 rx_desc = IGB_RX_DESC(rx_ring, i);
06034649 6130 bi = &rx_ring->rx_buffer_info[i];
c023cd88 6131 i -= rx_ring->count;
9d5c8243
AK
6132
6133 while (cleaned_count--) {
c023cd88
AD
6134 if (!igb_alloc_mapped_skb(rx_ring, bi))
6135 break;
9d5c8243 6136
c023cd88
AD
6137 /* Refresh the desc even if buffer_addrs didn't change
6138 * because each write-back erases this info. */
6139 rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
9d5c8243 6140
c023cd88
AD
6141 if (!igb_alloc_mapped_page(rx_ring, bi))
6142 break;
6143
6144 rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
9d5c8243 6145
c023cd88
AD
6146 rx_desc++;
6147 bi++;
9d5c8243 6148 i++;
c023cd88 6149 if (unlikely(!i)) {
60136906 6150 rx_desc = IGB_RX_DESC(rx_ring, 0);
06034649 6151 bi = rx_ring->rx_buffer_info;
c023cd88
AD
6152 i -= rx_ring->count;
6153 }
6154
6155 /* clear the hdr_addr for the next_to_use descriptor */
6156 rx_desc->read.hdr_addr = 0;
9d5c8243
AK
6157 }
6158
c023cd88
AD
6159 i += rx_ring->count;
6160
9d5c8243
AK
6161 if (rx_ring->next_to_use != i) {
6162 rx_ring->next_to_use = i;
9d5c8243
AK
6163
6164 /* Force memory writes to complete before letting h/w
6165 * know there are new descriptors to fetch. (Only
6166 * applicable for weak-ordered memory model archs,
6167 * such as IA-64). */
6168 wmb();
fce99e34 6169 writel(i, rx_ring->tail);
9d5c8243
AK
6170 }
6171}
6172
6173/**
6174 * igb_mii_ioctl -
6175 * @netdev:
6176 * @ifreq:
6177 * @cmd:
6178 **/
6179static int igb_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
6180{
6181 struct igb_adapter *adapter = netdev_priv(netdev);
6182 struct mii_ioctl_data *data = if_mii(ifr);
6183
6184 if (adapter->hw.phy.media_type != e1000_media_type_copper)
6185 return -EOPNOTSUPP;
6186
6187 switch (cmd) {
6188 case SIOCGMIIPHY:
6189 data->phy_id = adapter->hw.phy.addr;
6190 break;
6191 case SIOCGMIIREG:
f5f4cf08
AD
6192 if (igb_read_phy_reg(&adapter->hw, data->reg_num & 0x1F,
6193 &data->val_out))
9d5c8243
AK
6194 return -EIO;
6195 break;
6196 case SIOCSMIIREG:
6197 default:
6198 return -EOPNOTSUPP;
6199 }
6200 return 0;
6201}
6202
6203/**
6204 * igb_ioctl -
6205 * @netdev:
6206 * @ifreq:
6207 * @cmd:
6208 **/
6209static int igb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
6210{
6211 switch (cmd) {
6212 case SIOCGMIIPHY:
6213 case SIOCGMIIREG:
6214 case SIOCSMIIREG:
6215 return igb_mii_ioctl(netdev, ifr, cmd);
3c89f6d0 6216#ifdef CONFIG_IGB_PTP
c6cb090b 6217 case SIOCSHWTSTAMP:
a79f4f88 6218 return igb_ptp_hwtstamp_ioctl(netdev, ifr, cmd);
3c89f6d0 6219#endif /* CONFIG_IGB_PTP */
9d5c8243
AK
6220 default:
6221 return -EOPNOTSUPP;
6222 }
6223}
6224
009bc06e
AD
6225s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
6226{
6227 struct igb_adapter *adapter = hw->back;
6228 u16 cap_offset;
6229
bdaae04c 6230 cap_offset = adapter->pdev->pcie_cap;
009bc06e
AD
6231 if (!cap_offset)
6232 return -E1000_ERR_CONFIG;
6233
6234 pci_read_config_word(adapter->pdev, cap_offset + reg, value);
6235
6236 return 0;
6237}
6238
6239s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
6240{
6241 struct igb_adapter *adapter = hw->back;
6242 u16 cap_offset;
6243
bdaae04c 6244 cap_offset = adapter->pdev->pcie_cap;
009bc06e
AD
6245 if (!cap_offset)
6246 return -E1000_ERR_CONFIG;
6247
6248 pci_write_config_word(adapter->pdev, cap_offset + reg, *value);
6249
6250 return 0;
6251}
6252
c8f44aff 6253static void igb_vlan_mode(struct net_device *netdev, netdev_features_t features)
9d5c8243
AK
6254{
6255 struct igb_adapter *adapter = netdev_priv(netdev);
6256 struct e1000_hw *hw = &adapter->hw;
6257 u32 ctrl, rctl;
5faf030c 6258 bool enable = !!(features & NETIF_F_HW_VLAN_RX);
9d5c8243 6259
5faf030c 6260 if (enable) {
9d5c8243
AK
6261 /* enable VLAN tag insert/strip */
6262 ctrl = rd32(E1000_CTRL);
6263 ctrl |= E1000_CTRL_VME;
6264 wr32(E1000_CTRL, ctrl);
6265
51466239 6266 /* Disable CFI check */
9d5c8243 6267 rctl = rd32(E1000_RCTL);
9d5c8243
AK
6268 rctl &= ~E1000_RCTL_CFIEN;
6269 wr32(E1000_RCTL, rctl);
9d5c8243
AK
6270 } else {
6271 /* disable VLAN tag insert/strip */
6272 ctrl = rd32(E1000_CTRL);
6273 ctrl &= ~E1000_CTRL_VME;
6274 wr32(E1000_CTRL, ctrl);
9d5c8243
AK
6275 }
6276
e1739522 6277 igb_rlpml_set(adapter);
9d5c8243
AK
6278}
6279
8e586137 6280static int igb_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
9d5c8243
AK
6281{
6282 struct igb_adapter *adapter = netdev_priv(netdev);
6283 struct e1000_hw *hw = &adapter->hw;
4ae196df 6284 int pf_id = adapter->vfs_allocated_count;
9d5c8243 6285
51466239
AD
6286 /* attempt to add filter to vlvf array */
6287 igb_vlvf_set(adapter, vid, true, pf_id);
4ae196df 6288
51466239
AD
6289 /* add the filter since PF can receive vlans w/o entry in vlvf */
6290 igb_vfta_set(hw, vid, true);
b2cb09b1
JP
6291
6292 set_bit(vid, adapter->active_vlans);
8e586137
JP
6293
6294 return 0;
9d5c8243
AK
6295}
6296
8e586137 6297static int igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
9d5c8243
AK
6298{
6299 struct igb_adapter *adapter = netdev_priv(netdev);
6300 struct e1000_hw *hw = &adapter->hw;
4ae196df 6301 int pf_id = adapter->vfs_allocated_count;
51466239 6302 s32 err;
9d5c8243 6303
51466239
AD
6304 /* remove vlan from VLVF table array */
6305 err = igb_vlvf_set(adapter, vid, false, pf_id);
9d5c8243 6306
51466239
AD
6307 /* if vid was not present in VLVF just remove it from table */
6308 if (err)
4ae196df 6309 igb_vfta_set(hw, vid, false);
b2cb09b1
JP
6310
6311 clear_bit(vid, adapter->active_vlans);
8e586137
JP
6312
6313 return 0;
9d5c8243
AK
6314}
6315
6316static void igb_restore_vlan(struct igb_adapter *adapter)
6317{
b2cb09b1 6318 u16 vid;
9d5c8243 6319
5faf030c
AD
6320 igb_vlan_mode(adapter->netdev, adapter->netdev->features);
6321
b2cb09b1
JP
6322 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
6323 igb_vlan_rx_add_vid(adapter->netdev, vid);
9d5c8243
AK
6324}
6325
14ad2513 6326int igb_set_spd_dplx(struct igb_adapter *adapter, u32 spd, u8 dplx)
9d5c8243 6327{
090b1795 6328 struct pci_dev *pdev = adapter->pdev;
9d5c8243
AK
6329 struct e1000_mac_info *mac = &adapter->hw.mac;
6330
6331 mac->autoneg = 0;
6332
14ad2513
DD
6333 /* Make sure dplx is at most 1 bit and lsb of speed is not set
6334 * for the switch() below to work */
6335 if ((spd & 1) || (dplx & ~1))
6336 goto err_inval;
6337
cd2638a8
CW
6338 /* Fiber NIC's only allow 1000 Gbps Full duplex */
6339 if ((adapter->hw.phy.media_type == e1000_media_type_internal_serdes) &&
14ad2513
DD
6340 spd != SPEED_1000 &&
6341 dplx != DUPLEX_FULL)
6342 goto err_inval;
cd2638a8 6343
14ad2513 6344 switch (spd + dplx) {
9d5c8243
AK
6345 case SPEED_10 + DUPLEX_HALF:
6346 mac->forced_speed_duplex = ADVERTISE_10_HALF;
6347 break;
6348 case SPEED_10 + DUPLEX_FULL:
6349 mac->forced_speed_duplex = ADVERTISE_10_FULL;
6350 break;
6351 case SPEED_100 + DUPLEX_HALF:
6352 mac->forced_speed_duplex = ADVERTISE_100_HALF;
6353 break;
6354 case SPEED_100 + DUPLEX_FULL:
6355 mac->forced_speed_duplex = ADVERTISE_100_FULL;
6356 break;
6357 case SPEED_1000 + DUPLEX_FULL:
6358 mac->autoneg = 1;
6359 adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
6360 break;
6361 case SPEED_1000 + DUPLEX_HALF: /* not supported */
6362 default:
14ad2513 6363 goto err_inval;
9d5c8243 6364 }
8376dad0
JB
6365
6366 /* clear MDI, MDI(-X) override is only allowed when autoneg enabled */
6367 adapter->hw.phy.mdix = AUTO_ALL_MODES;
6368
9d5c8243 6369 return 0;
14ad2513
DD
6370
6371err_inval:
6372 dev_err(&pdev->dev, "Unsupported Speed/Duplex configuration\n");
6373 return -EINVAL;
9d5c8243
AK
6374}
6375
749ab2cd
YZ
6376static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake,
6377 bool runtime)
9d5c8243
AK
6378{
6379 struct net_device *netdev = pci_get_drvdata(pdev);
6380 struct igb_adapter *adapter = netdev_priv(netdev);
6381 struct e1000_hw *hw = &adapter->hw;
2d064c06 6382 u32 ctrl, rctl, status;
749ab2cd 6383 u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol;
9d5c8243
AK
6384#ifdef CONFIG_PM
6385 int retval = 0;
6386#endif
6387
6388 netif_device_detach(netdev);
6389
a88f10ec 6390 if (netif_running(netdev))
749ab2cd 6391 __igb_close(netdev, true);
a88f10ec 6392
047e0030 6393 igb_clear_interrupt_scheme(adapter);
9d5c8243
AK
6394
6395#ifdef CONFIG_PM
6396 retval = pci_save_state(pdev);
6397 if (retval)
6398 return retval;
6399#endif
6400
6401 status = rd32(E1000_STATUS);
6402 if (status & E1000_STATUS_LU)
6403 wufc &= ~E1000_WUFC_LNKC;
6404
6405 if (wufc) {
6406 igb_setup_rctl(adapter);
ff41f8dc 6407 igb_set_rx_mode(netdev);
9d5c8243
AK
6408
6409 /* turn on all-multi mode if wake on multicast is enabled */
6410 if (wufc & E1000_WUFC_MC) {
6411 rctl = rd32(E1000_RCTL);
6412 rctl |= E1000_RCTL_MPE;
6413 wr32(E1000_RCTL, rctl);
6414 }
6415
6416 ctrl = rd32(E1000_CTRL);
6417 /* advertise wake from D3Cold */
6418 #define E1000_CTRL_ADVD3WUC 0x00100000
6419 /* phy power management enable */
6420 #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
6421 ctrl |= E1000_CTRL_ADVD3WUC;
6422 wr32(E1000_CTRL, ctrl);
6423
9d5c8243 6424 /* Allow time for pending master requests to run */
330a6d6a 6425 igb_disable_pcie_master(hw);
9d5c8243
AK
6426
6427 wr32(E1000_WUC, E1000_WUC_PME_EN);
6428 wr32(E1000_WUFC, wufc);
9d5c8243
AK
6429 } else {
6430 wr32(E1000_WUC, 0);
6431 wr32(E1000_WUFC, 0);
9d5c8243
AK
6432 }
6433
3fe7c4c9
RW
6434 *enable_wake = wufc || adapter->en_mng_pt;
6435 if (!*enable_wake)
88a268c1
NN
6436 igb_power_down_link(adapter);
6437 else
6438 igb_power_up_link(adapter);
9d5c8243
AK
6439
6440 /* Release control of h/w to f/w. If f/w is AMT enabled, this
6441 * would have already happened in close and is redundant. */
6442 igb_release_hw_control(adapter);
6443
6444 pci_disable_device(pdev);
6445
9d5c8243
AK
6446 return 0;
6447}
6448
6449#ifdef CONFIG_PM
d9dd966d 6450#ifdef CONFIG_PM_SLEEP
749ab2cd 6451static int igb_suspend(struct device *dev)
3fe7c4c9
RW
6452{
6453 int retval;
6454 bool wake;
749ab2cd 6455 struct pci_dev *pdev = to_pci_dev(dev);
3fe7c4c9 6456
749ab2cd 6457 retval = __igb_shutdown(pdev, &wake, 0);
3fe7c4c9
RW
6458 if (retval)
6459 return retval;
6460
6461 if (wake) {
6462 pci_prepare_to_sleep(pdev);
6463 } else {
6464 pci_wake_from_d3(pdev, false);
6465 pci_set_power_state(pdev, PCI_D3hot);
6466 }
6467
6468 return 0;
6469}
d9dd966d 6470#endif /* CONFIG_PM_SLEEP */
3fe7c4c9 6471
749ab2cd 6472static int igb_resume(struct device *dev)
9d5c8243 6473{
749ab2cd 6474 struct pci_dev *pdev = to_pci_dev(dev);
9d5c8243
AK
6475 struct net_device *netdev = pci_get_drvdata(pdev);
6476 struct igb_adapter *adapter = netdev_priv(netdev);
6477 struct e1000_hw *hw = &adapter->hw;
6478 u32 err;
6479
6480 pci_set_power_state(pdev, PCI_D0);
6481 pci_restore_state(pdev);
b94f2d77 6482 pci_save_state(pdev);
42bfd33a 6483
aed5dec3 6484 err = pci_enable_device_mem(pdev);
9d5c8243
AK
6485 if (err) {
6486 dev_err(&pdev->dev,
6487 "igb: Cannot enable PCI device from suspend\n");
6488 return err;
6489 }
6490 pci_set_master(pdev);
6491
6492 pci_enable_wake(pdev, PCI_D3hot, 0);
6493 pci_enable_wake(pdev, PCI_D3cold, 0);
6494
cfb8c3aa 6495 if (igb_init_interrupt_scheme(adapter)) {
a88f10ec
AD
6496 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
6497 return -ENOMEM;
9d5c8243
AK
6498 }
6499
9d5c8243 6500 igb_reset(adapter);
a8564f03
AD
6501
6502 /* let the f/w know that the h/w is now under the control of the
6503 * driver. */
6504 igb_get_hw_control(adapter);
6505
9d5c8243
AK
6506 wr32(E1000_WUS, ~0);
6507
749ab2cd
YZ
6508 if (netdev->flags & IFF_UP) {
6509 err = __igb_open(netdev, true);
a88f10ec
AD
6510 if (err)
6511 return err;
6512 }
9d5c8243
AK
6513
6514 netif_device_attach(netdev);
749ab2cd
YZ
6515 return 0;
6516}
6517
6518#ifdef CONFIG_PM_RUNTIME
6519static int igb_runtime_idle(struct device *dev)
6520{
6521 struct pci_dev *pdev = to_pci_dev(dev);
6522 struct net_device *netdev = pci_get_drvdata(pdev);
6523 struct igb_adapter *adapter = netdev_priv(netdev);
6524
6525 if (!igb_has_link(adapter))
6526 pm_schedule_suspend(dev, MSEC_PER_SEC * 5);
6527
6528 return -EBUSY;
6529}
6530
6531static int igb_runtime_suspend(struct device *dev)
6532{
6533 struct pci_dev *pdev = to_pci_dev(dev);
6534 int retval;
6535 bool wake;
6536
6537 retval = __igb_shutdown(pdev, &wake, 1);
6538 if (retval)
6539 return retval;
6540
6541 if (wake) {
6542 pci_prepare_to_sleep(pdev);
6543 } else {
6544 pci_wake_from_d3(pdev, false);
6545 pci_set_power_state(pdev, PCI_D3hot);
6546 }
9d5c8243 6547
9d5c8243
AK
6548 return 0;
6549}
749ab2cd
YZ
6550
6551static int igb_runtime_resume(struct device *dev)
6552{
6553 return igb_resume(dev);
6554}
6555#endif /* CONFIG_PM_RUNTIME */
9d5c8243
AK
6556#endif
6557
6558static void igb_shutdown(struct pci_dev *pdev)
6559{
3fe7c4c9
RW
6560 bool wake;
6561
749ab2cd 6562 __igb_shutdown(pdev, &wake, 0);
3fe7c4c9
RW
6563
6564 if (system_state == SYSTEM_POWER_OFF) {
6565 pci_wake_from_d3(pdev, wake);
6566 pci_set_power_state(pdev, PCI_D3hot);
6567 }
9d5c8243
AK
6568}
6569
6570#ifdef CONFIG_NET_POLL_CONTROLLER
6571/*
6572 * Polling 'interrupt' - used by things like netconsole to send skbs
6573 * without having to re-enable interrupts. It's not called while
6574 * the interrupt routine is executing.
6575 */
6576static void igb_netpoll(struct net_device *netdev)
6577{
6578 struct igb_adapter *adapter = netdev_priv(netdev);
eebbbdba 6579 struct e1000_hw *hw = &adapter->hw;
0d1ae7f4 6580 struct igb_q_vector *q_vector;
9d5c8243 6581 int i;
9d5c8243 6582
047e0030 6583 for (i = 0; i < adapter->num_q_vectors; i++) {
0d1ae7f4
AD
6584 q_vector = adapter->q_vector[i];
6585 if (adapter->msix_entries)
6586 wr32(E1000_EIMC, q_vector->eims_value);
6587 else
6588 igb_irq_disable(adapter);
047e0030 6589 napi_schedule(&q_vector->napi);
eebbbdba 6590 }
9d5c8243
AK
6591}
6592#endif /* CONFIG_NET_POLL_CONTROLLER */
6593
6594/**
6595 * igb_io_error_detected - called when PCI error is detected
6596 * @pdev: Pointer to PCI device
6597 * @state: The current pci connection state
6598 *
6599 * This function is called after a PCI bus error affecting
6600 * this device has been detected.
6601 */
6602static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev,
6603 pci_channel_state_t state)
6604{
6605 struct net_device *netdev = pci_get_drvdata(pdev);
6606 struct igb_adapter *adapter = netdev_priv(netdev);
6607
6608 netif_device_detach(netdev);
6609
59ed6eec
AD
6610 if (state == pci_channel_io_perm_failure)
6611 return PCI_ERS_RESULT_DISCONNECT;
6612
9d5c8243
AK
6613 if (netif_running(netdev))
6614 igb_down(adapter);
6615 pci_disable_device(pdev);
6616
6617 /* Request a slot slot reset. */
6618 return PCI_ERS_RESULT_NEED_RESET;
6619}
6620
6621/**
6622 * igb_io_slot_reset - called after the pci bus has been reset.
6623 * @pdev: Pointer to PCI device
6624 *
6625 * Restart the card from scratch, as if from a cold-boot. Implementation
6626 * resembles the first-half of the igb_resume routine.
6627 */
6628static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev)
6629{
6630 struct net_device *netdev = pci_get_drvdata(pdev);
6631 struct igb_adapter *adapter = netdev_priv(netdev);
6632 struct e1000_hw *hw = &adapter->hw;
40a914fa 6633 pci_ers_result_t result;
42bfd33a 6634 int err;
9d5c8243 6635
aed5dec3 6636 if (pci_enable_device_mem(pdev)) {
9d5c8243
AK
6637 dev_err(&pdev->dev,
6638 "Cannot re-enable PCI device after reset.\n");
40a914fa
AD
6639 result = PCI_ERS_RESULT_DISCONNECT;
6640 } else {
6641 pci_set_master(pdev);
6642 pci_restore_state(pdev);
b94f2d77 6643 pci_save_state(pdev);
9d5c8243 6644
40a914fa
AD
6645 pci_enable_wake(pdev, PCI_D3hot, 0);
6646 pci_enable_wake(pdev, PCI_D3cold, 0);
9d5c8243 6647
40a914fa
AD
6648 igb_reset(adapter);
6649 wr32(E1000_WUS, ~0);
6650 result = PCI_ERS_RESULT_RECOVERED;
6651 }
9d5c8243 6652
ea943d41
JK
6653 err = pci_cleanup_aer_uncorrect_error_status(pdev);
6654 if (err) {
6655 dev_err(&pdev->dev, "pci_cleanup_aer_uncorrect_error_status "
6656 "failed 0x%0x\n", err);
6657 /* non-fatal, continue */
6658 }
40a914fa
AD
6659
6660 return result;
9d5c8243
AK
6661}
6662
6663/**
6664 * igb_io_resume - called when traffic can start flowing again.
6665 * @pdev: Pointer to PCI device
6666 *
6667 * This callback is called when the error recovery driver tells us that
6668 * its OK to resume normal operation. Implementation resembles the
6669 * second-half of the igb_resume routine.
6670 */
6671static void igb_io_resume(struct pci_dev *pdev)
6672{
6673 struct net_device *netdev = pci_get_drvdata(pdev);
6674 struct igb_adapter *adapter = netdev_priv(netdev);
6675
9d5c8243
AK
6676 if (netif_running(netdev)) {
6677 if (igb_up(adapter)) {
6678 dev_err(&pdev->dev, "igb_up failed after reset\n");
6679 return;
6680 }
6681 }
6682
6683 netif_device_attach(netdev);
6684
6685 /* let the f/w know that the h/w is now under the control of the
6686 * driver. */
6687 igb_get_hw_control(adapter);
9d5c8243
AK
6688}
6689
26ad9178
AD
6690static void igb_rar_set_qsel(struct igb_adapter *adapter, u8 *addr, u32 index,
6691 u8 qsel)
6692{
6693 u32 rar_low, rar_high;
6694 struct e1000_hw *hw = &adapter->hw;
6695
6696 /* HW expects these in little endian so we reverse the byte order
6697 * from network order (big endian) to little endian
6698 */
6699 rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) |
6700 ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
6701 rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
6702
6703 /* Indicate to hardware the Address is Valid. */
6704 rar_high |= E1000_RAH_AV;
6705
6706 if (hw->mac.type == e1000_82575)
6707 rar_high |= E1000_RAH_POOL_1 * qsel;
6708 else
6709 rar_high |= E1000_RAH_POOL_1 << qsel;
6710
6711 wr32(E1000_RAL(index), rar_low);
6712 wrfl();
6713 wr32(E1000_RAH(index), rar_high);
6714 wrfl();
6715}
6716
4ae196df
AD
6717static int igb_set_vf_mac(struct igb_adapter *adapter,
6718 int vf, unsigned char *mac_addr)
6719{
6720 struct e1000_hw *hw = &adapter->hw;
ff41f8dc
AD
6721 /* VF MAC addresses start at end of receive addresses and moves
6722 * torwards the first, as a result a collision should not be possible */
6723 int rar_entry = hw->mac.rar_entry_count - (vf + 1);
4ae196df 6724
37680117 6725 memcpy(adapter->vf_data[vf].vf_mac_addresses, mac_addr, ETH_ALEN);
4ae196df 6726
26ad9178 6727 igb_rar_set_qsel(adapter, mac_addr, rar_entry, vf);
4ae196df
AD
6728
6729 return 0;
6730}
6731
8151d294
WM
6732static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
6733{
6734 struct igb_adapter *adapter = netdev_priv(netdev);
6735 if (!is_valid_ether_addr(mac) || (vf >= adapter->vfs_allocated_count))
6736 return -EINVAL;
6737 adapter->vf_data[vf].flags |= IGB_VF_FLAG_PF_SET_MAC;
6738 dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n", mac, vf);
6739 dev_info(&adapter->pdev->dev, "Reload the VF driver to make this"
6740 " change effective.");
6741 if (test_bit(__IGB_DOWN, &adapter->state)) {
6742 dev_warn(&adapter->pdev->dev, "The VF MAC address has been set,"
6743 " but the PF device is not up.\n");
6744 dev_warn(&adapter->pdev->dev, "Bring the PF device up before"
6745 " attempting to use the VF device.\n");
6746 }
6747 return igb_set_vf_mac(adapter, vf, mac);
6748}
6749
17dc566c
LL
6750static int igb_link_mbps(int internal_link_speed)
6751{
6752 switch (internal_link_speed) {
6753 case SPEED_100:
6754 return 100;
6755 case SPEED_1000:
6756 return 1000;
6757 default:
6758 return 0;
6759 }
6760}
6761
6762static void igb_set_vf_rate_limit(struct e1000_hw *hw, int vf, int tx_rate,
6763 int link_speed)
6764{
6765 int rf_dec, rf_int;
6766 u32 bcnrc_val;
6767
6768 if (tx_rate != 0) {
6769 /* Calculate the rate factor values to set */
6770 rf_int = link_speed / tx_rate;
6771 rf_dec = (link_speed - (rf_int * tx_rate));
6772 rf_dec = (rf_dec * (1<<E1000_RTTBCNRC_RF_INT_SHIFT)) / tx_rate;
6773
6774 bcnrc_val = E1000_RTTBCNRC_RS_ENA;
6775 bcnrc_val |= ((rf_int<<E1000_RTTBCNRC_RF_INT_SHIFT) &
6776 E1000_RTTBCNRC_RF_INT_MASK);
6777 bcnrc_val |= (rf_dec & E1000_RTTBCNRC_RF_DEC_MASK);
6778 } else {
6779 bcnrc_val = 0;
6780 }
6781
6782 wr32(E1000_RTTDQSEL, vf); /* vf X uses queue X */
f00b0da7
LL
6783 /*
6784 * Set global transmit compensation time to the MMW_SIZE in RTTBCNRM
6785 * register. MMW_SIZE=0x014 if 9728-byte jumbo is supported.
6786 */
6787 wr32(E1000_RTTBCNRM, 0x14);
17dc566c
LL
6788 wr32(E1000_RTTBCNRC, bcnrc_val);
6789}
6790
6791static void igb_check_vf_rate_limit(struct igb_adapter *adapter)
6792{
6793 int actual_link_speed, i;
6794 bool reset_rate = false;
6795
6796 /* VF TX rate limit was not set or not supported */
6797 if ((adapter->vf_rate_link_speed == 0) ||
6798 (adapter->hw.mac.type != e1000_82576))
6799 return;
6800
6801 actual_link_speed = igb_link_mbps(adapter->link_speed);
6802 if (actual_link_speed != adapter->vf_rate_link_speed) {
6803 reset_rate = true;
6804 adapter->vf_rate_link_speed = 0;
6805 dev_info(&adapter->pdev->dev,
6806 "Link speed has been changed. VF Transmit "
6807 "rate is disabled\n");
6808 }
6809
6810 for (i = 0; i < adapter->vfs_allocated_count; i++) {
6811 if (reset_rate)
6812 adapter->vf_data[i].tx_rate = 0;
6813
6814 igb_set_vf_rate_limit(&adapter->hw, i,
6815 adapter->vf_data[i].tx_rate,
6816 actual_link_speed);
6817 }
6818}
6819
8151d294
WM
6820static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate)
6821{
17dc566c
LL
6822 struct igb_adapter *adapter = netdev_priv(netdev);
6823 struct e1000_hw *hw = &adapter->hw;
6824 int actual_link_speed;
6825
6826 if (hw->mac.type != e1000_82576)
6827 return -EOPNOTSUPP;
6828
6829 actual_link_speed = igb_link_mbps(adapter->link_speed);
6830 if ((vf >= adapter->vfs_allocated_count) ||
6831 (!(rd32(E1000_STATUS) & E1000_STATUS_LU)) ||
6832 (tx_rate < 0) || (tx_rate > actual_link_speed))
6833 return -EINVAL;
6834
6835 adapter->vf_rate_link_speed = actual_link_speed;
6836 adapter->vf_data[vf].tx_rate = (u16)tx_rate;
6837 igb_set_vf_rate_limit(hw, vf, tx_rate, actual_link_speed);
6838
6839 return 0;
8151d294
WM
6840}
6841
6842static int igb_ndo_get_vf_config(struct net_device *netdev,
6843 int vf, struct ifla_vf_info *ivi)
6844{
6845 struct igb_adapter *adapter = netdev_priv(netdev);
6846 if (vf >= adapter->vfs_allocated_count)
6847 return -EINVAL;
6848 ivi->vf = vf;
6849 memcpy(&ivi->mac, adapter->vf_data[vf].vf_mac_addresses, ETH_ALEN);
17dc566c 6850 ivi->tx_rate = adapter->vf_data[vf].tx_rate;
8151d294
WM
6851 ivi->vlan = adapter->vf_data[vf].pf_vlan;
6852 ivi->qos = adapter->vf_data[vf].pf_qos;
6853 return 0;
6854}
6855
4ae196df
AD
6856static void igb_vmm_control(struct igb_adapter *adapter)
6857{
6858 struct e1000_hw *hw = &adapter->hw;
10d8e907 6859 u32 reg;
4ae196df 6860
52a1dd4d
AD
6861 switch (hw->mac.type) {
6862 case e1000_82575:
f96a8a0b
CW
6863 case e1000_i210:
6864 case e1000_i211:
52a1dd4d
AD
6865 default:
6866 /* replication is not supported for 82575 */
4ae196df 6867 return;
52a1dd4d
AD
6868 case e1000_82576:
6869 /* notify HW that the MAC is adding vlan tags */
6870 reg = rd32(E1000_DTXCTL);
6871 reg |= E1000_DTXCTL_VLAN_ADDED;
6872 wr32(E1000_DTXCTL, reg);
6873 case e1000_82580:
6874 /* enable replication vlan tag stripping */
6875 reg = rd32(E1000_RPLOLR);
6876 reg |= E1000_RPLOLR_STRVLAN;
6877 wr32(E1000_RPLOLR, reg);
d2ba2ed8
AD
6878 case e1000_i350:
6879 /* none of the above registers are supported by i350 */
52a1dd4d
AD
6880 break;
6881 }
10d8e907 6882
d4960307
AD
6883 if (adapter->vfs_allocated_count) {
6884 igb_vmdq_set_loopback_pf(hw, true);
6885 igb_vmdq_set_replication_pf(hw, true);
13800469
GR
6886 igb_vmdq_set_anti_spoofing_pf(hw, true,
6887 adapter->vfs_allocated_count);
d4960307
AD
6888 } else {
6889 igb_vmdq_set_loopback_pf(hw, false);
6890 igb_vmdq_set_replication_pf(hw, false);
6891 }
4ae196df
AD
6892}
6893
b6e0c419
CW
6894static void igb_init_dmac(struct igb_adapter *adapter, u32 pba)
6895{
6896 struct e1000_hw *hw = &adapter->hw;
6897 u32 dmac_thr;
6898 u16 hwm;
6899
6900 if (hw->mac.type > e1000_82580) {
6901 if (adapter->flags & IGB_FLAG_DMAC) {
6902 u32 reg;
6903
6904 /* force threshold to 0. */
6905 wr32(E1000_DMCTXTH, 0);
6906
6907 /*
e8c626e9
MV
6908 * DMA Coalescing high water mark needs to be greater
6909 * than the Rx threshold. Set hwm to PBA - max frame
6910 * size in 16B units, capping it at PBA - 6KB.
b6e0c419 6911 */
e8c626e9
MV
6912 hwm = 64 * pba - adapter->max_frame_size / 16;
6913 if (hwm < 64 * (pba - 6))
6914 hwm = 64 * (pba - 6);
6915 reg = rd32(E1000_FCRTC);
6916 reg &= ~E1000_FCRTC_RTH_COAL_MASK;
6917 reg |= ((hwm << E1000_FCRTC_RTH_COAL_SHIFT)
6918 & E1000_FCRTC_RTH_COAL_MASK);
6919 wr32(E1000_FCRTC, reg);
6920
6921 /*
6922 * Set the DMA Coalescing Rx threshold to PBA - 2 * max
6923 * frame size, capping it at PBA - 10KB.
6924 */
6925 dmac_thr = pba - adapter->max_frame_size / 512;
6926 if (dmac_thr < pba - 10)
6927 dmac_thr = pba - 10;
b6e0c419
CW
6928 reg = rd32(E1000_DMACR);
6929 reg &= ~E1000_DMACR_DMACTHR_MASK;
b6e0c419
CW
6930 reg |= ((dmac_thr << E1000_DMACR_DMACTHR_SHIFT)
6931 & E1000_DMACR_DMACTHR_MASK);
6932
6933 /* transition to L0x or L1 if available..*/
6934 reg |= (E1000_DMACR_DMAC_EN | E1000_DMACR_DMAC_LX_MASK);
6935
6936 /* watchdog timer= +-1000 usec in 32usec intervals */
6937 reg |= (1000 >> 5);
0c02dd98
MV
6938
6939 /* Disable BMC-to-OS Watchdog Enable */
6940 reg &= ~E1000_DMACR_DC_BMC2OSW_EN;
b6e0c419
CW
6941 wr32(E1000_DMACR, reg);
6942
6943 /*
6944 * no lower threshold to disable
6945 * coalescing(smart fifb)-UTRESH=0
6946 */
6947 wr32(E1000_DMCRTRH, 0);
b6e0c419
CW
6948
6949 reg = (IGB_DMCTLX_DCFLUSH_DIS | 0x4);
6950
6951 wr32(E1000_DMCTLX, reg);
6952
6953 /*
6954 * free space in tx packet buffer to wake from
6955 * DMA coal
6956 */
6957 wr32(E1000_DMCTXTH, (IGB_MIN_TXPBSIZE -
6958 (IGB_TX_BUF_4096 + adapter->max_frame_size)) >> 6);
6959
6960 /*
6961 * make low power state decision controlled
6962 * by DMA coal
6963 */
6964 reg = rd32(E1000_PCIEMISC);
6965 reg &= ~E1000_PCIEMISC_LX_DECISION;
6966 wr32(E1000_PCIEMISC, reg);
6967 } /* endif adapter->dmac is not disabled */
6968 } else if (hw->mac.type == e1000_82580) {
6969 u32 reg = rd32(E1000_PCIEMISC);
6970 wr32(E1000_PCIEMISC, reg & ~E1000_PCIEMISC_LX_DECISION);
6971 wr32(E1000_DMACR, 0);
6972 }
6973}
6974
9d5c8243 6975/* igb_main.c */