]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - drivers/net/ethernet/intel/igb/igb_main.c
igb: Change how we check for pre-existing and assigned VFs
[mirror_ubuntu-zesty-kernel.git] / drivers / net / ethernet / intel / igb / igb_main.c
CommitLineData
9d5c8243
AK
1/*******************************************************************************
2
3 Intel(R) Gigabit Ethernet Linux driver
6e861326 4 Copyright(c) 2007-2012 Intel Corporation.
9d5c8243
AK
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
876d2d6f
JK
28#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
29
9d5c8243
AK
30#include <linux/module.h>
31#include <linux/types.h>
32#include <linux/init.h>
b2cb09b1 33#include <linux/bitops.h>
9d5c8243
AK
34#include <linux/vmalloc.h>
35#include <linux/pagemap.h>
36#include <linux/netdevice.h>
9d5c8243 37#include <linux/ipv6.h>
5a0e3ad6 38#include <linux/slab.h>
9d5c8243
AK
39#include <net/checksum.h>
40#include <net/ip6_checksum.h>
c6cb090b 41#include <linux/net_tstamp.h>
9d5c8243
AK
42#include <linux/mii.h>
43#include <linux/ethtool.h>
01789349 44#include <linux/if.h>
9d5c8243
AK
45#include <linux/if_vlan.h>
46#include <linux/pci.h>
c54106bb 47#include <linux/pci-aspm.h>
9d5c8243
AK
48#include <linux/delay.h>
49#include <linux/interrupt.h>
7d13a7d0
AD
50#include <linux/ip.h>
51#include <linux/tcp.h>
52#include <linux/sctp.h>
9d5c8243 53#include <linux/if_ether.h>
40a914fa 54#include <linux/aer.h>
70c71606 55#include <linux/prefetch.h>
749ab2cd 56#include <linux/pm_runtime.h>
421e02f0 57#ifdef CONFIG_IGB_DCA
fe4506b6
JC
58#include <linux/dca.h>
59#endif
9d5c8243
AK
60#include "igb.h"
61
200e5fd5
CW
62#define MAJ 4
63#define MIN 0
64#define BUILD 1
0d1fe82d 65#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
929dd047 66__stringify(BUILD) "-k"
9d5c8243
AK
67char igb_driver_name[] = "igb";
68char igb_driver_version[] = DRV_VERSION;
69static const char igb_driver_string[] =
70 "Intel(R) Gigabit Ethernet Network Driver";
6e861326 71static const char igb_copyright[] = "Copyright (c) 2007-2012 Intel Corporation.";
9d5c8243 72
9d5c8243
AK
73static const struct e1000_info *igb_info_tbl[] = {
74 [board_82575] = &e1000_82575_info,
75};
76
a3aa1884 77static DEFINE_PCI_DEVICE_TABLE(igb_pci_tbl) = {
f96a8a0b
CW
78 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I211_COPPER), board_82575 },
79 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_COPPER), board_82575 },
80 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_FIBER), board_82575 },
81 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SERDES), board_82575 },
82 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SGMII), board_82575 },
d2ba2ed8
AD
83 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_COPPER), board_82575 },
84 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_FIBER), board_82575 },
85 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SERDES), board_82575 },
86 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SGMII), board_82575 },
55cac248
AD
87 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER), board_82575 },
88 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_FIBER), board_82575 },
6493d24f 89 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_QUAD_FIBER), board_82575 },
55cac248
AD
90 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SERDES), board_82575 },
91 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SGMII), board_82575 },
92 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER_DUAL), board_82575 },
308fb39a
JG
93 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SGMII), board_82575 },
94 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SERDES), board_82575 },
1b5dda33
GJ
95 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_BACKPLANE), board_82575 },
96 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SFP), board_82575 },
2d064c06 97 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576), board_82575 },
9eb2341d 98 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS), board_82575 },
747d49ba 99 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS_SERDES), board_82575 },
2d064c06
AD
100 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_FIBER), board_82575 },
101 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES), board_82575 },
4703bf73 102 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES_QUAD), board_82575 },
b894fa26 103 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER_ET2), board_82575 },
c8ea5ea9 104 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER), board_82575 },
9d5c8243
AK
105 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_COPPER), board_82575 },
106 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_FIBER_SERDES), board_82575 },
107 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575GB_QUAD_COPPER), board_82575 },
108 /* required last entry */
109 {0, }
110};
111
112MODULE_DEVICE_TABLE(pci, igb_pci_tbl);
113
114void igb_reset(struct igb_adapter *);
115static int igb_setup_all_tx_resources(struct igb_adapter *);
116static int igb_setup_all_rx_resources(struct igb_adapter *);
117static void igb_free_all_tx_resources(struct igb_adapter *);
118static void igb_free_all_rx_resources(struct igb_adapter *);
06cf2666 119static void igb_setup_mrqc(struct igb_adapter *);
9d5c8243
AK
120static int igb_probe(struct pci_dev *, const struct pci_device_id *);
121static void __devexit igb_remove(struct pci_dev *pdev);
122static int igb_sw_init(struct igb_adapter *);
123static int igb_open(struct net_device *);
124static int igb_close(struct net_device *);
125static void igb_configure_tx(struct igb_adapter *);
126static void igb_configure_rx(struct igb_adapter *);
9d5c8243
AK
127static void igb_clean_all_tx_rings(struct igb_adapter *);
128static void igb_clean_all_rx_rings(struct igb_adapter *);
3b644cf6
MW
129static void igb_clean_tx_ring(struct igb_ring *);
130static void igb_clean_rx_ring(struct igb_ring *);
ff41f8dc 131static void igb_set_rx_mode(struct net_device *);
9d5c8243
AK
132static void igb_update_phy_info(unsigned long);
133static void igb_watchdog(unsigned long);
134static void igb_watchdog_task(struct work_struct *);
cd392f5c 135static netdev_tx_t igb_xmit_frame(struct sk_buff *skb, struct net_device *);
12dcd86b
ED
136static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *dev,
137 struct rtnl_link_stats64 *stats);
9d5c8243
AK
138static int igb_change_mtu(struct net_device *, int);
139static int igb_set_mac(struct net_device *, void *);
68d480c4 140static void igb_set_uta(struct igb_adapter *adapter);
9d5c8243
AK
141static irqreturn_t igb_intr(int irq, void *);
142static irqreturn_t igb_intr_msi(int irq, void *);
143static irqreturn_t igb_msix_other(int irq, void *);
047e0030 144static irqreturn_t igb_msix_ring(int irq, void *);
421e02f0 145#ifdef CONFIG_IGB_DCA
047e0030 146static void igb_update_dca(struct igb_q_vector *);
fe4506b6 147static void igb_setup_dca(struct igb_adapter *);
421e02f0 148#endif /* CONFIG_IGB_DCA */
661086df 149static int igb_poll(struct napi_struct *, int);
13fde97a 150static bool igb_clean_tx_irq(struct igb_q_vector *);
cd392f5c 151static bool igb_clean_rx_irq(struct igb_q_vector *, int);
9d5c8243
AK
152static int igb_ioctl(struct net_device *, struct ifreq *, int cmd);
153static void igb_tx_timeout(struct net_device *);
154static void igb_reset_task(struct work_struct *);
c8f44aff 155static void igb_vlan_mode(struct net_device *netdev, netdev_features_t features);
8e586137
JP
156static int igb_vlan_rx_add_vid(struct net_device *, u16);
157static int igb_vlan_rx_kill_vid(struct net_device *, u16);
9d5c8243 158static void igb_restore_vlan(struct igb_adapter *);
26ad9178 159static void igb_rar_set_qsel(struct igb_adapter *, u8 *, u32 , u8);
4ae196df
AD
160static void igb_ping_all_vfs(struct igb_adapter *);
161static void igb_msg_task(struct igb_adapter *);
4ae196df 162static void igb_vmm_control(struct igb_adapter *);
f2ca0dbe 163static int igb_set_vf_mac(struct igb_adapter *, int, unsigned char *);
4ae196df 164static void igb_restore_vf_multicasts(struct igb_adapter *adapter);
8151d294
WM
165static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac);
166static int igb_ndo_set_vf_vlan(struct net_device *netdev,
167 int vf, u16 vlan, u8 qos);
168static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate);
169static int igb_ndo_get_vf_config(struct net_device *netdev, int vf,
170 struct ifla_vf_info *ivi);
17dc566c 171static void igb_check_vf_rate_limit(struct igb_adapter *);
46a01698
RL
172
173#ifdef CONFIG_PCI_IOV
0224d663 174static int igb_vf_configure(struct igb_adapter *adapter, int vf);
f557147c 175static bool igb_vfs_are_assigned(struct igb_adapter *adapter);
46a01698 176#endif
9d5c8243 177
9d5c8243 178#ifdef CONFIG_PM
d9dd966d 179#ifdef CONFIG_PM_SLEEP
749ab2cd 180static int igb_suspend(struct device *);
d9dd966d 181#endif
749ab2cd
YZ
182static int igb_resume(struct device *);
183#ifdef CONFIG_PM_RUNTIME
184static int igb_runtime_suspend(struct device *dev);
185static int igb_runtime_resume(struct device *dev);
186static int igb_runtime_idle(struct device *dev);
187#endif
188static const struct dev_pm_ops igb_pm_ops = {
189 SET_SYSTEM_SLEEP_PM_OPS(igb_suspend, igb_resume)
190 SET_RUNTIME_PM_OPS(igb_runtime_suspend, igb_runtime_resume,
191 igb_runtime_idle)
192};
9d5c8243
AK
193#endif
194static void igb_shutdown(struct pci_dev *);
421e02f0 195#ifdef CONFIG_IGB_DCA
fe4506b6
JC
196static int igb_notify_dca(struct notifier_block *, unsigned long, void *);
197static struct notifier_block dca_notifier = {
198 .notifier_call = igb_notify_dca,
199 .next = NULL,
200 .priority = 0
201};
202#endif
9d5c8243
AK
203#ifdef CONFIG_NET_POLL_CONTROLLER
204/* for netdump / net console */
205static void igb_netpoll(struct net_device *);
206#endif
37680117 207#ifdef CONFIG_PCI_IOV
2a3abf6d
AD
208static unsigned int max_vfs = 0;
209module_param(max_vfs, uint, 0);
210MODULE_PARM_DESC(max_vfs, "Maximum number of virtual functions to allocate "
211 "per physical function");
212#endif /* CONFIG_PCI_IOV */
213
9d5c8243
AK
214static pci_ers_result_t igb_io_error_detected(struct pci_dev *,
215 pci_channel_state_t);
216static pci_ers_result_t igb_io_slot_reset(struct pci_dev *);
217static void igb_io_resume(struct pci_dev *);
218
219static struct pci_error_handlers igb_err_handler = {
220 .error_detected = igb_io_error_detected,
221 .slot_reset = igb_io_slot_reset,
222 .resume = igb_io_resume,
223};
224
b6e0c419 225static void igb_init_dmac(struct igb_adapter *adapter, u32 pba);
9d5c8243
AK
226
227static struct pci_driver igb_driver = {
228 .name = igb_driver_name,
229 .id_table = igb_pci_tbl,
230 .probe = igb_probe,
231 .remove = __devexit_p(igb_remove),
232#ifdef CONFIG_PM
749ab2cd 233 .driver.pm = &igb_pm_ops,
9d5c8243
AK
234#endif
235 .shutdown = igb_shutdown,
236 .err_handler = &igb_err_handler
237};
238
239MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
240MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver");
241MODULE_LICENSE("GPL");
242MODULE_VERSION(DRV_VERSION);
243
b3f4d599 244#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
245static int debug = -1;
246module_param(debug, int, 0);
247MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
248
c97ec42a
TI
249struct igb_reg_info {
250 u32 ofs;
251 char *name;
252};
253
254static const struct igb_reg_info igb_reg_info_tbl[] = {
255
256 /* General Registers */
257 {E1000_CTRL, "CTRL"},
258 {E1000_STATUS, "STATUS"},
259 {E1000_CTRL_EXT, "CTRL_EXT"},
260
261 /* Interrupt Registers */
262 {E1000_ICR, "ICR"},
263
264 /* RX Registers */
265 {E1000_RCTL, "RCTL"},
266 {E1000_RDLEN(0), "RDLEN"},
267 {E1000_RDH(0), "RDH"},
268 {E1000_RDT(0), "RDT"},
269 {E1000_RXDCTL(0), "RXDCTL"},
270 {E1000_RDBAL(0), "RDBAL"},
271 {E1000_RDBAH(0), "RDBAH"},
272
273 /* TX Registers */
274 {E1000_TCTL, "TCTL"},
275 {E1000_TDBAL(0), "TDBAL"},
276 {E1000_TDBAH(0), "TDBAH"},
277 {E1000_TDLEN(0), "TDLEN"},
278 {E1000_TDH(0), "TDH"},
279 {E1000_TDT(0), "TDT"},
280 {E1000_TXDCTL(0), "TXDCTL"},
281 {E1000_TDFH, "TDFH"},
282 {E1000_TDFT, "TDFT"},
283 {E1000_TDFHS, "TDFHS"},
284 {E1000_TDFPC, "TDFPC"},
285
286 /* List Terminator */
287 {}
288};
289
290/*
291 * igb_regdump - register printout routine
292 */
293static void igb_regdump(struct e1000_hw *hw, struct igb_reg_info *reginfo)
294{
295 int n = 0;
296 char rname[16];
297 u32 regs[8];
298
299 switch (reginfo->ofs) {
300 case E1000_RDLEN(0):
301 for (n = 0; n < 4; n++)
302 regs[n] = rd32(E1000_RDLEN(n));
303 break;
304 case E1000_RDH(0):
305 for (n = 0; n < 4; n++)
306 regs[n] = rd32(E1000_RDH(n));
307 break;
308 case E1000_RDT(0):
309 for (n = 0; n < 4; n++)
310 regs[n] = rd32(E1000_RDT(n));
311 break;
312 case E1000_RXDCTL(0):
313 for (n = 0; n < 4; n++)
314 regs[n] = rd32(E1000_RXDCTL(n));
315 break;
316 case E1000_RDBAL(0):
317 for (n = 0; n < 4; n++)
318 regs[n] = rd32(E1000_RDBAL(n));
319 break;
320 case E1000_RDBAH(0):
321 for (n = 0; n < 4; n++)
322 regs[n] = rd32(E1000_RDBAH(n));
323 break;
324 case E1000_TDBAL(0):
325 for (n = 0; n < 4; n++)
326 regs[n] = rd32(E1000_RDBAL(n));
327 break;
328 case E1000_TDBAH(0):
329 for (n = 0; n < 4; n++)
330 regs[n] = rd32(E1000_TDBAH(n));
331 break;
332 case E1000_TDLEN(0):
333 for (n = 0; n < 4; n++)
334 regs[n] = rd32(E1000_TDLEN(n));
335 break;
336 case E1000_TDH(0):
337 for (n = 0; n < 4; n++)
338 regs[n] = rd32(E1000_TDH(n));
339 break;
340 case E1000_TDT(0):
341 for (n = 0; n < 4; n++)
342 regs[n] = rd32(E1000_TDT(n));
343 break;
344 case E1000_TXDCTL(0):
345 for (n = 0; n < 4; n++)
346 regs[n] = rd32(E1000_TXDCTL(n));
347 break;
348 default:
876d2d6f 349 pr_info("%-15s %08x\n", reginfo->name, rd32(reginfo->ofs));
c97ec42a
TI
350 return;
351 }
352
353 snprintf(rname, 16, "%s%s", reginfo->name, "[0-3]");
876d2d6f
JK
354 pr_info("%-15s %08x %08x %08x %08x\n", rname, regs[0], regs[1],
355 regs[2], regs[3]);
c97ec42a
TI
356}
357
358/*
359 * igb_dump - Print registers, tx-rings and rx-rings
360 */
361static void igb_dump(struct igb_adapter *adapter)
362{
363 struct net_device *netdev = adapter->netdev;
364 struct e1000_hw *hw = &adapter->hw;
365 struct igb_reg_info *reginfo;
c97ec42a
TI
366 struct igb_ring *tx_ring;
367 union e1000_adv_tx_desc *tx_desc;
368 struct my_u0 { u64 a; u64 b; } *u0;
c97ec42a
TI
369 struct igb_ring *rx_ring;
370 union e1000_adv_rx_desc *rx_desc;
371 u32 staterr;
6ad4edfc 372 u16 i, n;
c97ec42a
TI
373
374 if (!netif_msg_hw(adapter))
375 return;
376
377 /* Print netdevice Info */
378 if (netdev) {
379 dev_info(&adapter->pdev->dev, "Net device Info\n");
876d2d6f
JK
380 pr_info("Device Name state trans_start "
381 "last_rx\n");
382 pr_info("%-15s %016lX %016lX %016lX\n", netdev->name,
383 netdev->state, netdev->trans_start, netdev->last_rx);
c97ec42a
TI
384 }
385
386 /* Print Registers */
387 dev_info(&adapter->pdev->dev, "Register Dump\n");
876d2d6f 388 pr_info(" Register Name Value\n");
c97ec42a
TI
389 for (reginfo = (struct igb_reg_info *)igb_reg_info_tbl;
390 reginfo->name; reginfo++) {
391 igb_regdump(hw, reginfo);
392 }
393
394 /* Print TX Ring Summary */
395 if (!netdev || !netif_running(netdev))
396 goto exit;
397
398 dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
876d2d6f 399 pr_info("Queue [NTU] [NTC] [bi(ntc)->dma ] leng ntw timestamp\n");
c97ec42a 400 for (n = 0; n < adapter->num_tx_queues; n++) {
06034649 401 struct igb_tx_buffer *buffer_info;
c97ec42a 402 tx_ring = adapter->tx_ring[n];
06034649 403 buffer_info = &tx_ring->tx_buffer_info[tx_ring->next_to_clean];
876d2d6f
JK
404 pr_info(" %5d %5X %5X %016llX %04X %p %016llX\n",
405 n, tx_ring->next_to_use, tx_ring->next_to_clean,
406 (u64)buffer_info->dma,
407 buffer_info->length,
408 buffer_info->next_to_watch,
409 (u64)buffer_info->time_stamp);
c97ec42a
TI
410 }
411
412 /* Print TX Rings */
413 if (!netif_msg_tx_done(adapter))
414 goto rx_ring_summary;
415
416 dev_info(&adapter->pdev->dev, "TX Rings Dump\n");
417
418 /* Transmit Descriptor Formats
419 *
420 * Advanced Transmit Descriptor
421 * +--------------------------------------------------------------+
422 * 0 | Buffer Address [63:0] |
423 * +--------------------------------------------------------------+
424 * 8 | PAYLEN | PORTS |CC|IDX | STA | DCMD |DTYP|MAC|RSV| DTALEN |
425 * +--------------------------------------------------------------+
426 * 63 46 45 40 39 38 36 35 32 31 24 15 0
427 */
428
429 for (n = 0; n < adapter->num_tx_queues; n++) {
430 tx_ring = adapter->tx_ring[n];
876d2d6f
JK
431 pr_info("------------------------------------\n");
432 pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index);
433 pr_info("------------------------------------\n");
434 pr_info("T [desc] [address 63:0 ] [PlPOCIStDDM Ln] "
435 "[bi->dma ] leng ntw timestamp "
436 "bi->skb\n");
c97ec42a
TI
437
438 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
876d2d6f 439 const char *next_desc;
06034649 440 struct igb_tx_buffer *buffer_info;
60136906 441 tx_desc = IGB_TX_DESC(tx_ring, i);
06034649 442 buffer_info = &tx_ring->tx_buffer_info[i];
c97ec42a 443 u0 = (struct my_u0 *)tx_desc;
876d2d6f
JK
444 if (i == tx_ring->next_to_use &&
445 i == tx_ring->next_to_clean)
446 next_desc = " NTC/U";
447 else if (i == tx_ring->next_to_use)
448 next_desc = " NTU";
449 else if (i == tx_ring->next_to_clean)
450 next_desc = " NTC";
451 else
452 next_desc = "";
453
454 pr_info("T [0x%03X] %016llX %016llX %016llX"
455 " %04X %p %016llX %p%s\n", i,
c97ec42a
TI
456 le64_to_cpu(u0->a),
457 le64_to_cpu(u0->b),
458 (u64)buffer_info->dma,
459 buffer_info->length,
460 buffer_info->next_to_watch,
461 (u64)buffer_info->time_stamp,
876d2d6f 462 buffer_info->skb, next_desc);
c97ec42a 463
b669588a 464 if (netif_msg_pktdata(adapter) && buffer_info->skb)
c97ec42a
TI
465 print_hex_dump(KERN_INFO, "",
466 DUMP_PREFIX_ADDRESS,
b669588a 467 16, 1, buffer_info->skb->data,
c97ec42a
TI
468 buffer_info->length, true);
469 }
470 }
471
472 /* Print RX Rings Summary */
473rx_ring_summary:
474 dev_info(&adapter->pdev->dev, "RX Rings Summary\n");
876d2d6f 475 pr_info("Queue [NTU] [NTC]\n");
c97ec42a
TI
476 for (n = 0; n < adapter->num_rx_queues; n++) {
477 rx_ring = adapter->rx_ring[n];
876d2d6f
JK
478 pr_info(" %5d %5X %5X\n",
479 n, rx_ring->next_to_use, rx_ring->next_to_clean);
c97ec42a
TI
480 }
481
482 /* Print RX Rings */
483 if (!netif_msg_rx_status(adapter))
484 goto exit;
485
486 dev_info(&adapter->pdev->dev, "RX Rings Dump\n");
487
488 /* Advanced Receive Descriptor (Read) Format
489 * 63 1 0
490 * +-----------------------------------------------------+
491 * 0 | Packet Buffer Address [63:1] |A0/NSE|
492 * +----------------------------------------------+------+
493 * 8 | Header Buffer Address [63:1] | DD |
494 * +-----------------------------------------------------+
495 *
496 *
497 * Advanced Receive Descriptor (Write-Back) Format
498 *
499 * 63 48 47 32 31 30 21 20 17 16 4 3 0
500 * +------------------------------------------------------+
501 * 0 | Packet IP |SPH| HDR_LEN | RSV|Packet| RSS |
502 * | Checksum Ident | | | | Type | Type |
503 * +------------------------------------------------------+
504 * 8 | VLAN Tag | Length | Extended Error | Extended Status |
505 * +------------------------------------------------------+
506 * 63 48 47 32 31 20 19 0
507 */
508
509 for (n = 0; n < adapter->num_rx_queues; n++) {
510 rx_ring = adapter->rx_ring[n];
876d2d6f
JK
511 pr_info("------------------------------------\n");
512 pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index);
513 pr_info("------------------------------------\n");
514 pr_info("R [desc] [ PktBuf A0] [ HeadBuf DD] "
515 "[bi->dma ] [bi->skb] <-- Adv Rx Read format\n");
516 pr_info("RWB[desc] [PcsmIpSHl PtRs] [vl er S cks ln] -----"
517 "----------- [bi->skb] <-- Adv Rx Write-Back format\n");
c97ec42a
TI
518
519 for (i = 0; i < rx_ring->count; i++) {
876d2d6f 520 const char *next_desc;
06034649
AD
521 struct igb_rx_buffer *buffer_info;
522 buffer_info = &rx_ring->rx_buffer_info[i];
60136906 523 rx_desc = IGB_RX_DESC(rx_ring, i);
c97ec42a
TI
524 u0 = (struct my_u0 *)rx_desc;
525 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
876d2d6f
JK
526
527 if (i == rx_ring->next_to_use)
528 next_desc = " NTU";
529 else if (i == rx_ring->next_to_clean)
530 next_desc = " NTC";
531 else
532 next_desc = "";
533
c97ec42a
TI
534 if (staterr & E1000_RXD_STAT_DD) {
535 /* Descriptor Done */
876d2d6f
JK
536 pr_info("%s[0x%03X] %016llX %016llX -------"
537 "--------- %p%s\n", "RWB", i,
c97ec42a
TI
538 le64_to_cpu(u0->a),
539 le64_to_cpu(u0->b),
876d2d6f 540 buffer_info->skb, next_desc);
c97ec42a 541 } else {
876d2d6f
JK
542 pr_info("%s[0x%03X] %016llX %016llX %016llX"
543 " %p%s\n", "R ", i,
c97ec42a
TI
544 le64_to_cpu(u0->a),
545 le64_to_cpu(u0->b),
546 (u64)buffer_info->dma,
876d2d6f 547 buffer_info->skb, next_desc);
c97ec42a 548
b669588a
ET
549 if (netif_msg_pktdata(adapter) &&
550 buffer_info->dma && buffer_info->skb) {
c97ec42a 551 print_hex_dump(KERN_INFO, "",
b669588a
ET
552 DUMP_PREFIX_ADDRESS,
553 16, 1, buffer_info->skb->data,
554 IGB_RX_HDR_LEN, true);
44390ca6
AD
555 print_hex_dump(KERN_INFO, "",
556 DUMP_PREFIX_ADDRESS,
557 16, 1,
b669588a
ET
558 page_address(buffer_info->page) +
559 buffer_info->page_offset,
44390ca6 560 PAGE_SIZE/2, true);
c97ec42a
TI
561 }
562 }
c97ec42a
TI
563 }
564 }
565
566exit:
567 return;
568}
569
9d5c8243 570/**
c041076a 571 * igb_get_hw_dev - return device
9d5c8243
AK
572 * used by hardware layer to print debugging information
573 **/
c041076a 574struct net_device *igb_get_hw_dev(struct e1000_hw *hw)
9d5c8243
AK
575{
576 struct igb_adapter *adapter = hw->back;
c041076a 577 return adapter->netdev;
9d5c8243 578}
38c845c7 579
9d5c8243
AK
580/**
581 * igb_init_module - Driver Registration Routine
582 *
583 * igb_init_module is the first routine called when the driver is
584 * loaded. All it does is register with the PCI subsystem.
585 **/
586static int __init igb_init_module(void)
587{
588 int ret;
876d2d6f 589 pr_info("%s - version %s\n",
9d5c8243
AK
590 igb_driver_string, igb_driver_version);
591
876d2d6f 592 pr_info("%s\n", igb_copyright);
9d5c8243 593
421e02f0 594#ifdef CONFIG_IGB_DCA
fe4506b6
JC
595 dca_register_notify(&dca_notifier);
596#endif
bbd98fe4 597 ret = pci_register_driver(&igb_driver);
9d5c8243
AK
598 return ret;
599}
600
601module_init(igb_init_module);
602
603/**
604 * igb_exit_module - Driver Exit Cleanup Routine
605 *
606 * igb_exit_module is called just before the driver is removed
607 * from memory.
608 **/
609static void __exit igb_exit_module(void)
610{
421e02f0 611#ifdef CONFIG_IGB_DCA
fe4506b6
JC
612 dca_unregister_notify(&dca_notifier);
613#endif
9d5c8243
AK
614 pci_unregister_driver(&igb_driver);
615}
616
617module_exit(igb_exit_module);
618
26bc19ec
AD
619#define Q_IDX_82576(i) (((i & 0x1) << 3) + (i >> 1))
620/**
621 * igb_cache_ring_register - Descriptor ring to register mapping
622 * @adapter: board private structure to initialize
623 *
624 * Once we know the feature-set enabled for the device, we'll cache
625 * the register offset the descriptor ring is assigned to.
626 **/
627static void igb_cache_ring_register(struct igb_adapter *adapter)
628{
ee1b9f06 629 int i = 0, j = 0;
047e0030 630 u32 rbase_offset = adapter->vfs_allocated_count;
26bc19ec
AD
631
632 switch (adapter->hw.mac.type) {
633 case e1000_82576:
634 /* The queues are allocated for virtualization such that VF 0
635 * is allocated queues 0 and 8, VF 1 queues 1 and 9, etc.
636 * In order to avoid collision we start at the first free queue
637 * and continue consuming queues in the same sequence
638 */
ee1b9f06 639 if (adapter->vfs_allocated_count) {
a99955fc 640 for (; i < adapter->rss_queues; i++)
3025a446
AD
641 adapter->rx_ring[i]->reg_idx = rbase_offset +
642 Q_IDX_82576(i);
ee1b9f06 643 }
26bc19ec 644 case e1000_82575:
55cac248 645 case e1000_82580:
d2ba2ed8 646 case e1000_i350:
f96a8a0b
CW
647 case e1000_i210:
648 case e1000_i211:
26bc19ec 649 default:
ee1b9f06 650 for (; i < adapter->num_rx_queues; i++)
3025a446 651 adapter->rx_ring[i]->reg_idx = rbase_offset + i;
ee1b9f06 652 for (; j < adapter->num_tx_queues; j++)
3025a446 653 adapter->tx_ring[j]->reg_idx = rbase_offset + j;
26bc19ec
AD
654 break;
655 }
656}
657
047e0030
AD
658static void igb_free_queues(struct igb_adapter *adapter)
659{
3025a446 660 int i;
047e0030 661
3025a446
AD
662 for (i = 0; i < adapter->num_tx_queues; i++) {
663 kfree(adapter->tx_ring[i]);
664 adapter->tx_ring[i] = NULL;
665 }
666 for (i = 0; i < adapter->num_rx_queues; i++) {
667 kfree(adapter->rx_ring[i]);
668 adapter->rx_ring[i] = NULL;
669 }
047e0030
AD
670 adapter->num_rx_queues = 0;
671 adapter->num_tx_queues = 0;
672}
673
9d5c8243
AK
674/**
675 * igb_alloc_queues - Allocate memory for all rings
676 * @adapter: board private structure to initialize
677 *
678 * We allocate one ring per queue at run-time since we don't know the
679 * number of queues at compile-time.
680 **/
681static int igb_alloc_queues(struct igb_adapter *adapter)
682{
3025a446 683 struct igb_ring *ring;
9d5c8243 684 int i;
81c2fc22 685 int orig_node = adapter->node;
9d5c8243 686
661086df 687 for (i = 0; i < adapter->num_tx_queues; i++) {
81c2fc22
AD
688 if (orig_node == -1) {
689 int cur_node = next_online_node(adapter->node);
690 if (cur_node == MAX_NUMNODES)
691 cur_node = first_online_node;
692 adapter->node = cur_node;
693 }
694 ring = kzalloc_node(sizeof(struct igb_ring), GFP_KERNEL,
695 adapter->node);
696 if (!ring)
697 ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL);
3025a446
AD
698 if (!ring)
699 goto err;
68fd9910 700 ring->count = adapter->tx_ring_count;
661086df 701 ring->queue_index = i;
59d71989 702 ring->dev = &adapter->pdev->dev;
e694e964 703 ring->netdev = adapter->netdev;
81c2fc22 704 ring->numa_node = adapter->node;
85ad76b2
AD
705 /* For 82575, context index must be unique per ring. */
706 if (adapter->hw.mac.type == e1000_82575)
866cff06 707 set_bit(IGB_RING_FLAG_TX_CTX_IDX, &ring->flags);
3025a446 708 adapter->tx_ring[i] = ring;
661086df 709 }
81c2fc22
AD
710 /* Restore the adapter's original node */
711 adapter->node = orig_node;
85ad76b2 712
9d5c8243 713 for (i = 0; i < adapter->num_rx_queues; i++) {
81c2fc22
AD
714 if (orig_node == -1) {
715 int cur_node = next_online_node(adapter->node);
716 if (cur_node == MAX_NUMNODES)
717 cur_node = first_online_node;
718 adapter->node = cur_node;
719 }
720 ring = kzalloc_node(sizeof(struct igb_ring), GFP_KERNEL,
721 adapter->node);
722 if (!ring)
723 ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL);
3025a446
AD
724 if (!ring)
725 goto err;
68fd9910 726 ring->count = adapter->rx_ring_count;
844290e5 727 ring->queue_index = i;
59d71989 728 ring->dev = &adapter->pdev->dev;
e694e964 729 ring->netdev = adapter->netdev;
81c2fc22 730 ring->numa_node = adapter->node;
85ad76b2
AD
731 /* set flag indicating ring supports SCTP checksum offload */
732 if (adapter->hw.mac.type >= e1000_82576)
866cff06 733 set_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags);
8be10e91 734
f96a8a0b
CW
735 /*
736 * On i350, i210, and i211, loopback VLAN packets
737 * have the tag byte-swapped.
738 * */
739 if (adapter->hw.mac.type >= e1000_i350)
8be10e91
AD
740 set_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &ring->flags);
741
3025a446 742 adapter->rx_ring[i] = ring;
9d5c8243 743 }
81c2fc22
AD
744 /* Restore the adapter's original node */
745 adapter->node = orig_node;
26bc19ec
AD
746
747 igb_cache_ring_register(adapter);
9d5c8243 748
047e0030 749 return 0;
a88f10ec 750
047e0030 751err:
81c2fc22
AD
752 /* Restore the adapter's original node */
753 adapter->node = orig_node;
047e0030 754 igb_free_queues(adapter);
d1a8c9e1 755
047e0030 756 return -ENOMEM;
a88f10ec
AD
757}
758
4be000c8
AD
759/**
760 * igb_write_ivar - configure ivar for given MSI-X vector
761 * @hw: pointer to the HW structure
762 * @msix_vector: vector number we are allocating to a given ring
763 * @index: row index of IVAR register to write within IVAR table
764 * @offset: column offset of in IVAR, should be multiple of 8
765 *
766 * This function is intended to handle the writing of the IVAR register
767 * for adapters 82576 and newer. The IVAR table consists of 2 columns,
768 * each containing an cause allocation for an Rx and Tx ring, and a
769 * variable number of rows depending on the number of queues supported.
770 **/
771static void igb_write_ivar(struct e1000_hw *hw, int msix_vector,
772 int index, int offset)
773{
774 u32 ivar = array_rd32(E1000_IVAR0, index);
775
776 /* clear any bits that are currently set */
777 ivar &= ~((u32)0xFF << offset);
778
779 /* write vector and valid bit */
780 ivar |= (msix_vector | E1000_IVAR_VALID) << offset;
781
782 array_wr32(E1000_IVAR0, index, ivar);
783}
784
9d5c8243 785#define IGB_N0_QUEUE -1
047e0030 786static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector)
9d5c8243 787{
047e0030 788 struct igb_adapter *adapter = q_vector->adapter;
9d5c8243 789 struct e1000_hw *hw = &adapter->hw;
047e0030
AD
790 int rx_queue = IGB_N0_QUEUE;
791 int tx_queue = IGB_N0_QUEUE;
4be000c8 792 u32 msixbm = 0;
047e0030 793
0ba82994
AD
794 if (q_vector->rx.ring)
795 rx_queue = q_vector->rx.ring->reg_idx;
796 if (q_vector->tx.ring)
797 tx_queue = q_vector->tx.ring->reg_idx;
2d064c06
AD
798
799 switch (hw->mac.type) {
800 case e1000_82575:
9d5c8243
AK
801 /* The 82575 assigns vectors using a bitmask, which matches the
802 bitmask for the EICR/EIMS/EIMC registers. To assign one
803 or more queues to a vector, we write the appropriate bits
804 into the MSIXBM register for that vector. */
047e0030 805 if (rx_queue > IGB_N0_QUEUE)
9d5c8243 806 msixbm = E1000_EICR_RX_QUEUE0 << rx_queue;
047e0030 807 if (tx_queue > IGB_N0_QUEUE)
9d5c8243 808 msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue;
feeb2721
AD
809 if (!adapter->msix_entries && msix_vector == 0)
810 msixbm |= E1000_EIMS_OTHER;
9d5c8243 811 array_wr32(E1000_MSIXBM(0), msix_vector, msixbm);
047e0030 812 q_vector->eims_value = msixbm;
2d064c06
AD
813 break;
814 case e1000_82576:
4be000c8
AD
815 /*
816 * 82576 uses a table that essentially consists of 2 columns
817 * with 8 rows. The ordering is column-major so we use the
818 * lower 3 bits as the row index, and the 4th bit as the
819 * column offset.
820 */
821 if (rx_queue > IGB_N0_QUEUE)
822 igb_write_ivar(hw, msix_vector,
823 rx_queue & 0x7,
824 (rx_queue & 0x8) << 1);
825 if (tx_queue > IGB_N0_QUEUE)
826 igb_write_ivar(hw, msix_vector,
827 tx_queue & 0x7,
828 ((tx_queue & 0x8) << 1) + 8);
047e0030 829 q_vector->eims_value = 1 << msix_vector;
2d064c06 830 break;
55cac248 831 case e1000_82580:
d2ba2ed8 832 case e1000_i350:
f96a8a0b
CW
833 case e1000_i210:
834 case e1000_i211:
4be000c8
AD
835 /*
836 * On 82580 and newer adapters the scheme is similar to 82576
837 * however instead of ordering column-major we have things
838 * ordered row-major. So we traverse the table by using
839 * bit 0 as the column offset, and the remaining bits as the
840 * row index.
841 */
842 if (rx_queue > IGB_N0_QUEUE)
843 igb_write_ivar(hw, msix_vector,
844 rx_queue >> 1,
845 (rx_queue & 0x1) << 4);
846 if (tx_queue > IGB_N0_QUEUE)
847 igb_write_ivar(hw, msix_vector,
848 tx_queue >> 1,
849 ((tx_queue & 0x1) << 4) + 8);
55cac248
AD
850 q_vector->eims_value = 1 << msix_vector;
851 break;
2d064c06
AD
852 default:
853 BUG();
854 break;
855 }
26b39276
AD
856
857 /* add q_vector eims value to global eims_enable_mask */
858 adapter->eims_enable_mask |= q_vector->eims_value;
859
860 /* configure q_vector to set itr on first interrupt */
861 q_vector->set_itr = 1;
9d5c8243
AK
862}
863
864/**
865 * igb_configure_msix - Configure MSI-X hardware
866 *
867 * igb_configure_msix sets up the hardware to properly
868 * generate MSI-X interrupts.
869 **/
870static void igb_configure_msix(struct igb_adapter *adapter)
871{
872 u32 tmp;
873 int i, vector = 0;
874 struct e1000_hw *hw = &adapter->hw;
875
876 adapter->eims_enable_mask = 0;
9d5c8243
AK
877
878 /* set vector for other causes, i.e. link changes */
2d064c06
AD
879 switch (hw->mac.type) {
880 case e1000_82575:
9d5c8243
AK
881 tmp = rd32(E1000_CTRL_EXT);
882 /* enable MSI-X PBA support*/
883 tmp |= E1000_CTRL_EXT_PBA_CLR;
884
885 /* Auto-Mask interrupts upon ICR read. */
886 tmp |= E1000_CTRL_EXT_EIAME;
887 tmp |= E1000_CTRL_EXT_IRCA;
888
889 wr32(E1000_CTRL_EXT, tmp);
047e0030
AD
890
891 /* enable msix_other interrupt */
892 array_wr32(E1000_MSIXBM(0), vector++,
893 E1000_EIMS_OTHER);
844290e5 894 adapter->eims_other = E1000_EIMS_OTHER;
9d5c8243 895
2d064c06
AD
896 break;
897
898 case e1000_82576:
55cac248 899 case e1000_82580:
d2ba2ed8 900 case e1000_i350:
f96a8a0b
CW
901 case e1000_i210:
902 case e1000_i211:
047e0030
AD
903 /* Turn on MSI-X capability first, or our settings
904 * won't stick. And it will take days to debug. */
905 wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE |
906 E1000_GPIE_PBA | E1000_GPIE_EIAME |
907 E1000_GPIE_NSICR);
908
909 /* enable msix_other interrupt */
910 adapter->eims_other = 1 << vector;
2d064c06 911 tmp = (vector++ | E1000_IVAR_VALID) << 8;
2d064c06 912
047e0030 913 wr32(E1000_IVAR_MISC, tmp);
2d064c06
AD
914 break;
915 default:
916 /* do nothing, since nothing else supports MSI-X */
917 break;
918 } /* switch (hw->mac.type) */
047e0030
AD
919
920 adapter->eims_enable_mask |= adapter->eims_other;
921
26b39276
AD
922 for (i = 0; i < adapter->num_q_vectors; i++)
923 igb_assign_vector(adapter->q_vector[i], vector++);
047e0030 924
9d5c8243
AK
925 wrfl();
926}
927
928/**
929 * igb_request_msix - Initialize MSI-X interrupts
930 *
931 * igb_request_msix allocates MSI-X vectors and requests interrupts from the
932 * kernel.
933 **/
934static int igb_request_msix(struct igb_adapter *adapter)
935{
936 struct net_device *netdev = adapter->netdev;
047e0030 937 struct e1000_hw *hw = &adapter->hw;
9d5c8243
AK
938 int i, err = 0, vector = 0;
939
047e0030 940 err = request_irq(adapter->msix_entries[vector].vector,
a0607fd3 941 igb_msix_other, 0, netdev->name, adapter);
047e0030
AD
942 if (err)
943 goto out;
944 vector++;
945
946 for (i = 0; i < adapter->num_q_vectors; i++) {
947 struct igb_q_vector *q_vector = adapter->q_vector[i];
948
949 q_vector->itr_register = hw->hw_addr + E1000_EITR(vector);
950
0ba82994 951 if (q_vector->rx.ring && q_vector->tx.ring)
047e0030 952 sprintf(q_vector->name, "%s-TxRx-%u", netdev->name,
0ba82994
AD
953 q_vector->rx.ring->queue_index);
954 else if (q_vector->tx.ring)
047e0030 955 sprintf(q_vector->name, "%s-tx-%u", netdev->name,
0ba82994
AD
956 q_vector->tx.ring->queue_index);
957 else if (q_vector->rx.ring)
047e0030 958 sprintf(q_vector->name, "%s-rx-%u", netdev->name,
0ba82994 959 q_vector->rx.ring->queue_index);
9d5c8243 960 else
047e0030
AD
961 sprintf(q_vector->name, "%s-unused", netdev->name);
962
9d5c8243 963 err = request_irq(adapter->msix_entries[vector].vector,
a0607fd3 964 igb_msix_ring, 0, q_vector->name,
047e0030 965 q_vector);
9d5c8243
AK
966 if (err)
967 goto out;
9d5c8243
AK
968 vector++;
969 }
970
9d5c8243
AK
971 igb_configure_msix(adapter);
972 return 0;
973out:
974 return err;
975}
976
977static void igb_reset_interrupt_capability(struct igb_adapter *adapter)
978{
979 if (adapter->msix_entries) {
980 pci_disable_msix(adapter->pdev);
981 kfree(adapter->msix_entries);
982 adapter->msix_entries = NULL;
047e0030 983 } else if (adapter->flags & IGB_FLAG_HAS_MSI) {
9d5c8243 984 pci_disable_msi(adapter->pdev);
047e0030 985 }
9d5c8243
AK
986}
987
047e0030
AD
988/**
989 * igb_free_q_vectors - Free memory allocated for interrupt vectors
990 * @adapter: board private structure to initialize
991 *
992 * This function frees the memory allocated to the q_vectors. In addition if
993 * NAPI is enabled it will delete any references to the NAPI struct prior
994 * to freeing the q_vector.
995 **/
996static void igb_free_q_vectors(struct igb_adapter *adapter)
997{
998 int v_idx;
999
1000 for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
1001 struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
1002 adapter->q_vector[v_idx] = NULL;
fe0592b4
NN
1003 if (!q_vector)
1004 continue;
047e0030
AD
1005 netif_napi_del(&q_vector->napi);
1006 kfree(q_vector);
1007 }
1008 adapter->num_q_vectors = 0;
1009}
1010
1011/**
1012 * igb_clear_interrupt_scheme - reset the device to a state of no interrupts
1013 *
1014 * This function resets the device so that it has 0 rx queues, tx queues, and
1015 * MSI-X interrupts allocated.
1016 */
1017static void igb_clear_interrupt_scheme(struct igb_adapter *adapter)
1018{
1019 igb_free_queues(adapter);
1020 igb_free_q_vectors(adapter);
1021 igb_reset_interrupt_capability(adapter);
1022}
9d5c8243
AK
1023
1024/**
1025 * igb_set_interrupt_capability - set MSI or MSI-X if supported
1026 *
1027 * Attempt to configure interrupts using the best available
1028 * capabilities of the hardware and kernel.
1029 **/
21adef3e 1030static int igb_set_interrupt_capability(struct igb_adapter *adapter)
9d5c8243
AK
1031{
1032 int err;
1033 int numvecs, i;
1034
83b7180d 1035 /* Number of supported queues. */
a99955fc 1036 adapter->num_rx_queues = adapter->rss_queues;
5fa8517f
GR
1037 if (adapter->vfs_allocated_count)
1038 adapter->num_tx_queues = 1;
1039 else
1040 adapter->num_tx_queues = adapter->rss_queues;
83b7180d 1041
047e0030
AD
1042 /* start with one vector for every rx queue */
1043 numvecs = adapter->num_rx_queues;
1044
3ad2f3fb 1045 /* if tx handler is separate add 1 for every tx queue */
a99955fc
AD
1046 if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS))
1047 numvecs += adapter->num_tx_queues;
047e0030
AD
1048
1049 /* store the number of vectors reserved for queues */
1050 adapter->num_q_vectors = numvecs;
1051
1052 /* add 1 vector for link status interrupts */
1053 numvecs++;
9d5c8243
AK
1054 adapter->msix_entries = kcalloc(numvecs, sizeof(struct msix_entry),
1055 GFP_KERNEL);
f96a8a0b 1056
9d5c8243
AK
1057 if (!adapter->msix_entries)
1058 goto msi_only;
1059
1060 for (i = 0; i < numvecs; i++)
1061 adapter->msix_entries[i].entry = i;
1062
1063 err = pci_enable_msix(adapter->pdev,
1064 adapter->msix_entries,
1065 numvecs);
1066 if (err == 0)
34a20e89 1067 goto out;
9d5c8243
AK
1068
1069 igb_reset_interrupt_capability(adapter);
1070
1071 /* If we can't do MSI-X, try MSI */
1072msi_only:
2a3abf6d
AD
1073#ifdef CONFIG_PCI_IOV
1074 /* disable SR-IOV for non MSI-X configurations */
1075 if (adapter->vf_data) {
1076 struct e1000_hw *hw = &adapter->hw;
1077 /* disable iov and allow time for transactions to clear */
1078 pci_disable_sriov(adapter->pdev);
1079 msleep(500);
1080
1081 kfree(adapter->vf_data);
1082 adapter->vf_data = NULL;
1083 wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
945a5151 1084 wrfl();
2a3abf6d
AD
1085 msleep(100);
1086 dev_info(&adapter->pdev->dev, "IOV Disabled\n");
1087 }
1088#endif
4fc82adf 1089 adapter->vfs_allocated_count = 0;
a99955fc 1090 adapter->rss_queues = 1;
4fc82adf 1091 adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
9d5c8243 1092 adapter->num_rx_queues = 1;
661086df 1093 adapter->num_tx_queues = 1;
047e0030 1094 adapter->num_q_vectors = 1;
9d5c8243 1095 if (!pci_enable_msi(adapter->pdev))
7dfc16fa 1096 adapter->flags |= IGB_FLAG_HAS_MSI;
34a20e89 1097out:
21adef3e 1098 /* Notify the stack of the (possibly) reduced queue counts. */
cfb8c3aa 1099 rtnl_lock();
21adef3e 1100 netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues);
cfb8c3aa
BP
1101 err = netif_set_real_num_rx_queues(adapter->netdev,
1102 adapter->num_rx_queues);
1103 rtnl_unlock();
1104 return err;
9d5c8243
AK
1105}
1106
047e0030
AD
1107/**
1108 * igb_alloc_q_vectors - Allocate memory for interrupt vectors
1109 * @adapter: board private structure to initialize
1110 *
1111 * We allocate one q_vector per queue interrupt. If allocation fails we
1112 * return -ENOMEM.
1113 **/
1114static int igb_alloc_q_vectors(struct igb_adapter *adapter)
1115{
1116 struct igb_q_vector *q_vector;
1117 struct e1000_hw *hw = &adapter->hw;
1118 int v_idx;
81c2fc22 1119 int orig_node = adapter->node;
047e0030
AD
1120
1121 for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
81c2fc22
AD
1122 if ((adapter->num_q_vectors == (adapter->num_rx_queues +
1123 adapter->num_tx_queues)) &&
1124 (adapter->num_rx_queues == v_idx))
1125 adapter->node = orig_node;
1126 if (orig_node == -1) {
1127 int cur_node = next_online_node(adapter->node);
1128 if (cur_node == MAX_NUMNODES)
1129 cur_node = first_online_node;
1130 adapter->node = cur_node;
1131 }
1132 q_vector = kzalloc_node(sizeof(struct igb_q_vector), GFP_KERNEL,
1133 adapter->node);
1134 if (!q_vector)
1135 q_vector = kzalloc(sizeof(struct igb_q_vector),
1136 GFP_KERNEL);
047e0030
AD
1137 if (!q_vector)
1138 goto err_out;
1139 q_vector->adapter = adapter;
047e0030
AD
1140 q_vector->itr_register = hw->hw_addr + E1000_EITR(0);
1141 q_vector->itr_val = IGB_START_ITR;
047e0030
AD
1142 netif_napi_add(adapter->netdev, &q_vector->napi, igb_poll, 64);
1143 adapter->q_vector[v_idx] = q_vector;
1144 }
81c2fc22
AD
1145 /* Restore the adapter's original node */
1146 adapter->node = orig_node;
1147
047e0030
AD
1148 return 0;
1149
1150err_out:
81c2fc22
AD
1151 /* Restore the adapter's original node */
1152 adapter->node = orig_node;
fe0592b4 1153 igb_free_q_vectors(adapter);
047e0030
AD
1154 return -ENOMEM;
1155}
1156
1157static void igb_map_rx_ring_to_vector(struct igb_adapter *adapter,
1158 int ring_idx, int v_idx)
1159{
3025a446 1160 struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
047e0030 1161
0ba82994
AD
1162 q_vector->rx.ring = adapter->rx_ring[ring_idx];
1163 q_vector->rx.ring->q_vector = q_vector;
1164 q_vector->rx.count++;
4fc82adf
AD
1165 q_vector->itr_val = adapter->rx_itr_setting;
1166 if (q_vector->itr_val && q_vector->itr_val <= 3)
1167 q_vector->itr_val = IGB_START_ITR;
047e0030
AD
1168}
1169
1170static void igb_map_tx_ring_to_vector(struct igb_adapter *adapter,
1171 int ring_idx, int v_idx)
1172{
3025a446 1173 struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
047e0030 1174
0ba82994
AD
1175 q_vector->tx.ring = adapter->tx_ring[ring_idx];
1176 q_vector->tx.ring->q_vector = q_vector;
1177 q_vector->tx.count++;
4fc82adf 1178 q_vector->itr_val = adapter->tx_itr_setting;
0ba82994 1179 q_vector->tx.work_limit = adapter->tx_work_limit;
4fc82adf
AD
1180 if (q_vector->itr_val && q_vector->itr_val <= 3)
1181 q_vector->itr_val = IGB_START_ITR;
047e0030
AD
1182}
1183
1184/**
1185 * igb_map_ring_to_vector - maps allocated queues to vectors
1186 *
1187 * This function maps the recently allocated queues to vectors.
1188 **/
1189static int igb_map_ring_to_vector(struct igb_adapter *adapter)
1190{
1191 int i;
1192 int v_idx = 0;
1193
1194 if ((adapter->num_q_vectors < adapter->num_rx_queues) ||
1195 (adapter->num_q_vectors < adapter->num_tx_queues))
1196 return -ENOMEM;
1197
1198 if (adapter->num_q_vectors >=
1199 (adapter->num_rx_queues + adapter->num_tx_queues)) {
1200 for (i = 0; i < adapter->num_rx_queues; i++)
1201 igb_map_rx_ring_to_vector(adapter, i, v_idx++);
1202 for (i = 0; i < adapter->num_tx_queues; i++)
1203 igb_map_tx_ring_to_vector(adapter, i, v_idx++);
1204 } else {
1205 for (i = 0; i < adapter->num_rx_queues; i++) {
1206 if (i < adapter->num_tx_queues)
1207 igb_map_tx_ring_to_vector(adapter, i, v_idx);
1208 igb_map_rx_ring_to_vector(adapter, i, v_idx++);
1209 }
1210 for (; i < adapter->num_tx_queues; i++)
1211 igb_map_tx_ring_to_vector(adapter, i, v_idx++);
1212 }
1213 return 0;
1214}
1215
1216/**
1217 * igb_init_interrupt_scheme - initialize interrupts, allocate queues/vectors
1218 *
1219 * This function initializes the interrupts and allocates all of the queues.
1220 **/
1221static int igb_init_interrupt_scheme(struct igb_adapter *adapter)
1222{
1223 struct pci_dev *pdev = adapter->pdev;
1224 int err;
1225
21adef3e
BH
1226 err = igb_set_interrupt_capability(adapter);
1227 if (err)
1228 return err;
047e0030
AD
1229
1230 err = igb_alloc_q_vectors(adapter);
1231 if (err) {
1232 dev_err(&pdev->dev, "Unable to allocate memory for vectors\n");
1233 goto err_alloc_q_vectors;
1234 }
1235
1236 err = igb_alloc_queues(adapter);
1237 if (err) {
1238 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
1239 goto err_alloc_queues;
1240 }
1241
1242 err = igb_map_ring_to_vector(adapter);
1243 if (err) {
1244 dev_err(&pdev->dev, "Invalid q_vector to ring mapping\n");
1245 goto err_map_queues;
1246 }
1247
1248
1249 return 0;
1250err_map_queues:
1251 igb_free_queues(adapter);
1252err_alloc_queues:
1253 igb_free_q_vectors(adapter);
1254err_alloc_q_vectors:
1255 igb_reset_interrupt_capability(adapter);
1256 return err;
1257}
1258
9d5c8243
AK
1259/**
1260 * igb_request_irq - initialize interrupts
1261 *
1262 * Attempts to configure interrupts using the best available
1263 * capabilities of the hardware and kernel.
1264 **/
1265static int igb_request_irq(struct igb_adapter *adapter)
1266{
1267 struct net_device *netdev = adapter->netdev;
047e0030 1268 struct pci_dev *pdev = adapter->pdev;
9d5c8243
AK
1269 int err = 0;
1270
1271 if (adapter->msix_entries) {
1272 err = igb_request_msix(adapter);
844290e5 1273 if (!err)
9d5c8243 1274 goto request_done;
9d5c8243 1275 /* fall back to MSI */
047e0030 1276 igb_clear_interrupt_scheme(adapter);
c74d588e 1277 if (!pci_enable_msi(pdev))
7dfc16fa 1278 adapter->flags |= IGB_FLAG_HAS_MSI;
9d5c8243
AK
1279 igb_free_all_tx_resources(adapter);
1280 igb_free_all_rx_resources(adapter);
047e0030 1281 adapter->num_tx_queues = 1;
9d5c8243 1282 adapter->num_rx_queues = 1;
047e0030
AD
1283 adapter->num_q_vectors = 1;
1284 err = igb_alloc_q_vectors(adapter);
1285 if (err) {
1286 dev_err(&pdev->dev,
1287 "Unable to allocate memory for vectors\n");
1288 goto request_done;
1289 }
1290 err = igb_alloc_queues(adapter);
1291 if (err) {
1292 dev_err(&pdev->dev,
1293 "Unable to allocate memory for queues\n");
1294 igb_free_q_vectors(adapter);
1295 goto request_done;
1296 }
1297 igb_setup_all_tx_resources(adapter);
1298 igb_setup_all_rx_resources(adapter);
9d5c8243 1299 }
844290e5 1300
c74d588e
AD
1301 igb_assign_vector(adapter->q_vector[0], 0);
1302
7dfc16fa 1303 if (adapter->flags & IGB_FLAG_HAS_MSI) {
c74d588e 1304 err = request_irq(pdev->irq, igb_intr_msi, 0,
047e0030 1305 netdev->name, adapter);
9d5c8243
AK
1306 if (!err)
1307 goto request_done;
047e0030 1308
9d5c8243
AK
1309 /* fall back to legacy interrupts */
1310 igb_reset_interrupt_capability(adapter);
7dfc16fa 1311 adapter->flags &= ~IGB_FLAG_HAS_MSI;
9d5c8243
AK
1312 }
1313
c74d588e 1314 err = request_irq(pdev->irq, igb_intr, IRQF_SHARED,
047e0030 1315 netdev->name, adapter);
9d5c8243 1316
6cb5e577 1317 if (err)
c74d588e 1318 dev_err(&pdev->dev, "Error %d getting interrupt\n",
9d5c8243 1319 err);
9d5c8243
AK
1320
1321request_done:
1322 return err;
1323}
1324
1325static void igb_free_irq(struct igb_adapter *adapter)
1326{
9d5c8243
AK
1327 if (adapter->msix_entries) {
1328 int vector = 0, i;
1329
047e0030 1330 free_irq(adapter->msix_entries[vector++].vector, adapter);
9d5c8243 1331
0d1ae7f4 1332 for (i = 0; i < adapter->num_q_vectors; i++)
047e0030 1333 free_irq(adapter->msix_entries[vector++].vector,
0d1ae7f4 1334 adapter->q_vector[i]);
047e0030
AD
1335 } else {
1336 free_irq(adapter->pdev->irq, adapter);
9d5c8243 1337 }
9d5c8243
AK
1338}
1339
1340/**
1341 * igb_irq_disable - Mask off interrupt generation on the NIC
1342 * @adapter: board private structure
1343 **/
1344static void igb_irq_disable(struct igb_adapter *adapter)
1345{
1346 struct e1000_hw *hw = &adapter->hw;
1347
25568a53
AD
1348 /*
1349 * we need to be careful when disabling interrupts. The VFs are also
1350 * mapped into these registers and so clearing the bits can cause
1351 * issues on the VF drivers so we only need to clear what we set
1352 */
9d5c8243 1353 if (adapter->msix_entries) {
2dfd1212
AD
1354 u32 regval = rd32(E1000_EIAM);
1355 wr32(E1000_EIAM, regval & ~adapter->eims_enable_mask);
1356 wr32(E1000_EIMC, adapter->eims_enable_mask);
1357 regval = rd32(E1000_EIAC);
1358 wr32(E1000_EIAC, regval & ~adapter->eims_enable_mask);
9d5c8243 1359 }
844290e5
PW
1360
1361 wr32(E1000_IAM, 0);
9d5c8243
AK
1362 wr32(E1000_IMC, ~0);
1363 wrfl();
81a61859
ET
1364 if (adapter->msix_entries) {
1365 int i;
1366 for (i = 0; i < adapter->num_q_vectors; i++)
1367 synchronize_irq(adapter->msix_entries[i].vector);
1368 } else {
1369 synchronize_irq(adapter->pdev->irq);
1370 }
9d5c8243
AK
1371}
1372
1373/**
1374 * igb_irq_enable - Enable default interrupt generation settings
1375 * @adapter: board private structure
1376 **/
1377static void igb_irq_enable(struct igb_adapter *adapter)
1378{
1379 struct e1000_hw *hw = &adapter->hw;
1380
1381 if (adapter->msix_entries) {
06218a8d 1382 u32 ims = E1000_IMS_LSC | E1000_IMS_DOUTSYNC | E1000_IMS_DRSTA;
2dfd1212
AD
1383 u32 regval = rd32(E1000_EIAC);
1384 wr32(E1000_EIAC, regval | adapter->eims_enable_mask);
1385 regval = rd32(E1000_EIAM);
1386 wr32(E1000_EIAM, regval | adapter->eims_enable_mask);
844290e5 1387 wr32(E1000_EIMS, adapter->eims_enable_mask);
25568a53 1388 if (adapter->vfs_allocated_count) {
4ae196df 1389 wr32(E1000_MBVFIMR, 0xFF);
25568a53
AD
1390 ims |= E1000_IMS_VMMB;
1391 }
1392 wr32(E1000_IMS, ims);
844290e5 1393 } else {
55cac248
AD
1394 wr32(E1000_IMS, IMS_ENABLE_MASK |
1395 E1000_IMS_DRSTA);
1396 wr32(E1000_IAM, IMS_ENABLE_MASK |
1397 E1000_IMS_DRSTA);
844290e5 1398 }
9d5c8243
AK
1399}
1400
1401static void igb_update_mng_vlan(struct igb_adapter *adapter)
1402{
51466239 1403 struct e1000_hw *hw = &adapter->hw;
9d5c8243
AK
1404 u16 vid = adapter->hw.mng_cookie.vlan_id;
1405 u16 old_vid = adapter->mng_vlan_id;
51466239
AD
1406
1407 if (hw->mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
1408 /* add VID to filter table */
1409 igb_vfta_set(hw, vid, true);
1410 adapter->mng_vlan_id = vid;
1411 } else {
1412 adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
1413 }
1414
1415 if ((old_vid != (u16)IGB_MNG_VLAN_NONE) &&
1416 (vid != old_vid) &&
b2cb09b1 1417 !test_bit(old_vid, adapter->active_vlans)) {
51466239
AD
1418 /* remove VID from filter table */
1419 igb_vfta_set(hw, old_vid, false);
9d5c8243
AK
1420 }
1421}
1422
1423/**
1424 * igb_release_hw_control - release control of the h/w to f/w
1425 * @adapter: address of board private structure
1426 *
1427 * igb_release_hw_control resets CTRL_EXT:DRV_LOAD bit.
1428 * For ASF and Pass Through versions of f/w this means that the
1429 * driver is no longer loaded.
1430 *
1431 **/
1432static void igb_release_hw_control(struct igb_adapter *adapter)
1433{
1434 struct e1000_hw *hw = &adapter->hw;
1435 u32 ctrl_ext;
1436
1437 /* Let firmware take over control of h/w */
1438 ctrl_ext = rd32(E1000_CTRL_EXT);
1439 wr32(E1000_CTRL_EXT,
1440 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
1441}
1442
9d5c8243
AK
1443/**
1444 * igb_get_hw_control - get control of the h/w from f/w
1445 * @adapter: address of board private structure
1446 *
1447 * igb_get_hw_control sets CTRL_EXT:DRV_LOAD bit.
1448 * For ASF and Pass Through versions of f/w this means that
1449 * the driver is loaded.
1450 *
1451 **/
1452static void igb_get_hw_control(struct igb_adapter *adapter)
1453{
1454 struct e1000_hw *hw = &adapter->hw;
1455 u32 ctrl_ext;
1456
1457 /* Let firmware know the driver has taken over */
1458 ctrl_ext = rd32(E1000_CTRL_EXT);
1459 wr32(E1000_CTRL_EXT,
1460 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
1461}
1462
9d5c8243
AK
1463/**
1464 * igb_configure - configure the hardware for RX and TX
1465 * @adapter: private board structure
1466 **/
1467static void igb_configure(struct igb_adapter *adapter)
1468{
1469 struct net_device *netdev = adapter->netdev;
1470 int i;
1471
1472 igb_get_hw_control(adapter);
ff41f8dc 1473 igb_set_rx_mode(netdev);
9d5c8243
AK
1474
1475 igb_restore_vlan(adapter);
9d5c8243 1476
85b430b4 1477 igb_setup_tctl(adapter);
06cf2666 1478 igb_setup_mrqc(adapter);
9d5c8243 1479 igb_setup_rctl(adapter);
85b430b4
AD
1480
1481 igb_configure_tx(adapter);
9d5c8243 1482 igb_configure_rx(adapter);
662d7205
AD
1483
1484 igb_rx_fifo_flush_82575(&adapter->hw);
1485
c493ea45 1486 /* call igb_desc_unused which always leaves
9d5c8243
AK
1487 * at least 1 descriptor unused to make sure
1488 * next_to_use != next_to_clean */
1489 for (i = 0; i < adapter->num_rx_queues; i++) {
3025a446 1490 struct igb_ring *ring = adapter->rx_ring[i];
cd392f5c 1491 igb_alloc_rx_buffers(ring, igb_desc_unused(ring));
9d5c8243 1492 }
9d5c8243
AK
1493}
1494
88a268c1
NN
1495/**
1496 * igb_power_up_link - Power up the phy/serdes link
1497 * @adapter: address of board private structure
1498 **/
1499void igb_power_up_link(struct igb_adapter *adapter)
1500{
76886596
AA
1501 igb_reset_phy(&adapter->hw);
1502
88a268c1
NN
1503 if (adapter->hw.phy.media_type == e1000_media_type_copper)
1504 igb_power_up_phy_copper(&adapter->hw);
1505 else
1506 igb_power_up_serdes_link_82575(&adapter->hw);
1507}
1508
1509/**
1510 * igb_power_down_link - Power down the phy/serdes link
1511 * @adapter: address of board private structure
1512 */
1513static void igb_power_down_link(struct igb_adapter *adapter)
1514{
1515 if (adapter->hw.phy.media_type == e1000_media_type_copper)
1516 igb_power_down_phy_copper_82575(&adapter->hw);
1517 else
1518 igb_shutdown_serdes_link_82575(&adapter->hw);
1519}
9d5c8243
AK
1520
1521/**
1522 * igb_up - Open the interface and prepare it to handle traffic
1523 * @adapter: board private structure
1524 **/
9d5c8243
AK
1525int igb_up(struct igb_adapter *adapter)
1526{
1527 struct e1000_hw *hw = &adapter->hw;
1528 int i;
1529
1530 /* hardware has been reset, we need to reload some things */
1531 igb_configure(adapter);
1532
1533 clear_bit(__IGB_DOWN, &adapter->state);
1534
0d1ae7f4
AD
1535 for (i = 0; i < adapter->num_q_vectors; i++)
1536 napi_enable(&(adapter->q_vector[i]->napi));
1537
844290e5 1538 if (adapter->msix_entries)
9d5c8243 1539 igb_configure_msix(adapter);
feeb2721
AD
1540 else
1541 igb_assign_vector(adapter->q_vector[0], 0);
9d5c8243
AK
1542
1543 /* Clear any pending interrupts. */
1544 rd32(E1000_ICR);
1545 igb_irq_enable(adapter);
1546
d4960307
AD
1547 /* notify VFs that reset has been completed */
1548 if (adapter->vfs_allocated_count) {
1549 u32 reg_data = rd32(E1000_CTRL_EXT);
1550 reg_data |= E1000_CTRL_EXT_PFRSTD;
1551 wr32(E1000_CTRL_EXT, reg_data);
1552 }
1553
4cb9be7a
JB
1554 netif_tx_start_all_queues(adapter->netdev);
1555
25568a53
AD
1556 /* start the watchdog. */
1557 hw->mac.get_link_status = 1;
1558 schedule_work(&adapter->watchdog_task);
1559
9d5c8243
AK
1560 return 0;
1561}
1562
1563void igb_down(struct igb_adapter *adapter)
1564{
9d5c8243 1565 struct net_device *netdev = adapter->netdev;
330a6d6a 1566 struct e1000_hw *hw = &adapter->hw;
9d5c8243
AK
1567 u32 tctl, rctl;
1568 int i;
1569
1570 /* signal that we're down so the interrupt handler does not
1571 * reschedule our watchdog timer */
1572 set_bit(__IGB_DOWN, &adapter->state);
1573
1574 /* disable receives in the hardware */
1575 rctl = rd32(E1000_RCTL);
1576 wr32(E1000_RCTL, rctl & ~E1000_RCTL_EN);
1577 /* flush and sleep below */
1578
fd2ea0a7 1579 netif_tx_stop_all_queues(netdev);
9d5c8243
AK
1580
1581 /* disable transmits in the hardware */
1582 tctl = rd32(E1000_TCTL);
1583 tctl &= ~E1000_TCTL_EN;
1584 wr32(E1000_TCTL, tctl);
1585 /* flush both disables and wait for them to finish */
1586 wrfl();
1587 msleep(10);
1588
0d1ae7f4
AD
1589 for (i = 0; i < adapter->num_q_vectors; i++)
1590 napi_disable(&(adapter->q_vector[i]->napi));
9d5c8243 1591
9d5c8243
AK
1592 igb_irq_disable(adapter);
1593
1594 del_timer_sync(&adapter->watchdog_timer);
1595 del_timer_sync(&adapter->phy_info_timer);
1596
9d5c8243 1597 netif_carrier_off(netdev);
04fe6358
AD
1598
1599 /* record the stats before reset*/
12dcd86b
ED
1600 spin_lock(&adapter->stats64_lock);
1601 igb_update_stats(adapter, &adapter->stats64);
1602 spin_unlock(&adapter->stats64_lock);
04fe6358 1603
9d5c8243
AK
1604 adapter->link_speed = 0;
1605 adapter->link_duplex = 0;
1606
3023682e
JK
1607 if (!pci_channel_offline(adapter->pdev))
1608 igb_reset(adapter);
9d5c8243
AK
1609 igb_clean_all_tx_rings(adapter);
1610 igb_clean_all_rx_rings(adapter);
7e0e99ef
AD
1611#ifdef CONFIG_IGB_DCA
1612
1613 /* since we reset the hardware DCA settings were cleared */
1614 igb_setup_dca(adapter);
1615#endif
9d5c8243
AK
1616}
1617
1618void igb_reinit_locked(struct igb_adapter *adapter)
1619{
1620 WARN_ON(in_interrupt());
1621 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
1622 msleep(1);
1623 igb_down(adapter);
1624 igb_up(adapter);
1625 clear_bit(__IGB_RESETTING, &adapter->state);
1626}
1627
1628void igb_reset(struct igb_adapter *adapter)
1629{
090b1795 1630 struct pci_dev *pdev = adapter->pdev;
9d5c8243 1631 struct e1000_hw *hw = &adapter->hw;
2d064c06
AD
1632 struct e1000_mac_info *mac = &hw->mac;
1633 struct e1000_fc_info *fc = &hw->fc;
9d5c8243
AK
1634 u32 pba = 0, tx_space, min_tx_space, min_rx_space;
1635 u16 hwm;
1636
1637 /* Repartition Pba for greater than 9k mtu
1638 * To take effect CTRL.RST is required.
1639 */
fa4dfae0 1640 switch (mac->type) {
d2ba2ed8 1641 case e1000_i350:
55cac248
AD
1642 case e1000_82580:
1643 pba = rd32(E1000_RXPBS);
1644 pba = igb_rxpbs_adjust_82580(pba);
1645 break;
fa4dfae0 1646 case e1000_82576:
d249be54
AD
1647 pba = rd32(E1000_RXPBS);
1648 pba &= E1000_RXPBS_SIZE_MASK_82576;
fa4dfae0
AD
1649 break;
1650 case e1000_82575:
f96a8a0b
CW
1651 case e1000_i210:
1652 case e1000_i211:
fa4dfae0
AD
1653 default:
1654 pba = E1000_PBA_34K;
1655 break;
2d064c06 1656 }
9d5c8243 1657
2d064c06
AD
1658 if ((adapter->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) &&
1659 (mac->type < e1000_82576)) {
9d5c8243
AK
1660 /* adjust PBA for jumbo frames */
1661 wr32(E1000_PBA, pba);
1662
1663 /* To maintain wire speed transmits, the Tx FIFO should be
1664 * large enough to accommodate two full transmit packets,
1665 * rounded up to the next 1KB and expressed in KB. Likewise,
1666 * the Rx FIFO should be large enough to accommodate at least
1667 * one full receive packet and is similarly rounded up and
1668 * expressed in KB. */
1669 pba = rd32(E1000_PBA);
1670 /* upper 16 bits has Tx packet buffer allocation size in KB */
1671 tx_space = pba >> 16;
1672 /* lower 16 bits has Rx packet buffer allocation size in KB */
1673 pba &= 0xffff;
1674 /* the tx fifo also stores 16 bytes of information about the tx
1675 * but don't include ethernet FCS because hardware appends it */
1676 min_tx_space = (adapter->max_frame_size +
85e8d004 1677 sizeof(union e1000_adv_tx_desc) -
9d5c8243
AK
1678 ETH_FCS_LEN) * 2;
1679 min_tx_space = ALIGN(min_tx_space, 1024);
1680 min_tx_space >>= 10;
1681 /* software strips receive CRC, so leave room for it */
1682 min_rx_space = adapter->max_frame_size;
1683 min_rx_space = ALIGN(min_rx_space, 1024);
1684 min_rx_space >>= 10;
1685
1686 /* If current Tx allocation is less than the min Tx FIFO size,
1687 * and the min Tx FIFO size is less than the current Rx FIFO
1688 * allocation, take space away from current Rx allocation */
1689 if (tx_space < min_tx_space &&
1690 ((min_tx_space - tx_space) < pba)) {
1691 pba = pba - (min_tx_space - tx_space);
1692
1693 /* if short on rx space, rx wins and must trump tx
1694 * adjustment */
1695 if (pba < min_rx_space)
1696 pba = min_rx_space;
1697 }
2d064c06 1698 wr32(E1000_PBA, pba);
9d5c8243 1699 }
9d5c8243
AK
1700
1701 /* flow control settings */
1702 /* The high water mark must be low enough to fit one full frame
1703 * (or the size used for early receive) above it in the Rx FIFO.
1704 * Set it to the lower of:
1705 * - 90% of the Rx FIFO size, or
1706 * - the full Rx FIFO size minus one full frame */
1707 hwm = min(((pba << 10) * 9 / 10),
2d064c06 1708 ((pba << 10) - 2 * adapter->max_frame_size));
9d5c8243 1709
d405ea3e
AD
1710 fc->high_water = hwm & 0xFFF0; /* 16-byte granularity */
1711 fc->low_water = fc->high_water - 16;
9d5c8243
AK
1712 fc->pause_time = 0xFFFF;
1713 fc->send_xon = 1;
0cce119a 1714 fc->current_mode = fc->requested_mode;
9d5c8243 1715
4ae196df
AD
1716 /* disable receive for all VFs and wait one second */
1717 if (adapter->vfs_allocated_count) {
1718 int i;
1719 for (i = 0 ; i < adapter->vfs_allocated_count; i++)
8fa7e0f7 1720 adapter->vf_data[i].flags &= IGB_VF_FLAG_PF_SET_MAC;
4ae196df
AD
1721
1722 /* ping all the active vfs to let them know we are going down */
f2ca0dbe 1723 igb_ping_all_vfs(adapter);
4ae196df
AD
1724
1725 /* disable transmits and receives */
1726 wr32(E1000_VFRE, 0);
1727 wr32(E1000_VFTE, 0);
1728 }
1729
9d5c8243 1730 /* Allow time for pending master requests to run */
330a6d6a 1731 hw->mac.ops.reset_hw(hw);
9d5c8243
AK
1732 wr32(E1000_WUC, 0);
1733
330a6d6a 1734 if (hw->mac.ops.init_hw(hw))
090b1795 1735 dev_err(&pdev->dev, "Hardware Error\n");
831ec0b4 1736
a27416bb
MV
1737 /*
1738 * Flow control settings reset on hardware reset, so guarantee flow
1739 * control is off when forcing speed.
1740 */
1741 if (!hw->mac.autoneg)
1742 igb_force_mac_fc(hw);
1743
b6e0c419 1744 igb_init_dmac(adapter, pba);
88a268c1
NN
1745 if (!netif_running(adapter->netdev))
1746 igb_power_down_link(adapter);
1747
9d5c8243
AK
1748 igb_update_mng_vlan(adapter);
1749
1750 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
1751 wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE);
1752
1f6e8178
MV
1753#ifdef CONFIG_IGB_PTP
1754 /* Re-enable PTP, where applicable. */
1755 igb_ptp_reset(adapter);
1756#endif /* CONFIG_IGB_PTP */
1757
330a6d6a 1758 igb_get_phy_info(hw);
9d5c8243
AK
1759}
1760
c8f44aff
MM
1761static netdev_features_t igb_fix_features(struct net_device *netdev,
1762 netdev_features_t features)
b2cb09b1
JP
1763{
1764 /*
1765 * Since there is no support for separate rx/tx vlan accel
1766 * enable/disable make sure tx flag is always in same state as rx.
1767 */
1768 if (features & NETIF_F_HW_VLAN_RX)
1769 features |= NETIF_F_HW_VLAN_TX;
1770 else
1771 features &= ~NETIF_F_HW_VLAN_TX;
1772
1773 return features;
1774}
1775
c8f44aff
MM
1776static int igb_set_features(struct net_device *netdev,
1777 netdev_features_t features)
ac52caa3 1778{
c8f44aff 1779 netdev_features_t changed = netdev->features ^ features;
89eaefb6 1780 struct igb_adapter *adapter = netdev_priv(netdev);
ac52caa3 1781
b2cb09b1
JP
1782 if (changed & NETIF_F_HW_VLAN_RX)
1783 igb_vlan_mode(netdev, features);
1784
89eaefb6
BG
1785 if (!(changed & NETIF_F_RXALL))
1786 return 0;
1787
1788 netdev->features = features;
1789
1790 if (netif_running(netdev))
1791 igb_reinit_locked(adapter);
1792 else
1793 igb_reset(adapter);
1794
ac52caa3
MM
1795 return 0;
1796}
1797
2e5c6922 1798static const struct net_device_ops igb_netdev_ops = {
559e9c49 1799 .ndo_open = igb_open,
2e5c6922 1800 .ndo_stop = igb_close,
cd392f5c 1801 .ndo_start_xmit = igb_xmit_frame,
12dcd86b 1802 .ndo_get_stats64 = igb_get_stats64,
ff41f8dc 1803 .ndo_set_rx_mode = igb_set_rx_mode,
2e5c6922
SH
1804 .ndo_set_mac_address = igb_set_mac,
1805 .ndo_change_mtu = igb_change_mtu,
1806 .ndo_do_ioctl = igb_ioctl,
1807 .ndo_tx_timeout = igb_tx_timeout,
1808 .ndo_validate_addr = eth_validate_addr,
2e5c6922
SH
1809 .ndo_vlan_rx_add_vid = igb_vlan_rx_add_vid,
1810 .ndo_vlan_rx_kill_vid = igb_vlan_rx_kill_vid,
8151d294
WM
1811 .ndo_set_vf_mac = igb_ndo_set_vf_mac,
1812 .ndo_set_vf_vlan = igb_ndo_set_vf_vlan,
1813 .ndo_set_vf_tx_rate = igb_ndo_set_vf_bw,
1814 .ndo_get_vf_config = igb_ndo_get_vf_config,
2e5c6922
SH
1815#ifdef CONFIG_NET_POLL_CONTROLLER
1816 .ndo_poll_controller = igb_netpoll,
1817#endif
b2cb09b1
JP
1818 .ndo_fix_features = igb_fix_features,
1819 .ndo_set_features = igb_set_features,
2e5c6922
SH
1820};
1821
d67974f0
CW
1822/**
1823 * igb_set_fw_version - Configure version string for ethtool
1824 * @adapter: adapter struct
1825 *
1826 **/
1827void igb_set_fw_version(struct igb_adapter *adapter)
1828{
1829 struct e1000_hw *hw = &adapter->hw;
1830 u16 eeprom_verh, eeprom_verl, comb_verh, comb_verl, comb_offset;
1831 u16 major, build, patch, fw_version;
1832 u32 etrack_id;
1833
1834 hw->nvm.ops.read(hw, 5, 1, &fw_version);
1835 if (adapter->hw.mac.type != e1000_i211) {
1836 hw->nvm.ops.read(hw, NVM_ETRACK_WORD, 1, &eeprom_verh);
1837 hw->nvm.ops.read(hw, (NVM_ETRACK_WORD + 1), 1, &eeprom_verl);
1838 etrack_id = (eeprom_verh << IGB_ETRACK_SHIFT) | eeprom_verl;
1839
1840 /* combo image version needs to be found */
1841 hw->nvm.ops.read(hw, NVM_COMB_VER_PTR, 1, &comb_offset);
1842 if ((comb_offset != 0x0) &&
1843 (comb_offset != IGB_NVM_VER_INVALID)) {
1844 hw->nvm.ops.read(hw, (NVM_COMB_VER_OFF + comb_offset
1845 + 1), 1, &comb_verh);
1846 hw->nvm.ops.read(hw, (NVM_COMB_VER_OFF + comb_offset),
1847 1, &comb_verl);
1848
1849 /* Only display Option Rom if it exists and is valid */
1850 if ((comb_verh && comb_verl) &&
1851 ((comb_verh != IGB_NVM_VER_INVALID) &&
1852 (comb_verl != IGB_NVM_VER_INVALID))) {
1853 major = comb_verl >> IGB_COMB_VER_SHFT;
1854 build = (comb_verl << IGB_COMB_VER_SHFT) |
1855 (comb_verh >> IGB_COMB_VER_SHFT);
1856 patch = comb_verh & IGB_COMB_VER_MASK;
1857 snprintf(adapter->fw_version,
1858 sizeof(adapter->fw_version),
1859 "%d.%d%d, 0x%08x, %d.%d.%d",
1860 (fw_version & IGB_MAJOR_MASK) >>
1861 IGB_MAJOR_SHIFT,
1862 (fw_version & IGB_MINOR_MASK) >>
1863 IGB_MINOR_SHIFT,
1864 (fw_version & IGB_BUILD_MASK),
1865 etrack_id, major, build, patch);
1866 goto out;
1867 }
1868 }
1869 snprintf(adapter->fw_version, sizeof(adapter->fw_version),
1870 "%d.%d%d, 0x%08x",
1871 (fw_version & IGB_MAJOR_MASK) >> IGB_MAJOR_SHIFT,
1872 (fw_version & IGB_MINOR_MASK) >> IGB_MINOR_SHIFT,
1873 (fw_version & IGB_BUILD_MASK), etrack_id);
1874 } else {
1875 snprintf(adapter->fw_version, sizeof(adapter->fw_version),
1876 "%d.%d%d",
1877 (fw_version & IGB_MAJOR_MASK) >> IGB_MAJOR_SHIFT,
1878 (fw_version & IGB_MINOR_MASK) >> IGB_MINOR_SHIFT,
1879 (fw_version & IGB_BUILD_MASK));
1880 }
1881out:
1882 return;
1883}
1884
9d5c8243
AK
1885/**
1886 * igb_probe - Device Initialization Routine
1887 * @pdev: PCI device information struct
1888 * @ent: entry in igb_pci_tbl
1889 *
1890 * Returns 0 on success, negative on failure
1891 *
1892 * igb_probe initializes an adapter identified by a pci_dev structure.
1893 * The OS initialization, configuring of the adapter private structure,
1894 * and a hardware reset occur.
1895 **/
1896static int __devinit igb_probe(struct pci_dev *pdev,
1897 const struct pci_device_id *ent)
1898{
1899 struct net_device *netdev;
1900 struct igb_adapter *adapter;
1901 struct e1000_hw *hw;
4337e993 1902 u16 eeprom_data = 0;
9835fd73 1903 s32 ret_val;
4337e993 1904 static int global_quad_port_a; /* global quad port a indication */
9d5c8243
AK
1905 const struct e1000_info *ei = igb_info_tbl[ent->driver_data];
1906 unsigned long mmio_start, mmio_len;
2d6a5e95 1907 int err, pci_using_dac;
9d5c8243 1908 u16 eeprom_apme_mask = IGB_EEPROM_APME;
9835fd73 1909 u8 part_str[E1000_PBANUM_LENGTH];
9d5c8243 1910
bded64a7
AG
1911 /* Catch broken hardware that put the wrong VF device ID in
1912 * the PCIe SR-IOV capability.
1913 */
1914 if (pdev->is_virtfn) {
1915 WARN(1, KERN_ERR "%s (%hx:%hx) should not be a VF!\n",
f96a8a0b 1916 pci_name(pdev), pdev->vendor, pdev->device);
bded64a7
AG
1917 return -EINVAL;
1918 }
1919
aed5dec3 1920 err = pci_enable_device_mem(pdev);
9d5c8243
AK
1921 if (err)
1922 return err;
1923
1924 pci_using_dac = 0;
59d71989 1925 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
9d5c8243 1926 if (!err) {
59d71989 1927 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
9d5c8243
AK
1928 if (!err)
1929 pci_using_dac = 1;
1930 } else {
59d71989 1931 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
9d5c8243 1932 if (err) {
59d71989 1933 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
9d5c8243
AK
1934 if (err) {
1935 dev_err(&pdev->dev, "No usable DMA "
1936 "configuration, aborting\n");
1937 goto err_dma;
1938 }
1939 }
1940 }
1941
aed5dec3
AD
1942 err = pci_request_selected_regions(pdev, pci_select_bars(pdev,
1943 IORESOURCE_MEM),
1944 igb_driver_name);
9d5c8243
AK
1945 if (err)
1946 goto err_pci_reg;
1947
19d5afd4 1948 pci_enable_pcie_error_reporting(pdev);
40a914fa 1949
9d5c8243 1950 pci_set_master(pdev);
c682fc23 1951 pci_save_state(pdev);
9d5c8243
AK
1952
1953 err = -ENOMEM;
1bfaf07b 1954 netdev = alloc_etherdev_mq(sizeof(struct igb_adapter),
1cc3bd87 1955 IGB_MAX_TX_QUEUES);
9d5c8243
AK
1956 if (!netdev)
1957 goto err_alloc_etherdev;
1958
1959 SET_NETDEV_DEV(netdev, &pdev->dev);
1960
1961 pci_set_drvdata(pdev, netdev);
1962 adapter = netdev_priv(netdev);
1963 adapter->netdev = netdev;
1964 adapter->pdev = pdev;
1965 hw = &adapter->hw;
1966 hw->back = adapter;
b3f4d599 1967 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
9d5c8243
AK
1968
1969 mmio_start = pci_resource_start(pdev, 0);
1970 mmio_len = pci_resource_len(pdev, 0);
1971
1972 err = -EIO;
28b0759c
AD
1973 hw->hw_addr = ioremap(mmio_start, mmio_len);
1974 if (!hw->hw_addr)
9d5c8243
AK
1975 goto err_ioremap;
1976
2e5c6922 1977 netdev->netdev_ops = &igb_netdev_ops;
9d5c8243 1978 igb_set_ethtool_ops(netdev);
9d5c8243 1979 netdev->watchdog_timeo = 5 * HZ;
9d5c8243
AK
1980
1981 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
1982
1983 netdev->mem_start = mmio_start;
1984 netdev->mem_end = mmio_start + mmio_len;
1985
9d5c8243
AK
1986 /* PCI config space info */
1987 hw->vendor_id = pdev->vendor;
1988 hw->device_id = pdev->device;
1989 hw->revision_id = pdev->revision;
1990 hw->subsystem_vendor_id = pdev->subsystem_vendor;
1991 hw->subsystem_device_id = pdev->subsystem_device;
1992
9d5c8243
AK
1993 /* Copy the default MAC, PHY and NVM function pointers */
1994 memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
1995 memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
1996 memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops));
1997 /* Initialize skew-specific constants */
1998 err = ei->get_invariants(hw);
1999 if (err)
450c87c8 2000 goto err_sw_init;
9d5c8243 2001
450c87c8 2002 /* setup the private structure */
9d5c8243
AK
2003 err = igb_sw_init(adapter);
2004 if (err)
2005 goto err_sw_init;
2006
2007 igb_get_bus_info_pcie(hw);
2008
2009 hw->phy.autoneg_wait_to_complete = false;
9d5c8243
AK
2010
2011 /* Copper options */
2012 if (hw->phy.media_type == e1000_media_type_copper) {
2013 hw->phy.mdix = AUTO_ALL_MODES;
2014 hw->phy.disable_polarity_correction = false;
2015 hw->phy.ms_type = e1000_ms_hw_default;
2016 }
2017
2018 if (igb_check_reset_block(hw))
2019 dev_info(&pdev->dev,
2020 "PHY reset is blocked due to SOL/IDER session.\n");
2021
077887c3
AD
2022 /*
2023 * features is initialized to 0 in allocation, it might have bits
2024 * set by igb_sw_init so we should use an or instead of an
2025 * assignment.
2026 */
2027 netdev->features |= NETIF_F_SG |
2028 NETIF_F_IP_CSUM |
2029 NETIF_F_IPV6_CSUM |
2030 NETIF_F_TSO |
2031 NETIF_F_TSO6 |
2032 NETIF_F_RXHASH |
2033 NETIF_F_RXCSUM |
2034 NETIF_F_HW_VLAN_RX |
2035 NETIF_F_HW_VLAN_TX;
2036
2037 /* copy netdev features into list of user selectable features */
2038 netdev->hw_features |= netdev->features;
89eaefb6 2039 netdev->hw_features |= NETIF_F_RXALL;
077887c3
AD
2040
2041 /* set this bit last since it cannot be part of hw_features */
2042 netdev->features |= NETIF_F_HW_VLAN_FILTER;
2043
2044 netdev->vlan_features |= NETIF_F_TSO |
2045 NETIF_F_TSO6 |
2046 NETIF_F_IP_CSUM |
2047 NETIF_F_IPV6_CSUM |
2048 NETIF_F_SG;
48f29ffc 2049
6b8f0922
BG
2050 netdev->priv_flags |= IFF_SUPP_NOFCS;
2051
7b872a55 2052 if (pci_using_dac) {
9d5c8243 2053 netdev->features |= NETIF_F_HIGHDMA;
7b872a55
YZ
2054 netdev->vlan_features |= NETIF_F_HIGHDMA;
2055 }
9d5c8243 2056
ac52caa3
MM
2057 if (hw->mac.type >= e1000_82576) {
2058 netdev->hw_features |= NETIF_F_SCTP_CSUM;
b9473560 2059 netdev->features |= NETIF_F_SCTP_CSUM;
ac52caa3 2060 }
b9473560 2061
01789349
JP
2062 netdev->priv_flags |= IFF_UNICAST_FLT;
2063
330a6d6a 2064 adapter->en_mng_pt = igb_enable_mng_pass_thru(hw);
9d5c8243
AK
2065
2066 /* before reading the NVM, reset the controller to put the device in a
2067 * known good starting state */
2068 hw->mac.ops.reset_hw(hw);
2069
f96a8a0b
CW
2070 /*
2071 * make sure the NVM is good , i211 parts have special NVM that
2072 * doesn't contain a checksum
2073 */
2074 if (hw->mac.type != e1000_i211) {
2075 if (hw->nvm.ops.validate(hw) < 0) {
2076 dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n");
2077 err = -EIO;
2078 goto err_eeprom;
2079 }
9d5c8243
AK
2080 }
2081
2082 /* copy the MAC address out of the NVM */
2083 if (hw->mac.ops.read_mac_addr(hw))
2084 dev_err(&pdev->dev, "NVM Read Error\n");
2085
2086 memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len);
2087 memcpy(netdev->perm_addr, hw->mac.addr, netdev->addr_len);
2088
2089 if (!is_valid_ether_addr(netdev->perm_addr)) {
2090 dev_err(&pdev->dev, "Invalid MAC Address\n");
2091 err = -EIO;
2092 goto err_eeprom;
2093 }
2094
d67974f0
CW
2095 /* get firmware version for ethtool -i */
2096 igb_set_fw_version(adapter);
2097
c061b18d 2098 setup_timer(&adapter->watchdog_timer, igb_watchdog,
0e340485 2099 (unsigned long) adapter);
c061b18d 2100 setup_timer(&adapter->phy_info_timer, igb_update_phy_info,
0e340485 2101 (unsigned long) adapter);
9d5c8243
AK
2102
2103 INIT_WORK(&adapter->reset_task, igb_reset_task);
2104 INIT_WORK(&adapter->watchdog_task, igb_watchdog_task);
2105
450c87c8 2106 /* Initialize link properties that are user-changeable */
9d5c8243
AK
2107 adapter->fc_autoneg = true;
2108 hw->mac.autoneg = true;
2109 hw->phy.autoneg_advertised = 0x2f;
2110
0cce119a
AD
2111 hw->fc.requested_mode = e1000_fc_default;
2112 hw->fc.current_mode = e1000_fc_default;
9d5c8243 2113
9d5c8243
AK
2114 igb_validate_mdi_setting(hw);
2115
9d5c8243
AK
2116 /* Initial Wake on LAN setting If APM wake is enabled in the EEPROM,
2117 * enable the ACPI Magic Packet filter
2118 */
2119
a2cf8b6c 2120 if (hw->bus.func == 0)
312c75ae 2121 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
6d337dce 2122 else if (hw->mac.type >= e1000_82580)
55cac248
AD
2123 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A +
2124 NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1,
2125 &eeprom_data);
a2cf8b6c
AD
2126 else if (hw->bus.func == 1)
2127 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
9d5c8243
AK
2128
2129 if (eeprom_data & eeprom_apme_mask)
2130 adapter->eeprom_wol |= E1000_WUFC_MAG;
2131
2132 /* now that we have the eeprom settings, apply the special cases where
2133 * the eeprom may be wrong or the board simply won't support wake on
2134 * lan on a particular port */
2135 switch (pdev->device) {
2136 case E1000_DEV_ID_82575GB_QUAD_COPPER:
2137 adapter->eeprom_wol = 0;
2138 break;
2139 case E1000_DEV_ID_82575EB_FIBER_SERDES:
2d064c06
AD
2140 case E1000_DEV_ID_82576_FIBER:
2141 case E1000_DEV_ID_82576_SERDES:
9d5c8243
AK
2142 /* Wake events only supported on port A for dual fiber
2143 * regardless of eeprom setting */
2144 if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1)
2145 adapter->eeprom_wol = 0;
2146 break;
c8ea5ea9 2147 case E1000_DEV_ID_82576_QUAD_COPPER:
d5aa2252 2148 case E1000_DEV_ID_82576_QUAD_COPPER_ET2:
c8ea5ea9
AD
2149 /* if quad port adapter, disable WoL on all but port A */
2150 if (global_quad_port_a != 0)
2151 adapter->eeprom_wol = 0;
2152 else
2153 adapter->flags |= IGB_FLAG_QUAD_PORT_A;
2154 /* Reset for multiple quad port adapters */
2155 if (++global_quad_port_a == 4)
2156 global_quad_port_a = 0;
2157 break;
9d5c8243
AK
2158 }
2159
2160 /* initialize the wol settings based on the eeprom settings */
2161 adapter->wol = adapter->eeprom_wol;
e1b86d84 2162 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
9d5c8243
AK
2163
2164 /* reset the hardware with the new settings */
2165 igb_reset(adapter);
2166
2167 /* let the f/w know that the h/w is now under the control of the
2168 * driver. */
2169 igb_get_hw_control(adapter);
2170
9d5c8243
AK
2171 strcpy(netdev->name, "eth%d");
2172 err = register_netdev(netdev);
2173 if (err)
2174 goto err_register;
2175
b168dfc5
JB
2176 /* carrier off reporting is important to ethtool even BEFORE open */
2177 netif_carrier_off(netdev);
2178
421e02f0 2179#ifdef CONFIG_IGB_DCA
bbd98fe4 2180 if (dca_add_requester(&pdev->dev) == 0) {
7dfc16fa 2181 adapter->flags |= IGB_FLAG_DCA_ENABLED;
fe4506b6 2182 dev_info(&pdev->dev, "DCA enabled\n");
fe4506b6
JC
2183 igb_setup_dca(adapter);
2184 }
fe4506b6 2185
38c845c7 2186#endif
3c89f6d0 2187
7ebae817 2188#ifdef CONFIG_IGB_PTP
673b8b70 2189 /* do hw tstamp init after resetting */
7ebae817 2190 igb_ptp_init(adapter);
3c89f6d0 2191#endif /* CONFIG_IGB_PTP */
673b8b70 2192
9d5c8243
AK
2193 dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n");
2194 /* print bus type/speed/width info */
7c510e4b 2195 dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n",
9d5c8243 2196 netdev->name,
559e9c49 2197 ((hw->bus.speed == e1000_bus_speed_2500) ? "2.5Gb/s" :
ff846f52 2198 (hw->bus.speed == e1000_bus_speed_5000) ? "5.0Gb/s" :
559e9c49 2199 "unknown"),
59c3de89
AD
2200 ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" :
2201 (hw->bus.width == e1000_bus_width_pcie_x2) ? "Width x2" :
2202 (hw->bus.width == e1000_bus_width_pcie_x1) ? "Width x1" :
2203 "unknown"),
7c510e4b 2204 netdev->dev_addr);
9d5c8243 2205
9835fd73
CW
2206 ret_val = igb_read_part_string(hw, part_str, E1000_PBANUM_LENGTH);
2207 if (ret_val)
2208 strcpy(part_str, "Unknown");
2209 dev_info(&pdev->dev, "%s: PBA No: %s\n", netdev->name, part_str);
9d5c8243
AK
2210 dev_info(&pdev->dev,
2211 "Using %s interrupts. %d rx queue(s), %d tx queue(s)\n",
2212 adapter->msix_entries ? "MSI-X" :
7dfc16fa 2213 (adapter->flags & IGB_FLAG_HAS_MSI) ? "MSI" : "legacy",
9d5c8243 2214 adapter->num_rx_queues, adapter->num_tx_queues);
09b068d4
CW
2215 switch (hw->mac.type) {
2216 case e1000_i350:
f96a8a0b
CW
2217 case e1000_i210:
2218 case e1000_i211:
09b068d4
CW
2219 igb_set_eee_i350(hw);
2220 break;
2221 default:
2222 break;
2223 }
749ab2cd
YZ
2224
2225 pm_runtime_put_noidle(&pdev->dev);
9d5c8243
AK
2226 return 0;
2227
2228err_register:
2229 igb_release_hw_control(adapter);
2230err_eeprom:
2231 if (!igb_check_reset_block(hw))
f5f4cf08 2232 igb_reset_phy(hw);
9d5c8243
AK
2233
2234 if (hw->flash_address)
2235 iounmap(hw->flash_address);
9d5c8243 2236err_sw_init:
047e0030 2237 igb_clear_interrupt_scheme(adapter);
9d5c8243
AK
2238 iounmap(hw->hw_addr);
2239err_ioremap:
2240 free_netdev(netdev);
2241err_alloc_etherdev:
559e9c49
AD
2242 pci_release_selected_regions(pdev,
2243 pci_select_bars(pdev, IORESOURCE_MEM));
9d5c8243
AK
2244err_pci_reg:
2245err_dma:
2246 pci_disable_device(pdev);
2247 return err;
2248}
2249
2250/**
2251 * igb_remove - Device Removal Routine
2252 * @pdev: PCI device information struct
2253 *
2254 * igb_remove is called by the PCI subsystem to alert the driver
2255 * that it should release a PCI device. The could be caused by a
2256 * Hot-Plug event, or because the driver is going to be removed from
2257 * memory.
2258 **/
2259static void __devexit igb_remove(struct pci_dev *pdev)
2260{
2261 struct net_device *netdev = pci_get_drvdata(pdev);
2262 struct igb_adapter *adapter = netdev_priv(netdev);
fe4506b6 2263 struct e1000_hw *hw = &adapter->hw;
9d5c8243 2264
749ab2cd 2265 pm_runtime_get_noresume(&pdev->dev);
7ebae817 2266#ifdef CONFIG_IGB_PTP
a79f4f88 2267 igb_ptp_stop(adapter);
3c89f6d0 2268#endif /* CONFIG_IGB_PTP */
749ab2cd 2269
760141a5
TH
2270 /*
2271 * The watchdog timer may be rescheduled, so explicitly
2272 * disable watchdog from being rescheduled.
2273 */
9d5c8243
AK
2274 set_bit(__IGB_DOWN, &adapter->state);
2275 del_timer_sync(&adapter->watchdog_timer);
2276 del_timer_sync(&adapter->phy_info_timer);
2277
760141a5
TH
2278 cancel_work_sync(&adapter->reset_task);
2279 cancel_work_sync(&adapter->watchdog_task);
9d5c8243 2280
421e02f0 2281#ifdef CONFIG_IGB_DCA
7dfc16fa 2282 if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
fe4506b6
JC
2283 dev_info(&pdev->dev, "DCA disabled\n");
2284 dca_remove_requester(&pdev->dev);
7dfc16fa 2285 adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
cbd347ad 2286 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE);
fe4506b6
JC
2287 }
2288#endif
2289
9d5c8243
AK
2290 /* Release control of h/w to f/w. If f/w is AMT enabled, this
2291 * would have already happened in close and is redundant. */
2292 igb_release_hw_control(adapter);
2293
2294 unregister_netdev(netdev);
2295
047e0030 2296 igb_clear_interrupt_scheme(adapter);
9d5c8243 2297
37680117
AD
2298#ifdef CONFIG_PCI_IOV
2299 /* reclaim resources allocated to VFs */
2300 if (adapter->vf_data) {
2301 /* disable iov and allow time for transactions to clear */
f557147c
SA
2302 if (igb_vfs_are_assigned(adapter)) {
2303 dev_info(&pdev->dev, "Unloading driver while VFs are assigned - VFs will not be deallocated\n");
2304 } else {
0224d663
GR
2305 pci_disable_sriov(pdev);
2306 msleep(500);
0224d663 2307 }
37680117
AD
2308
2309 kfree(adapter->vf_data);
2310 adapter->vf_data = NULL;
2311 wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
945a5151 2312 wrfl();
37680117
AD
2313 msleep(100);
2314 dev_info(&pdev->dev, "IOV Disabled\n");
2315 }
2316#endif
559e9c49 2317
28b0759c
AD
2318 iounmap(hw->hw_addr);
2319 if (hw->flash_address)
2320 iounmap(hw->flash_address);
559e9c49
AD
2321 pci_release_selected_regions(pdev,
2322 pci_select_bars(pdev, IORESOURCE_MEM));
9d5c8243 2323
1128c756 2324 kfree(adapter->shadow_vfta);
9d5c8243
AK
2325 free_netdev(netdev);
2326
19d5afd4 2327 pci_disable_pcie_error_reporting(pdev);
40a914fa 2328
9d5c8243
AK
2329 pci_disable_device(pdev);
2330}
2331
a6b623e0
AD
2332/**
2333 * igb_probe_vfs - Initialize vf data storage and add VFs to pci config space
2334 * @adapter: board private structure to initialize
2335 *
2336 * This function initializes the vf specific data storage and then attempts to
2337 * allocate the VFs. The reason for ordering it this way is because it is much
2338 * mor expensive time wise to disable SR-IOV than it is to allocate and free
2339 * the memory for the VFs.
2340 **/
2341static void __devinit igb_probe_vfs(struct igb_adapter * adapter)
2342{
2343#ifdef CONFIG_PCI_IOV
2344 struct pci_dev *pdev = adapter->pdev;
f96a8a0b 2345 struct e1000_hw *hw = &adapter->hw;
f557147c 2346 int old_vfs = pci_num_vf(adapter->pdev);
0224d663 2347 int i;
a6b623e0 2348
f96a8a0b
CW
2349 /* Virtualization features not supported on i210 family. */
2350 if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211))
2351 return;
2352
0224d663
GR
2353 if (old_vfs) {
2354 dev_info(&pdev->dev, "%d pre-allocated VFs found - override "
2355 "max_vfs setting of %d\n", old_vfs, max_vfs);
2356 adapter->vfs_allocated_count = old_vfs;
a6b623e0
AD
2357 }
2358
0224d663
GR
2359 if (!adapter->vfs_allocated_count)
2360 return;
2361
2362 adapter->vf_data = kcalloc(adapter->vfs_allocated_count,
2363 sizeof(struct vf_data_storage), GFP_KERNEL);
f96a8a0b 2364
0224d663
GR
2365 /* if allocation failed then we do not support SR-IOV */
2366 if (!adapter->vf_data) {
a6b623e0 2367 adapter->vfs_allocated_count = 0;
0224d663
GR
2368 dev_err(&pdev->dev, "Unable to allocate memory for VF "
2369 "Data Storage\n");
2370 goto out;
a6b623e0 2371 }
0224d663
GR
2372
2373 if (!old_vfs) {
2374 if (pci_enable_sriov(pdev, adapter->vfs_allocated_count))
2375 goto err_out;
2376 }
2377 dev_info(&pdev->dev, "%d VFs allocated\n",
2378 adapter->vfs_allocated_count);
2379 for (i = 0; i < adapter->vfs_allocated_count; i++)
2380 igb_vf_configure(adapter, i);
2381
2382 /* DMA Coalescing is not supported in IOV mode. */
2383 adapter->flags &= ~IGB_FLAG_DMAC;
2384 goto out;
2385err_out:
2386 kfree(adapter->vf_data);
2387 adapter->vf_data = NULL;
2388 adapter->vfs_allocated_count = 0;
2389out:
2390 return;
a6b623e0
AD
2391#endif /* CONFIG_PCI_IOV */
2392}
2393
9d5c8243
AK
2394/**
2395 * igb_sw_init - Initialize general software structures (struct igb_adapter)
2396 * @adapter: board private structure to initialize
2397 *
2398 * igb_sw_init initializes the Adapter private data structure.
2399 * Fields are initialized based on PCI device information and
2400 * OS network device settings (MTU size).
2401 **/
2402static int __devinit igb_sw_init(struct igb_adapter *adapter)
2403{
2404 struct e1000_hw *hw = &adapter->hw;
2405 struct net_device *netdev = adapter->netdev;
2406 struct pci_dev *pdev = adapter->pdev;
374a542d 2407 u32 max_rss_queues;
9d5c8243
AK
2408
2409 pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word);
2410
13fde97a 2411 /* set default ring sizes */
68fd9910
AD
2412 adapter->tx_ring_count = IGB_DEFAULT_TXD;
2413 adapter->rx_ring_count = IGB_DEFAULT_RXD;
13fde97a
AD
2414
2415 /* set default ITR values */
4fc82adf
AD
2416 adapter->rx_itr_setting = IGB_DEFAULT_ITR;
2417 adapter->tx_itr_setting = IGB_DEFAULT_ITR;
2418
13fde97a
AD
2419 /* set default work limits */
2420 adapter->tx_work_limit = IGB_DEFAULT_TX_WORK;
2421
153285f9
AD
2422 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN +
2423 VLAN_HLEN;
9d5c8243
AK
2424 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
2425
81c2fc22
AD
2426 adapter->node = -1;
2427
12dcd86b 2428 spin_lock_init(&adapter->stats64_lock);
a6b623e0 2429#ifdef CONFIG_PCI_IOV
6b78bb1d
CW
2430 switch (hw->mac.type) {
2431 case e1000_82576:
2432 case e1000_i350:
9b082d73
SA
2433 if (max_vfs > 7) {
2434 dev_warn(&pdev->dev,
2435 "Maximum of 7 VFs per PF, using max\n");
2436 adapter->vfs_allocated_count = 7;
2437 } else
2438 adapter->vfs_allocated_count = max_vfs;
6b78bb1d
CW
2439 break;
2440 default:
2441 break;
2442 }
a6b623e0 2443#endif /* CONFIG_PCI_IOV */
374a542d
MV
2444
2445 /* Determine the maximum number of RSS queues supported. */
f96a8a0b 2446 switch (hw->mac.type) {
374a542d
MV
2447 case e1000_i211:
2448 max_rss_queues = IGB_MAX_RX_QUEUES_I211;
2449 break;
2450 case e1000_82575:
f96a8a0b 2451 case e1000_i210:
374a542d
MV
2452 max_rss_queues = IGB_MAX_RX_QUEUES_82575;
2453 break;
2454 case e1000_i350:
2455 /* I350 cannot do RSS and SR-IOV at the same time */
2456 if (!!adapter->vfs_allocated_count) {
2457 max_rss_queues = 1;
2458 break;
2459 }
2460 /* fall through */
2461 case e1000_82576:
2462 if (!!adapter->vfs_allocated_count) {
2463 max_rss_queues = 2;
2464 break;
2465 }
2466 /* fall through */
2467 case e1000_82580:
2468 default:
2469 max_rss_queues = IGB_MAX_RX_QUEUES;
f96a8a0b 2470 break;
374a542d
MV
2471 }
2472
2473 adapter->rss_queues = min_t(u32, max_rss_queues, num_online_cpus());
2474
2475 /* Determine if we need to pair queues. */
2476 switch (hw->mac.type) {
2477 case e1000_82575:
f96a8a0b 2478 case e1000_i211:
374a542d 2479 /* Device supports enough interrupts without queue pairing. */
f96a8a0b 2480 break;
374a542d
MV
2481 case e1000_82576:
2482 /*
2483 * If VFs are going to be allocated with RSS queues then we
2484 * should pair the queues in order to conserve interrupts due
2485 * to limited supply.
2486 */
2487 if ((adapter->rss_queues > 1) &&
2488 (adapter->vfs_allocated_count > 6))
2489 adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
2490 /* fall through */
2491 case e1000_82580:
2492 case e1000_i350:
2493 case e1000_i210:
f96a8a0b 2494 default:
374a542d
MV
2495 /*
2496 * If rss_queues > half of max_rss_queues, pair the queues in
2497 * order to conserve interrupts due to limited supply.
2498 */
2499 if (adapter->rss_queues > (max_rss_queues / 2))
2500 adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
f96a8a0b
CW
2501 break;
2502 }
a99955fc 2503
1128c756
CW
2504 /* Setup and initialize a copy of the hw vlan table array */
2505 adapter->shadow_vfta = kzalloc(sizeof(u32) *
2506 E1000_VLAN_FILTER_TBL_SIZE,
2507 GFP_ATOMIC);
2508
a6b623e0 2509 /* This call may decrease the number of queues */
047e0030 2510 if (igb_init_interrupt_scheme(adapter)) {
9d5c8243
AK
2511 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
2512 return -ENOMEM;
2513 }
2514
a6b623e0
AD
2515 igb_probe_vfs(adapter);
2516
9d5c8243
AK
2517 /* Explicitly disable IRQ since the NIC can be in any state. */
2518 igb_irq_disable(adapter);
2519
f96a8a0b 2520 if (hw->mac.type >= e1000_i350)
831ec0b4
CW
2521 adapter->flags &= ~IGB_FLAG_DMAC;
2522
9d5c8243
AK
2523 set_bit(__IGB_DOWN, &adapter->state);
2524 return 0;
2525}
2526
2527/**
2528 * igb_open - Called when a network interface is made active
2529 * @netdev: network interface device structure
2530 *
2531 * Returns 0 on success, negative value on failure
2532 *
2533 * The open entry point is called when a network interface is made
2534 * active by the system (IFF_UP). At this point all resources needed
2535 * for transmit and receive operations are allocated, the interrupt
2536 * handler is registered with the OS, the watchdog timer is started,
2537 * and the stack is notified that the interface is ready.
2538 **/
749ab2cd 2539static int __igb_open(struct net_device *netdev, bool resuming)
9d5c8243
AK
2540{
2541 struct igb_adapter *adapter = netdev_priv(netdev);
2542 struct e1000_hw *hw = &adapter->hw;
749ab2cd 2543 struct pci_dev *pdev = adapter->pdev;
9d5c8243
AK
2544 int err;
2545 int i;
2546
2547 /* disallow open during test */
749ab2cd
YZ
2548 if (test_bit(__IGB_TESTING, &adapter->state)) {
2549 WARN_ON(resuming);
9d5c8243 2550 return -EBUSY;
749ab2cd
YZ
2551 }
2552
2553 if (!resuming)
2554 pm_runtime_get_sync(&pdev->dev);
9d5c8243 2555
b168dfc5
JB
2556 netif_carrier_off(netdev);
2557
9d5c8243
AK
2558 /* allocate transmit descriptors */
2559 err = igb_setup_all_tx_resources(adapter);
2560 if (err)
2561 goto err_setup_tx;
2562
2563 /* allocate receive descriptors */
2564 err = igb_setup_all_rx_resources(adapter);
2565 if (err)
2566 goto err_setup_rx;
2567
88a268c1 2568 igb_power_up_link(adapter);
9d5c8243 2569
9d5c8243
AK
2570 /* before we allocate an interrupt, we must be ready to handle it.
2571 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
2572 * as soon as we call pci_request_irq, so we have to setup our
2573 * clean_rx handler before we do so. */
2574 igb_configure(adapter);
2575
2576 err = igb_request_irq(adapter);
2577 if (err)
2578 goto err_req_irq;
2579
2580 /* From here on the code is the same as igb_up() */
2581 clear_bit(__IGB_DOWN, &adapter->state);
2582
0d1ae7f4
AD
2583 for (i = 0; i < adapter->num_q_vectors; i++)
2584 napi_enable(&(adapter->q_vector[i]->napi));
9d5c8243
AK
2585
2586 /* Clear any pending interrupts. */
2587 rd32(E1000_ICR);
844290e5
PW
2588
2589 igb_irq_enable(adapter);
2590
d4960307
AD
2591 /* notify VFs that reset has been completed */
2592 if (adapter->vfs_allocated_count) {
2593 u32 reg_data = rd32(E1000_CTRL_EXT);
2594 reg_data |= E1000_CTRL_EXT_PFRSTD;
2595 wr32(E1000_CTRL_EXT, reg_data);
2596 }
2597
d55b53ff
JK
2598 netif_tx_start_all_queues(netdev);
2599
749ab2cd
YZ
2600 if (!resuming)
2601 pm_runtime_put(&pdev->dev);
2602
25568a53
AD
2603 /* start the watchdog. */
2604 hw->mac.get_link_status = 1;
2605 schedule_work(&adapter->watchdog_task);
9d5c8243
AK
2606
2607 return 0;
2608
2609err_req_irq:
2610 igb_release_hw_control(adapter);
88a268c1 2611 igb_power_down_link(adapter);
9d5c8243
AK
2612 igb_free_all_rx_resources(adapter);
2613err_setup_rx:
2614 igb_free_all_tx_resources(adapter);
2615err_setup_tx:
2616 igb_reset(adapter);
749ab2cd
YZ
2617 if (!resuming)
2618 pm_runtime_put(&pdev->dev);
9d5c8243
AK
2619
2620 return err;
2621}
2622
749ab2cd
YZ
2623static int igb_open(struct net_device *netdev)
2624{
2625 return __igb_open(netdev, false);
2626}
2627
9d5c8243
AK
2628/**
2629 * igb_close - Disables a network interface
2630 * @netdev: network interface device structure
2631 *
2632 * Returns 0, this is not allowed to fail
2633 *
2634 * The close entry point is called when an interface is de-activated
2635 * by the OS. The hardware is still under the driver's control, but
2636 * needs to be disabled. A global MAC reset is issued to stop the
2637 * hardware, and all transmit and receive resources are freed.
2638 **/
749ab2cd 2639static int __igb_close(struct net_device *netdev, bool suspending)
9d5c8243
AK
2640{
2641 struct igb_adapter *adapter = netdev_priv(netdev);
749ab2cd 2642 struct pci_dev *pdev = adapter->pdev;
9d5c8243
AK
2643
2644 WARN_ON(test_bit(__IGB_RESETTING, &adapter->state));
9d5c8243 2645
749ab2cd
YZ
2646 if (!suspending)
2647 pm_runtime_get_sync(&pdev->dev);
2648
2649 igb_down(adapter);
9d5c8243
AK
2650 igb_free_irq(adapter);
2651
2652 igb_free_all_tx_resources(adapter);
2653 igb_free_all_rx_resources(adapter);
2654
749ab2cd
YZ
2655 if (!suspending)
2656 pm_runtime_put_sync(&pdev->dev);
9d5c8243
AK
2657 return 0;
2658}
2659
749ab2cd
YZ
2660static int igb_close(struct net_device *netdev)
2661{
2662 return __igb_close(netdev, false);
2663}
2664
9d5c8243
AK
2665/**
2666 * igb_setup_tx_resources - allocate Tx resources (Descriptors)
9d5c8243
AK
2667 * @tx_ring: tx descriptor ring (for a specific queue) to setup
2668 *
2669 * Return 0 on success, negative on failure
2670 **/
80785298 2671int igb_setup_tx_resources(struct igb_ring *tx_ring)
9d5c8243 2672{
59d71989 2673 struct device *dev = tx_ring->dev;
81c2fc22 2674 int orig_node = dev_to_node(dev);
9d5c8243
AK
2675 int size;
2676
06034649 2677 size = sizeof(struct igb_tx_buffer) * tx_ring->count;
81c2fc22
AD
2678 tx_ring->tx_buffer_info = vzalloc_node(size, tx_ring->numa_node);
2679 if (!tx_ring->tx_buffer_info)
2680 tx_ring->tx_buffer_info = vzalloc(size);
06034649 2681 if (!tx_ring->tx_buffer_info)
9d5c8243 2682 goto err;
9d5c8243
AK
2683
2684 /* round up to nearest 4K */
85e8d004 2685 tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
9d5c8243
AK
2686 tx_ring->size = ALIGN(tx_ring->size, 4096);
2687
81c2fc22 2688 set_dev_node(dev, tx_ring->numa_node);
59d71989
AD
2689 tx_ring->desc = dma_alloc_coherent(dev,
2690 tx_ring->size,
2691 &tx_ring->dma,
2692 GFP_KERNEL);
81c2fc22
AD
2693 set_dev_node(dev, orig_node);
2694 if (!tx_ring->desc)
2695 tx_ring->desc = dma_alloc_coherent(dev,
2696 tx_ring->size,
2697 &tx_ring->dma,
2698 GFP_KERNEL);
9d5c8243
AK
2699
2700 if (!tx_ring->desc)
2701 goto err;
2702
9d5c8243
AK
2703 tx_ring->next_to_use = 0;
2704 tx_ring->next_to_clean = 0;
81c2fc22 2705
9d5c8243
AK
2706 return 0;
2707
2708err:
06034649 2709 vfree(tx_ring->tx_buffer_info);
59d71989 2710 dev_err(dev,
9d5c8243
AK
2711 "Unable to allocate memory for the transmit descriptor ring\n");
2712 return -ENOMEM;
2713}
2714
2715/**
2716 * igb_setup_all_tx_resources - wrapper to allocate Tx resources
2717 * (Descriptors) for all queues
2718 * @adapter: board private structure
2719 *
2720 * Return 0 on success, negative on failure
2721 **/
2722static int igb_setup_all_tx_resources(struct igb_adapter *adapter)
2723{
439705e1 2724 struct pci_dev *pdev = adapter->pdev;
9d5c8243
AK
2725 int i, err = 0;
2726
2727 for (i = 0; i < adapter->num_tx_queues; i++) {
3025a446 2728 err = igb_setup_tx_resources(adapter->tx_ring[i]);
9d5c8243 2729 if (err) {
439705e1 2730 dev_err(&pdev->dev,
9d5c8243
AK
2731 "Allocation for Tx Queue %u failed\n", i);
2732 for (i--; i >= 0; i--)
3025a446 2733 igb_free_tx_resources(adapter->tx_ring[i]);
9d5c8243
AK
2734 break;
2735 }
2736 }
2737
2738 return err;
2739}
2740
2741/**
85b430b4
AD
2742 * igb_setup_tctl - configure the transmit control registers
2743 * @adapter: Board private structure
9d5c8243 2744 **/
d7ee5b3a 2745void igb_setup_tctl(struct igb_adapter *adapter)
9d5c8243 2746{
9d5c8243
AK
2747 struct e1000_hw *hw = &adapter->hw;
2748 u32 tctl;
9d5c8243 2749
85b430b4
AD
2750 /* disable queue 0 which is enabled by default on 82575 and 82576 */
2751 wr32(E1000_TXDCTL(0), 0);
9d5c8243
AK
2752
2753 /* Program the Transmit Control Register */
9d5c8243
AK
2754 tctl = rd32(E1000_TCTL);
2755 tctl &= ~E1000_TCTL_CT;
2756 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
2757 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
2758
2759 igb_config_collision_dist(hw);
2760
9d5c8243
AK
2761 /* Enable transmits */
2762 tctl |= E1000_TCTL_EN;
2763
2764 wr32(E1000_TCTL, tctl);
2765}
2766
85b430b4
AD
2767/**
2768 * igb_configure_tx_ring - Configure transmit ring after Reset
2769 * @adapter: board private structure
2770 * @ring: tx ring to configure
2771 *
2772 * Configure a transmit ring after a reset.
2773 **/
d7ee5b3a
AD
2774void igb_configure_tx_ring(struct igb_adapter *adapter,
2775 struct igb_ring *ring)
85b430b4
AD
2776{
2777 struct e1000_hw *hw = &adapter->hw;
a74420e0 2778 u32 txdctl = 0;
85b430b4
AD
2779 u64 tdba = ring->dma;
2780 int reg_idx = ring->reg_idx;
2781
2782 /* disable the queue */
a74420e0 2783 wr32(E1000_TXDCTL(reg_idx), 0);
85b430b4
AD
2784 wrfl();
2785 mdelay(10);
2786
2787 wr32(E1000_TDLEN(reg_idx),
2788 ring->count * sizeof(union e1000_adv_tx_desc));
2789 wr32(E1000_TDBAL(reg_idx),
2790 tdba & 0x00000000ffffffffULL);
2791 wr32(E1000_TDBAH(reg_idx), tdba >> 32);
2792
fce99e34 2793 ring->tail = hw->hw_addr + E1000_TDT(reg_idx);
a74420e0 2794 wr32(E1000_TDH(reg_idx), 0);
fce99e34 2795 writel(0, ring->tail);
85b430b4
AD
2796
2797 txdctl |= IGB_TX_PTHRESH;
2798 txdctl |= IGB_TX_HTHRESH << 8;
2799 txdctl |= IGB_TX_WTHRESH << 16;
2800
2801 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
2802 wr32(E1000_TXDCTL(reg_idx), txdctl);
2803}
2804
2805/**
2806 * igb_configure_tx - Configure transmit Unit after Reset
2807 * @adapter: board private structure
2808 *
2809 * Configure the Tx unit of the MAC after a reset.
2810 **/
2811static void igb_configure_tx(struct igb_adapter *adapter)
2812{
2813 int i;
2814
2815 for (i = 0; i < adapter->num_tx_queues; i++)
3025a446 2816 igb_configure_tx_ring(adapter, adapter->tx_ring[i]);
85b430b4
AD
2817}
2818
9d5c8243
AK
2819/**
2820 * igb_setup_rx_resources - allocate Rx resources (Descriptors)
9d5c8243
AK
2821 * @rx_ring: rx descriptor ring (for a specific queue) to setup
2822 *
2823 * Returns 0 on success, negative on failure
2824 **/
80785298 2825int igb_setup_rx_resources(struct igb_ring *rx_ring)
9d5c8243 2826{
59d71989 2827 struct device *dev = rx_ring->dev;
81c2fc22 2828 int orig_node = dev_to_node(dev);
9d5c8243
AK
2829 int size, desc_len;
2830
06034649 2831 size = sizeof(struct igb_rx_buffer) * rx_ring->count;
81c2fc22
AD
2832 rx_ring->rx_buffer_info = vzalloc_node(size, rx_ring->numa_node);
2833 if (!rx_ring->rx_buffer_info)
2834 rx_ring->rx_buffer_info = vzalloc(size);
06034649 2835 if (!rx_ring->rx_buffer_info)
9d5c8243 2836 goto err;
9d5c8243
AK
2837
2838 desc_len = sizeof(union e1000_adv_rx_desc);
2839
2840 /* Round up to nearest 4K */
2841 rx_ring->size = rx_ring->count * desc_len;
2842 rx_ring->size = ALIGN(rx_ring->size, 4096);
2843
81c2fc22 2844 set_dev_node(dev, rx_ring->numa_node);
59d71989
AD
2845 rx_ring->desc = dma_alloc_coherent(dev,
2846 rx_ring->size,
2847 &rx_ring->dma,
2848 GFP_KERNEL);
81c2fc22
AD
2849 set_dev_node(dev, orig_node);
2850 if (!rx_ring->desc)
2851 rx_ring->desc = dma_alloc_coherent(dev,
2852 rx_ring->size,
2853 &rx_ring->dma,
2854 GFP_KERNEL);
9d5c8243
AK
2855
2856 if (!rx_ring->desc)
2857 goto err;
2858
2859 rx_ring->next_to_clean = 0;
2860 rx_ring->next_to_use = 0;
9d5c8243 2861
9d5c8243
AK
2862 return 0;
2863
2864err:
06034649
AD
2865 vfree(rx_ring->rx_buffer_info);
2866 rx_ring->rx_buffer_info = NULL;
59d71989
AD
2867 dev_err(dev, "Unable to allocate memory for the receive descriptor"
2868 " ring\n");
9d5c8243
AK
2869 return -ENOMEM;
2870}
2871
2872/**
2873 * igb_setup_all_rx_resources - wrapper to allocate Rx resources
2874 * (Descriptors) for all queues
2875 * @adapter: board private structure
2876 *
2877 * Return 0 on success, negative on failure
2878 **/
2879static int igb_setup_all_rx_resources(struct igb_adapter *adapter)
2880{
439705e1 2881 struct pci_dev *pdev = adapter->pdev;
9d5c8243
AK
2882 int i, err = 0;
2883
2884 for (i = 0; i < adapter->num_rx_queues; i++) {
3025a446 2885 err = igb_setup_rx_resources(adapter->rx_ring[i]);
9d5c8243 2886 if (err) {
439705e1 2887 dev_err(&pdev->dev,
9d5c8243
AK
2888 "Allocation for Rx Queue %u failed\n", i);
2889 for (i--; i >= 0; i--)
3025a446 2890 igb_free_rx_resources(adapter->rx_ring[i]);
9d5c8243
AK
2891 break;
2892 }
2893 }
2894
2895 return err;
2896}
2897
06cf2666
AD
2898/**
2899 * igb_setup_mrqc - configure the multiple receive queue control registers
2900 * @adapter: Board private structure
2901 **/
2902static void igb_setup_mrqc(struct igb_adapter *adapter)
2903{
2904 struct e1000_hw *hw = &adapter->hw;
2905 u32 mrqc, rxcsum;
2906 u32 j, num_rx_queues, shift = 0, shift2 = 0;
2907 union e1000_reta {
2908 u32 dword;
2909 u8 bytes[4];
2910 } reta;
2911 static const u8 rsshash[40] = {
2912 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2, 0x41, 0x67,
2913 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0, 0xd0, 0xca, 0x2b, 0xcb,
2914 0xae, 0x7b, 0x30, 0xb4, 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30,
2915 0xf2, 0x0c, 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa };
2916
2917 /* Fill out hash function seeds */
2918 for (j = 0; j < 10; j++) {
2919 u32 rsskey = rsshash[(j * 4)];
2920 rsskey |= rsshash[(j * 4) + 1] << 8;
2921 rsskey |= rsshash[(j * 4) + 2] << 16;
2922 rsskey |= rsshash[(j * 4) + 3] << 24;
2923 array_wr32(E1000_RSSRK(0), j, rsskey);
2924 }
2925
a99955fc 2926 num_rx_queues = adapter->rss_queues;
06cf2666
AD
2927
2928 if (adapter->vfs_allocated_count) {
2929 /* 82575 and 82576 supports 2 RSS queues for VMDq */
2930 switch (hw->mac.type) {
d2ba2ed8 2931 case e1000_i350:
55cac248
AD
2932 case e1000_82580:
2933 num_rx_queues = 1;
2934 shift = 0;
2935 break;
06cf2666
AD
2936 case e1000_82576:
2937 shift = 3;
2938 num_rx_queues = 2;
2939 break;
2940 case e1000_82575:
2941 shift = 2;
2942 shift2 = 6;
2943 default:
2944 break;
2945 }
2946 } else {
2947 if (hw->mac.type == e1000_82575)
2948 shift = 6;
2949 }
2950
2951 for (j = 0; j < (32 * 4); j++) {
2952 reta.bytes[j & 3] = (j % num_rx_queues) << shift;
2953 if (shift2)
2954 reta.bytes[j & 3] |= num_rx_queues << shift2;
2955 if ((j & 3) == 3)
2956 wr32(E1000_RETA(j >> 2), reta.dword);
2957 }
2958
2959 /*
2960 * Disable raw packet checksumming so that RSS hash is placed in
2961 * descriptor on writeback. No need to enable TCP/UDP/IP checksum
2962 * offloads as they are enabled by default
2963 */
2964 rxcsum = rd32(E1000_RXCSUM);
2965 rxcsum |= E1000_RXCSUM_PCSD;
2966
2967 if (adapter->hw.mac.type >= e1000_82576)
2968 /* Enable Receive Checksum Offload for SCTP */
2969 rxcsum |= E1000_RXCSUM_CRCOFL;
2970
2971 /* Don't need to set TUOFL or IPOFL, they default to 1 */
2972 wr32(E1000_RXCSUM, rxcsum);
f96a8a0b
CW
2973 /*
2974 * Generate RSS hash based on TCP port numbers and/or
2975 * IPv4/v6 src and dst addresses since UDP cannot be
2976 * hashed reliably due to IP fragmentation
2977 */
2978
2979 mrqc = E1000_MRQC_RSS_FIELD_IPV4 |
2980 E1000_MRQC_RSS_FIELD_IPV4_TCP |
2981 E1000_MRQC_RSS_FIELD_IPV6 |
2982 E1000_MRQC_RSS_FIELD_IPV6_TCP |
2983 E1000_MRQC_RSS_FIELD_IPV6_TCP_EX;
06cf2666
AD
2984
2985 /* If VMDq is enabled then we set the appropriate mode for that, else
2986 * we default to RSS so that an RSS hash is calculated per packet even
2987 * if we are only using one queue */
2988 if (adapter->vfs_allocated_count) {
2989 if (hw->mac.type > e1000_82575) {
2990 /* Set the default pool for the PF's first queue */
2991 u32 vtctl = rd32(E1000_VT_CTL);
2992 vtctl &= ~(E1000_VT_CTL_DEFAULT_POOL_MASK |
2993 E1000_VT_CTL_DISABLE_DEF_POOL);
2994 vtctl |= adapter->vfs_allocated_count <<
2995 E1000_VT_CTL_DEFAULT_POOL_SHIFT;
2996 wr32(E1000_VT_CTL, vtctl);
2997 }
a99955fc 2998 if (adapter->rss_queues > 1)
f96a8a0b 2999 mrqc |= E1000_MRQC_ENABLE_VMDQ_RSS_2Q;
06cf2666 3000 else
f96a8a0b 3001 mrqc |= E1000_MRQC_ENABLE_VMDQ;
06cf2666 3002 } else {
f96a8a0b
CW
3003 if (hw->mac.type != e1000_i211)
3004 mrqc |= E1000_MRQC_ENABLE_RSS_4Q;
06cf2666
AD
3005 }
3006 igb_vmm_control(adapter);
3007
06cf2666
AD
3008 wr32(E1000_MRQC, mrqc);
3009}
3010
9d5c8243
AK
3011/**
3012 * igb_setup_rctl - configure the receive control registers
3013 * @adapter: Board private structure
3014 **/
d7ee5b3a 3015void igb_setup_rctl(struct igb_adapter *adapter)
9d5c8243
AK
3016{
3017 struct e1000_hw *hw = &adapter->hw;
3018 u32 rctl;
9d5c8243
AK
3019
3020 rctl = rd32(E1000_RCTL);
3021
3022 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
69d728ba 3023 rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
9d5c8243 3024
69d728ba 3025 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_RDMTS_HALF |
28b0759c 3026 (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
9d5c8243 3027
87cb7e8c
AK
3028 /*
3029 * enable stripping of CRC. It's unlikely this will break BMC
3030 * redirection as it did with e1000. Newer features require
3031 * that the HW strips the CRC.
73cd78f1 3032 */
87cb7e8c 3033 rctl |= E1000_RCTL_SECRC;
9d5c8243 3034
559e9c49 3035 /* disable store bad packets and clear size bits. */
ec54d7d6 3036 rctl &= ~(E1000_RCTL_SBP | E1000_RCTL_SZ_256);
9d5c8243 3037
6ec43fe6
AD
3038 /* enable LPE to prevent packets larger than max_frame_size */
3039 rctl |= E1000_RCTL_LPE;
9d5c8243 3040
952f72a8
AD
3041 /* disable queue 0 to prevent tail write w/o re-config */
3042 wr32(E1000_RXDCTL(0), 0);
9d5c8243 3043
e1739522
AD
3044 /* Attention!!! For SR-IOV PF driver operations you must enable
3045 * queue drop for all VF and PF queues to prevent head of line blocking
3046 * if an un-trusted VF does not provide descriptors to hardware.
3047 */
3048 if (adapter->vfs_allocated_count) {
e1739522
AD
3049 /* set all queue drop enable bits */
3050 wr32(E1000_QDE, ALL_QUEUES);
e1739522
AD
3051 }
3052
89eaefb6
BG
3053 /* This is useful for sniffing bad packets. */
3054 if (adapter->netdev->features & NETIF_F_RXALL) {
3055 /* UPE and MPE will be handled by normal PROMISC logic
3056 * in e1000e_set_rx_mode */
3057 rctl |= (E1000_RCTL_SBP | /* Receive bad packets */
3058 E1000_RCTL_BAM | /* RX All Bcast Pkts */
3059 E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */
3060
3061 rctl &= ~(E1000_RCTL_VFE | /* Disable VLAN filter */
3062 E1000_RCTL_DPF | /* Allow filtered pause */
3063 E1000_RCTL_CFIEN); /* Dis VLAN CFIEN Filter */
3064 /* Do not mess with E1000_CTRL_VME, it affects transmit as well,
3065 * and that breaks VLANs.
3066 */
3067 }
3068
9d5c8243
AK
3069 wr32(E1000_RCTL, rctl);
3070}
3071
7d5753f0
AD
3072static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size,
3073 int vfn)
3074{
3075 struct e1000_hw *hw = &adapter->hw;
3076 u32 vmolr;
3077
3078 /* if it isn't the PF check to see if VFs are enabled and
3079 * increase the size to support vlan tags */
3080 if (vfn < adapter->vfs_allocated_count &&
3081 adapter->vf_data[vfn].vlans_enabled)
3082 size += VLAN_TAG_SIZE;
3083
3084 vmolr = rd32(E1000_VMOLR(vfn));
3085 vmolr &= ~E1000_VMOLR_RLPML_MASK;
3086 vmolr |= size | E1000_VMOLR_LPE;
3087 wr32(E1000_VMOLR(vfn), vmolr);
3088
3089 return 0;
3090}
3091
e1739522
AD
3092/**
3093 * igb_rlpml_set - set maximum receive packet size
3094 * @adapter: board private structure
3095 *
3096 * Configure maximum receivable packet size.
3097 **/
3098static void igb_rlpml_set(struct igb_adapter *adapter)
3099{
153285f9 3100 u32 max_frame_size = adapter->max_frame_size;
e1739522
AD
3101 struct e1000_hw *hw = &adapter->hw;
3102 u16 pf_id = adapter->vfs_allocated_count;
3103
e1739522
AD
3104 if (pf_id) {
3105 igb_set_vf_rlpml(adapter, max_frame_size, pf_id);
153285f9
AD
3106 /*
3107 * If we're in VMDQ or SR-IOV mode, then set global RLPML
3108 * to our max jumbo frame size, in case we need to enable
3109 * jumbo frames on one of the rings later.
3110 * This will not pass over-length frames into the default
3111 * queue because it's gated by the VMOLR.RLPML.
3112 */
7d5753f0 3113 max_frame_size = MAX_JUMBO_FRAME_SIZE;
e1739522
AD
3114 }
3115
3116 wr32(E1000_RLPML, max_frame_size);
3117}
3118
8151d294
WM
3119static inline void igb_set_vmolr(struct igb_adapter *adapter,
3120 int vfn, bool aupe)
7d5753f0
AD
3121{
3122 struct e1000_hw *hw = &adapter->hw;
3123 u32 vmolr;
3124
3125 /*
3126 * This register exists only on 82576 and newer so if we are older then
3127 * we should exit and do nothing
3128 */
3129 if (hw->mac.type < e1000_82576)
3130 return;
3131
3132 vmolr = rd32(E1000_VMOLR(vfn));
8151d294
WM
3133 vmolr |= E1000_VMOLR_STRVLAN; /* Strip vlan tags */
3134 if (aupe)
3135 vmolr |= E1000_VMOLR_AUPE; /* Accept untagged packets */
3136 else
3137 vmolr &= ~(E1000_VMOLR_AUPE); /* Tagged packets ONLY */
7d5753f0
AD
3138
3139 /* clear all bits that might not be set */
3140 vmolr &= ~(E1000_VMOLR_BAM | E1000_VMOLR_RSSE);
3141
a99955fc 3142 if (adapter->rss_queues > 1 && vfn == adapter->vfs_allocated_count)
7d5753f0
AD
3143 vmolr |= E1000_VMOLR_RSSE; /* enable RSS */
3144 /*
3145 * for VMDq only allow the VFs and pool 0 to accept broadcast and
3146 * multicast packets
3147 */
3148 if (vfn <= adapter->vfs_allocated_count)
3149 vmolr |= E1000_VMOLR_BAM; /* Accept broadcast */
3150
3151 wr32(E1000_VMOLR(vfn), vmolr);
3152}
3153
85b430b4
AD
3154/**
3155 * igb_configure_rx_ring - Configure a receive ring after Reset
3156 * @adapter: board private structure
3157 * @ring: receive ring to be configured
3158 *
3159 * Configure the Rx unit of the MAC after a reset.
3160 **/
d7ee5b3a
AD
3161void igb_configure_rx_ring(struct igb_adapter *adapter,
3162 struct igb_ring *ring)
85b430b4
AD
3163{
3164 struct e1000_hw *hw = &adapter->hw;
3165 u64 rdba = ring->dma;
3166 int reg_idx = ring->reg_idx;
a74420e0 3167 u32 srrctl = 0, rxdctl = 0;
85b430b4
AD
3168
3169 /* disable the queue */
a74420e0 3170 wr32(E1000_RXDCTL(reg_idx), 0);
85b430b4
AD
3171
3172 /* Set DMA base address registers */
3173 wr32(E1000_RDBAL(reg_idx),
3174 rdba & 0x00000000ffffffffULL);
3175 wr32(E1000_RDBAH(reg_idx), rdba >> 32);
3176 wr32(E1000_RDLEN(reg_idx),
3177 ring->count * sizeof(union e1000_adv_rx_desc));
3178
3179 /* initialize head and tail */
fce99e34 3180 ring->tail = hw->hw_addr + E1000_RDT(reg_idx);
a74420e0 3181 wr32(E1000_RDH(reg_idx), 0);
fce99e34 3182 writel(0, ring->tail);
85b430b4 3183
952f72a8 3184 /* set descriptor configuration */
44390ca6 3185 srrctl = IGB_RX_HDR_LEN << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
952f72a8 3186#if (PAGE_SIZE / 2) > IGB_RXBUFFER_16384
44390ca6 3187 srrctl |= IGB_RXBUFFER_16384 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
952f72a8 3188#else
44390ca6 3189 srrctl |= (PAGE_SIZE / 2) >> E1000_SRRCTL_BSIZEPKT_SHIFT;
952f72a8 3190#endif
44390ca6 3191 srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
3c89f6d0 3192#ifdef CONFIG_IGB_PTP
06218a8d 3193 if (hw->mac.type >= e1000_82580)
757b77e2 3194 srrctl |= E1000_SRRCTL_TIMESTAMP;
3c89f6d0 3195#endif /* CONFIG_IGB_PTP */
e6bdb6fe
NN
3196 /* Only set Drop Enable if we are supporting multiple queues */
3197 if (adapter->vfs_allocated_count || adapter->num_rx_queues > 1)
3198 srrctl |= E1000_SRRCTL_DROP_EN;
952f72a8
AD
3199
3200 wr32(E1000_SRRCTL(reg_idx), srrctl);
3201
7d5753f0 3202 /* set filtering for VMDQ pools */
8151d294 3203 igb_set_vmolr(adapter, reg_idx & 0x7, true);
7d5753f0 3204
85b430b4
AD
3205 rxdctl |= IGB_RX_PTHRESH;
3206 rxdctl |= IGB_RX_HTHRESH << 8;
3207 rxdctl |= IGB_RX_WTHRESH << 16;
a74420e0
AD
3208
3209 /* enable receive descriptor fetching */
3210 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
85b430b4
AD
3211 wr32(E1000_RXDCTL(reg_idx), rxdctl);
3212}
3213
9d5c8243
AK
3214/**
3215 * igb_configure_rx - Configure receive Unit after Reset
3216 * @adapter: board private structure
3217 *
3218 * Configure the Rx unit of the MAC after a reset.
3219 **/
3220static void igb_configure_rx(struct igb_adapter *adapter)
3221{
9107584e 3222 int i;
9d5c8243 3223
68d480c4
AD
3224 /* set UTA to appropriate mode */
3225 igb_set_uta(adapter);
3226
26ad9178
AD
3227 /* set the correct pool for the PF default MAC address in entry 0 */
3228 igb_rar_set_qsel(adapter, adapter->hw.mac.addr, 0,
3229 adapter->vfs_allocated_count);
3230
06cf2666
AD
3231 /* Setup the HW Rx Head and Tail Descriptor Pointers and
3232 * the Base and Length of the Rx Descriptor Ring */
3233 for (i = 0; i < adapter->num_rx_queues; i++)
3025a446 3234 igb_configure_rx_ring(adapter, adapter->rx_ring[i]);
9d5c8243
AK
3235}
3236
3237/**
3238 * igb_free_tx_resources - Free Tx Resources per Queue
9d5c8243
AK
3239 * @tx_ring: Tx descriptor ring for a specific queue
3240 *
3241 * Free all transmit software resources
3242 **/
68fd9910 3243void igb_free_tx_resources(struct igb_ring *tx_ring)
9d5c8243 3244{
3b644cf6 3245 igb_clean_tx_ring(tx_ring);
9d5c8243 3246
06034649
AD
3247 vfree(tx_ring->tx_buffer_info);
3248 tx_ring->tx_buffer_info = NULL;
9d5c8243 3249
439705e1
AD
3250 /* if not set, then don't free */
3251 if (!tx_ring->desc)
3252 return;
3253
59d71989
AD
3254 dma_free_coherent(tx_ring->dev, tx_ring->size,
3255 tx_ring->desc, tx_ring->dma);
9d5c8243
AK
3256
3257 tx_ring->desc = NULL;
3258}
3259
3260/**
3261 * igb_free_all_tx_resources - Free Tx Resources for All Queues
3262 * @adapter: board private structure
3263 *
3264 * Free all transmit software resources
3265 **/
3266static void igb_free_all_tx_resources(struct igb_adapter *adapter)
3267{
3268 int i;
3269
3270 for (i = 0; i < adapter->num_tx_queues; i++)
3025a446 3271 igb_free_tx_resources(adapter->tx_ring[i]);
9d5c8243
AK
3272}
3273
ebe42d16
AD
3274void igb_unmap_and_free_tx_resource(struct igb_ring *ring,
3275 struct igb_tx_buffer *tx_buffer)
3276{
3277 if (tx_buffer->skb) {
3278 dev_kfree_skb_any(tx_buffer->skb);
3279 if (tx_buffer->dma)
3280 dma_unmap_single(ring->dev,
3281 tx_buffer->dma,
3282 tx_buffer->length,
3283 DMA_TO_DEVICE);
3284 } else if (tx_buffer->dma) {
3285 dma_unmap_page(ring->dev,
3286 tx_buffer->dma,
3287 tx_buffer->length,
3288 DMA_TO_DEVICE);
3289 }
3290 tx_buffer->next_to_watch = NULL;
3291 tx_buffer->skb = NULL;
3292 tx_buffer->dma = 0;
3293 /* buffer_info must be completely set up in the transmit path */
9d5c8243
AK
3294}
3295
3296/**
3297 * igb_clean_tx_ring - Free Tx Buffers
9d5c8243
AK
3298 * @tx_ring: ring to be cleaned
3299 **/
3b644cf6 3300static void igb_clean_tx_ring(struct igb_ring *tx_ring)
9d5c8243 3301{
06034649 3302 struct igb_tx_buffer *buffer_info;
9d5c8243 3303 unsigned long size;
6ad4edfc 3304 u16 i;
9d5c8243 3305
06034649 3306 if (!tx_ring->tx_buffer_info)
9d5c8243
AK
3307 return;
3308 /* Free all the Tx ring sk_buffs */
3309
3310 for (i = 0; i < tx_ring->count; i++) {
06034649 3311 buffer_info = &tx_ring->tx_buffer_info[i];
80785298 3312 igb_unmap_and_free_tx_resource(tx_ring, buffer_info);
9d5c8243
AK
3313 }
3314
dad8a3b3
JF
3315 netdev_tx_reset_queue(txring_txq(tx_ring));
3316
06034649
AD
3317 size = sizeof(struct igb_tx_buffer) * tx_ring->count;
3318 memset(tx_ring->tx_buffer_info, 0, size);
9d5c8243
AK
3319
3320 /* Zero out the descriptor ring */
9d5c8243
AK
3321 memset(tx_ring->desc, 0, tx_ring->size);
3322
3323 tx_ring->next_to_use = 0;
3324 tx_ring->next_to_clean = 0;
9d5c8243
AK
3325}
3326
3327/**
3328 * igb_clean_all_tx_rings - Free Tx Buffers for all queues
3329 * @adapter: board private structure
3330 **/
3331static void igb_clean_all_tx_rings(struct igb_adapter *adapter)
3332{
3333 int i;
3334
3335 for (i = 0; i < adapter->num_tx_queues; i++)
3025a446 3336 igb_clean_tx_ring(adapter->tx_ring[i]);
9d5c8243
AK
3337}
3338
3339/**
3340 * igb_free_rx_resources - Free Rx Resources
9d5c8243
AK
3341 * @rx_ring: ring to clean the resources from
3342 *
3343 * Free all receive software resources
3344 **/
68fd9910 3345void igb_free_rx_resources(struct igb_ring *rx_ring)
9d5c8243 3346{
3b644cf6 3347 igb_clean_rx_ring(rx_ring);
9d5c8243 3348
06034649
AD
3349 vfree(rx_ring->rx_buffer_info);
3350 rx_ring->rx_buffer_info = NULL;
9d5c8243 3351
439705e1
AD
3352 /* if not set, then don't free */
3353 if (!rx_ring->desc)
3354 return;
3355
59d71989
AD
3356 dma_free_coherent(rx_ring->dev, rx_ring->size,
3357 rx_ring->desc, rx_ring->dma);
9d5c8243
AK
3358
3359 rx_ring->desc = NULL;
3360}
3361
3362/**
3363 * igb_free_all_rx_resources - Free Rx Resources for All Queues
3364 * @adapter: board private structure
3365 *
3366 * Free all receive software resources
3367 **/
3368static void igb_free_all_rx_resources(struct igb_adapter *adapter)
3369{
3370 int i;
3371
3372 for (i = 0; i < adapter->num_rx_queues; i++)
3025a446 3373 igb_free_rx_resources(adapter->rx_ring[i]);
9d5c8243
AK
3374}
3375
3376/**
3377 * igb_clean_rx_ring - Free Rx Buffers per Queue
9d5c8243
AK
3378 * @rx_ring: ring to free buffers from
3379 **/
3b644cf6 3380static void igb_clean_rx_ring(struct igb_ring *rx_ring)
9d5c8243 3381{
9d5c8243 3382 unsigned long size;
c023cd88 3383 u16 i;
9d5c8243 3384
06034649 3385 if (!rx_ring->rx_buffer_info)
9d5c8243 3386 return;
439705e1 3387
9d5c8243
AK
3388 /* Free all the Rx ring sk_buffs */
3389 for (i = 0; i < rx_ring->count; i++) {
06034649 3390 struct igb_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i];
9d5c8243 3391 if (buffer_info->dma) {
59d71989 3392 dma_unmap_single(rx_ring->dev,
80785298 3393 buffer_info->dma,
44390ca6 3394 IGB_RX_HDR_LEN,
59d71989 3395 DMA_FROM_DEVICE);
9d5c8243
AK
3396 buffer_info->dma = 0;
3397 }
3398
3399 if (buffer_info->skb) {
3400 dev_kfree_skb(buffer_info->skb);
3401 buffer_info->skb = NULL;
3402 }
6ec43fe6 3403 if (buffer_info->page_dma) {
59d71989 3404 dma_unmap_page(rx_ring->dev,
80785298 3405 buffer_info->page_dma,
6ec43fe6 3406 PAGE_SIZE / 2,
59d71989 3407 DMA_FROM_DEVICE);
6ec43fe6
AD
3408 buffer_info->page_dma = 0;
3409 }
9d5c8243 3410 if (buffer_info->page) {
9d5c8243
AK
3411 put_page(buffer_info->page);
3412 buffer_info->page = NULL;
bf36c1a0 3413 buffer_info->page_offset = 0;
9d5c8243
AK
3414 }
3415 }
3416
06034649
AD
3417 size = sizeof(struct igb_rx_buffer) * rx_ring->count;
3418 memset(rx_ring->rx_buffer_info, 0, size);
9d5c8243
AK
3419
3420 /* Zero out the descriptor ring */
3421 memset(rx_ring->desc, 0, rx_ring->size);
3422
3423 rx_ring->next_to_clean = 0;
3424 rx_ring->next_to_use = 0;
9d5c8243
AK
3425}
3426
3427/**
3428 * igb_clean_all_rx_rings - Free Rx Buffers for all queues
3429 * @adapter: board private structure
3430 **/
3431static void igb_clean_all_rx_rings(struct igb_adapter *adapter)
3432{
3433 int i;
3434
3435 for (i = 0; i < adapter->num_rx_queues; i++)
3025a446 3436 igb_clean_rx_ring(adapter->rx_ring[i]);
9d5c8243
AK
3437}
3438
3439/**
3440 * igb_set_mac - Change the Ethernet Address of the NIC
3441 * @netdev: network interface device structure
3442 * @p: pointer to an address structure
3443 *
3444 * Returns 0 on success, negative on failure
3445 **/
3446static int igb_set_mac(struct net_device *netdev, void *p)
3447{
3448 struct igb_adapter *adapter = netdev_priv(netdev);
28b0759c 3449 struct e1000_hw *hw = &adapter->hw;
9d5c8243
AK
3450 struct sockaddr *addr = p;
3451
3452 if (!is_valid_ether_addr(addr->sa_data))
3453 return -EADDRNOTAVAIL;
3454
3455 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
28b0759c 3456 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
9d5c8243 3457
26ad9178
AD
3458 /* set the correct pool for the new PF MAC address in entry 0 */
3459 igb_rar_set_qsel(adapter, hw->mac.addr, 0,
3460 adapter->vfs_allocated_count);
e1739522 3461
9d5c8243
AK
3462 return 0;
3463}
3464
3465/**
68d480c4 3466 * igb_write_mc_addr_list - write multicast addresses to MTA
9d5c8243
AK
3467 * @netdev: network interface device structure
3468 *
68d480c4
AD
3469 * Writes multicast address list to the MTA hash table.
3470 * Returns: -ENOMEM on failure
3471 * 0 on no addresses written
3472 * X on writing X addresses to MTA
9d5c8243 3473 **/
68d480c4 3474static int igb_write_mc_addr_list(struct net_device *netdev)
9d5c8243
AK
3475{
3476 struct igb_adapter *adapter = netdev_priv(netdev);
3477 struct e1000_hw *hw = &adapter->hw;
22bedad3 3478 struct netdev_hw_addr *ha;
68d480c4 3479 u8 *mta_list;
9d5c8243
AK
3480 int i;
3481
4cd24eaf 3482 if (netdev_mc_empty(netdev)) {
68d480c4
AD
3483 /* nothing to program, so clear mc list */
3484 igb_update_mc_addr_list(hw, NULL, 0);
3485 igb_restore_vf_multicasts(adapter);
3486 return 0;
3487 }
9d5c8243 3488
4cd24eaf 3489 mta_list = kzalloc(netdev_mc_count(netdev) * 6, GFP_ATOMIC);
68d480c4
AD
3490 if (!mta_list)
3491 return -ENOMEM;
ff41f8dc 3492
68d480c4 3493 /* The shared function expects a packed array of only addresses. */
48e2f183 3494 i = 0;
22bedad3
JP
3495 netdev_for_each_mc_addr(ha, netdev)
3496 memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
68d480c4 3497
68d480c4
AD
3498 igb_update_mc_addr_list(hw, mta_list, i);
3499 kfree(mta_list);
3500
4cd24eaf 3501 return netdev_mc_count(netdev);
68d480c4
AD
3502}
3503
3504/**
3505 * igb_write_uc_addr_list - write unicast addresses to RAR table
3506 * @netdev: network interface device structure
3507 *
3508 * Writes unicast address list to the RAR table.
3509 * Returns: -ENOMEM on failure/insufficient address space
3510 * 0 on no addresses written
3511 * X on writing X addresses to the RAR table
3512 **/
3513static int igb_write_uc_addr_list(struct net_device *netdev)
3514{
3515 struct igb_adapter *adapter = netdev_priv(netdev);
3516 struct e1000_hw *hw = &adapter->hw;
3517 unsigned int vfn = adapter->vfs_allocated_count;
3518 unsigned int rar_entries = hw->mac.rar_entry_count - (vfn + 1);
3519 int count = 0;
3520
3521 /* return ENOMEM indicating insufficient memory for addresses */
32e7bfc4 3522 if (netdev_uc_count(netdev) > rar_entries)
68d480c4 3523 return -ENOMEM;
9d5c8243 3524
32e7bfc4 3525 if (!netdev_uc_empty(netdev) && rar_entries) {
ff41f8dc 3526 struct netdev_hw_addr *ha;
32e7bfc4
JP
3527
3528 netdev_for_each_uc_addr(ha, netdev) {
ff41f8dc
AD
3529 if (!rar_entries)
3530 break;
26ad9178
AD
3531 igb_rar_set_qsel(adapter, ha->addr,
3532 rar_entries--,
68d480c4
AD
3533 vfn);
3534 count++;
ff41f8dc
AD
3535 }
3536 }
3537 /* write the addresses in reverse order to avoid write combining */
3538 for (; rar_entries > 0 ; rar_entries--) {
3539 wr32(E1000_RAH(rar_entries), 0);
3540 wr32(E1000_RAL(rar_entries), 0);
3541 }
3542 wrfl();
3543
68d480c4
AD
3544 return count;
3545}
3546
3547/**
3548 * igb_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
3549 * @netdev: network interface device structure
3550 *
3551 * The set_rx_mode entry point is called whenever the unicast or multicast
3552 * address lists or the network interface flags are updated. This routine is
3553 * responsible for configuring the hardware for proper unicast, multicast,
3554 * promiscuous mode, and all-multi behavior.
3555 **/
3556static void igb_set_rx_mode(struct net_device *netdev)
3557{
3558 struct igb_adapter *adapter = netdev_priv(netdev);
3559 struct e1000_hw *hw = &adapter->hw;
3560 unsigned int vfn = adapter->vfs_allocated_count;
3561 u32 rctl, vmolr = 0;
3562 int count;
3563
3564 /* Check for Promiscuous and All Multicast modes */
3565 rctl = rd32(E1000_RCTL);
3566
3567 /* clear the effected bits */
3568 rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_VFE);
3569
3570 if (netdev->flags & IFF_PROMISC) {
3571 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
3572 vmolr |= (E1000_VMOLR_ROPE | E1000_VMOLR_MPME);
3573 } else {
3574 if (netdev->flags & IFF_ALLMULTI) {
3575 rctl |= E1000_RCTL_MPE;
3576 vmolr |= E1000_VMOLR_MPME;
3577 } else {
3578 /*
3579 * Write addresses to the MTA, if the attempt fails
25985edc 3580 * then we should just turn on promiscuous mode so
68d480c4
AD
3581 * that we can at least receive multicast traffic
3582 */
3583 count = igb_write_mc_addr_list(netdev);
3584 if (count < 0) {
3585 rctl |= E1000_RCTL_MPE;
3586 vmolr |= E1000_VMOLR_MPME;
3587 } else if (count) {
3588 vmolr |= E1000_VMOLR_ROMPE;
3589 }
3590 }
3591 /*
3592 * Write addresses to available RAR registers, if there is not
3593 * sufficient space to store all the addresses then enable
25985edc 3594 * unicast promiscuous mode
68d480c4
AD
3595 */
3596 count = igb_write_uc_addr_list(netdev);
3597 if (count < 0) {
3598 rctl |= E1000_RCTL_UPE;
3599 vmolr |= E1000_VMOLR_ROPE;
3600 }
3601 rctl |= E1000_RCTL_VFE;
28fc06f5 3602 }
68d480c4 3603 wr32(E1000_RCTL, rctl);
28fc06f5 3604
68d480c4
AD
3605 /*
3606 * In order to support SR-IOV and eventually VMDq it is necessary to set
3607 * the VMOLR to enable the appropriate modes. Without this workaround
3608 * we will have issues with VLAN tag stripping not being done for frames
3609 * that are only arriving because we are the default pool
3610 */
f96a8a0b 3611 if ((hw->mac.type < e1000_82576) || (hw->mac.type > e1000_i350))
28fc06f5 3612 return;
9d5c8243 3613
68d480c4
AD
3614 vmolr |= rd32(E1000_VMOLR(vfn)) &
3615 ~(E1000_VMOLR_ROPE | E1000_VMOLR_MPME | E1000_VMOLR_ROMPE);
3616 wr32(E1000_VMOLR(vfn), vmolr);
28fc06f5 3617 igb_restore_vf_multicasts(adapter);
9d5c8243
AK
3618}
3619
13800469
GR
3620static void igb_check_wvbr(struct igb_adapter *adapter)
3621{
3622 struct e1000_hw *hw = &adapter->hw;
3623 u32 wvbr = 0;
3624
3625 switch (hw->mac.type) {
3626 case e1000_82576:
3627 case e1000_i350:
3628 if (!(wvbr = rd32(E1000_WVBR)))
3629 return;
3630 break;
3631 default:
3632 break;
3633 }
3634
3635 adapter->wvbr |= wvbr;
3636}
3637
3638#define IGB_STAGGERED_QUEUE_OFFSET 8
3639
3640static void igb_spoof_check(struct igb_adapter *adapter)
3641{
3642 int j;
3643
3644 if (!adapter->wvbr)
3645 return;
3646
3647 for(j = 0; j < adapter->vfs_allocated_count; j++) {
3648 if (adapter->wvbr & (1 << j) ||
3649 adapter->wvbr & (1 << (j + IGB_STAGGERED_QUEUE_OFFSET))) {
3650 dev_warn(&adapter->pdev->dev,
3651 "Spoof event(s) detected on VF %d\n", j);
3652 adapter->wvbr &=
3653 ~((1 << j) |
3654 (1 << (j + IGB_STAGGERED_QUEUE_OFFSET)));
3655 }
3656 }
3657}
3658
9d5c8243
AK
3659/* Need to wait a few seconds after link up to get diagnostic information from
3660 * the phy */
3661static void igb_update_phy_info(unsigned long data)
3662{
3663 struct igb_adapter *adapter = (struct igb_adapter *) data;
f5f4cf08 3664 igb_get_phy_info(&adapter->hw);
9d5c8243
AK
3665}
3666
4d6b725e
AD
3667/**
3668 * igb_has_link - check shared code for link and determine up/down
3669 * @adapter: pointer to driver private info
3670 **/
3145535a 3671bool igb_has_link(struct igb_adapter *adapter)
4d6b725e
AD
3672{
3673 struct e1000_hw *hw = &adapter->hw;
3674 bool link_active = false;
3675 s32 ret_val = 0;
3676
3677 /* get_link_status is set on LSC (link status) interrupt or
3678 * rx sequence error interrupt. get_link_status will stay
3679 * false until the e1000_check_for_link establishes link
3680 * for copper adapters ONLY
3681 */
3682 switch (hw->phy.media_type) {
3683 case e1000_media_type_copper:
3684 if (hw->mac.get_link_status) {
3685 ret_val = hw->mac.ops.check_for_link(hw);
3686 link_active = !hw->mac.get_link_status;
3687 } else {
3688 link_active = true;
3689 }
3690 break;
4d6b725e
AD
3691 case e1000_media_type_internal_serdes:
3692 ret_val = hw->mac.ops.check_for_link(hw);
3693 link_active = hw->mac.serdes_has_link;
3694 break;
3695 default:
3696 case e1000_media_type_unknown:
3697 break;
3698 }
3699
3700 return link_active;
3701}
3702
563988dc
SA
3703static bool igb_thermal_sensor_event(struct e1000_hw *hw, u32 event)
3704{
3705 bool ret = false;
3706 u32 ctrl_ext, thstat;
3707
f96a8a0b 3708 /* check for thermal sensor event on i350 copper only */
563988dc
SA
3709 if (hw->mac.type == e1000_i350) {
3710 thstat = rd32(E1000_THSTAT);
3711 ctrl_ext = rd32(E1000_CTRL_EXT);
3712
3713 if ((hw->phy.media_type == e1000_media_type_copper) &&
3714 !(ctrl_ext & E1000_CTRL_EXT_LINK_MODE_SGMII)) {
3715 ret = !!(thstat & event);
3716 }
3717 }
3718
3719 return ret;
3720}
3721
9d5c8243
AK
3722/**
3723 * igb_watchdog - Timer Call-back
3724 * @data: pointer to adapter cast into an unsigned long
3725 **/
3726static void igb_watchdog(unsigned long data)
3727{
3728 struct igb_adapter *adapter = (struct igb_adapter *)data;
3729 /* Do the rest outside of interrupt context */
3730 schedule_work(&adapter->watchdog_task);
3731}
3732
3733static void igb_watchdog_task(struct work_struct *work)
3734{
3735 struct igb_adapter *adapter = container_of(work,
559e9c49
AD
3736 struct igb_adapter,
3737 watchdog_task);
9d5c8243 3738 struct e1000_hw *hw = &adapter->hw;
9d5c8243 3739 struct net_device *netdev = adapter->netdev;
563988dc 3740 u32 link;
7a6ea550 3741 int i;
9d5c8243 3742
4d6b725e 3743 link = igb_has_link(adapter);
9d5c8243 3744 if (link) {
749ab2cd
YZ
3745 /* Cancel scheduled suspend requests. */
3746 pm_runtime_resume(netdev->dev.parent);
3747
9d5c8243
AK
3748 if (!netif_carrier_ok(netdev)) {
3749 u32 ctrl;
330a6d6a
AD
3750 hw->mac.ops.get_speed_and_duplex(hw,
3751 &adapter->link_speed,
3752 &adapter->link_duplex);
9d5c8243
AK
3753
3754 ctrl = rd32(E1000_CTRL);
527d47c1 3755 /* Links status message must follow this format */
876d2d6f
JK
3756 printk(KERN_INFO "igb: %s NIC Link is Up %d Mbps %s "
3757 "Duplex, Flow Control: %s\n",
559e9c49
AD
3758 netdev->name,
3759 adapter->link_speed,
3760 adapter->link_duplex == FULL_DUPLEX ?
876d2d6f
JK
3761 "Full" : "Half",
3762 (ctrl & E1000_CTRL_TFCE) &&
3763 (ctrl & E1000_CTRL_RFCE) ? "RX/TX" :
3764 (ctrl & E1000_CTRL_RFCE) ? "RX" :
3765 (ctrl & E1000_CTRL_TFCE) ? "TX" : "None");
9d5c8243 3766
563988dc 3767 /* check for thermal sensor event */
876d2d6f
JK
3768 if (igb_thermal_sensor_event(hw,
3769 E1000_THSTAT_LINK_THROTTLE)) {
3770 netdev_info(netdev, "The network adapter link "
3771 "speed was downshifted because it "
3772 "overheated\n");
7ef5ed1c 3773 }
563988dc 3774
d07f3e37 3775 /* adjust timeout factor according to speed/duplex */
9d5c8243
AK
3776 adapter->tx_timeout_factor = 1;
3777 switch (adapter->link_speed) {
3778 case SPEED_10:
9d5c8243
AK
3779 adapter->tx_timeout_factor = 14;
3780 break;
3781 case SPEED_100:
9d5c8243
AK
3782 /* maybe add some timeout factor ? */
3783 break;
3784 }
3785
3786 netif_carrier_on(netdev);
9d5c8243 3787
4ae196df 3788 igb_ping_all_vfs(adapter);
17dc566c 3789 igb_check_vf_rate_limit(adapter);
4ae196df 3790
4b1a9877 3791 /* link state has changed, schedule phy info update */
9d5c8243
AK
3792 if (!test_bit(__IGB_DOWN, &adapter->state))
3793 mod_timer(&adapter->phy_info_timer,
3794 round_jiffies(jiffies + 2 * HZ));
3795 }
3796 } else {
3797 if (netif_carrier_ok(netdev)) {
3798 adapter->link_speed = 0;
3799 adapter->link_duplex = 0;
563988dc
SA
3800
3801 /* check for thermal sensor event */
876d2d6f
JK
3802 if (igb_thermal_sensor_event(hw,
3803 E1000_THSTAT_PWR_DOWN)) {
3804 netdev_err(netdev, "The network adapter was "
3805 "stopped because it overheated\n");
7ef5ed1c 3806 }
563988dc 3807
527d47c1
AD
3808 /* Links status message must follow this format */
3809 printk(KERN_INFO "igb: %s NIC Link is Down\n",
3810 netdev->name);
9d5c8243 3811 netif_carrier_off(netdev);
4b1a9877 3812
4ae196df
AD
3813 igb_ping_all_vfs(adapter);
3814
4b1a9877 3815 /* link state has changed, schedule phy info update */
9d5c8243
AK
3816 if (!test_bit(__IGB_DOWN, &adapter->state))
3817 mod_timer(&adapter->phy_info_timer,
3818 round_jiffies(jiffies + 2 * HZ));
749ab2cd
YZ
3819
3820 pm_schedule_suspend(netdev->dev.parent,
3821 MSEC_PER_SEC * 5);
9d5c8243
AK
3822 }
3823 }
3824
12dcd86b
ED
3825 spin_lock(&adapter->stats64_lock);
3826 igb_update_stats(adapter, &adapter->stats64);
3827 spin_unlock(&adapter->stats64_lock);
9d5c8243 3828
dbabb065 3829 for (i = 0; i < adapter->num_tx_queues; i++) {
3025a446 3830 struct igb_ring *tx_ring = adapter->tx_ring[i];
dbabb065 3831 if (!netif_carrier_ok(netdev)) {
9d5c8243
AK
3832 /* We've lost link, so the controller stops DMA,
3833 * but we've got queued Tx work that's never going
3834 * to get done, so reset controller to flush Tx.
3835 * (Do the reset outside of interrupt context). */
dbabb065
AD
3836 if (igb_desc_unused(tx_ring) + 1 < tx_ring->count) {
3837 adapter->tx_timeout_count++;
3838 schedule_work(&adapter->reset_task);
3839 /* return immediately since reset is imminent */
3840 return;
3841 }
9d5c8243 3842 }
9d5c8243 3843
dbabb065 3844 /* Force detection of hung controller every watchdog period */
6d095fa8 3845 set_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
dbabb065 3846 }
f7ba205e 3847
9d5c8243 3848 /* Cause software interrupt to ensure rx ring is cleaned */
7a6ea550 3849 if (adapter->msix_entries) {
047e0030 3850 u32 eics = 0;
0d1ae7f4
AD
3851 for (i = 0; i < adapter->num_q_vectors; i++)
3852 eics |= adapter->q_vector[i]->eims_value;
7a6ea550
AD
3853 wr32(E1000_EICS, eics);
3854 } else {
3855 wr32(E1000_ICS, E1000_ICS_RXDMT0);
3856 }
9d5c8243 3857
13800469
GR
3858 igb_spoof_check(adapter);
3859
9d5c8243
AK
3860 /* Reset the timer */
3861 if (!test_bit(__IGB_DOWN, &adapter->state))
3862 mod_timer(&adapter->watchdog_timer,
3863 round_jiffies(jiffies + 2 * HZ));
3864}
3865
3866enum latency_range {
3867 lowest_latency = 0,
3868 low_latency = 1,
3869 bulk_latency = 2,
3870 latency_invalid = 255
3871};
3872
6eb5a7f1
AD
3873/**
3874 * igb_update_ring_itr - update the dynamic ITR value based on packet size
3875 *
3876 * Stores a new ITR value based on strictly on packet size. This
3877 * algorithm is less sophisticated than that used in igb_update_itr,
3878 * due to the difficulty of synchronizing statistics across multiple
eef35c2d 3879 * receive rings. The divisors and thresholds used by this function
6eb5a7f1
AD
3880 * were determined based on theoretical maximum wire speed and testing
3881 * data, in order to minimize response time while increasing bulk
3882 * throughput.
3883 * This functionality is controlled by the InterruptThrottleRate module
3884 * parameter (see igb_param.c)
3885 * NOTE: This function is called only when operating in a multiqueue
3886 * receive environment.
047e0030 3887 * @q_vector: pointer to q_vector
6eb5a7f1 3888 **/
047e0030 3889static void igb_update_ring_itr(struct igb_q_vector *q_vector)
9d5c8243 3890{
047e0030 3891 int new_val = q_vector->itr_val;
6eb5a7f1 3892 int avg_wire_size = 0;
047e0030 3893 struct igb_adapter *adapter = q_vector->adapter;
12dcd86b 3894 unsigned int packets;
9d5c8243 3895
6eb5a7f1
AD
3896 /* For non-gigabit speeds, just fix the interrupt rate at 4000
3897 * ints/sec - ITR timer value of 120 ticks.
3898 */
3899 if (adapter->link_speed != SPEED_1000) {
0ba82994 3900 new_val = IGB_4K_ITR;
6eb5a7f1 3901 goto set_itr_val;
9d5c8243 3902 }
047e0030 3903
0ba82994
AD
3904 packets = q_vector->rx.total_packets;
3905 if (packets)
3906 avg_wire_size = q_vector->rx.total_bytes / packets;
047e0030 3907
0ba82994
AD
3908 packets = q_vector->tx.total_packets;
3909 if (packets)
3910 avg_wire_size = max_t(u32, avg_wire_size,
3911 q_vector->tx.total_bytes / packets);
047e0030
AD
3912
3913 /* if avg_wire_size isn't set no work was done */
3914 if (!avg_wire_size)
3915 goto clear_counts;
9d5c8243 3916
6eb5a7f1
AD
3917 /* Add 24 bytes to size to account for CRC, preamble, and gap */
3918 avg_wire_size += 24;
3919
3920 /* Don't starve jumbo frames */
3921 avg_wire_size = min(avg_wire_size, 3000);
9d5c8243 3922
6eb5a7f1
AD
3923 /* Give a little boost to mid-size frames */
3924 if ((avg_wire_size > 300) && (avg_wire_size < 1200))
3925 new_val = avg_wire_size / 3;
3926 else
3927 new_val = avg_wire_size / 2;
9d5c8243 3928
0ba82994
AD
3929 /* conservative mode (itr 3) eliminates the lowest_latency setting */
3930 if (new_val < IGB_20K_ITR &&
3931 ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
3932 (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
3933 new_val = IGB_20K_ITR;
abe1c363 3934
6eb5a7f1 3935set_itr_val:
047e0030
AD
3936 if (new_val != q_vector->itr_val) {
3937 q_vector->itr_val = new_val;
3938 q_vector->set_itr = 1;
9d5c8243 3939 }
6eb5a7f1 3940clear_counts:
0ba82994
AD
3941 q_vector->rx.total_bytes = 0;
3942 q_vector->rx.total_packets = 0;
3943 q_vector->tx.total_bytes = 0;
3944 q_vector->tx.total_packets = 0;
9d5c8243
AK
3945}
3946
3947/**
3948 * igb_update_itr - update the dynamic ITR value based on statistics
3949 * Stores a new ITR value based on packets and byte
3950 * counts during the last interrupt. The advantage of per interrupt
3951 * computation is faster updates and more accurate ITR for the current
3952 * traffic pattern. Constants in this function were computed
3953 * based on theoretical maximum wire speed and thresholds were set based
3954 * on testing data as well as attempting to minimize response time
3955 * while increasing bulk throughput.
3956 * this functionality is controlled by the InterruptThrottleRate module
3957 * parameter (see igb_param.c)
3958 * NOTE: These calculations are only valid when operating in a single-
3959 * queue environment.
0ba82994
AD
3960 * @q_vector: pointer to q_vector
3961 * @ring_container: ring info to update the itr for
9d5c8243 3962 **/
0ba82994
AD
3963static void igb_update_itr(struct igb_q_vector *q_vector,
3964 struct igb_ring_container *ring_container)
9d5c8243 3965{
0ba82994
AD
3966 unsigned int packets = ring_container->total_packets;
3967 unsigned int bytes = ring_container->total_bytes;
3968 u8 itrval = ring_container->itr;
9d5c8243 3969
0ba82994 3970 /* no packets, exit with status unchanged */
9d5c8243 3971 if (packets == 0)
0ba82994 3972 return;
9d5c8243 3973
0ba82994 3974 switch (itrval) {
9d5c8243
AK
3975 case lowest_latency:
3976 /* handle TSO and jumbo frames */
3977 if (bytes/packets > 8000)
0ba82994 3978 itrval = bulk_latency;
9d5c8243 3979 else if ((packets < 5) && (bytes > 512))
0ba82994 3980 itrval = low_latency;
9d5c8243
AK
3981 break;
3982 case low_latency: /* 50 usec aka 20000 ints/s */
3983 if (bytes > 10000) {
3984 /* this if handles the TSO accounting */
3985 if (bytes/packets > 8000) {
0ba82994 3986 itrval = bulk_latency;
9d5c8243 3987 } else if ((packets < 10) || ((bytes/packets) > 1200)) {
0ba82994 3988 itrval = bulk_latency;
9d5c8243 3989 } else if ((packets > 35)) {
0ba82994 3990 itrval = lowest_latency;
9d5c8243
AK
3991 }
3992 } else if (bytes/packets > 2000) {
0ba82994 3993 itrval = bulk_latency;
9d5c8243 3994 } else if (packets <= 2 && bytes < 512) {
0ba82994 3995 itrval = lowest_latency;
9d5c8243
AK
3996 }
3997 break;
3998 case bulk_latency: /* 250 usec aka 4000 ints/s */
3999 if (bytes > 25000) {
4000 if (packets > 35)
0ba82994 4001 itrval = low_latency;
1e5c3d21 4002 } else if (bytes < 1500) {
0ba82994 4003 itrval = low_latency;
9d5c8243
AK
4004 }
4005 break;
4006 }
4007
0ba82994
AD
4008 /* clear work counters since we have the values we need */
4009 ring_container->total_bytes = 0;
4010 ring_container->total_packets = 0;
4011
4012 /* write updated itr to ring container */
4013 ring_container->itr = itrval;
9d5c8243
AK
4014}
4015
0ba82994 4016static void igb_set_itr(struct igb_q_vector *q_vector)
9d5c8243 4017{
0ba82994 4018 struct igb_adapter *adapter = q_vector->adapter;
047e0030 4019 u32 new_itr = q_vector->itr_val;
0ba82994 4020 u8 current_itr = 0;
9d5c8243
AK
4021
4022 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
4023 if (adapter->link_speed != SPEED_1000) {
4024 current_itr = 0;
0ba82994 4025 new_itr = IGB_4K_ITR;
9d5c8243
AK
4026 goto set_itr_now;
4027 }
4028
0ba82994
AD
4029 igb_update_itr(q_vector, &q_vector->tx);
4030 igb_update_itr(q_vector, &q_vector->rx);
9d5c8243 4031
0ba82994 4032 current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
9d5c8243 4033
6eb5a7f1 4034 /* conservative mode (itr 3) eliminates the lowest_latency setting */
0ba82994
AD
4035 if (current_itr == lowest_latency &&
4036 ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
4037 (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
6eb5a7f1
AD
4038 current_itr = low_latency;
4039
9d5c8243
AK
4040 switch (current_itr) {
4041 /* counts and packets in update_itr are dependent on these numbers */
4042 case lowest_latency:
0ba82994 4043 new_itr = IGB_70K_ITR; /* 70,000 ints/sec */
9d5c8243
AK
4044 break;
4045 case low_latency:
0ba82994 4046 new_itr = IGB_20K_ITR; /* 20,000 ints/sec */
9d5c8243
AK
4047 break;
4048 case bulk_latency:
0ba82994 4049 new_itr = IGB_4K_ITR; /* 4,000 ints/sec */
9d5c8243
AK
4050 break;
4051 default:
4052 break;
4053 }
4054
4055set_itr_now:
047e0030 4056 if (new_itr != q_vector->itr_val) {
9d5c8243
AK
4057 /* this attempts to bias the interrupt rate towards Bulk
4058 * by adding intermediate steps when interrupt rate is
4059 * increasing */
047e0030
AD
4060 new_itr = new_itr > q_vector->itr_val ?
4061 max((new_itr * q_vector->itr_val) /
4062 (new_itr + (q_vector->itr_val >> 2)),
0ba82994 4063 new_itr) :
9d5c8243
AK
4064 new_itr;
4065 /* Don't write the value here; it resets the adapter's
4066 * internal timer, and causes us to delay far longer than
4067 * we should between interrupts. Instead, we write the ITR
4068 * value at the beginning of the next interrupt so the timing
4069 * ends up being correct.
4070 */
047e0030
AD
4071 q_vector->itr_val = new_itr;
4072 q_vector->set_itr = 1;
9d5c8243 4073 }
9d5c8243
AK
4074}
4075
c50b52a0
SH
4076static void igb_tx_ctxtdesc(struct igb_ring *tx_ring, u32 vlan_macip_lens,
4077 u32 type_tucmd, u32 mss_l4len_idx)
7d13a7d0
AD
4078{
4079 struct e1000_adv_tx_context_desc *context_desc;
4080 u16 i = tx_ring->next_to_use;
4081
4082 context_desc = IGB_TX_CTXTDESC(tx_ring, i);
4083
4084 i++;
4085 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
4086
4087 /* set bits to identify this as an advanced context descriptor */
4088 type_tucmd |= E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT;
4089
4090 /* For 82575, context index must be unique per ring. */
866cff06 4091 if (test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
7d13a7d0
AD
4092 mss_l4len_idx |= tx_ring->reg_idx << 4;
4093
4094 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
4095 context_desc->seqnum_seed = 0;
4096 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
4097 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
4098}
4099
7af40ad9
AD
4100static int igb_tso(struct igb_ring *tx_ring,
4101 struct igb_tx_buffer *first,
4102 u8 *hdr_len)
9d5c8243 4103{
7af40ad9 4104 struct sk_buff *skb = first->skb;
7d13a7d0
AD
4105 u32 vlan_macip_lens, type_tucmd;
4106 u32 mss_l4len_idx, l4len;
4107
4108 if (!skb_is_gso(skb))
4109 return 0;
9d5c8243
AK
4110
4111 if (skb_header_cloned(skb)) {
7af40ad9 4112 int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
9d5c8243
AK
4113 if (err)
4114 return err;
4115 }
4116
7d13a7d0
AD
4117 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
4118 type_tucmd = E1000_ADVTXD_TUCMD_L4T_TCP;
9d5c8243 4119
7af40ad9 4120 if (first->protocol == __constant_htons(ETH_P_IP)) {
9d5c8243
AK
4121 struct iphdr *iph = ip_hdr(skb);
4122 iph->tot_len = 0;
4123 iph->check = 0;
4124 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
4125 iph->daddr, 0,
4126 IPPROTO_TCP,
4127 0);
7d13a7d0 4128 type_tucmd |= E1000_ADVTXD_TUCMD_IPV4;
7af40ad9
AD
4129 first->tx_flags |= IGB_TX_FLAGS_TSO |
4130 IGB_TX_FLAGS_CSUM |
4131 IGB_TX_FLAGS_IPV4;
8e1e8a47 4132 } else if (skb_is_gso_v6(skb)) {
9d5c8243
AK
4133 ipv6_hdr(skb)->payload_len = 0;
4134 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
4135 &ipv6_hdr(skb)->daddr,
4136 0, IPPROTO_TCP, 0);
7af40ad9
AD
4137 first->tx_flags |= IGB_TX_FLAGS_TSO |
4138 IGB_TX_FLAGS_CSUM;
9d5c8243
AK
4139 }
4140
7af40ad9 4141 /* compute header lengths */
7d13a7d0
AD
4142 l4len = tcp_hdrlen(skb);
4143 *hdr_len = skb_transport_offset(skb) + l4len;
9d5c8243 4144
7af40ad9
AD
4145 /* update gso size and bytecount with header size */
4146 first->gso_segs = skb_shinfo(skb)->gso_segs;
4147 first->bytecount += (first->gso_segs - 1) * *hdr_len;
4148
9d5c8243 4149 /* MSS L4LEN IDX */
7d13a7d0
AD
4150 mss_l4len_idx = l4len << E1000_ADVTXD_L4LEN_SHIFT;
4151 mss_l4len_idx |= skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT;
9d5c8243 4152
7d13a7d0
AD
4153 /* VLAN MACLEN IPLEN */
4154 vlan_macip_lens = skb_network_header_len(skb);
4155 vlan_macip_lens |= skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT;
7af40ad9 4156 vlan_macip_lens |= first->tx_flags & IGB_TX_FLAGS_VLAN_MASK;
9d5c8243 4157
7d13a7d0 4158 igb_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, mss_l4len_idx);
9d5c8243 4159
7d13a7d0 4160 return 1;
9d5c8243
AK
4161}
4162
7af40ad9 4163static void igb_tx_csum(struct igb_ring *tx_ring, struct igb_tx_buffer *first)
9d5c8243 4164{
7af40ad9 4165 struct sk_buff *skb = first->skb;
7d13a7d0
AD
4166 u32 vlan_macip_lens = 0;
4167 u32 mss_l4len_idx = 0;
4168 u32 type_tucmd = 0;
9d5c8243 4169
7d13a7d0 4170 if (skb->ip_summed != CHECKSUM_PARTIAL) {
7af40ad9
AD
4171 if (!(first->tx_flags & IGB_TX_FLAGS_VLAN))
4172 return;
7d13a7d0
AD
4173 } else {
4174 u8 l4_hdr = 0;
7af40ad9 4175 switch (first->protocol) {
7d13a7d0
AD
4176 case __constant_htons(ETH_P_IP):
4177 vlan_macip_lens |= skb_network_header_len(skb);
4178 type_tucmd |= E1000_ADVTXD_TUCMD_IPV4;
4179 l4_hdr = ip_hdr(skb)->protocol;
4180 break;
4181 case __constant_htons(ETH_P_IPV6):
4182 vlan_macip_lens |= skb_network_header_len(skb);
4183 l4_hdr = ipv6_hdr(skb)->nexthdr;
4184 break;
4185 default:
4186 if (unlikely(net_ratelimit())) {
4187 dev_warn(tx_ring->dev,
4188 "partial checksum but proto=%x!\n",
7af40ad9 4189 first->protocol);
fa4a7ef3 4190 }
7d13a7d0
AD
4191 break;
4192 }
fa4a7ef3 4193
7d13a7d0
AD
4194 switch (l4_hdr) {
4195 case IPPROTO_TCP:
4196 type_tucmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
4197 mss_l4len_idx = tcp_hdrlen(skb) <<
4198 E1000_ADVTXD_L4LEN_SHIFT;
4199 break;
4200 case IPPROTO_SCTP:
4201 type_tucmd |= E1000_ADVTXD_TUCMD_L4T_SCTP;
4202 mss_l4len_idx = sizeof(struct sctphdr) <<
4203 E1000_ADVTXD_L4LEN_SHIFT;
4204 break;
4205 case IPPROTO_UDP:
4206 mss_l4len_idx = sizeof(struct udphdr) <<
4207 E1000_ADVTXD_L4LEN_SHIFT;
4208 break;
4209 default:
4210 if (unlikely(net_ratelimit())) {
4211 dev_warn(tx_ring->dev,
4212 "partial checksum but l4 proto=%x!\n",
4213 l4_hdr);
44b0cda3 4214 }
7d13a7d0 4215 break;
9d5c8243 4216 }
7af40ad9
AD
4217
4218 /* update TX checksum flag */
4219 first->tx_flags |= IGB_TX_FLAGS_CSUM;
7d13a7d0 4220 }
9d5c8243 4221
7d13a7d0 4222 vlan_macip_lens |= skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT;
7af40ad9 4223 vlan_macip_lens |= first->tx_flags & IGB_TX_FLAGS_VLAN_MASK;
9d5c8243 4224
7d13a7d0 4225 igb_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, mss_l4len_idx);
9d5c8243
AK
4226}
4227
e032afc8
AD
4228static __le32 igb_tx_cmd_type(u32 tx_flags)
4229{
4230 /* set type for advanced descriptor with frame checksum insertion */
4231 __le32 cmd_type = cpu_to_le32(E1000_ADVTXD_DTYP_DATA |
4232 E1000_ADVTXD_DCMD_IFCS |
4233 E1000_ADVTXD_DCMD_DEXT);
4234
4235 /* set HW vlan bit if vlan is present */
4236 if (tx_flags & IGB_TX_FLAGS_VLAN)
4237 cmd_type |= cpu_to_le32(E1000_ADVTXD_DCMD_VLE);
4238
3c89f6d0 4239#ifdef CONFIG_IGB_PTP
e032afc8 4240 /* set timestamp bit if present */
1f6e8178 4241 if (unlikely(tx_flags & IGB_TX_FLAGS_TSTAMP))
e032afc8 4242 cmd_type |= cpu_to_le32(E1000_ADVTXD_MAC_TSTAMP);
3c89f6d0 4243#endif /* CONFIG_IGB_PTP */
e032afc8
AD
4244
4245 /* set segmentation bits for TSO */
4246 if (tx_flags & IGB_TX_FLAGS_TSO)
4247 cmd_type |= cpu_to_le32(E1000_ADVTXD_DCMD_TSE);
4248
4249 return cmd_type;
4250}
4251
7af40ad9
AD
4252static void igb_tx_olinfo_status(struct igb_ring *tx_ring,
4253 union e1000_adv_tx_desc *tx_desc,
4254 u32 tx_flags, unsigned int paylen)
e032afc8
AD
4255{
4256 u32 olinfo_status = paylen << E1000_ADVTXD_PAYLEN_SHIFT;
4257
4258 /* 82575 requires a unique index per ring if any offload is enabled */
4259 if ((tx_flags & (IGB_TX_FLAGS_CSUM | IGB_TX_FLAGS_VLAN)) &&
866cff06 4260 test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
e032afc8
AD
4261 olinfo_status |= tx_ring->reg_idx << 4;
4262
4263 /* insert L4 checksum */
4264 if (tx_flags & IGB_TX_FLAGS_CSUM) {
4265 olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
4266
4267 /* insert IPv4 checksum */
4268 if (tx_flags & IGB_TX_FLAGS_IPV4)
4269 olinfo_status |= E1000_TXD_POPTS_IXSM << 8;
4270 }
4271
7af40ad9 4272 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
e032afc8
AD
4273}
4274
ebe42d16
AD
4275/*
4276 * The largest size we can write to the descriptor is 65535. In order to
4277 * maintain a power of two alignment we have to limit ourselves to 32K.
4278 */
4279#define IGB_MAX_TXD_PWR 15
7af40ad9 4280#define IGB_MAX_DATA_PER_TXD (1<<IGB_MAX_TXD_PWR)
9d5c8243 4281
7af40ad9
AD
4282static void igb_tx_map(struct igb_ring *tx_ring,
4283 struct igb_tx_buffer *first,
ebe42d16 4284 const u8 hdr_len)
9d5c8243 4285{
7af40ad9 4286 struct sk_buff *skb = first->skb;
ebe42d16
AD
4287 struct igb_tx_buffer *tx_buffer_info;
4288 union e1000_adv_tx_desc *tx_desc;
4289 dma_addr_t dma;
4290 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
4291 unsigned int data_len = skb->data_len;
4292 unsigned int size = skb_headlen(skb);
4293 unsigned int paylen = skb->len - hdr_len;
4294 __le32 cmd_type;
7af40ad9 4295 u32 tx_flags = first->tx_flags;
ebe42d16 4296 u16 i = tx_ring->next_to_use;
ebe42d16
AD
4297
4298 tx_desc = IGB_TX_DESC(tx_ring, i);
4299
7af40ad9 4300 igb_tx_olinfo_status(tx_ring, tx_desc, tx_flags, paylen);
ebe42d16
AD
4301 cmd_type = igb_tx_cmd_type(tx_flags);
4302
4303 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
4304 if (dma_mapping_error(tx_ring->dev, dma))
6366ad33 4305 goto dma_error;
9d5c8243 4306
ebe42d16
AD
4307 /* record length, and DMA address */
4308 first->length = size;
4309 first->dma = dma;
ebe42d16
AD
4310 tx_desc->read.buffer_addr = cpu_to_le64(dma);
4311
4312 for (;;) {
4313 while (unlikely(size > IGB_MAX_DATA_PER_TXD)) {
4314 tx_desc->read.cmd_type_len =
4315 cmd_type | cpu_to_le32(IGB_MAX_DATA_PER_TXD);
4316
4317 i++;
4318 tx_desc++;
4319 if (i == tx_ring->count) {
4320 tx_desc = IGB_TX_DESC(tx_ring, 0);
4321 i = 0;
4322 }
4323
4324 dma += IGB_MAX_DATA_PER_TXD;
4325 size -= IGB_MAX_DATA_PER_TXD;
4326
4327 tx_desc->read.olinfo_status = 0;
4328 tx_desc->read.buffer_addr = cpu_to_le64(dma);
4329 }
4330
4331 if (likely(!data_len))
4332 break;
2bbfebe2 4333
ebe42d16 4334 tx_desc->read.cmd_type_len = cmd_type | cpu_to_le32(size);
9d5c8243 4335
65689fef 4336 i++;
ebe42d16
AD
4337 tx_desc++;
4338 if (i == tx_ring->count) {
4339 tx_desc = IGB_TX_DESC(tx_ring, 0);
65689fef 4340 i = 0;
ebe42d16 4341 }
65689fef 4342
9e903e08 4343 size = skb_frag_size(frag);
ebe42d16
AD
4344 data_len -= size;
4345
4346 dma = skb_frag_dma_map(tx_ring->dev, frag, 0,
4347 size, DMA_TO_DEVICE);
4348 if (dma_mapping_error(tx_ring->dev, dma))
6366ad33
AD
4349 goto dma_error;
4350
ebe42d16
AD
4351 tx_buffer_info = &tx_ring->tx_buffer_info[i];
4352 tx_buffer_info->length = size;
4353 tx_buffer_info->dma = dma;
4354
4355 tx_desc->read.olinfo_status = 0;
4356 tx_desc->read.buffer_addr = cpu_to_le64(dma);
4357
4358 frag++;
9d5c8243
AK
4359 }
4360
bdbc0631
ED
4361 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
4362
ebe42d16
AD
4363 /* write last descriptor with RS and EOP bits */
4364 cmd_type |= cpu_to_le32(size) | cpu_to_le32(IGB_TXD_DCMD);
6b8f0922
BG
4365 if (unlikely(skb->no_fcs))
4366 cmd_type &= ~(cpu_to_le32(E1000_ADVTXD_DCMD_IFCS));
ebe42d16 4367 tx_desc->read.cmd_type_len = cmd_type;
8542db05
AD
4368
4369 /* set the timestamp */
4370 first->time_stamp = jiffies;
4371
ebe42d16
AD
4372 /*
4373 * Force memory writes to complete before letting h/w know there
4374 * are new descriptors to fetch. (Only applicable for weak-ordered
4375 * memory model archs, such as IA-64).
4376 *
4377 * We also need this memory barrier to make certain all of the
4378 * status bits have been updated before next_to_watch is written.
4379 */
4380 wmb();
4381
8542db05 4382 /* set next_to_watch value indicating a packet is present */
ebe42d16 4383 first->next_to_watch = tx_desc;
9d5c8243 4384
ebe42d16
AD
4385 i++;
4386 if (i == tx_ring->count)
4387 i = 0;
6366ad33 4388
ebe42d16 4389 tx_ring->next_to_use = i;
6366ad33 4390
ebe42d16 4391 writel(i, tx_ring->tail);
6366ad33 4392
ebe42d16
AD
4393 /* we need this if more than one processor can write to our tail
4394 * at a time, it syncronizes IO on IA64/Altix systems */
4395 mmiowb();
4396
4397 return;
4398
4399dma_error:
4400 dev_err(tx_ring->dev, "TX DMA map failed\n");
4401
4402 /* clear dma mappings for failed tx_buffer_info map */
4403 for (;;) {
4404 tx_buffer_info = &tx_ring->tx_buffer_info[i];
4405 igb_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
4406 if (tx_buffer_info == first)
4407 break;
a77ff709
NN
4408 if (i == 0)
4409 i = tx_ring->count;
6366ad33 4410 i--;
6366ad33
AD
4411 }
4412
9d5c8243 4413 tx_ring->next_to_use = i;
9d5c8243
AK
4414}
4415
6ad4edfc 4416static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size)
9d5c8243 4417{
e694e964
AD
4418 struct net_device *netdev = tx_ring->netdev;
4419
661086df 4420 netif_stop_subqueue(netdev, tx_ring->queue_index);
661086df 4421
9d5c8243
AK
4422 /* Herbert's original patch had:
4423 * smp_mb__after_netif_stop_queue();
4424 * but since that doesn't exist yet, just open code it. */
4425 smp_mb();
4426
4427 /* We need to check again in a case another CPU has just
4428 * made room available. */
c493ea45 4429 if (igb_desc_unused(tx_ring) < size)
9d5c8243
AK
4430 return -EBUSY;
4431
4432 /* A reprieve! */
661086df 4433 netif_wake_subqueue(netdev, tx_ring->queue_index);
12dcd86b
ED
4434
4435 u64_stats_update_begin(&tx_ring->tx_syncp2);
4436 tx_ring->tx_stats.restart_queue2++;
4437 u64_stats_update_end(&tx_ring->tx_syncp2);
4438
9d5c8243
AK
4439 return 0;
4440}
4441
6ad4edfc 4442static inline int igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size)
9d5c8243 4443{
c493ea45 4444 if (igb_desc_unused(tx_ring) >= size)
9d5c8243 4445 return 0;
e694e964 4446 return __igb_maybe_stop_tx(tx_ring, size);
9d5c8243
AK
4447}
4448
cd392f5c
AD
4449netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
4450 struct igb_ring *tx_ring)
9d5c8243 4451{
1f6e8178
MV
4452#ifdef CONFIG_IGB_PTP
4453 struct igb_adapter *adapter = netdev_priv(tx_ring->netdev);
4454#endif /* CONFIG_IGB_PTP */
8542db05 4455 struct igb_tx_buffer *first;
ebe42d16 4456 int tso;
91d4ee33 4457 u32 tx_flags = 0;
31f6adbb 4458 __be16 protocol = vlan_get_protocol(skb);
91d4ee33 4459 u8 hdr_len = 0;
9d5c8243 4460
9d5c8243
AK
4461 /* need: 1 descriptor per page,
4462 * + 2 desc gap to keep tail from touching head,
4463 * + 1 desc for skb->data,
4464 * + 1 desc for context descriptor,
4465 * otherwise try next time */
e694e964 4466 if (igb_maybe_stop_tx(tx_ring, skb_shinfo(skb)->nr_frags + 4)) {
9d5c8243 4467 /* this is a hard error */
9d5c8243
AK
4468 return NETDEV_TX_BUSY;
4469 }
33af6bcc 4470
7af40ad9
AD
4471 /* record the location of the first descriptor for this packet */
4472 first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
4473 first->skb = skb;
4474 first->bytecount = skb->len;
4475 first->gso_segs = 1;
4476
3c89f6d0 4477#ifdef CONFIG_IGB_PTP
1f6e8178
MV
4478 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4479 !(adapter->ptp_tx_skb))) {
2244d07b 4480 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
33af6bcc 4481 tx_flags |= IGB_TX_FLAGS_TSTAMP;
1f6e8178
MV
4482
4483 adapter->ptp_tx_skb = skb_get(skb);
4484 if (adapter->hw.mac.type == e1000_82576)
4485 schedule_work(&adapter->ptp_tx_work);
33af6bcc 4486 }
3c89f6d0 4487#endif /* CONFIG_IGB_PTP */
9d5c8243 4488
eab6d18d 4489 if (vlan_tx_tag_present(skb)) {
9d5c8243
AK
4490 tx_flags |= IGB_TX_FLAGS_VLAN;
4491 tx_flags |= (vlan_tx_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT);
4492 }
4493
7af40ad9
AD
4494 /* record initial flags and protocol */
4495 first->tx_flags = tx_flags;
4496 first->protocol = protocol;
cdfd01fc 4497
7af40ad9
AD
4498 tso = igb_tso(tx_ring, first, &hdr_len);
4499 if (tso < 0)
7d13a7d0 4500 goto out_drop;
7af40ad9
AD
4501 else if (!tso)
4502 igb_tx_csum(tx_ring, first);
9d5c8243 4503
7af40ad9 4504 igb_tx_map(tx_ring, first, hdr_len);
85ad76b2
AD
4505
4506 /* Make sure there is space in the ring for the next send. */
e694e964 4507 igb_maybe_stop_tx(tx_ring, MAX_SKB_FRAGS + 4);
85ad76b2 4508
9d5c8243 4509 return NETDEV_TX_OK;
7d13a7d0
AD
4510
4511out_drop:
7af40ad9
AD
4512 igb_unmap_and_free_tx_resource(tx_ring, first);
4513
7d13a7d0 4514 return NETDEV_TX_OK;
9d5c8243
AK
4515}
4516
1cc3bd87
AD
4517static inline struct igb_ring *igb_tx_queue_mapping(struct igb_adapter *adapter,
4518 struct sk_buff *skb)
4519{
4520 unsigned int r_idx = skb->queue_mapping;
4521
4522 if (r_idx >= adapter->num_tx_queues)
4523 r_idx = r_idx % adapter->num_tx_queues;
4524
4525 return adapter->tx_ring[r_idx];
4526}
4527
cd392f5c
AD
4528static netdev_tx_t igb_xmit_frame(struct sk_buff *skb,
4529 struct net_device *netdev)
9d5c8243
AK
4530{
4531 struct igb_adapter *adapter = netdev_priv(netdev);
b1a436c3
AD
4532
4533 if (test_bit(__IGB_DOWN, &adapter->state)) {
4534 dev_kfree_skb_any(skb);
4535 return NETDEV_TX_OK;
4536 }
4537
4538 if (skb->len <= 0) {
4539 dev_kfree_skb_any(skb);
4540 return NETDEV_TX_OK;
4541 }
4542
1cc3bd87
AD
4543 /*
4544 * The minimum packet size with TCTL.PSP set is 17 so pad the skb
4545 * in order to meet this minimum size requirement.
4546 */
4547 if (skb->len < 17) {
4548 if (skb_padto(skb, 17))
4549 return NETDEV_TX_OK;
4550 skb->len = 17;
4551 }
9d5c8243 4552
1cc3bd87 4553 return igb_xmit_frame_ring(skb, igb_tx_queue_mapping(adapter, skb));
9d5c8243
AK
4554}
4555
4556/**
4557 * igb_tx_timeout - Respond to a Tx Hang
4558 * @netdev: network interface device structure
4559 **/
4560static void igb_tx_timeout(struct net_device *netdev)
4561{
4562 struct igb_adapter *adapter = netdev_priv(netdev);
4563 struct e1000_hw *hw = &adapter->hw;
4564
4565 /* Do the reset outside of interrupt context */
4566 adapter->tx_timeout_count++;
f7ba205e 4567
06218a8d 4568 if (hw->mac.type >= e1000_82580)
55cac248
AD
4569 hw->dev_spec._82575.global_device_reset = true;
4570
9d5c8243 4571 schedule_work(&adapter->reset_task);
265de409
AD
4572 wr32(E1000_EICS,
4573 (adapter->eims_enable_mask & ~adapter->eims_other));
9d5c8243
AK
4574}
4575
4576static void igb_reset_task(struct work_struct *work)
4577{
4578 struct igb_adapter *adapter;
4579 adapter = container_of(work, struct igb_adapter, reset_task);
4580
c97ec42a
TI
4581 igb_dump(adapter);
4582 netdev_err(adapter->netdev, "Reset adapter\n");
9d5c8243
AK
4583 igb_reinit_locked(adapter);
4584}
4585
4586/**
12dcd86b 4587 * igb_get_stats64 - Get System Network Statistics
9d5c8243 4588 * @netdev: network interface device structure
12dcd86b 4589 * @stats: rtnl_link_stats64 pointer
9d5c8243 4590 *
9d5c8243 4591 **/
12dcd86b
ED
4592static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *netdev,
4593 struct rtnl_link_stats64 *stats)
9d5c8243 4594{
12dcd86b
ED
4595 struct igb_adapter *adapter = netdev_priv(netdev);
4596
4597 spin_lock(&adapter->stats64_lock);
4598 igb_update_stats(adapter, &adapter->stats64);
4599 memcpy(stats, &adapter->stats64, sizeof(*stats));
4600 spin_unlock(&adapter->stats64_lock);
4601
4602 return stats;
9d5c8243
AK
4603}
4604
4605/**
4606 * igb_change_mtu - Change the Maximum Transfer Unit
4607 * @netdev: network interface device structure
4608 * @new_mtu: new value for maximum frame size
4609 *
4610 * Returns 0 on success, negative on failure
4611 **/
4612static int igb_change_mtu(struct net_device *netdev, int new_mtu)
4613{
4614 struct igb_adapter *adapter = netdev_priv(netdev);
090b1795 4615 struct pci_dev *pdev = adapter->pdev;
153285f9 4616 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
9d5c8243 4617
c809d227 4618 if ((new_mtu < 68) || (max_frame > MAX_JUMBO_FRAME_SIZE)) {
090b1795 4619 dev_err(&pdev->dev, "Invalid MTU setting\n");
9d5c8243
AK
4620 return -EINVAL;
4621 }
4622
153285f9 4623#define MAX_STD_JUMBO_FRAME_SIZE 9238
9d5c8243 4624 if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
090b1795 4625 dev_err(&pdev->dev, "MTU > 9216 not supported.\n");
9d5c8243
AK
4626 return -EINVAL;
4627 }
4628
4629 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
4630 msleep(1);
73cd78f1 4631
9d5c8243
AK
4632 /* igb_down has a dependency on max_frame_size */
4633 adapter->max_frame_size = max_frame;
559e9c49 4634
4c844851
AD
4635 if (netif_running(netdev))
4636 igb_down(adapter);
9d5c8243 4637
090b1795 4638 dev_info(&pdev->dev, "changing MTU from %d to %d\n",
9d5c8243
AK
4639 netdev->mtu, new_mtu);
4640 netdev->mtu = new_mtu;
4641
4642 if (netif_running(netdev))
4643 igb_up(adapter);
4644 else
4645 igb_reset(adapter);
4646
4647 clear_bit(__IGB_RESETTING, &adapter->state);
4648
4649 return 0;
4650}
4651
4652/**
4653 * igb_update_stats - Update the board statistics counters
4654 * @adapter: board private structure
4655 **/
4656
12dcd86b
ED
4657void igb_update_stats(struct igb_adapter *adapter,
4658 struct rtnl_link_stats64 *net_stats)
9d5c8243
AK
4659{
4660 struct e1000_hw *hw = &adapter->hw;
4661 struct pci_dev *pdev = adapter->pdev;
fa3d9a6d 4662 u32 reg, mpc;
9d5c8243 4663 u16 phy_tmp;
3f9c0164
AD
4664 int i;
4665 u64 bytes, packets;
12dcd86b
ED
4666 unsigned int start;
4667 u64 _bytes, _packets;
9d5c8243
AK
4668
4669#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
4670
4671 /*
4672 * Prevent stats update while adapter is being reset, or if the pci
4673 * connection is down.
4674 */
4675 if (adapter->link_speed == 0)
4676 return;
4677 if (pci_channel_offline(pdev))
4678 return;
4679
3f9c0164
AD
4680 bytes = 0;
4681 packets = 0;
4682 for (i = 0; i < adapter->num_rx_queues; i++) {
ae1c07a6 4683 u32 rqdpc = rd32(E1000_RQDPC(i));
3025a446 4684 struct igb_ring *ring = adapter->rx_ring[i];
12dcd86b 4685
ae1c07a6
AD
4686 if (rqdpc) {
4687 ring->rx_stats.drops += rqdpc;
4688 net_stats->rx_fifo_errors += rqdpc;
4689 }
12dcd86b
ED
4690
4691 do {
4692 start = u64_stats_fetch_begin_bh(&ring->rx_syncp);
4693 _bytes = ring->rx_stats.bytes;
4694 _packets = ring->rx_stats.packets;
4695 } while (u64_stats_fetch_retry_bh(&ring->rx_syncp, start));
4696 bytes += _bytes;
4697 packets += _packets;
3f9c0164
AD
4698 }
4699
128e45eb
AD
4700 net_stats->rx_bytes = bytes;
4701 net_stats->rx_packets = packets;
3f9c0164
AD
4702
4703 bytes = 0;
4704 packets = 0;
4705 for (i = 0; i < adapter->num_tx_queues; i++) {
3025a446 4706 struct igb_ring *ring = adapter->tx_ring[i];
12dcd86b
ED
4707 do {
4708 start = u64_stats_fetch_begin_bh(&ring->tx_syncp);
4709 _bytes = ring->tx_stats.bytes;
4710 _packets = ring->tx_stats.packets;
4711 } while (u64_stats_fetch_retry_bh(&ring->tx_syncp, start));
4712 bytes += _bytes;
4713 packets += _packets;
3f9c0164 4714 }
128e45eb
AD
4715 net_stats->tx_bytes = bytes;
4716 net_stats->tx_packets = packets;
3f9c0164
AD
4717
4718 /* read stats registers */
9d5c8243
AK
4719 adapter->stats.crcerrs += rd32(E1000_CRCERRS);
4720 adapter->stats.gprc += rd32(E1000_GPRC);
4721 adapter->stats.gorc += rd32(E1000_GORCL);
4722 rd32(E1000_GORCH); /* clear GORCL */
4723 adapter->stats.bprc += rd32(E1000_BPRC);
4724 adapter->stats.mprc += rd32(E1000_MPRC);
4725 adapter->stats.roc += rd32(E1000_ROC);
4726
4727 adapter->stats.prc64 += rd32(E1000_PRC64);
4728 adapter->stats.prc127 += rd32(E1000_PRC127);
4729 adapter->stats.prc255 += rd32(E1000_PRC255);
4730 adapter->stats.prc511 += rd32(E1000_PRC511);
4731 adapter->stats.prc1023 += rd32(E1000_PRC1023);
4732 adapter->stats.prc1522 += rd32(E1000_PRC1522);
4733 adapter->stats.symerrs += rd32(E1000_SYMERRS);
4734 adapter->stats.sec += rd32(E1000_SEC);
4735
fa3d9a6d
MW
4736 mpc = rd32(E1000_MPC);
4737 adapter->stats.mpc += mpc;
4738 net_stats->rx_fifo_errors += mpc;
9d5c8243
AK
4739 adapter->stats.scc += rd32(E1000_SCC);
4740 adapter->stats.ecol += rd32(E1000_ECOL);
4741 adapter->stats.mcc += rd32(E1000_MCC);
4742 adapter->stats.latecol += rd32(E1000_LATECOL);
4743 adapter->stats.dc += rd32(E1000_DC);
4744 adapter->stats.rlec += rd32(E1000_RLEC);
4745 adapter->stats.xonrxc += rd32(E1000_XONRXC);
4746 adapter->stats.xontxc += rd32(E1000_XONTXC);
4747 adapter->stats.xoffrxc += rd32(E1000_XOFFRXC);
4748 adapter->stats.xofftxc += rd32(E1000_XOFFTXC);
4749 adapter->stats.fcruc += rd32(E1000_FCRUC);
4750 adapter->stats.gptc += rd32(E1000_GPTC);
4751 adapter->stats.gotc += rd32(E1000_GOTCL);
4752 rd32(E1000_GOTCH); /* clear GOTCL */
fa3d9a6d 4753 adapter->stats.rnbc += rd32(E1000_RNBC);
9d5c8243
AK
4754 adapter->stats.ruc += rd32(E1000_RUC);
4755 adapter->stats.rfc += rd32(E1000_RFC);
4756 adapter->stats.rjc += rd32(E1000_RJC);
4757 adapter->stats.tor += rd32(E1000_TORH);
4758 adapter->stats.tot += rd32(E1000_TOTH);
4759 adapter->stats.tpr += rd32(E1000_TPR);
4760
4761 adapter->stats.ptc64 += rd32(E1000_PTC64);
4762 adapter->stats.ptc127 += rd32(E1000_PTC127);
4763 adapter->stats.ptc255 += rd32(E1000_PTC255);
4764 adapter->stats.ptc511 += rd32(E1000_PTC511);
4765 adapter->stats.ptc1023 += rd32(E1000_PTC1023);
4766 adapter->stats.ptc1522 += rd32(E1000_PTC1522);
4767
4768 adapter->stats.mptc += rd32(E1000_MPTC);
4769 adapter->stats.bptc += rd32(E1000_BPTC);
4770
2d0b0f69
NN
4771 adapter->stats.tpt += rd32(E1000_TPT);
4772 adapter->stats.colc += rd32(E1000_COLC);
9d5c8243
AK
4773
4774 adapter->stats.algnerrc += rd32(E1000_ALGNERRC);
43915c7c
NN
4775 /* read internal phy specific stats */
4776 reg = rd32(E1000_CTRL_EXT);
4777 if (!(reg & E1000_CTRL_EXT_LINK_MODE_MASK)) {
4778 adapter->stats.rxerrc += rd32(E1000_RXERRC);
4779 adapter->stats.tncrs += rd32(E1000_TNCRS);
4780 }
4781
9d5c8243
AK
4782 adapter->stats.tsctc += rd32(E1000_TSCTC);
4783 adapter->stats.tsctfc += rd32(E1000_TSCTFC);
4784
4785 adapter->stats.iac += rd32(E1000_IAC);
4786 adapter->stats.icrxoc += rd32(E1000_ICRXOC);
4787 adapter->stats.icrxptc += rd32(E1000_ICRXPTC);
4788 adapter->stats.icrxatc += rd32(E1000_ICRXATC);
4789 adapter->stats.ictxptc += rd32(E1000_ICTXPTC);
4790 adapter->stats.ictxatc += rd32(E1000_ICTXATC);
4791 adapter->stats.ictxqec += rd32(E1000_ICTXQEC);
4792 adapter->stats.ictxqmtc += rd32(E1000_ICTXQMTC);
4793 adapter->stats.icrxdmtc += rd32(E1000_ICRXDMTC);
4794
4795 /* Fill out the OS statistics structure */
128e45eb
AD
4796 net_stats->multicast = adapter->stats.mprc;
4797 net_stats->collisions = adapter->stats.colc;
9d5c8243
AK
4798
4799 /* Rx Errors */
4800
4801 /* RLEC on some newer hardware can be incorrect so build
8c0ab70a 4802 * our own version based on RUC and ROC */
128e45eb 4803 net_stats->rx_errors = adapter->stats.rxerrc +
9d5c8243
AK
4804 adapter->stats.crcerrs + adapter->stats.algnerrc +
4805 adapter->stats.ruc + adapter->stats.roc +
4806 adapter->stats.cexterr;
128e45eb
AD
4807 net_stats->rx_length_errors = adapter->stats.ruc +
4808 adapter->stats.roc;
4809 net_stats->rx_crc_errors = adapter->stats.crcerrs;
4810 net_stats->rx_frame_errors = adapter->stats.algnerrc;
4811 net_stats->rx_missed_errors = adapter->stats.mpc;
9d5c8243
AK
4812
4813 /* Tx Errors */
128e45eb
AD
4814 net_stats->tx_errors = adapter->stats.ecol +
4815 adapter->stats.latecol;
4816 net_stats->tx_aborted_errors = adapter->stats.ecol;
4817 net_stats->tx_window_errors = adapter->stats.latecol;
4818 net_stats->tx_carrier_errors = adapter->stats.tncrs;
9d5c8243
AK
4819
4820 /* Tx Dropped needs to be maintained elsewhere */
4821
4822 /* Phy Stats */
4823 if (hw->phy.media_type == e1000_media_type_copper) {
4824 if ((adapter->link_speed == SPEED_1000) &&
73cd78f1 4825 (!igb_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
9d5c8243
AK
4826 phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
4827 adapter->phy_stats.idle_errors += phy_tmp;
4828 }
4829 }
4830
4831 /* Management Stats */
4832 adapter->stats.mgptc += rd32(E1000_MGTPTC);
4833 adapter->stats.mgprc += rd32(E1000_MGTPRC);
4834 adapter->stats.mgpdc += rd32(E1000_MGTPDC);
0a915b95
CW
4835
4836 /* OS2BMC Stats */
4837 reg = rd32(E1000_MANC);
4838 if (reg & E1000_MANC_EN_BMC2OS) {
4839 adapter->stats.o2bgptc += rd32(E1000_O2BGPTC);
4840 adapter->stats.o2bspc += rd32(E1000_O2BSPC);
4841 adapter->stats.b2ospc += rd32(E1000_B2OSPC);
4842 adapter->stats.b2ogprc += rd32(E1000_B2OGPRC);
4843 }
9d5c8243
AK
4844}
4845
9d5c8243
AK
4846static irqreturn_t igb_msix_other(int irq, void *data)
4847{
047e0030 4848 struct igb_adapter *adapter = data;
9d5c8243 4849 struct e1000_hw *hw = &adapter->hw;
844290e5 4850 u32 icr = rd32(E1000_ICR);
844290e5 4851 /* reading ICR causes bit 31 of EICR to be cleared */
dda0e083 4852
7f081d40
AD
4853 if (icr & E1000_ICR_DRSTA)
4854 schedule_work(&adapter->reset_task);
4855
047e0030 4856 if (icr & E1000_ICR_DOUTSYNC) {
dda0e083
AD
4857 /* HW is reporting DMA is out of sync */
4858 adapter->stats.doosync++;
13800469
GR
4859 /* The DMA Out of Sync is also indication of a spoof event
4860 * in IOV mode. Check the Wrong VM Behavior register to
4861 * see if it is really a spoof event. */
4862 igb_check_wvbr(adapter);
dda0e083 4863 }
eebbbdba 4864
4ae196df
AD
4865 /* Check for a mailbox event */
4866 if (icr & E1000_ICR_VMMB)
4867 igb_msg_task(adapter);
4868
4869 if (icr & E1000_ICR_LSC) {
4870 hw->mac.get_link_status = 1;
4871 /* guard against interrupt when we're going down */
4872 if (!test_bit(__IGB_DOWN, &adapter->state))
4873 mod_timer(&adapter->watchdog_timer, jiffies + 1);
4874 }
4875
1f6e8178
MV
4876#ifdef CONFIG_IGB_PTP
4877 if (icr & E1000_ICR_TS) {
4878 u32 tsicr = rd32(E1000_TSICR);
4879
4880 if (tsicr & E1000_TSICR_TXTS) {
4881 /* acknowledge the interrupt */
4882 wr32(E1000_TSICR, E1000_TSICR_TXTS);
4883 /* retrieve hardware timestamp */
4884 schedule_work(&adapter->ptp_tx_work);
4885 }
4886 }
4887#endif /* CONFIG_IGB_PTP */
4888
844290e5 4889 wr32(E1000_EIMS, adapter->eims_other);
9d5c8243
AK
4890
4891 return IRQ_HANDLED;
4892}
4893
047e0030 4894static void igb_write_itr(struct igb_q_vector *q_vector)
9d5c8243 4895{
26b39276 4896 struct igb_adapter *adapter = q_vector->adapter;
047e0030 4897 u32 itr_val = q_vector->itr_val & 0x7FFC;
9d5c8243 4898
047e0030
AD
4899 if (!q_vector->set_itr)
4900 return;
73cd78f1 4901
047e0030
AD
4902 if (!itr_val)
4903 itr_val = 0x4;
661086df 4904
26b39276
AD
4905 if (adapter->hw.mac.type == e1000_82575)
4906 itr_val |= itr_val << 16;
661086df 4907 else
0ba82994 4908 itr_val |= E1000_EITR_CNT_IGNR;
661086df 4909
047e0030
AD
4910 writel(itr_val, q_vector->itr_register);
4911 q_vector->set_itr = 0;
6eb5a7f1
AD
4912}
4913
047e0030 4914static irqreturn_t igb_msix_ring(int irq, void *data)
9d5c8243 4915{
047e0030 4916 struct igb_q_vector *q_vector = data;
9d5c8243 4917
047e0030
AD
4918 /* Write the ITR value calculated from the previous interrupt. */
4919 igb_write_itr(q_vector);
9d5c8243 4920
047e0030 4921 napi_schedule(&q_vector->napi);
844290e5 4922
047e0030 4923 return IRQ_HANDLED;
fe4506b6
JC
4924}
4925
421e02f0 4926#ifdef CONFIG_IGB_DCA
047e0030 4927static void igb_update_dca(struct igb_q_vector *q_vector)
fe4506b6 4928{
047e0030 4929 struct igb_adapter *adapter = q_vector->adapter;
fe4506b6
JC
4930 struct e1000_hw *hw = &adapter->hw;
4931 int cpu = get_cpu();
fe4506b6 4932
047e0030
AD
4933 if (q_vector->cpu == cpu)
4934 goto out_no_update;
4935
0ba82994
AD
4936 if (q_vector->tx.ring) {
4937 int q = q_vector->tx.ring->reg_idx;
047e0030
AD
4938 u32 dca_txctrl = rd32(E1000_DCA_TXCTRL(q));
4939 if (hw->mac.type == e1000_82575) {
4940 dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK;
4941 dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
2d064c06 4942 } else {
047e0030
AD
4943 dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK_82576;
4944 dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) <<
4945 E1000_DCA_TXCTRL_CPUID_SHIFT;
4946 }
4947 dca_txctrl |= E1000_DCA_TXCTRL_DESC_DCA_EN;
4948 wr32(E1000_DCA_TXCTRL(q), dca_txctrl);
4949 }
0ba82994
AD
4950 if (q_vector->rx.ring) {
4951 int q = q_vector->rx.ring->reg_idx;
047e0030
AD
4952 u32 dca_rxctrl = rd32(E1000_DCA_RXCTRL(q));
4953 if (hw->mac.type == e1000_82575) {
2d064c06 4954 dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK;
92be7917 4955 dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
047e0030
AD
4956 } else {
4957 dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK_82576;
4958 dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) <<
4959 E1000_DCA_RXCTRL_CPUID_SHIFT;
2d064c06 4960 }
fe4506b6
JC
4961 dca_rxctrl |= E1000_DCA_RXCTRL_DESC_DCA_EN;
4962 dca_rxctrl |= E1000_DCA_RXCTRL_HEAD_DCA_EN;
4963 dca_rxctrl |= E1000_DCA_RXCTRL_DATA_DCA_EN;
4964 wr32(E1000_DCA_RXCTRL(q), dca_rxctrl);
fe4506b6 4965 }
047e0030
AD
4966 q_vector->cpu = cpu;
4967out_no_update:
fe4506b6
JC
4968 put_cpu();
4969}
4970
4971static void igb_setup_dca(struct igb_adapter *adapter)
4972{
7e0e99ef 4973 struct e1000_hw *hw = &adapter->hw;
fe4506b6
JC
4974 int i;
4975
7dfc16fa 4976 if (!(adapter->flags & IGB_FLAG_DCA_ENABLED))
fe4506b6
JC
4977 return;
4978
7e0e99ef
AD
4979 /* Always use CB2 mode, difference is masked in the CB driver. */
4980 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_CB2);
4981
047e0030 4982 for (i = 0; i < adapter->num_q_vectors; i++) {
26b39276
AD
4983 adapter->q_vector[i]->cpu = -1;
4984 igb_update_dca(adapter->q_vector[i]);
fe4506b6
JC
4985 }
4986}
4987
4988static int __igb_notify_dca(struct device *dev, void *data)
4989{
4990 struct net_device *netdev = dev_get_drvdata(dev);
4991 struct igb_adapter *adapter = netdev_priv(netdev);
090b1795 4992 struct pci_dev *pdev = adapter->pdev;
fe4506b6
JC
4993 struct e1000_hw *hw = &adapter->hw;
4994 unsigned long event = *(unsigned long *)data;
4995
4996 switch (event) {
4997 case DCA_PROVIDER_ADD:
4998 /* if already enabled, don't do it again */
7dfc16fa 4999 if (adapter->flags & IGB_FLAG_DCA_ENABLED)
fe4506b6 5000 break;
fe4506b6 5001 if (dca_add_requester(dev) == 0) {
bbd98fe4 5002 adapter->flags |= IGB_FLAG_DCA_ENABLED;
090b1795 5003 dev_info(&pdev->dev, "DCA enabled\n");
fe4506b6
JC
5004 igb_setup_dca(adapter);
5005 break;
5006 }
5007 /* Fall Through since DCA is disabled. */
5008 case DCA_PROVIDER_REMOVE:
7dfc16fa 5009 if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
fe4506b6 5010 /* without this a class_device is left
047e0030 5011 * hanging around in the sysfs model */
fe4506b6 5012 dca_remove_requester(dev);
090b1795 5013 dev_info(&pdev->dev, "DCA disabled\n");
7dfc16fa 5014 adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
cbd347ad 5015 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE);
fe4506b6
JC
5016 }
5017 break;
5018 }
bbd98fe4 5019
fe4506b6 5020 return 0;
9d5c8243
AK
5021}
5022
fe4506b6
JC
5023static int igb_notify_dca(struct notifier_block *nb, unsigned long event,
5024 void *p)
5025{
5026 int ret_val;
5027
5028 ret_val = driver_for_each_device(&igb_driver.driver, NULL, &event,
5029 __igb_notify_dca);
5030
5031 return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
5032}
421e02f0 5033#endif /* CONFIG_IGB_DCA */
9d5c8243 5034
0224d663
GR
5035#ifdef CONFIG_PCI_IOV
5036static int igb_vf_configure(struct igb_adapter *adapter, int vf)
5037{
5038 unsigned char mac_addr[ETH_ALEN];
0224d663 5039
7efd26d0 5040 eth_random_addr(mac_addr);
0224d663
GR
5041 igb_set_vf_mac(adapter, vf, mac_addr);
5042
f557147c 5043 return 0;
0224d663
GR
5044}
5045
f557147c 5046static bool igb_vfs_are_assigned(struct igb_adapter *adapter)
0224d663 5047{
0224d663 5048 struct pci_dev *pdev = adapter->pdev;
f557147c
SA
5049 struct pci_dev *vfdev;
5050 int dev_id;
0224d663
GR
5051
5052 switch (adapter->hw.mac.type) {
5053 case e1000_82576:
f557147c 5054 dev_id = IGB_82576_VF_DEV_ID;
0224d663
GR
5055 break;
5056 case e1000_i350:
f557147c 5057 dev_id = IGB_I350_VF_DEV_ID;
0224d663
GR
5058 break;
5059 default:
f557147c 5060 return false;
0224d663
GR
5061 }
5062
f557147c
SA
5063 /* loop through all the VFs to see if we own any that are assigned */
5064 vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, dev_id, NULL);
5065 while (vfdev) {
5066 /* if we don't own it we don't care */
5067 if (vfdev->is_virtfn && vfdev->physfn == pdev) {
5068 /* if it is assigned we cannot release it */
5069 if (vfdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
0224d663
GR
5070 return true;
5071 }
f557147c
SA
5072
5073 vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, dev_id, vfdev);
0224d663 5074 }
f557147c 5075
0224d663
GR
5076 return false;
5077}
5078
5079#endif
4ae196df
AD
5080static void igb_ping_all_vfs(struct igb_adapter *adapter)
5081{
5082 struct e1000_hw *hw = &adapter->hw;
5083 u32 ping;
5084 int i;
5085
5086 for (i = 0 ; i < adapter->vfs_allocated_count; i++) {
5087 ping = E1000_PF_CONTROL_MSG;
f2ca0dbe 5088 if (adapter->vf_data[i].flags & IGB_VF_FLAG_CTS)
4ae196df
AD
5089 ping |= E1000_VT_MSGTYPE_CTS;
5090 igb_write_mbx(hw, &ping, 1, i);
5091 }
5092}
5093
7d5753f0
AD
5094static int igb_set_vf_promisc(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
5095{
5096 struct e1000_hw *hw = &adapter->hw;
5097 u32 vmolr = rd32(E1000_VMOLR(vf));
5098 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
5099
d85b9004 5100 vf_data->flags &= ~(IGB_VF_FLAG_UNI_PROMISC |
7d5753f0
AD
5101 IGB_VF_FLAG_MULTI_PROMISC);
5102 vmolr &= ~(E1000_VMOLR_ROPE | E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
5103
5104 if (*msgbuf & E1000_VF_SET_PROMISC_MULTICAST) {
5105 vmolr |= E1000_VMOLR_MPME;
d85b9004 5106 vf_data->flags |= IGB_VF_FLAG_MULTI_PROMISC;
7d5753f0
AD
5107 *msgbuf &= ~E1000_VF_SET_PROMISC_MULTICAST;
5108 } else {
5109 /*
5110 * if we have hashes and we are clearing a multicast promisc
5111 * flag we need to write the hashes to the MTA as this step
5112 * was previously skipped
5113 */
5114 if (vf_data->num_vf_mc_hashes > 30) {
5115 vmolr |= E1000_VMOLR_MPME;
5116 } else if (vf_data->num_vf_mc_hashes) {
5117 int j;
5118 vmolr |= E1000_VMOLR_ROMPE;
5119 for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
5120 igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
5121 }
5122 }
5123
5124 wr32(E1000_VMOLR(vf), vmolr);
5125
5126 /* there are flags left unprocessed, likely not supported */
5127 if (*msgbuf & E1000_VT_MSGINFO_MASK)
5128 return -EINVAL;
5129
5130 return 0;
5131
5132}
5133
4ae196df
AD
5134static int igb_set_vf_multicasts(struct igb_adapter *adapter,
5135 u32 *msgbuf, u32 vf)
5136{
5137 int n = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
5138 u16 *hash_list = (u16 *)&msgbuf[1];
5139 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
5140 int i;
5141
7d5753f0 5142 /* salt away the number of multicast addresses assigned
4ae196df
AD
5143 * to this VF for later use to restore when the PF multi cast
5144 * list changes
5145 */
5146 vf_data->num_vf_mc_hashes = n;
5147
7d5753f0
AD
5148 /* only up to 30 hash values supported */
5149 if (n > 30)
5150 n = 30;
5151
5152 /* store the hashes for later use */
4ae196df 5153 for (i = 0; i < n; i++)
a419aef8 5154 vf_data->vf_mc_hashes[i] = hash_list[i];
4ae196df
AD
5155
5156 /* Flush and reset the mta with the new values */
ff41f8dc 5157 igb_set_rx_mode(adapter->netdev);
4ae196df
AD
5158
5159 return 0;
5160}
5161
5162static void igb_restore_vf_multicasts(struct igb_adapter *adapter)
5163{
5164 struct e1000_hw *hw = &adapter->hw;
5165 struct vf_data_storage *vf_data;
5166 int i, j;
5167
5168 for (i = 0; i < adapter->vfs_allocated_count; i++) {
7d5753f0
AD
5169 u32 vmolr = rd32(E1000_VMOLR(i));
5170 vmolr &= ~(E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
5171
4ae196df 5172 vf_data = &adapter->vf_data[i];
7d5753f0
AD
5173
5174 if ((vf_data->num_vf_mc_hashes > 30) ||
5175 (vf_data->flags & IGB_VF_FLAG_MULTI_PROMISC)) {
5176 vmolr |= E1000_VMOLR_MPME;
5177 } else if (vf_data->num_vf_mc_hashes) {
5178 vmolr |= E1000_VMOLR_ROMPE;
5179 for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
5180 igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
5181 }
5182 wr32(E1000_VMOLR(i), vmolr);
4ae196df
AD
5183 }
5184}
5185
5186static void igb_clear_vf_vfta(struct igb_adapter *adapter, u32 vf)
5187{
5188 struct e1000_hw *hw = &adapter->hw;
5189 u32 pool_mask, reg, vid;
5190 int i;
5191
5192 pool_mask = 1 << (E1000_VLVF_POOLSEL_SHIFT + vf);
5193
5194 /* Find the vlan filter for this id */
5195 for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
5196 reg = rd32(E1000_VLVF(i));
5197
5198 /* remove the vf from the pool */
5199 reg &= ~pool_mask;
5200
5201 /* if pool is empty then remove entry from vfta */
5202 if (!(reg & E1000_VLVF_POOLSEL_MASK) &&
5203 (reg & E1000_VLVF_VLANID_ENABLE)) {
5204 reg = 0;
5205 vid = reg & E1000_VLVF_VLANID_MASK;
5206 igb_vfta_set(hw, vid, false);
5207 }
5208
5209 wr32(E1000_VLVF(i), reg);
5210 }
ae641bdc
AD
5211
5212 adapter->vf_data[vf].vlans_enabled = 0;
4ae196df
AD
5213}
5214
5215static s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf)
5216{
5217 struct e1000_hw *hw = &adapter->hw;
5218 u32 reg, i;
5219
51466239
AD
5220 /* The vlvf table only exists on 82576 hardware and newer */
5221 if (hw->mac.type < e1000_82576)
5222 return -1;
5223
5224 /* we only need to do this if VMDq is enabled */
4ae196df
AD
5225 if (!adapter->vfs_allocated_count)
5226 return -1;
5227
5228 /* Find the vlan filter for this id */
5229 for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
5230 reg = rd32(E1000_VLVF(i));
5231 if ((reg & E1000_VLVF_VLANID_ENABLE) &&
5232 vid == (reg & E1000_VLVF_VLANID_MASK))
5233 break;
5234 }
5235
5236 if (add) {
5237 if (i == E1000_VLVF_ARRAY_SIZE) {
5238 /* Did not find a matching VLAN ID entry that was
5239 * enabled. Search for a free filter entry, i.e.
5240 * one without the enable bit set
5241 */
5242 for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
5243 reg = rd32(E1000_VLVF(i));
5244 if (!(reg & E1000_VLVF_VLANID_ENABLE))
5245 break;
5246 }
5247 }
5248 if (i < E1000_VLVF_ARRAY_SIZE) {
5249 /* Found an enabled/available entry */
5250 reg |= 1 << (E1000_VLVF_POOLSEL_SHIFT + vf);
5251
5252 /* if !enabled we need to set this up in vfta */
5253 if (!(reg & E1000_VLVF_VLANID_ENABLE)) {
51466239
AD
5254 /* add VID to filter table */
5255 igb_vfta_set(hw, vid, true);
4ae196df
AD
5256 reg |= E1000_VLVF_VLANID_ENABLE;
5257 }
cad6d05f
AD
5258 reg &= ~E1000_VLVF_VLANID_MASK;
5259 reg |= vid;
4ae196df 5260 wr32(E1000_VLVF(i), reg);
ae641bdc
AD
5261
5262 /* do not modify RLPML for PF devices */
5263 if (vf >= adapter->vfs_allocated_count)
5264 return 0;
5265
5266 if (!adapter->vf_data[vf].vlans_enabled) {
5267 u32 size;
5268 reg = rd32(E1000_VMOLR(vf));
5269 size = reg & E1000_VMOLR_RLPML_MASK;
5270 size += 4;
5271 reg &= ~E1000_VMOLR_RLPML_MASK;
5272 reg |= size;
5273 wr32(E1000_VMOLR(vf), reg);
5274 }
ae641bdc 5275
51466239 5276 adapter->vf_data[vf].vlans_enabled++;
4ae196df
AD
5277 }
5278 } else {
5279 if (i < E1000_VLVF_ARRAY_SIZE) {
5280 /* remove vf from the pool */
5281 reg &= ~(1 << (E1000_VLVF_POOLSEL_SHIFT + vf));
5282 /* if pool is empty then remove entry from vfta */
5283 if (!(reg & E1000_VLVF_POOLSEL_MASK)) {
5284 reg = 0;
5285 igb_vfta_set(hw, vid, false);
5286 }
5287 wr32(E1000_VLVF(i), reg);
ae641bdc
AD
5288
5289 /* do not modify RLPML for PF devices */
5290 if (vf >= adapter->vfs_allocated_count)
5291 return 0;
5292
5293 adapter->vf_data[vf].vlans_enabled--;
5294 if (!adapter->vf_data[vf].vlans_enabled) {
5295 u32 size;
5296 reg = rd32(E1000_VMOLR(vf));
5297 size = reg & E1000_VMOLR_RLPML_MASK;
5298 size -= 4;
5299 reg &= ~E1000_VMOLR_RLPML_MASK;
5300 reg |= size;
5301 wr32(E1000_VMOLR(vf), reg);
5302 }
4ae196df
AD
5303 }
5304 }
8151d294
WM
5305 return 0;
5306}
5307
5308static void igb_set_vmvir(struct igb_adapter *adapter, u32 vid, u32 vf)
5309{
5310 struct e1000_hw *hw = &adapter->hw;
5311
5312 if (vid)
5313 wr32(E1000_VMVIR(vf), (vid | E1000_VMVIR_VLANA_DEFAULT));
5314 else
5315 wr32(E1000_VMVIR(vf), 0);
5316}
5317
5318static int igb_ndo_set_vf_vlan(struct net_device *netdev,
5319 int vf, u16 vlan, u8 qos)
5320{
5321 int err = 0;
5322 struct igb_adapter *adapter = netdev_priv(netdev);
5323
5324 if ((vf >= adapter->vfs_allocated_count) || (vlan > 4095) || (qos > 7))
5325 return -EINVAL;
5326 if (vlan || qos) {
5327 err = igb_vlvf_set(adapter, vlan, !!vlan, vf);
5328 if (err)
5329 goto out;
5330 igb_set_vmvir(adapter, vlan | (qos << VLAN_PRIO_SHIFT), vf);
5331 igb_set_vmolr(adapter, vf, !vlan);
5332 adapter->vf_data[vf].pf_vlan = vlan;
5333 adapter->vf_data[vf].pf_qos = qos;
5334 dev_info(&adapter->pdev->dev,
5335 "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf);
5336 if (test_bit(__IGB_DOWN, &adapter->state)) {
5337 dev_warn(&adapter->pdev->dev,
5338 "The VF VLAN has been set,"
5339 " but the PF device is not up.\n");
5340 dev_warn(&adapter->pdev->dev,
5341 "Bring the PF device up before"
5342 " attempting to use the VF device.\n");
5343 }
5344 } else {
5345 igb_vlvf_set(adapter, adapter->vf_data[vf].pf_vlan,
5346 false, vf);
5347 igb_set_vmvir(adapter, vlan, vf);
5348 igb_set_vmolr(adapter, vf, true);
5349 adapter->vf_data[vf].pf_vlan = 0;
5350 adapter->vf_data[vf].pf_qos = 0;
5351 }
5352out:
5353 return err;
4ae196df
AD
5354}
5355
5356static int igb_set_vf_vlan(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
5357{
5358 int add = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
5359 int vid = (msgbuf[1] & E1000_VLVF_VLANID_MASK);
5360
5361 return igb_vlvf_set(adapter, vid, add, vf);
5362}
5363
f2ca0dbe 5364static inline void igb_vf_reset(struct igb_adapter *adapter, u32 vf)
4ae196df 5365{
8fa7e0f7
GR
5366 /* clear flags - except flag that indicates PF has set the MAC */
5367 adapter->vf_data[vf].flags &= IGB_VF_FLAG_PF_SET_MAC;
f2ca0dbe 5368 adapter->vf_data[vf].last_nack = jiffies;
4ae196df
AD
5369
5370 /* reset offloads to defaults */
8151d294 5371 igb_set_vmolr(adapter, vf, true);
4ae196df
AD
5372
5373 /* reset vlans for device */
5374 igb_clear_vf_vfta(adapter, vf);
8151d294
WM
5375 if (adapter->vf_data[vf].pf_vlan)
5376 igb_ndo_set_vf_vlan(adapter->netdev, vf,
5377 adapter->vf_data[vf].pf_vlan,
5378 adapter->vf_data[vf].pf_qos);
5379 else
5380 igb_clear_vf_vfta(adapter, vf);
4ae196df
AD
5381
5382 /* reset multicast table array for vf */
5383 adapter->vf_data[vf].num_vf_mc_hashes = 0;
5384
5385 /* Flush and reset the mta with the new values */
ff41f8dc 5386 igb_set_rx_mode(adapter->netdev);
4ae196df
AD
5387}
5388
f2ca0dbe
AD
5389static void igb_vf_reset_event(struct igb_adapter *adapter, u32 vf)
5390{
5391 unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
5392
5393 /* generate a new mac address as we were hotplug removed/added */
8151d294 5394 if (!(adapter->vf_data[vf].flags & IGB_VF_FLAG_PF_SET_MAC))
7efd26d0 5395 eth_random_addr(vf_mac);
f2ca0dbe
AD
5396
5397 /* process remaining reset events */
5398 igb_vf_reset(adapter, vf);
5399}
5400
5401static void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf)
4ae196df
AD
5402{
5403 struct e1000_hw *hw = &adapter->hw;
5404 unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
ff41f8dc 5405 int rar_entry = hw->mac.rar_entry_count - (vf + 1);
4ae196df
AD
5406 u32 reg, msgbuf[3];
5407 u8 *addr = (u8 *)(&msgbuf[1]);
5408
5409 /* process all the same items cleared in a function level reset */
f2ca0dbe 5410 igb_vf_reset(adapter, vf);
4ae196df
AD
5411
5412 /* set vf mac address */
26ad9178 5413 igb_rar_set_qsel(adapter, vf_mac, rar_entry, vf);
4ae196df
AD
5414
5415 /* enable transmit and receive for vf */
5416 reg = rd32(E1000_VFTE);
5417 wr32(E1000_VFTE, reg | (1 << vf));
5418 reg = rd32(E1000_VFRE);
5419 wr32(E1000_VFRE, reg | (1 << vf));
5420
8fa7e0f7 5421 adapter->vf_data[vf].flags |= IGB_VF_FLAG_CTS;
4ae196df
AD
5422
5423 /* reply to reset with ack and vf mac address */
5424 msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_ACK;
5425 memcpy(addr, vf_mac, 6);
5426 igb_write_mbx(hw, msgbuf, 3, vf);
5427}
5428
5429static int igb_set_vf_mac_addr(struct igb_adapter *adapter, u32 *msg, int vf)
5430{
de42edde
GR
5431 /*
5432 * The VF MAC Address is stored in a packed array of bytes
5433 * starting at the second 32 bit word of the msg array
5434 */
f2ca0dbe
AD
5435 unsigned char *addr = (char *)&msg[1];
5436 int err = -1;
4ae196df 5437
f2ca0dbe
AD
5438 if (is_valid_ether_addr(addr))
5439 err = igb_set_vf_mac(adapter, vf, addr);
4ae196df 5440
f2ca0dbe 5441 return err;
4ae196df
AD
5442}
5443
5444static void igb_rcv_ack_from_vf(struct igb_adapter *adapter, u32 vf)
5445{
5446 struct e1000_hw *hw = &adapter->hw;
f2ca0dbe 5447 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
4ae196df
AD
5448 u32 msg = E1000_VT_MSGTYPE_NACK;
5449
5450 /* if device isn't clear to send it shouldn't be reading either */
f2ca0dbe
AD
5451 if (!(vf_data->flags & IGB_VF_FLAG_CTS) &&
5452 time_after(jiffies, vf_data->last_nack + (2 * HZ))) {
4ae196df 5453 igb_write_mbx(hw, &msg, 1, vf);
f2ca0dbe 5454 vf_data->last_nack = jiffies;
4ae196df
AD
5455 }
5456}
5457
f2ca0dbe 5458static void igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf)
4ae196df 5459{
f2ca0dbe
AD
5460 struct pci_dev *pdev = adapter->pdev;
5461 u32 msgbuf[E1000_VFMAILBOX_SIZE];
4ae196df 5462 struct e1000_hw *hw = &adapter->hw;
f2ca0dbe 5463 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
4ae196df
AD
5464 s32 retval;
5465
f2ca0dbe 5466 retval = igb_read_mbx(hw, msgbuf, E1000_VFMAILBOX_SIZE, vf);
4ae196df 5467
fef45f4c
AD
5468 if (retval) {
5469 /* if receive failed revoke VF CTS stats and restart init */
f2ca0dbe 5470 dev_err(&pdev->dev, "Error receiving message from VF\n");
fef45f4c
AD
5471 vf_data->flags &= ~IGB_VF_FLAG_CTS;
5472 if (!time_after(jiffies, vf_data->last_nack + (2 * HZ)))
5473 return;
5474 goto out;
5475 }
4ae196df
AD
5476
5477 /* this is a message we already processed, do nothing */
5478 if (msgbuf[0] & (E1000_VT_MSGTYPE_ACK | E1000_VT_MSGTYPE_NACK))
f2ca0dbe 5479 return;
4ae196df
AD
5480
5481 /*
5482 * until the vf completes a reset it should not be
5483 * allowed to start any configuration.
5484 */
5485
5486 if (msgbuf[0] == E1000_VF_RESET) {
5487 igb_vf_reset_msg(adapter, vf);
f2ca0dbe 5488 return;
4ae196df
AD
5489 }
5490
f2ca0dbe 5491 if (!(vf_data->flags & IGB_VF_FLAG_CTS)) {
fef45f4c
AD
5492 if (!time_after(jiffies, vf_data->last_nack + (2 * HZ)))
5493 return;
5494 retval = -1;
5495 goto out;
4ae196df
AD
5496 }
5497
5498 switch ((msgbuf[0] & 0xFFFF)) {
5499 case E1000_VF_SET_MAC_ADDR:
a6b5ea35
GR
5500 retval = -EINVAL;
5501 if (!(vf_data->flags & IGB_VF_FLAG_PF_SET_MAC))
5502 retval = igb_set_vf_mac_addr(adapter, msgbuf, vf);
5503 else
5504 dev_warn(&pdev->dev,
5505 "VF %d attempted to override administratively "
5506 "set MAC address\nReload the VF driver to "
5507 "resume operations\n", vf);
4ae196df 5508 break;
7d5753f0
AD
5509 case E1000_VF_SET_PROMISC:
5510 retval = igb_set_vf_promisc(adapter, msgbuf, vf);
5511 break;
4ae196df
AD
5512 case E1000_VF_SET_MULTICAST:
5513 retval = igb_set_vf_multicasts(adapter, msgbuf, vf);
5514 break;
5515 case E1000_VF_SET_LPE:
5516 retval = igb_set_vf_rlpml(adapter, msgbuf[1], vf);
5517 break;
5518 case E1000_VF_SET_VLAN:
a6b5ea35
GR
5519 retval = -1;
5520 if (vf_data->pf_vlan)
5521 dev_warn(&pdev->dev,
5522 "VF %d attempted to override administratively "
5523 "set VLAN tag\nReload the VF driver to "
5524 "resume operations\n", vf);
8151d294
WM
5525 else
5526 retval = igb_set_vf_vlan(adapter, msgbuf, vf);
4ae196df
AD
5527 break;
5528 default:
090b1795 5529 dev_err(&pdev->dev, "Unhandled Msg %08x\n", msgbuf[0]);
4ae196df
AD
5530 retval = -1;
5531 break;
5532 }
5533
fef45f4c
AD
5534 msgbuf[0] |= E1000_VT_MSGTYPE_CTS;
5535out:
4ae196df
AD
5536 /* notify the VF of the results of what it sent us */
5537 if (retval)
5538 msgbuf[0] |= E1000_VT_MSGTYPE_NACK;
5539 else
5540 msgbuf[0] |= E1000_VT_MSGTYPE_ACK;
5541
4ae196df 5542 igb_write_mbx(hw, msgbuf, 1, vf);
f2ca0dbe 5543}
4ae196df 5544
f2ca0dbe
AD
5545static void igb_msg_task(struct igb_adapter *adapter)
5546{
5547 struct e1000_hw *hw = &adapter->hw;
5548 u32 vf;
5549
5550 for (vf = 0; vf < adapter->vfs_allocated_count; vf++) {
5551 /* process any reset requests */
5552 if (!igb_check_for_rst(hw, vf))
5553 igb_vf_reset_event(adapter, vf);
5554
5555 /* process any messages pending */
5556 if (!igb_check_for_msg(hw, vf))
5557 igb_rcv_msg_from_vf(adapter, vf);
5558
5559 /* process any acks */
5560 if (!igb_check_for_ack(hw, vf))
5561 igb_rcv_ack_from_vf(adapter, vf);
5562 }
4ae196df
AD
5563}
5564
68d480c4
AD
5565/**
5566 * igb_set_uta - Set unicast filter table address
5567 * @adapter: board private structure
5568 *
5569 * The unicast table address is a register array of 32-bit registers.
5570 * The table is meant to be used in a way similar to how the MTA is used
5571 * however due to certain limitations in the hardware it is necessary to
25985edc
LDM
5572 * set all the hash bits to 1 and use the VMOLR ROPE bit as a promiscuous
5573 * enable bit to allow vlan tag stripping when promiscuous mode is enabled
68d480c4
AD
5574 **/
5575static void igb_set_uta(struct igb_adapter *adapter)
5576{
5577 struct e1000_hw *hw = &adapter->hw;
5578 int i;
5579
5580 /* The UTA table only exists on 82576 hardware and newer */
5581 if (hw->mac.type < e1000_82576)
5582 return;
5583
5584 /* we only need to do this if VMDq is enabled */
5585 if (!adapter->vfs_allocated_count)
5586 return;
5587
5588 for (i = 0; i < hw->mac.uta_reg_count; i++)
5589 array_wr32(E1000_UTA, i, ~0);
5590}
5591
9d5c8243
AK
5592/**
5593 * igb_intr_msi - Interrupt Handler
5594 * @irq: interrupt number
5595 * @data: pointer to a network interface device structure
5596 **/
5597static irqreturn_t igb_intr_msi(int irq, void *data)
5598{
047e0030
AD
5599 struct igb_adapter *adapter = data;
5600 struct igb_q_vector *q_vector = adapter->q_vector[0];
9d5c8243
AK
5601 struct e1000_hw *hw = &adapter->hw;
5602 /* read ICR disables interrupts using IAM */
5603 u32 icr = rd32(E1000_ICR);
5604
047e0030 5605 igb_write_itr(q_vector);
9d5c8243 5606
7f081d40
AD
5607 if (icr & E1000_ICR_DRSTA)
5608 schedule_work(&adapter->reset_task);
5609
047e0030 5610 if (icr & E1000_ICR_DOUTSYNC) {
dda0e083
AD
5611 /* HW is reporting DMA is out of sync */
5612 adapter->stats.doosync++;
5613 }
5614
9d5c8243
AK
5615 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
5616 hw->mac.get_link_status = 1;
5617 if (!test_bit(__IGB_DOWN, &adapter->state))
5618 mod_timer(&adapter->watchdog_timer, jiffies + 1);
5619 }
5620
1f6e8178
MV
5621#ifdef CONFIG_IGB_PTP
5622 if (icr & E1000_ICR_TS) {
5623 u32 tsicr = rd32(E1000_TSICR);
5624
5625 if (tsicr & E1000_TSICR_TXTS) {
5626 /* acknowledge the interrupt */
5627 wr32(E1000_TSICR, E1000_TSICR_TXTS);
5628 /* retrieve hardware timestamp */
5629 schedule_work(&adapter->ptp_tx_work);
5630 }
5631 }
5632#endif /* CONFIG_IGB_PTP */
5633
047e0030 5634 napi_schedule(&q_vector->napi);
9d5c8243
AK
5635
5636 return IRQ_HANDLED;
5637}
5638
5639/**
4a3c6433 5640 * igb_intr - Legacy Interrupt Handler
9d5c8243
AK
5641 * @irq: interrupt number
5642 * @data: pointer to a network interface device structure
5643 **/
5644static irqreturn_t igb_intr(int irq, void *data)
5645{
047e0030
AD
5646 struct igb_adapter *adapter = data;
5647 struct igb_q_vector *q_vector = adapter->q_vector[0];
9d5c8243
AK
5648 struct e1000_hw *hw = &adapter->hw;
5649 /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No
5650 * need for the IMC write */
5651 u32 icr = rd32(E1000_ICR);
9d5c8243
AK
5652
5653 /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
5654 * not set, then the adapter didn't send an interrupt */
5655 if (!(icr & E1000_ICR_INT_ASSERTED))
5656 return IRQ_NONE;
5657
0ba82994
AD
5658 igb_write_itr(q_vector);
5659
7f081d40
AD
5660 if (icr & E1000_ICR_DRSTA)
5661 schedule_work(&adapter->reset_task);
5662
047e0030 5663 if (icr & E1000_ICR_DOUTSYNC) {
dda0e083
AD
5664 /* HW is reporting DMA is out of sync */
5665 adapter->stats.doosync++;
5666 }
5667
9d5c8243
AK
5668 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
5669 hw->mac.get_link_status = 1;
5670 /* guard against interrupt when we're going down */
5671 if (!test_bit(__IGB_DOWN, &adapter->state))
5672 mod_timer(&adapter->watchdog_timer, jiffies + 1);
5673 }
5674
1f6e8178
MV
5675#ifdef CONFIG_IGB_PTP
5676 if (icr & E1000_ICR_TS) {
5677 u32 tsicr = rd32(E1000_TSICR);
5678
5679 if (tsicr & E1000_TSICR_TXTS) {
5680 /* acknowledge the interrupt */
5681 wr32(E1000_TSICR, E1000_TSICR_TXTS);
5682 /* retrieve hardware timestamp */
5683 schedule_work(&adapter->ptp_tx_work);
5684 }
5685 }
5686#endif /* CONFIG_IGB_PTP */
5687
047e0030 5688 napi_schedule(&q_vector->napi);
9d5c8243
AK
5689
5690 return IRQ_HANDLED;
5691}
5692
c50b52a0 5693static void igb_ring_irq_enable(struct igb_q_vector *q_vector)
9d5c8243 5694{
047e0030 5695 struct igb_adapter *adapter = q_vector->adapter;
46544258 5696 struct e1000_hw *hw = &adapter->hw;
9d5c8243 5697
0ba82994
AD
5698 if ((q_vector->rx.ring && (adapter->rx_itr_setting & 3)) ||
5699 (!q_vector->rx.ring && (adapter->tx_itr_setting & 3))) {
5700 if ((adapter->num_q_vectors == 1) && !adapter->vf_data)
5701 igb_set_itr(q_vector);
46544258 5702 else
047e0030 5703 igb_update_ring_itr(q_vector);
9d5c8243
AK
5704 }
5705
46544258
AD
5706 if (!test_bit(__IGB_DOWN, &adapter->state)) {
5707 if (adapter->msix_entries)
047e0030 5708 wr32(E1000_EIMS, q_vector->eims_value);
46544258
AD
5709 else
5710 igb_irq_enable(adapter);
5711 }
9d5c8243
AK
5712}
5713
46544258
AD
5714/**
5715 * igb_poll - NAPI Rx polling callback
5716 * @napi: napi polling structure
5717 * @budget: count of how many packets we should handle
5718 **/
5719static int igb_poll(struct napi_struct *napi, int budget)
9d5c8243 5720{
047e0030
AD
5721 struct igb_q_vector *q_vector = container_of(napi,
5722 struct igb_q_vector,
5723 napi);
16eb8815 5724 bool clean_complete = true;
9d5c8243 5725
421e02f0 5726#ifdef CONFIG_IGB_DCA
047e0030
AD
5727 if (q_vector->adapter->flags & IGB_FLAG_DCA_ENABLED)
5728 igb_update_dca(q_vector);
fe4506b6 5729#endif
0ba82994 5730 if (q_vector->tx.ring)
13fde97a 5731 clean_complete = igb_clean_tx_irq(q_vector);
9d5c8243 5732
0ba82994 5733 if (q_vector->rx.ring)
cd392f5c 5734 clean_complete &= igb_clean_rx_irq(q_vector, budget);
047e0030 5735
16eb8815
AD
5736 /* If all work not completed, return budget and keep polling */
5737 if (!clean_complete)
5738 return budget;
46544258 5739
9d5c8243 5740 /* If not enough Rx work done, exit the polling mode */
16eb8815
AD
5741 napi_complete(napi);
5742 igb_ring_irq_enable(q_vector);
9d5c8243 5743
16eb8815 5744 return 0;
9d5c8243 5745}
6d8126f9 5746
9d5c8243
AK
5747/**
5748 * igb_clean_tx_irq - Reclaim resources after transmit completes
047e0030 5749 * @q_vector: pointer to q_vector containing needed info
49ce9c2c 5750 *
9d5c8243
AK
5751 * returns true if ring is completely cleaned
5752 **/
047e0030 5753static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
9d5c8243 5754{
047e0030 5755 struct igb_adapter *adapter = q_vector->adapter;
0ba82994 5756 struct igb_ring *tx_ring = q_vector->tx.ring;
06034649 5757 struct igb_tx_buffer *tx_buffer;
8542db05 5758 union e1000_adv_tx_desc *tx_desc, *eop_desc;
9d5c8243 5759 unsigned int total_bytes = 0, total_packets = 0;
0ba82994 5760 unsigned int budget = q_vector->tx.work_limit;
8542db05 5761 unsigned int i = tx_ring->next_to_clean;
9d5c8243 5762
13fde97a
AD
5763 if (test_bit(__IGB_DOWN, &adapter->state))
5764 return true;
0e014cb1 5765
06034649 5766 tx_buffer = &tx_ring->tx_buffer_info[i];
13fde97a 5767 tx_desc = IGB_TX_DESC(tx_ring, i);
8542db05 5768 i -= tx_ring->count;
9d5c8243 5769
13fde97a 5770 for (; budget; budget--) {
8542db05 5771 eop_desc = tx_buffer->next_to_watch;
13fde97a 5772
8542db05
AD
5773 /* prevent any other reads prior to eop_desc */
5774 rmb();
5775
5776 /* if next_to_watch is not set then there is no work pending */
5777 if (!eop_desc)
5778 break;
13fde97a
AD
5779
5780 /* if DD is not set pending work has not been completed */
5781 if (!(eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)))
5782 break;
5783
8542db05
AD
5784 /* clear next_to_watch to prevent false hangs */
5785 tx_buffer->next_to_watch = NULL;
9d5c8243 5786
ebe42d16
AD
5787 /* update the statistics for this packet */
5788 total_bytes += tx_buffer->bytecount;
5789 total_packets += tx_buffer->gso_segs;
13fde97a 5790
ebe42d16
AD
5791 /* free the skb */
5792 dev_kfree_skb_any(tx_buffer->skb);
5793 tx_buffer->skb = NULL;
13fde97a 5794
ebe42d16
AD
5795 /* unmap skb header data */
5796 dma_unmap_single(tx_ring->dev,
5797 tx_buffer->dma,
5798 tx_buffer->length,
5799 DMA_TO_DEVICE);
5800
5801 /* clear last DMA location and unmap remaining buffers */
5802 while (tx_desc != eop_desc) {
5803 tx_buffer->dma = 0;
9d5c8243 5804
13fde97a
AD
5805 tx_buffer++;
5806 tx_desc++;
9d5c8243 5807 i++;
8542db05
AD
5808 if (unlikely(!i)) {
5809 i -= tx_ring->count;
06034649 5810 tx_buffer = tx_ring->tx_buffer_info;
13fde97a
AD
5811 tx_desc = IGB_TX_DESC(tx_ring, 0);
5812 }
ebe42d16
AD
5813
5814 /* unmap any remaining paged data */
5815 if (tx_buffer->dma) {
5816 dma_unmap_page(tx_ring->dev,
5817 tx_buffer->dma,
5818 tx_buffer->length,
5819 DMA_TO_DEVICE);
5820 }
5821 }
5822
5823 /* clear last DMA location */
5824 tx_buffer->dma = 0;
5825
5826 /* move us one more past the eop_desc for start of next pkt */
5827 tx_buffer++;
5828 tx_desc++;
5829 i++;
5830 if (unlikely(!i)) {
5831 i -= tx_ring->count;
5832 tx_buffer = tx_ring->tx_buffer_info;
5833 tx_desc = IGB_TX_DESC(tx_ring, 0);
5834 }
0e014cb1
AD
5835 }
5836
bdbc0631
ED
5837 netdev_tx_completed_queue(txring_txq(tx_ring),
5838 total_packets, total_bytes);
8542db05 5839 i += tx_ring->count;
9d5c8243 5840 tx_ring->next_to_clean = i;
13fde97a
AD
5841 u64_stats_update_begin(&tx_ring->tx_syncp);
5842 tx_ring->tx_stats.bytes += total_bytes;
5843 tx_ring->tx_stats.packets += total_packets;
5844 u64_stats_update_end(&tx_ring->tx_syncp);
0ba82994
AD
5845 q_vector->tx.total_bytes += total_bytes;
5846 q_vector->tx.total_packets += total_packets;
9d5c8243 5847
6d095fa8 5848 if (test_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags)) {
13fde97a 5849 struct e1000_hw *hw = &adapter->hw;
12dcd86b 5850
8542db05 5851 eop_desc = tx_buffer->next_to_watch;
9d5c8243 5852
9d5c8243
AK
5853 /* Detect a transmit hang in hardware, this serializes the
5854 * check with the clearing of time_stamp and movement of i */
6d095fa8 5855 clear_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
8542db05
AD
5856 if (eop_desc &&
5857 time_after(jiffies, tx_buffer->time_stamp +
8e95a202
JP
5858 (adapter->tx_timeout_factor * HZ)) &&
5859 !(rd32(E1000_STATUS) & E1000_STATUS_TXOFF)) {
9d5c8243 5860
9d5c8243 5861 /* detected Tx unit hang */
59d71989 5862 dev_err(tx_ring->dev,
9d5c8243 5863 "Detected Tx Unit Hang\n"
2d064c06 5864 " Tx Queue <%d>\n"
9d5c8243
AK
5865 " TDH <%x>\n"
5866 " TDT <%x>\n"
5867 " next_to_use <%x>\n"
5868 " next_to_clean <%x>\n"
9d5c8243
AK
5869 "buffer_info[next_to_clean]\n"
5870 " time_stamp <%lx>\n"
8542db05 5871 " next_to_watch <%p>\n"
9d5c8243
AK
5872 " jiffies <%lx>\n"
5873 " desc.status <%x>\n",
2d064c06 5874 tx_ring->queue_index,
238ac817 5875 rd32(E1000_TDH(tx_ring->reg_idx)),
fce99e34 5876 readl(tx_ring->tail),
9d5c8243
AK
5877 tx_ring->next_to_use,
5878 tx_ring->next_to_clean,
8542db05
AD
5879 tx_buffer->time_stamp,
5880 eop_desc,
9d5c8243 5881 jiffies,
0e014cb1 5882 eop_desc->wb.status);
13fde97a
AD
5883 netif_stop_subqueue(tx_ring->netdev,
5884 tx_ring->queue_index);
5885
5886 /* we are about to reset, no point in enabling stuff */
5887 return true;
9d5c8243
AK
5888 }
5889 }
13fde97a
AD
5890
5891 if (unlikely(total_packets &&
5892 netif_carrier_ok(tx_ring->netdev) &&
5893 igb_desc_unused(tx_ring) >= IGB_TX_QUEUE_WAKE)) {
5894 /* Make sure that anybody stopping the queue after this
5895 * sees the new next_to_clean.
5896 */
5897 smp_mb();
5898 if (__netif_subqueue_stopped(tx_ring->netdev,
5899 tx_ring->queue_index) &&
5900 !(test_bit(__IGB_DOWN, &adapter->state))) {
5901 netif_wake_subqueue(tx_ring->netdev,
5902 tx_ring->queue_index);
5903
5904 u64_stats_update_begin(&tx_ring->tx_syncp);
5905 tx_ring->tx_stats.restart_queue++;
5906 u64_stats_update_end(&tx_ring->tx_syncp);
5907 }
5908 }
5909
5910 return !!budget;
9d5c8243
AK
5911}
5912
cd392f5c 5913static inline void igb_rx_checksum(struct igb_ring *ring,
3ceb90fd
AD
5914 union e1000_adv_rx_desc *rx_desc,
5915 struct sk_buff *skb)
9d5c8243 5916{
bc8acf2c 5917 skb_checksum_none_assert(skb);
9d5c8243 5918
294e7d78 5919 /* Ignore Checksum bit is set */
3ceb90fd 5920 if (igb_test_staterr(rx_desc, E1000_RXD_STAT_IXSM))
294e7d78
AD
5921 return;
5922
5923 /* Rx checksum disabled via ethtool */
5924 if (!(ring->netdev->features & NETIF_F_RXCSUM))
9d5c8243 5925 return;
85ad76b2 5926
9d5c8243 5927 /* TCP/UDP checksum error bit is set */
3ceb90fd
AD
5928 if (igb_test_staterr(rx_desc,
5929 E1000_RXDEXT_STATERR_TCPE |
5930 E1000_RXDEXT_STATERR_IPE)) {
b9473560
JB
5931 /*
5932 * work around errata with sctp packets where the TCPE aka
5933 * L4E bit is set incorrectly on 64 byte (60 byte w/o crc)
5934 * packets, (aka let the stack check the crc32c)
5935 */
866cff06
AD
5936 if (!((skb->len == 60) &&
5937 test_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags))) {
12dcd86b 5938 u64_stats_update_begin(&ring->rx_syncp);
04a5fcaa 5939 ring->rx_stats.csum_err++;
12dcd86b
ED
5940 u64_stats_update_end(&ring->rx_syncp);
5941 }
9d5c8243 5942 /* let the stack verify checksum errors */
9d5c8243
AK
5943 return;
5944 }
5945 /* It must be a TCP or UDP packet with a valid checksum */
3ceb90fd
AD
5946 if (igb_test_staterr(rx_desc, E1000_RXD_STAT_TCPCS |
5947 E1000_RXD_STAT_UDPCS))
9d5c8243
AK
5948 skb->ip_summed = CHECKSUM_UNNECESSARY;
5949
3ceb90fd
AD
5950 dev_dbg(ring->dev, "cksum success: bits %08X\n",
5951 le32_to_cpu(rx_desc->wb.upper.status_error));
9d5c8243
AK
5952}
5953
077887c3
AD
5954static inline void igb_rx_hash(struct igb_ring *ring,
5955 union e1000_adv_rx_desc *rx_desc,
5956 struct sk_buff *skb)
5957{
5958 if (ring->netdev->features & NETIF_F_RXHASH)
5959 skb->rxhash = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss);
5960}
5961
8be10e91
AD
5962static void igb_rx_vlan(struct igb_ring *ring,
5963 union e1000_adv_rx_desc *rx_desc,
5964 struct sk_buff *skb)
5965{
5966 if (igb_test_staterr(rx_desc, E1000_RXD_STAT_VP)) {
5967 u16 vid;
5968 if (igb_test_staterr(rx_desc, E1000_RXDEXT_STATERR_LB) &&
5969 test_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &ring->flags))
5970 vid = be16_to_cpu(rx_desc->wb.upper.vlan);
5971 else
5972 vid = le16_to_cpu(rx_desc->wb.upper.vlan);
5973
5974 __vlan_hwaccel_put_tag(skb, vid);
5975 }
5976}
5977
44390ca6 5978static inline u16 igb_get_hlen(union e1000_adv_rx_desc *rx_desc)
2d94d8ab
AD
5979{
5980 /* HW will not DMA in data larger than the given buffer, even if it
5981 * parses the (NFS, of course) header to be larger. In that case, it
5982 * fills the header buffer and spills the rest into the page.
5983 */
5984 u16 hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hdr_info) &
5985 E1000_RXDADV_HDRBUFLEN_MASK) >> E1000_RXDADV_HDRBUFLEN_SHIFT;
44390ca6
AD
5986 if (hlen > IGB_RX_HDR_LEN)
5987 hlen = IGB_RX_HDR_LEN;
2d94d8ab
AD
5988 return hlen;
5989}
5990
cd392f5c 5991static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, int budget)
9d5c8243 5992{
0ba82994 5993 struct igb_ring *rx_ring = q_vector->rx.ring;
16eb8815
AD
5994 union e1000_adv_rx_desc *rx_desc;
5995 const int current_node = numa_node_id();
9d5c8243 5996 unsigned int total_bytes = 0, total_packets = 0;
16eb8815
AD
5997 u16 cleaned_count = igb_desc_unused(rx_ring);
5998 u16 i = rx_ring->next_to_clean;
9d5c8243 5999
60136906 6000 rx_desc = IGB_RX_DESC(rx_ring, i);
9d5c8243 6001
3ceb90fd 6002 while (igb_test_staterr(rx_desc, E1000_RXD_STAT_DD)) {
06034649 6003 struct igb_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i];
16eb8815
AD
6004 struct sk_buff *skb = buffer_info->skb;
6005 union e1000_adv_rx_desc *next_rxd;
9d5c8243 6006
69d3ca53 6007 buffer_info->skb = NULL;
16eb8815 6008 prefetch(skb->data);
69d3ca53
AD
6009
6010 i++;
6011 if (i == rx_ring->count)
6012 i = 0;
42d0781a 6013
60136906 6014 next_rxd = IGB_RX_DESC(rx_ring, i);
69d3ca53 6015 prefetch(next_rxd);
9d5c8243 6016
16eb8815
AD
6017 /*
6018 * This memory barrier is needed to keep us from reading
6019 * any other fields out of the rx_desc until we know the
6020 * RXD_STAT_DD bit is set
6021 */
6022 rmb();
9d5c8243 6023
16eb8815
AD
6024 if (!skb_is_nonlinear(skb)) {
6025 __skb_put(skb, igb_get_hlen(rx_desc));
6026 dma_unmap_single(rx_ring->dev, buffer_info->dma,
44390ca6 6027 IGB_RX_HDR_LEN,
59d71989 6028 DMA_FROM_DEVICE);
91615f76 6029 buffer_info->dma = 0;
bf36c1a0
AD
6030 }
6031
16eb8815
AD
6032 if (rx_desc->wb.upper.length) {
6033 u16 length = le16_to_cpu(rx_desc->wb.upper.length);
bf36c1a0 6034
aa913403 6035 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
bf36c1a0
AD
6036 buffer_info->page,
6037 buffer_info->page_offset,
6038 length);
6039
16eb8815
AD
6040 skb->len += length;
6041 skb->data_len += length;
95b9c1df 6042 skb->truesize += PAGE_SIZE / 2;
16eb8815 6043
d1eff350
AD
6044 if ((page_count(buffer_info->page) != 1) ||
6045 (page_to_nid(buffer_info->page) != current_node))
bf36c1a0
AD
6046 buffer_info->page = NULL;
6047 else
6048 get_page(buffer_info->page);
9d5c8243 6049
16eb8815
AD
6050 dma_unmap_page(rx_ring->dev, buffer_info->page_dma,
6051 PAGE_SIZE / 2, DMA_FROM_DEVICE);
6052 buffer_info->page_dma = 0;
9d5c8243 6053 }
9d5c8243 6054
3ceb90fd 6055 if (!igb_test_staterr(rx_desc, E1000_RXD_STAT_EOP)) {
06034649
AD
6056 struct igb_rx_buffer *next_buffer;
6057 next_buffer = &rx_ring->rx_buffer_info[i];
b2d56536
AD
6058 buffer_info->skb = next_buffer->skb;
6059 buffer_info->dma = next_buffer->dma;
6060 next_buffer->skb = skb;
6061 next_buffer->dma = 0;
bf36c1a0
AD
6062 goto next_desc;
6063 }
44390ca6 6064
89eaefb6
BG
6065 if (unlikely((igb_test_staterr(rx_desc,
6066 E1000_RXDEXT_ERR_FRAME_ERR_MASK))
6067 && !(rx_ring->netdev->features & NETIF_F_RXALL))) {
16eb8815 6068 dev_kfree_skb_any(skb);
9d5c8243
AK
6069 goto next_desc;
6070 }
9d5c8243 6071
7ebae817 6072#ifdef CONFIG_IGB_PTP
a79f4f88 6073 igb_ptp_rx_hwtstamp(q_vector, rx_desc, skb);
3c89f6d0 6074#endif /* CONFIG_IGB_PTP */
077887c3 6075 igb_rx_hash(rx_ring, rx_desc, skb);
3ceb90fd 6076 igb_rx_checksum(rx_ring, rx_desc, skb);
8be10e91 6077 igb_rx_vlan(rx_ring, rx_desc, skb);
3ceb90fd
AD
6078
6079 total_bytes += skb->len;
6080 total_packets++;
6081
6082 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
6083
b2cb09b1 6084 napi_gro_receive(&q_vector->napi, skb);
9d5c8243 6085
16eb8815 6086 budget--;
9d5c8243 6087next_desc:
16eb8815
AD
6088 if (!budget)
6089 break;
6090
6091 cleaned_count++;
9d5c8243
AK
6092 /* return some buffers to hardware, one at a time is too slow */
6093 if (cleaned_count >= IGB_RX_BUFFER_WRITE) {
cd392f5c 6094 igb_alloc_rx_buffers(rx_ring, cleaned_count);
9d5c8243
AK
6095 cleaned_count = 0;
6096 }
6097
6098 /* use prefetched values */
6099 rx_desc = next_rxd;
9d5c8243 6100 }
bf36c1a0 6101
9d5c8243 6102 rx_ring->next_to_clean = i;
12dcd86b 6103 u64_stats_update_begin(&rx_ring->rx_syncp);
9d5c8243
AK
6104 rx_ring->rx_stats.packets += total_packets;
6105 rx_ring->rx_stats.bytes += total_bytes;
12dcd86b 6106 u64_stats_update_end(&rx_ring->rx_syncp);
0ba82994
AD
6107 q_vector->rx.total_packets += total_packets;
6108 q_vector->rx.total_bytes += total_bytes;
c023cd88
AD
6109
6110 if (cleaned_count)
cd392f5c 6111 igb_alloc_rx_buffers(rx_ring, cleaned_count);
c023cd88 6112
16eb8815 6113 return !!budget;
9d5c8243
AK
6114}
6115
c023cd88 6116static bool igb_alloc_mapped_skb(struct igb_ring *rx_ring,
06034649 6117 struct igb_rx_buffer *bi)
c023cd88
AD
6118{
6119 struct sk_buff *skb = bi->skb;
6120 dma_addr_t dma = bi->dma;
6121
6122 if (dma)
6123 return true;
6124
6125 if (likely(!skb)) {
6126 skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
6127 IGB_RX_HDR_LEN);
6128 bi->skb = skb;
6129 if (!skb) {
6130 rx_ring->rx_stats.alloc_failed++;
6131 return false;
6132 }
6133
6134 /* initialize skb for ring */
6135 skb_record_rx_queue(skb, rx_ring->queue_index);
6136 }
6137
6138 dma = dma_map_single(rx_ring->dev, skb->data,
6139 IGB_RX_HDR_LEN, DMA_FROM_DEVICE);
6140
6141 if (dma_mapping_error(rx_ring->dev, dma)) {
6142 rx_ring->rx_stats.alloc_failed++;
6143 return false;
6144 }
6145
6146 bi->dma = dma;
6147 return true;
6148}
6149
6150static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
06034649 6151 struct igb_rx_buffer *bi)
c023cd88
AD
6152{
6153 struct page *page = bi->page;
6154 dma_addr_t page_dma = bi->page_dma;
6155 unsigned int page_offset = bi->page_offset ^ (PAGE_SIZE / 2);
6156
6157 if (page_dma)
6158 return true;
6159
6160 if (!page) {
0614002b 6161 page = __skb_alloc_page(GFP_ATOMIC, bi->skb);
c023cd88
AD
6162 bi->page = page;
6163 if (unlikely(!page)) {
6164 rx_ring->rx_stats.alloc_failed++;
6165 return false;
6166 }
6167 }
6168
6169 page_dma = dma_map_page(rx_ring->dev, page,
6170 page_offset, PAGE_SIZE / 2,
6171 DMA_FROM_DEVICE);
6172
6173 if (dma_mapping_error(rx_ring->dev, page_dma)) {
6174 rx_ring->rx_stats.alloc_failed++;
6175 return false;
6176 }
6177
6178 bi->page_dma = page_dma;
6179 bi->page_offset = page_offset;
6180 return true;
6181}
6182
9d5c8243 6183/**
cd392f5c 6184 * igb_alloc_rx_buffers - Replace used receive buffers; packet split
9d5c8243
AK
6185 * @adapter: address of board private structure
6186 **/
cd392f5c 6187void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)
9d5c8243 6188{
9d5c8243 6189 union e1000_adv_rx_desc *rx_desc;
06034649 6190 struct igb_rx_buffer *bi;
c023cd88 6191 u16 i = rx_ring->next_to_use;
9d5c8243 6192
60136906 6193 rx_desc = IGB_RX_DESC(rx_ring, i);
06034649 6194 bi = &rx_ring->rx_buffer_info[i];
c023cd88 6195 i -= rx_ring->count;
9d5c8243
AK
6196
6197 while (cleaned_count--) {
c023cd88
AD
6198 if (!igb_alloc_mapped_skb(rx_ring, bi))
6199 break;
9d5c8243 6200
c023cd88
AD
6201 /* Refresh the desc even if buffer_addrs didn't change
6202 * because each write-back erases this info. */
6203 rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
9d5c8243 6204
c023cd88
AD
6205 if (!igb_alloc_mapped_page(rx_ring, bi))
6206 break;
6207
6208 rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
9d5c8243 6209
c023cd88
AD
6210 rx_desc++;
6211 bi++;
9d5c8243 6212 i++;
c023cd88 6213 if (unlikely(!i)) {
60136906 6214 rx_desc = IGB_RX_DESC(rx_ring, 0);
06034649 6215 bi = rx_ring->rx_buffer_info;
c023cd88
AD
6216 i -= rx_ring->count;
6217 }
6218
6219 /* clear the hdr_addr for the next_to_use descriptor */
6220 rx_desc->read.hdr_addr = 0;
9d5c8243
AK
6221 }
6222
c023cd88
AD
6223 i += rx_ring->count;
6224
9d5c8243
AK
6225 if (rx_ring->next_to_use != i) {
6226 rx_ring->next_to_use = i;
9d5c8243
AK
6227
6228 /* Force memory writes to complete before letting h/w
6229 * know there are new descriptors to fetch. (Only
6230 * applicable for weak-ordered memory model archs,
6231 * such as IA-64). */
6232 wmb();
fce99e34 6233 writel(i, rx_ring->tail);
9d5c8243
AK
6234 }
6235}
6236
6237/**
6238 * igb_mii_ioctl -
6239 * @netdev:
6240 * @ifreq:
6241 * @cmd:
6242 **/
6243static int igb_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
6244{
6245 struct igb_adapter *adapter = netdev_priv(netdev);
6246 struct mii_ioctl_data *data = if_mii(ifr);
6247
6248 if (adapter->hw.phy.media_type != e1000_media_type_copper)
6249 return -EOPNOTSUPP;
6250
6251 switch (cmd) {
6252 case SIOCGMIIPHY:
6253 data->phy_id = adapter->hw.phy.addr;
6254 break;
6255 case SIOCGMIIREG:
f5f4cf08
AD
6256 if (igb_read_phy_reg(&adapter->hw, data->reg_num & 0x1F,
6257 &data->val_out))
9d5c8243
AK
6258 return -EIO;
6259 break;
6260 case SIOCSMIIREG:
6261 default:
6262 return -EOPNOTSUPP;
6263 }
6264 return 0;
6265}
6266
6267/**
6268 * igb_ioctl -
6269 * @netdev:
6270 * @ifreq:
6271 * @cmd:
6272 **/
6273static int igb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
6274{
6275 switch (cmd) {
6276 case SIOCGMIIPHY:
6277 case SIOCGMIIREG:
6278 case SIOCSMIIREG:
6279 return igb_mii_ioctl(netdev, ifr, cmd);
3c89f6d0 6280#ifdef CONFIG_IGB_PTP
c6cb090b 6281 case SIOCSHWTSTAMP:
a79f4f88 6282 return igb_ptp_hwtstamp_ioctl(netdev, ifr, cmd);
3c89f6d0 6283#endif /* CONFIG_IGB_PTP */
9d5c8243
AK
6284 default:
6285 return -EOPNOTSUPP;
6286 }
6287}
6288
009bc06e
AD
6289s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
6290{
6291 struct igb_adapter *adapter = hw->back;
6292 u16 cap_offset;
6293
bdaae04c 6294 cap_offset = adapter->pdev->pcie_cap;
009bc06e
AD
6295 if (!cap_offset)
6296 return -E1000_ERR_CONFIG;
6297
6298 pci_read_config_word(adapter->pdev, cap_offset + reg, value);
6299
6300 return 0;
6301}
6302
6303s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
6304{
6305 struct igb_adapter *adapter = hw->back;
6306 u16 cap_offset;
6307
bdaae04c 6308 cap_offset = adapter->pdev->pcie_cap;
009bc06e
AD
6309 if (!cap_offset)
6310 return -E1000_ERR_CONFIG;
6311
6312 pci_write_config_word(adapter->pdev, cap_offset + reg, *value);
6313
6314 return 0;
6315}
6316
c8f44aff 6317static void igb_vlan_mode(struct net_device *netdev, netdev_features_t features)
9d5c8243
AK
6318{
6319 struct igb_adapter *adapter = netdev_priv(netdev);
6320 struct e1000_hw *hw = &adapter->hw;
6321 u32 ctrl, rctl;
5faf030c 6322 bool enable = !!(features & NETIF_F_HW_VLAN_RX);
9d5c8243 6323
5faf030c 6324 if (enable) {
9d5c8243
AK
6325 /* enable VLAN tag insert/strip */
6326 ctrl = rd32(E1000_CTRL);
6327 ctrl |= E1000_CTRL_VME;
6328 wr32(E1000_CTRL, ctrl);
6329
51466239 6330 /* Disable CFI check */
9d5c8243 6331 rctl = rd32(E1000_RCTL);
9d5c8243
AK
6332 rctl &= ~E1000_RCTL_CFIEN;
6333 wr32(E1000_RCTL, rctl);
9d5c8243
AK
6334 } else {
6335 /* disable VLAN tag insert/strip */
6336 ctrl = rd32(E1000_CTRL);
6337 ctrl &= ~E1000_CTRL_VME;
6338 wr32(E1000_CTRL, ctrl);
9d5c8243
AK
6339 }
6340
e1739522 6341 igb_rlpml_set(adapter);
9d5c8243
AK
6342}
6343
8e586137 6344static int igb_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
9d5c8243
AK
6345{
6346 struct igb_adapter *adapter = netdev_priv(netdev);
6347 struct e1000_hw *hw = &adapter->hw;
4ae196df 6348 int pf_id = adapter->vfs_allocated_count;
9d5c8243 6349
51466239
AD
6350 /* attempt to add filter to vlvf array */
6351 igb_vlvf_set(adapter, vid, true, pf_id);
4ae196df 6352
51466239
AD
6353 /* add the filter since PF can receive vlans w/o entry in vlvf */
6354 igb_vfta_set(hw, vid, true);
b2cb09b1
JP
6355
6356 set_bit(vid, adapter->active_vlans);
8e586137
JP
6357
6358 return 0;
9d5c8243
AK
6359}
6360
8e586137 6361static int igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
9d5c8243
AK
6362{
6363 struct igb_adapter *adapter = netdev_priv(netdev);
6364 struct e1000_hw *hw = &adapter->hw;
4ae196df 6365 int pf_id = adapter->vfs_allocated_count;
51466239 6366 s32 err;
9d5c8243 6367
51466239
AD
6368 /* remove vlan from VLVF table array */
6369 err = igb_vlvf_set(adapter, vid, false, pf_id);
9d5c8243 6370
51466239
AD
6371 /* if vid was not present in VLVF just remove it from table */
6372 if (err)
4ae196df 6373 igb_vfta_set(hw, vid, false);
b2cb09b1
JP
6374
6375 clear_bit(vid, adapter->active_vlans);
8e586137
JP
6376
6377 return 0;
9d5c8243
AK
6378}
6379
6380static void igb_restore_vlan(struct igb_adapter *adapter)
6381{
b2cb09b1 6382 u16 vid;
9d5c8243 6383
5faf030c
AD
6384 igb_vlan_mode(adapter->netdev, adapter->netdev->features);
6385
b2cb09b1
JP
6386 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
6387 igb_vlan_rx_add_vid(adapter->netdev, vid);
9d5c8243
AK
6388}
6389
14ad2513 6390int igb_set_spd_dplx(struct igb_adapter *adapter, u32 spd, u8 dplx)
9d5c8243 6391{
090b1795 6392 struct pci_dev *pdev = adapter->pdev;
9d5c8243
AK
6393 struct e1000_mac_info *mac = &adapter->hw.mac;
6394
6395 mac->autoneg = 0;
6396
14ad2513
DD
6397 /* Make sure dplx is at most 1 bit and lsb of speed is not set
6398 * for the switch() below to work */
6399 if ((spd & 1) || (dplx & ~1))
6400 goto err_inval;
6401
cd2638a8
CW
6402 /* Fiber NIC's only allow 1000 Gbps Full duplex */
6403 if ((adapter->hw.phy.media_type == e1000_media_type_internal_serdes) &&
14ad2513
DD
6404 spd != SPEED_1000 &&
6405 dplx != DUPLEX_FULL)
6406 goto err_inval;
cd2638a8 6407
14ad2513 6408 switch (spd + dplx) {
9d5c8243
AK
6409 case SPEED_10 + DUPLEX_HALF:
6410 mac->forced_speed_duplex = ADVERTISE_10_HALF;
6411 break;
6412 case SPEED_10 + DUPLEX_FULL:
6413 mac->forced_speed_duplex = ADVERTISE_10_FULL;
6414 break;
6415 case SPEED_100 + DUPLEX_HALF:
6416 mac->forced_speed_duplex = ADVERTISE_100_HALF;
6417 break;
6418 case SPEED_100 + DUPLEX_FULL:
6419 mac->forced_speed_duplex = ADVERTISE_100_FULL;
6420 break;
6421 case SPEED_1000 + DUPLEX_FULL:
6422 mac->autoneg = 1;
6423 adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
6424 break;
6425 case SPEED_1000 + DUPLEX_HALF: /* not supported */
6426 default:
14ad2513 6427 goto err_inval;
9d5c8243 6428 }
8376dad0
JB
6429
6430 /* clear MDI, MDI(-X) override is only allowed when autoneg enabled */
6431 adapter->hw.phy.mdix = AUTO_ALL_MODES;
6432
9d5c8243 6433 return 0;
14ad2513
DD
6434
6435err_inval:
6436 dev_err(&pdev->dev, "Unsupported Speed/Duplex configuration\n");
6437 return -EINVAL;
9d5c8243
AK
6438}
6439
749ab2cd
YZ
6440static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake,
6441 bool runtime)
9d5c8243
AK
6442{
6443 struct net_device *netdev = pci_get_drvdata(pdev);
6444 struct igb_adapter *adapter = netdev_priv(netdev);
6445 struct e1000_hw *hw = &adapter->hw;
2d064c06 6446 u32 ctrl, rctl, status;
749ab2cd 6447 u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol;
9d5c8243
AK
6448#ifdef CONFIG_PM
6449 int retval = 0;
6450#endif
6451
6452 netif_device_detach(netdev);
6453
a88f10ec 6454 if (netif_running(netdev))
749ab2cd 6455 __igb_close(netdev, true);
a88f10ec 6456
047e0030 6457 igb_clear_interrupt_scheme(adapter);
9d5c8243
AK
6458
6459#ifdef CONFIG_PM
6460 retval = pci_save_state(pdev);
6461 if (retval)
6462 return retval;
6463#endif
6464
6465 status = rd32(E1000_STATUS);
6466 if (status & E1000_STATUS_LU)
6467 wufc &= ~E1000_WUFC_LNKC;
6468
6469 if (wufc) {
6470 igb_setup_rctl(adapter);
ff41f8dc 6471 igb_set_rx_mode(netdev);
9d5c8243
AK
6472
6473 /* turn on all-multi mode if wake on multicast is enabled */
6474 if (wufc & E1000_WUFC_MC) {
6475 rctl = rd32(E1000_RCTL);
6476 rctl |= E1000_RCTL_MPE;
6477 wr32(E1000_RCTL, rctl);
6478 }
6479
6480 ctrl = rd32(E1000_CTRL);
6481 /* advertise wake from D3Cold */
6482 #define E1000_CTRL_ADVD3WUC 0x00100000
6483 /* phy power management enable */
6484 #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
6485 ctrl |= E1000_CTRL_ADVD3WUC;
6486 wr32(E1000_CTRL, ctrl);
6487
9d5c8243 6488 /* Allow time for pending master requests to run */
330a6d6a 6489 igb_disable_pcie_master(hw);
9d5c8243
AK
6490
6491 wr32(E1000_WUC, E1000_WUC_PME_EN);
6492 wr32(E1000_WUFC, wufc);
9d5c8243
AK
6493 } else {
6494 wr32(E1000_WUC, 0);
6495 wr32(E1000_WUFC, 0);
9d5c8243
AK
6496 }
6497
3fe7c4c9
RW
6498 *enable_wake = wufc || adapter->en_mng_pt;
6499 if (!*enable_wake)
88a268c1
NN
6500 igb_power_down_link(adapter);
6501 else
6502 igb_power_up_link(adapter);
9d5c8243
AK
6503
6504 /* Release control of h/w to f/w. If f/w is AMT enabled, this
6505 * would have already happened in close and is redundant. */
6506 igb_release_hw_control(adapter);
6507
6508 pci_disable_device(pdev);
6509
9d5c8243
AK
6510 return 0;
6511}
6512
6513#ifdef CONFIG_PM
d9dd966d 6514#ifdef CONFIG_PM_SLEEP
749ab2cd 6515static int igb_suspend(struct device *dev)
3fe7c4c9
RW
6516{
6517 int retval;
6518 bool wake;
749ab2cd 6519 struct pci_dev *pdev = to_pci_dev(dev);
3fe7c4c9 6520
749ab2cd 6521 retval = __igb_shutdown(pdev, &wake, 0);
3fe7c4c9
RW
6522 if (retval)
6523 return retval;
6524
6525 if (wake) {
6526 pci_prepare_to_sleep(pdev);
6527 } else {
6528 pci_wake_from_d3(pdev, false);
6529 pci_set_power_state(pdev, PCI_D3hot);
6530 }
6531
6532 return 0;
6533}
d9dd966d 6534#endif /* CONFIG_PM_SLEEP */
3fe7c4c9 6535
749ab2cd 6536static int igb_resume(struct device *dev)
9d5c8243 6537{
749ab2cd 6538 struct pci_dev *pdev = to_pci_dev(dev);
9d5c8243
AK
6539 struct net_device *netdev = pci_get_drvdata(pdev);
6540 struct igb_adapter *adapter = netdev_priv(netdev);
6541 struct e1000_hw *hw = &adapter->hw;
6542 u32 err;
6543
6544 pci_set_power_state(pdev, PCI_D0);
6545 pci_restore_state(pdev);
b94f2d77 6546 pci_save_state(pdev);
42bfd33a 6547
aed5dec3 6548 err = pci_enable_device_mem(pdev);
9d5c8243
AK
6549 if (err) {
6550 dev_err(&pdev->dev,
6551 "igb: Cannot enable PCI device from suspend\n");
6552 return err;
6553 }
6554 pci_set_master(pdev);
6555
6556 pci_enable_wake(pdev, PCI_D3hot, 0);
6557 pci_enable_wake(pdev, PCI_D3cold, 0);
6558
cfb8c3aa 6559 if (igb_init_interrupt_scheme(adapter)) {
a88f10ec
AD
6560 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
6561 return -ENOMEM;
9d5c8243
AK
6562 }
6563
9d5c8243 6564 igb_reset(adapter);
a8564f03
AD
6565
6566 /* let the f/w know that the h/w is now under the control of the
6567 * driver. */
6568 igb_get_hw_control(adapter);
6569
9d5c8243
AK
6570 wr32(E1000_WUS, ~0);
6571
749ab2cd
YZ
6572 if (netdev->flags & IFF_UP) {
6573 err = __igb_open(netdev, true);
a88f10ec
AD
6574 if (err)
6575 return err;
6576 }
9d5c8243
AK
6577
6578 netif_device_attach(netdev);
749ab2cd
YZ
6579 return 0;
6580}
6581
6582#ifdef CONFIG_PM_RUNTIME
6583static int igb_runtime_idle(struct device *dev)
6584{
6585 struct pci_dev *pdev = to_pci_dev(dev);
6586 struct net_device *netdev = pci_get_drvdata(pdev);
6587 struct igb_adapter *adapter = netdev_priv(netdev);
6588
6589 if (!igb_has_link(adapter))
6590 pm_schedule_suspend(dev, MSEC_PER_SEC * 5);
6591
6592 return -EBUSY;
6593}
6594
6595static int igb_runtime_suspend(struct device *dev)
6596{
6597 struct pci_dev *pdev = to_pci_dev(dev);
6598 int retval;
6599 bool wake;
6600
6601 retval = __igb_shutdown(pdev, &wake, 1);
6602 if (retval)
6603 return retval;
6604
6605 if (wake) {
6606 pci_prepare_to_sleep(pdev);
6607 } else {
6608 pci_wake_from_d3(pdev, false);
6609 pci_set_power_state(pdev, PCI_D3hot);
6610 }
9d5c8243 6611
9d5c8243
AK
6612 return 0;
6613}
749ab2cd
YZ
6614
6615static int igb_runtime_resume(struct device *dev)
6616{
6617 return igb_resume(dev);
6618}
6619#endif /* CONFIG_PM_RUNTIME */
9d5c8243
AK
6620#endif
6621
6622static void igb_shutdown(struct pci_dev *pdev)
6623{
3fe7c4c9
RW
6624 bool wake;
6625
749ab2cd 6626 __igb_shutdown(pdev, &wake, 0);
3fe7c4c9
RW
6627
6628 if (system_state == SYSTEM_POWER_OFF) {
6629 pci_wake_from_d3(pdev, wake);
6630 pci_set_power_state(pdev, PCI_D3hot);
6631 }
9d5c8243
AK
6632}
6633
6634#ifdef CONFIG_NET_POLL_CONTROLLER
6635/*
6636 * Polling 'interrupt' - used by things like netconsole to send skbs
6637 * without having to re-enable interrupts. It's not called while
6638 * the interrupt routine is executing.
6639 */
6640static void igb_netpoll(struct net_device *netdev)
6641{
6642 struct igb_adapter *adapter = netdev_priv(netdev);
eebbbdba 6643 struct e1000_hw *hw = &adapter->hw;
0d1ae7f4 6644 struct igb_q_vector *q_vector;
9d5c8243 6645 int i;
9d5c8243 6646
047e0030 6647 for (i = 0; i < adapter->num_q_vectors; i++) {
0d1ae7f4
AD
6648 q_vector = adapter->q_vector[i];
6649 if (adapter->msix_entries)
6650 wr32(E1000_EIMC, q_vector->eims_value);
6651 else
6652 igb_irq_disable(adapter);
047e0030 6653 napi_schedule(&q_vector->napi);
eebbbdba 6654 }
9d5c8243
AK
6655}
6656#endif /* CONFIG_NET_POLL_CONTROLLER */
6657
6658/**
6659 * igb_io_error_detected - called when PCI error is detected
6660 * @pdev: Pointer to PCI device
6661 * @state: The current pci connection state
6662 *
6663 * This function is called after a PCI bus error affecting
6664 * this device has been detected.
6665 */
6666static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev,
6667 pci_channel_state_t state)
6668{
6669 struct net_device *netdev = pci_get_drvdata(pdev);
6670 struct igb_adapter *adapter = netdev_priv(netdev);
6671
6672 netif_device_detach(netdev);
6673
59ed6eec
AD
6674 if (state == pci_channel_io_perm_failure)
6675 return PCI_ERS_RESULT_DISCONNECT;
6676
9d5c8243
AK
6677 if (netif_running(netdev))
6678 igb_down(adapter);
6679 pci_disable_device(pdev);
6680
6681 /* Request a slot slot reset. */
6682 return PCI_ERS_RESULT_NEED_RESET;
6683}
6684
6685/**
6686 * igb_io_slot_reset - called after the pci bus has been reset.
6687 * @pdev: Pointer to PCI device
6688 *
6689 * Restart the card from scratch, as if from a cold-boot. Implementation
6690 * resembles the first-half of the igb_resume routine.
6691 */
6692static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev)
6693{
6694 struct net_device *netdev = pci_get_drvdata(pdev);
6695 struct igb_adapter *adapter = netdev_priv(netdev);
6696 struct e1000_hw *hw = &adapter->hw;
40a914fa 6697 pci_ers_result_t result;
42bfd33a 6698 int err;
9d5c8243 6699
aed5dec3 6700 if (pci_enable_device_mem(pdev)) {
9d5c8243
AK
6701 dev_err(&pdev->dev,
6702 "Cannot re-enable PCI device after reset.\n");
40a914fa
AD
6703 result = PCI_ERS_RESULT_DISCONNECT;
6704 } else {
6705 pci_set_master(pdev);
6706 pci_restore_state(pdev);
b94f2d77 6707 pci_save_state(pdev);
9d5c8243 6708
40a914fa
AD
6709 pci_enable_wake(pdev, PCI_D3hot, 0);
6710 pci_enable_wake(pdev, PCI_D3cold, 0);
9d5c8243 6711
40a914fa
AD
6712 igb_reset(adapter);
6713 wr32(E1000_WUS, ~0);
6714 result = PCI_ERS_RESULT_RECOVERED;
6715 }
9d5c8243 6716
ea943d41
JK
6717 err = pci_cleanup_aer_uncorrect_error_status(pdev);
6718 if (err) {
6719 dev_err(&pdev->dev, "pci_cleanup_aer_uncorrect_error_status "
6720 "failed 0x%0x\n", err);
6721 /* non-fatal, continue */
6722 }
40a914fa
AD
6723
6724 return result;
9d5c8243
AK
6725}
6726
6727/**
6728 * igb_io_resume - called when traffic can start flowing again.
6729 * @pdev: Pointer to PCI device
6730 *
6731 * This callback is called when the error recovery driver tells us that
6732 * its OK to resume normal operation. Implementation resembles the
6733 * second-half of the igb_resume routine.
6734 */
6735static void igb_io_resume(struct pci_dev *pdev)
6736{
6737 struct net_device *netdev = pci_get_drvdata(pdev);
6738 struct igb_adapter *adapter = netdev_priv(netdev);
6739
9d5c8243
AK
6740 if (netif_running(netdev)) {
6741 if (igb_up(adapter)) {
6742 dev_err(&pdev->dev, "igb_up failed after reset\n");
6743 return;
6744 }
6745 }
6746
6747 netif_device_attach(netdev);
6748
6749 /* let the f/w know that the h/w is now under the control of the
6750 * driver. */
6751 igb_get_hw_control(adapter);
9d5c8243
AK
6752}
6753
26ad9178
AD
6754static void igb_rar_set_qsel(struct igb_adapter *adapter, u8 *addr, u32 index,
6755 u8 qsel)
6756{
6757 u32 rar_low, rar_high;
6758 struct e1000_hw *hw = &adapter->hw;
6759
6760 /* HW expects these in little endian so we reverse the byte order
6761 * from network order (big endian) to little endian
6762 */
6763 rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) |
6764 ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
6765 rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
6766
6767 /* Indicate to hardware the Address is Valid. */
6768 rar_high |= E1000_RAH_AV;
6769
6770 if (hw->mac.type == e1000_82575)
6771 rar_high |= E1000_RAH_POOL_1 * qsel;
6772 else
6773 rar_high |= E1000_RAH_POOL_1 << qsel;
6774
6775 wr32(E1000_RAL(index), rar_low);
6776 wrfl();
6777 wr32(E1000_RAH(index), rar_high);
6778 wrfl();
6779}
6780
4ae196df
AD
6781static int igb_set_vf_mac(struct igb_adapter *adapter,
6782 int vf, unsigned char *mac_addr)
6783{
6784 struct e1000_hw *hw = &adapter->hw;
ff41f8dc
AD
6785 /* VF MAC addresses start at end of receive addresses and moves
6786 * torwards the first, as a result a collision should not be possible */
6787 int rar_entry = hw->mac.rar_entry_count - (vf + 1);
4ae196df 6788
37680117 6789 memcpy(adapter->vf_data[vf].vf_mac_addresses, mac_addr, ETH_ALEN);
4ae196df 6790
26ad9178 6791 igb_rar_set_qsel(adapter, mac_addr, rar_entry, vf);
4ae196df
AD
6792
6793 return 0;
6794}
6795
8151d294
WM
6796static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
6797{
6798 struct igb_adapter *adapter = netdev_priv(netdev);
6799 if (!is_valid_ether_addr(mac) || (vf >= adapter->vfs_allocated_count))
6800 return -EINVAL;
6801 adapter->vf_data[vf].flags |= IGB_VF_FLAG_PF_SET_MAC;
6802 dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n", mac, vf);
6803 dev_info(&adapter->pdev->dev, "Reload the VF driver to make this"
6804 " change effective.");
6805 if (test_bit(__IGB_DOWN, &adapter->state)) {
6806 dev_warn(&adapter->pdev->dev, "The VF MAC address has been set,"
6807 " but the PF device is not up.\n");
6808 dev_warn(&adapter->pdev->dev, "Bring the PF device up before"
6809 " attempting to use the VF device.\n");
6810 }
6811 return igb_set_vf_mac(adapter, vf, mac);
6812}
6813
17dc566c
LL
6814static int igb_link_mbps(int internal_link_speed)
6815{
6816 switch (internal_link_speed) {
6817 case SPEED_100:
6818 return 100;
6819 case SPEED_1000:
6820 return 1000;
6821 default:
6822 return 0;
6823 }
6824}
6825
6826static void igb_set_vf_rate_limit(struct e1000_hw *hw, int vf, int tx_rate,
6827 int link_speed)
6828{
6829 int rf_dec, rf_int;
6830 u32 bcnrc_val;
6831
6832 if (tx_rate != 0) {
6833 /* Calculate the rate factor values to set */
6834 rf_int = link_speed / tx_rate;
6835 rf_dec = (link_speed - (rf_int * tx_rate));
6836 rf_dec = (rf_dec * (1<<E1000_RTTBCNRC_RF_INT_SHIFT)) / tx_rate;
6837
6838 bcnrc_val = E1000_RTTBCNRC_RS_ENA;
6839 bcnrc_val |= ((rf_int<<E1000_RTTBCNRC_RF_INT_SHIFT) &
6840 E1000_RTTBCNRC_RF_INT_MASK);
6841 bcnrc_val |= (rf_dec & E1000_RTTBCNRC_RF_DEC_MASK);
6842 } else {
6843 bcnrc_val = 0;
6844 }
6845
6846 wr32(E1000_RTTDQSEL, vf); /* vf X uses queue X */
f00b0da7
LL
6847 /*
6848 * Set global transmit compensation time to the MMW_SIZE in RTTBCNRM
6849 * register. MMW_SIZE=0x014 if 9728-byte jumbo is supported.
6850 */
6851 wr32(E1000_RTTBCNRM, 0x14);
17dc566c
LL
6852 wr32(E1000_RTTBCNRC, bcnrc_val);
6853}
6854
6855static void igb_check_vf_rate_limit(struct igb_adapter *adapter)
6856{
6857 int actual_link_speed, i;
6858 bool reset_rate = false;
6859
6860 /* VF TX rate limit was not set or not supported */
6861 if ((adapter->vf_rate_link_speed == 0) ||
6862 (adapter->hw.mac.type != e1000_82576))
6863 return;
6864
6865 actual_link_speed = igb_link_mbps(adapter->link_speed);
6866 if (actual_link_speed != adapter->vf_rate_link_speed) {
6867 reset_rate = true;
6868 adapter->vf_rate_link_speed = 0;
6869 dev_info(&adapter->pdev->dev,
6870 "Link speed has been changed. VF Transmit "
6871 "rate is disabled\n");
6872 }
6873
6874 for (i = 0; i < adapter->vfs_allocated_count; i++) {
6875 if (reset_rate)
6876 adapter->vf_data[i].tx_rate = 0;
6877
6878 igb_set_vf_rate_limit(&adapter->hw, i,
6879 adapter->vf_data[i].tx_rate,
6880 actual_link_speed);
6881 }
6882}
6883
8151d294
WM
6884static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate)
6885{
17dc566c
LL
6886 struct igb_adapter *adapter = netdev_priv(netdev);
6887 struct e1000_hw *hw = &adapter->hw;
6888 int actual_link_speed;
6889
6890 if (hw->mac.type != e1000_82576)
6891 return -EOPNOTSUPP;
6892
6893 actual_link_speed = igb_link_mbps(adapter->link_speed);
6894 if ((vf >= adapter->vfs_allocated_count) ||
6895 (!(rd32(E1000_STATUS) & E1000_STATUS_LU)) ||
6896 (tx_rate < 0) || (tx_rate > actual_link_speed))
6897 return -EINVAL;
6898
6899 adapter->vf_rate_link_speed = actual_link_speed;
6900 adapter->vf_data[vf].tx_rate = (u16)tx_rate;
6901 igb_set_vf_rate_limit(hw, vf, tx_rate, actual_link_speed);
6902
6903 return 0;
8151d294
WM
6904}
6905
6906static int igb_ndo_get_vf_config(struct net_device *netdev,
6907 int vf, struct ifla_vf_info *ivi)
6908{
6909 struct igb_adapter *adapter = netdev_priv(netdev);
6910 if (vf >= adapter->vfs_allocated_count)
6911 return -EINVAL;
6912 ivi->vf = vf;
6913 memcpy(&ivi->mac, adapter->vf_data[vf].vf_mac_addresses, ETH_ALEN);
17dc566c 6914 ivi->tx_rate = adapter->vf_data[vf].tx_rate;
8151d294
WM
6915 ivi->vlan = adapter->vf_data[vf].pf_vlan;
6916 ivi->qos = adapter->vf_data[vf].pf_qos;
6917 return 0;
6918}
6919
4ae196df
AD
6920static void igb_vmm_control(struct igb_adapter *adapter)
6921{
6922 struct e1000_hw *hw = &adapter->hw;
10d8e907 6923 u32 reg;
4ae196df 6924
52a1dd4d
AD
6925 switch (hw->mac.type) {
6926 case e1000_82575:
f96a8a0b
CW
6927 case e1000_i210:
6928 case e1000_i211:
52a1dd4d
AD
6929 default:
6930 /* replication is not supported for 82575 */
4ae196df 6931 return;
52a1dd4d
AD
6932 case e1000_82576:
6933 /* notify HW that the MAC is adding vlan tags */
6934 reg = rd32(E1000_DTXCTL);
6935 reg |= E1000_DTXCTL_VLAN_ADDED;
6936 wr32(E1000_DTXCTL, reg);
6937 case e1000_82580:
6938 /* enable replication vlan tag stripping */
6939 reg = rd32(E1000_RPLOLR);
6940 reg |= E1000_RPLOLR_STRVLAN;
6941 wr32(E1000_RPLOLR, reg);
d2ba2ed8
AD
6942 case e1000_i350:
6943 /* none of the above registers are supported by i350 */
52a1dd4d
AD
6944 break;
6945 }
10d8e907 6946
d4960307
AD
6947 if (adapter->vfs_allocated_count) {
6948 igb_vmdq_set_loopback_pf(hw, true);
6949 igb_vmdq_set_replication_pf(hw, true);
13800469
GR
6950 igb_vmdq_set_anti_spoofing_pf(hw, true,
6951 adapter->vfs_allocated_count);
d4960307
AD
6952 } else {
6953 igb_vmdq_set_loopback_pf(hw, false);
6954 igb_vmdq_set_replication_pf(hw, false);
6955 }
4ae196df
AD
6956}
6957
b6e0c419
CW
6958static void igb_init_dmac(struct igb_adapter *adapter, u32 pba)
6959{
6960 struct e1000_hw *hw = &adapter->hw;
6961 u32 dmac_thr;
6962 u16 hwm;
6963
6964 if (hw->mac.type > e1000_82580) {
6965 if (adapter->flags & IGB_FLAG_DMAC) {
6966 u32 reg;
6967
6968 /* force threshold to 0. */
6969 wr32(E1000_DMCTXTH, 0);
6970
6971 /*
e8c626e9
MV
6972 * DMA Coalescing high water mark needs to be greater
6973 * than the Rx threshold. Set hwm to PBA - max frame
6974 * size in 16B units, capping it at PBA - 6KB.
b6e0c419 6975 */
e8c626e9
MV
6976 hwm = 64 * pba - adapter->max_frame_size / 16;
6977 if (hwm < 64 * (pba - 6))
6978 hwm = 64 * (pba - 6);
6979 reg = rd32(E1000_FCRTC);
6980 reg &= ~E1000_FCRTC_RTH_COAL_MASK;
6981 reg |= ((hwm << E1000_FCRTC_RTH_COAL_SHIFT)
6982 & E1000_FCRTC_RTH_COAL_MASK);
6983 wr32(E1000_FCRTC, reg);
6984
6985 /*
6986 * Set the DMA Coalescing Rx threshold to PBA - 2 * max
6987 * frame size, capping it at PBA - 10KB.
6988 */
6989 dmac_thr = pba - adapter->max_frame_size / 512;
6990 if (dmac_thr < pba - 10)
6991 dmac_thr = pba - 10;
b6e0c419
CW
6992 reg = rd32(E1000_DMACR);
6993 reg &= ~E1000_DMACR_DMACTHR_MASK;
b6e0c419
CW
6994 reg |= ((dmac_thr << E1000_DMACR_DMACTHR_SHIFT)
6995 & E1000_DMACR_DMACTHR_MASK);
6996
6997 /* transition to L0x or L1 if available..*/
6998 reg |= (E1000_DMACR_DMAC_EN | E1000_DMACR_DMAC_LX_MASK);
6999
7000 /* watchdog timer= +-1000 usec in 32usec intervals */
7001 reg |= (1000 >> 5);
0c02dd98
MV
7002
7003 /* Disable BMC-to-OS Watchdog Enable */
7004 reg &= ~E1000_DMACR_DC_BMC2OSW_EN;
b6e0c419
CW
7005 wr32(E1000_DMACR, reg);
7006
7007 /*
7008 * no lower threshold to disable
7009 * coalescing(smart fifb)-UTRESH=0
7010 */
7011 wr32(E1000_DMCRTRH, 0);
b6e0c419
CW
7012
7013 reg = (IGB_DMCTLX_DCFLUSH_DIS | 0x4);
7014
7015 wr32(E1000_DMCTLX, reg);
7016
7017 /*
7018 * free space in tx packet buffer to wake from
7019 * DMA coal
7020 */
7021 wr32(E1000_DMCTXTH, (IGB_MIN_TXPBSIZE -
7022 (IGB_TX_BUF_4096 + adapter->max_frame_size)) >> 6);
7023
7024 /*
7025 * make low power state decision controlled
7026 * by DMA coal
7027 */
7028 reg = rd32(E1000_PCIEMISC);
7029 reg &= ~E1000_PCIEMISC_LX_DECISION;
7030 wr32(E1000_PCIEMISC, reg);
7031 } /* endif adapter->dmac is not disabled */
7032 } else if (hw->mac.type == e1000_82580) {
7033 u32 reg = rd32(E1000_PCIEMISC);
7034 wr32(E1000_PCIEMISC, reg & ~E1000_PCIEMISC_LX_DECISION);
7035 wr32(E1000_DMACR, 0);
7036 }
7037}
7038
9d5c8243 7039/* igb_main.c */