]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - drivers/net/ethernet/intel/igb/igb_main.c
igb: cleanup IVAR configuration
[mirror_ubuntu-zesty-kernel.git] / drivers / net / ethernet / intel / igb / igb_main.c
CommitLineData
9d5c8243
AK
1/*******************************************************************************
2
3 Intel(R) Gigabit Ethernet Linux driver
4297f99b 4 Copyright(c) 2007-2011 Intel Corporation.
9d5c8243
AK
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28#include <linux/module.h>
29#include <linux/types.h>
30#include <linux/init.h>
b2cb09b1 31#include <linux/bitops.h>
9d5c8243
AK
32#include <linux/vmalloc.h>
33#include <linux/pagemap.h>
34#include <linux/netdevice.h>
9d5c8243 35#include <linux/ipv6.h>
5a0e3ad6 36#include <linux/slab.h>
9d5c8243
AK
37#include <net/checksum.h>
38#include <net/ip6_checksum.h>
c6cb090b 39#include <linux/net_tstamp.h>
9d5c8243
AK
40#include <linux/mii.h>
41#include <linux/ethtool.h>
01789349 42#include <linux/if.h>
9d5c8243
AK
43#include <linux/if_vlan.h>
44#include <linux/pci.h>
c54106bb 45#include <linux/pci-aspm.h>
9d5c8243
AK
46#include <linux/delay.h>
47#include <linux/interrupt.h>
7d13a7d0
AD
48#include <linux/ip.h>
49#include <linux/tcp.h>
50#include <linux/sctp.h>
9d5c8243 51#include <linux/if_ether.h>
40a914fa 52#include <linux/aer.h>
70c71606 53#include <linux/prefetch.h>
421e02f0 54#ifdef CONFIG_IGB_DCA
fe4506b6
JC
55#include <linux/dca.h>
56#endif
9d5c8243
AK
57#include "igb.h"
58
0d1fe82d
CW
59#define MAJ 3
60#define MIN 0
61#define BUILD 6
0d1fe82d 62#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
929dd047 63__stringify(BUILD) "-k"
9d5c8243
AK
64char igb_driver_name[] = "igb";
65char igb_driver_version[] = DRV_VERSION;
66static const char igb_driver_string[] =
67 "Intel(R) Gigabit Ethernet Network Driver";
4c4b42cb 68static const char igb_copyright[] = "Copyright (c) 2007-2011 Intel Corporation.";
9d5c8243 69
9d5c8243
AK
70static const struct e1000_info *igb_info_tbl[] = {
71 [board_82575] = &e1000_82575_info,
72};
73
a3aa1884 74static DEFINE_PCI_DEVICE_TABLE(igb_pci_tbl) = {
d2ba2ed8
AD
75 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_COPPER), board_82575 },
76 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_FIBER), board_82575 },
77 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SERDES), board_82575 },
78 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SGMII), board_82575 },
55cac248
AD
79 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER), board_82575 },
80 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_FIBER), board_82575 },
6493d24f 81 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_QUAD_FIBER), board_82575 },
55cac248
AD
82 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SERDES), board_82575 },
83 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SGMII), board_82575 },
84 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER_DUAL), board_82575 },
308fb39a
JG
85 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SGMII), board_82575 },
86 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SERDES), board_82575 },
1b5dda33
GJ
87 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_BACKPLANE), board_82575 },
88 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SFP), board_82575 },
2d064c06 89 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576), board_82575 },
9eb2341d 90 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS), board_82575 },
747d49ba 91 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS_SERDES), board_82575 },
2d064c06
AD
92 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_FIBER), board_82575 },
93 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES), board_82575 },
4703bf73 94 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES_QUAD), board_82575 },
b894fa26 95 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER_ET2), board_82575 },
c8ea5ea9 96 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER), board_82575 },
9d5c8243
AK
97 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_COPPER), board_82575 },
98 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_FIBER_SERDES), board_82575 },
99 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575GB_QUAD_COPPER), board_82575 },
100 /* required last entry */
101 {0, }
102};
103
104MODULE_DEVICE_TABLE(pci, igb_pci_tbl);
105
106void igb_reset(struct igb_adapter *);
107static int igb_setup_all_tx_resources(struct igb_adapter *);
108static int igb_setup_all_rx_resources(struct igb_adapter *);
109static void igb_free_all_tx_resources(struct igb_adapter *);
110static void igb_free_all_rx_resources(struct igb_adapter *);
06cf2666 111static void igb_setup_mrqc(struct igb_adapter *);
9d5c8243
AK
112static int igb_probe(struct pci_dev *, const struct pci_device_id *);
113static void __devexit igb_remove(struct pci_dev *pdev);
673b8b70 114static void igb_init_hw_timer(struct igb_adapter *adapter);
9d5c8243
AK
115static int igb_sw_init(struct igb_adapter *);
116static int igb_open(struct net_device *);
117static int igb_close(struct net_device *);
118static void igb_configure_tx(struct igb_adapter *);
119static void igb_configure_rx(struct igb_adapter *);
9d5c8243
AK
120static void igb_clean_all_tx_rings(struct igb_adapter *);
121static void igb_clean_all_rx_rings(struct igb_adapter *);
3b644cf6
MW
122static void igb_clean_tx_ring(struct igb_ring *);
123static void igb_clean_rx_ring(struct igb_ring *);
ff41f8dc 124static void igb_set_rx_mode(struct net_device *);
9d5c8243
AK
125static void igb_update_phy_info(unsigned long);
126static void igb_watchdog(unsigned long);
127static void igb_watchdog_task(struct work_struct *);
cd392f5c 128static netdev_tx_t igb_xmit_frame(struct sk_buff *skb, struct net_device *);
12dcd86b
ED
129static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *dev,
130 struct rtnl_link_stats64 *stats);
9d5c8243
AK
131static int igb_change_mtu(struct net_device *, int);
132static int igb_set_mac(struct net_device *, void *);
68d480c4 133static void igb_set_uta(struct igb_adapter *adapter);
9d5c8243
AK
134static irqreturn_t igb_intr(int irq, void *);
135static irqreturn_t igb_intr_msi(int irq, void *);
136static irqreturn_t igb_msix_other(int irq, void *);
047e0030 137static irqreturn_t igb_msix_ring(int irq, void *);
421e02f0 138#ifdef CONFIG_IGB_DCA
047e0030 139static void igb_update_dca(struct igb_q_vector *);
fe4506b6 140static void igb_setup_dca(struct igb_adapter *);
421e02f0 141#endif /* CONFIG_IGB_DCA */
661086df 142static int igb_poll(struct napi_struct *, int);
13fde97a 143static bool igb_clean_tx_irq(struct igb_q_vector *);
cd392f5c 144static bool igb_clean_rx_irq(struct igb_q_vector *, int);
9d5c8243
AK
145static int igb_ioctl(struct net_device *, struct ifreq *, int cmd);
146static void igb_tx_timeout(struct net_device *);
147static void igb_reset_task(struct work_struct *);
b2cb09b1 148static void igb_vlan_mode(struct net_device *netdev, u32 features);
9d5c8243
AK
149static void igb_vlan_rx_add_vid(struct net_device *, u16);
150static void igb_vlan_rx_kill_vid(struct net_device *, u16);
151static void igb_restore_vlan(struct igb_adapter *);
26ad9178 152static void igb_rar_set_qsel(struct igb_adapter *, u8 *, u32 , u8);
4ae196df
AD
153static void igb_ping_all_vfs(struct igb_adapter *);
154static void igb_msg_task(struct igb_adapter *);
4ae196df 155static void igb_vmm_control(struct igb_adapter *);
f2ca0dbe 156static int igb_set_vf_mac(struct igb_adapter *, int, unsigned char *);
4ae196df 157static void igb_restore_vf_multicasts(struct igb_adapter *adapter);
8151d294
WM
158static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac);
159static int igb_ndo_set_vf_vlan(struct net_device *netdev,
160 int vf, u16 vlan, u8 qos);
161static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate);
162static int igb_ndo_get_vf_config(struct net_device *netdev, int vf,
163 struct ifla_vf_info *ivi);
17dc566c 164static void igb_check_vf_rate_limit(struct igb_adapter *);
9d5c8243 165
9d5c8243 166#ifdef CONFIG_PM
3fe7c4c9 167static int igb_suspend(struct pci_dev *, pm_message_t);
9d5c8243
AK
168static int igb_resume(struct pci_dev *);
169#endif
170static void igb_shutdown(struct pci_dev *);
421e02f0 171#ifdef CONFIG_IGB_DCA
fe4506b6
JC
172static int igb_notify_dca(struct notifier_block *, unsigned long, void *);
173static struct notifier_block dca_notifier = {
174 .notifier_call = igb_notify_dca,
175 .next = NULL,
176 .priority = 0
177};
178#endif
9d5c8243
AK
179#ifdef CONFIG_NET_POLL_CONTROLLER
180/* for netdump / net console */
181static void igb_netpoll(struct net_device *);
182#endif
37680117 183#ifdef CONFIG_PCI_IOV
2a3abf6d
AD
184static unsigned int max_vfs = 0;
185module_param(max_vfs, uint, 0);
186MODULE_PARM_DESC(max_vfs, "Maximum number of virtual functions to allocate "
187 "per physical function");
188#endif /* CONFIG_PCI_IOV */
189
9d5c8243
AK
190static pci_ers_result_t igb_io_error_detected(struct pci_dev *,
191 pci_channel_state_t);
192static pci_ers_result_t igb_io_slot_reset(struct pci_dev *);
193static void igb_io_resume(struct pci_dev *);
194
195static struct pci_error_handlers igb_err_handler = {
196 .error_detected = igb_io_error_detected,
197 .slot_reset = igb_io_slot_reset,
198 .resume = igb_io_resume,
199};
200
201
202static struct pci_driver igb_driver = {
203 .name = igb_driver_name,
204 .id_table = igb_pci_tbl,
205 .probe = igb_probe,
206 .remove = __devexit_p(igb_remove),
207#ifdef CONFIG_PM
25985edc 208 /* Power Management Hooks */
9d5c8243
AK
209 .suspend = igb_suspend,
210 .resume = igb_resume,
211#endif
212 .shutdown = igb_shutdown,
213 .err_handler = &igb_err_handler
214};
215
216MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
217MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver");
218MODULE_LICENSE("GPL");
219MODULE_VERSION(DRV_VERSION);
220
c97ec42a
TI
221struct igb_reg_info {
222 u32 ofs;
223 char *name;
224};
225
226static const struct igb_reg_info igb_reg_info_tbl[] = {
227
228 /* General Registers */
229 {E1000_CTRL, "CTRL"},
230 {E1000_STATUS, "STATUS"},
231 {E1000_CTRL_EXT, "CTRL_EXT"},
232
233 /* Interrupt Registers */
234 {E1000_ICR, "ICR"},
235
236 /* RX Registers */
237 {E1000_RCTL, "RCTL"},
238 {E1000_RDLEN(0), "RDLEN"},
239 {E1000_RDH(0), "RDH"},
240 {E1000_RDT(0), "RDT"},
241 {E1000_RXDCTL(0), "RXDCTL"},
242 {E1000_RDBAL(0), "RDBAL"},
243 {E1000_RDBAH(0), "RDBAH"},
244
245 /* TX Registers */
246 {E1000_TCTL, "TCTL"},
247 {E1000_TDBAL(0), "TDBAL"},
248 {E1000_TDBAH(0), "TDBAH"},
249 {E1000_TDLEN(0), "TDLEN"},
250 {E1000_TDH(0), "TDH"},
251 {E1000_TDT(0), "TDT"},
252 {E1000_TXDCTL(0), "TXDCTL"},
253 {E1000_TDFH, "TDFH"},
254 {E1000_TDFT, "TDFT"},
255 {E1000_TDFHS, "TDFHS"},
256 {E1000_TDFPC, "TDFPC"},
257
258 /* List Terminator */
259 {}
260};
261
262/*
263 * igb_regdump - register printout routine
264 */
265static void igb_regdump(struct e1000_hw *hw, struct igb_reg_info *reginfo)
266{
267 int n = 0;
268 char rname[16];
269 u32 regs[8];
270
271 switch (reginfo->ofs) {
272 case E1000_RDLEN(0):
273 for (n = 0; n < 4; n++)
274 regs[n] = rd32(E1000_RDLEN(n));
275 break;
276 case E1000_RDH(0):
277 for (n = 0; n < 4; n++)
278 regs[n] = rd32(E1000_RDH(n));
279 break;
280 case E1000_RDT(0):
281 for (n = 0; n < 4; n++)
282 regs[n] = rd32(E1000_RDT(n));
283 break;
284 case E1000_RXDCTL(0):
285 for (n = 0; n < 4; n++)
286 regs[n] = rd32(E1000_RXDCTL(n));
287 break;
288 case E1000_RDBAL(0):
289 for (n = 0; n < 4; n++)
290 regs[n] = rd32(E1000_RDBAL(n));
291 break;
292 case E1000_RDBAH(0):
293 for (n = 0; n < 4; n++)
294 regs[n] = rd32(E1000_RDBAH(n));
295 break;
296 case E1000_TDBAL(0):
297 for (n = 0; n < 4; n++)
298 regs[n] = rd32(E1000_RDBAL(n));
299 break;
300 case E1000_TDBAH(0):
301 for (n = 0; n < 4; n++)
302 regs[n] = rd32(E1000_TDBAH(n));
303 break;
304 case E1000_TDLEN(0):
305 for (n = 0; n < 4; n++)
306 regs[n] = rd32(E1000_TDLEN(n));
307 break;
308 case E1000_TDH(0):
309 for (n = 0; n < 4; n++)
310 regs[n] = rd32(E1000_TDH(n));
311 break;
312 case E1000_TDT(0):
313 for (n = 0; n < 4; n++)
314 regs[n] = rd32(E1000_TDT(n));
315 break;
316 case E1000_TXDCTL(0):
317 for (n = 0; n < 4; n++)
318 regs[n] = rd32(E1000_TXDCTL(n));
319 break;
320 default:
321 printk(KERN_INFO "%-15s %08x\n",
322 reginfo->name, rd32(reginfo->ofs));
323 return;
324 }
325
326 snprintf(rname, 16, "%s%s", reginfo->name, "[0-3]");
327 printk(KERN_INFO "%-15s ", rname);
328 for (n = 0; n < 4; n++)
329 printk(KERN_CONT "%08x ", regs[n]);
330 printk(KERN_CONT "\n");
331}
332
333/*
334 * igb_dump - Print registers, tx-rings and rx-rings
335 */
336static void igb_dump(struct igb_adapter *adapter)
337{
338 struct net_device *netdev = adapter->netdev;
339 struct e1000_hw *hw = &adapter->hw;
340 struct igb_reg_info *reginfo;
c97ec42a
TI
341 struct igb_ring *tx_ring;
342 union e1000_adv_tx_desc *tx_desc;
343 struct my_u0 { u64 a; u64 b; } *u0;
c97ec42a
TI
344 struct igb_ring *rx_ring;
345 union e1000_adv_rx_desc *rx_desc;
346 u32 staterr;
6ad4edfc 347 u16 i, n;
c97ec42a
TI
348
349 if (!netif_msg_hw(adapter))
350 return;
351
352 /* Print netdevice Info */
353 if (netdev) {
354 dev_info(&adapter->pdev->dev, "Net device Info\n");
355 printk(KERN_INFO "Device Name state "
356 "trans_start last_rx\n");
357 printk(KERN_INFO "%-15s %016lX %016lX %016lX\n",
358 netdev->name,
359 netdev->state,
360 netdev->trans_start,
361 netdev->last_rx);
362 }
363
364 /* Print Registers */
365 dev_info(&adapter->pdev->dev, "Register Dump\n");
366 printk(KERN_INFO " Register Name Value\n");
367 for (reginfo = (struct igb_reg_info *)igb_reg_info_tbl;
368 reginfo->name; reginfo++) {
369 igb_regdump(hw, reginfo);
370 }
371
372 /* Print TX Ring Summary */
373 if (!netdev || !netif_running(netdev))
374 goto exit;
375
376 dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
377 printk(KERN_INFO "Queue [NTU] [NTC] [bi(ntc)->dma ]"
378 " leng ntw timestamp\n");
379 for (n = 0; n < adapter->num_tx_queues; n++) {
06034649 380 struct igb_tx_buffer *buffer_info;
c97ec42a 381 tx_ring = adapter->tx_ring[n];
06034649 382 buffer_info = &tx_ring->tx_buffer_info[tx_ring->next_to_clean];
8542db05 383 printk(KERN_INFO " %5d %5X %5X %016llX %04X %p %016llX\n",
c97ec42a
TI
384 n, tx_ring->next_to_use, tx_ring->next_to_clean,
385 (u64)buffer_info->dma,
386 buffer_info->length,
387 buffer_info->next_to_watch,
388 (u64)buffer_info->time_stamp);
389 }
390
391 /* Print TX Rings */
392 if (!netif_msg_tx_done(adapter))
393 goto rx_ring_summary;
394
395 dev_info(&adapter->pdev->dev, "TX Rings Dump\n");
396
397 /* Transmit Descriptor Formats
398 *
399 * Advanced Transmit Descriptor
400 * +--------------------------------------------------------------+
401 * 0 | Buffer Address [63:0] |
402 * +--------------------------------------------------------------+
403 * 8 | PAYLEN | PORTS |CC|IDX | STA | DCMD |DTYP|MAC|RSV| DTALEN |
404 * +--------------------------------------------------------------+
405 * 63 46 45 40 39 38 36 35 32 31 24 15 0
406 */
407
408 for (n = 0; n < adapter->num_tx_queues; n++) {
409 tx_ring = adapter->tx_ring[n];
410 printk(KERN_INFO "------------------------------------\n");
411 printk(KERN_INFO "TX QUEUE INDEX = %d\n", tx_ring->queue_index);
412 printk(KERN_INFO "------------------------------------\n");
413 printk(KERN_INFO "T [desc] [address 63:0 ] "
414 "[PlPOCIStDDM Ln] [bi->dma ] "
415 "leng ntw timestamp bi->skb\n");
416
417 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
06034649 418 struct igb_tx_buffer *buffer_info;
60136906 419 tx_desc = IGB_TX_DESC(tx_ring, i);
06034649 420 buffer_info = &tx_ring->tx_buffer_info[i];
c97ec42a
TI
421 u0 = (struct my_u0 *)tx_desc;
422 printk(KERN_INFO "T [0x%03X] %016llX %016llX %016llX"
8542db05 423 " %04X %p %016llX %p", i,
c97ec42a
TI
424 le64_to_cpu(u0->a),
425 le64_to_cpu(u0->b),
426 (u64)buffer_info->dma,
427 buffer_info->length,
428 buffer_info->next_to_watch,
429 (u64)buffer_info->time_stamp,
430 buffer_info->skb);
431 if (i == tx_ring->next_to_use &&
432 i == tx_ring->next_to_clean)
433 printk(KERN_CONT " NTC/U\n");
434 else if (i == tx_ring->next_to_use)
435 printk(KERN_CONT " NTU\n");
436 else if (i == tx_ring->next_to_clean)
437 printk(KERN_CONT " NTC\n");
438 else
439 printk(KERN_CONT "\n");
440
441 if (netif_msg_pktdata(adapter) && buffer_info->dma != 0)
442 print_hex_dump(KERN_INFO, "",
443 DUMP_PREFIX_ADDRESS,
444 16, 1, phys_to_virt(buffer_info->dma),
445 buffer_info->length, true);
446 }
447 }
448
449 /* Print RX Rings Summary */
450rx_ring_summary:
451 dev_info(&adapter->pdev->dev, "RX Rings Summary\n");
452 printk(KERN_INFO "Queue [NTU] [NTC]\n");
453 for (n = 0; n < adapter->num_rx_queues; n++) {
454 rx_ring = adapter->rx_ring[n];
455 printk(KERN_INFO " %5d %5X %5X\n", n,
456 rx_ring->next_to_use, rx_ring->next_to_clean);
457 }
458
459 /* Print RX Rings */
460 if (!netif_msg_rx_status(adapter))
461 goto exit;
462
463 dev_info(&adapter->pdev->dev, "RX Rings Dump\n");
464
465 /* Advanced Receive Descriptor (Read) Format
466 * 63 1 0
467 * +-----------------------------------------------------+
468 * 0 | Packet Buffer Address [63:1] |A0/NSE|
469 * +----------------------------------------------+------+
470 * 8 | Header Buffer Address [63:1] | DD |
471 * +-----------------------------------------------------+
472 *
473 *
474 * Advanced Receive Descriptor (Write-Back) Format
475 *
476 * 63 48 47 32 31 30 21 20 17 16 4 3 0
477 * +------------------------------------------------------+
478 * 0 | Packet IP |SPH| HDR_LEN | RSV|Packet| RSS |
479 * | Checksum Ident | | | | Type | Type |
480 * +------------------------------------------------------+
481 * 8 | VLAN Tag | Length | Extended Error | Extended Status |
482 * +------------------------------------------------------+
483 * 63 48 47 32 31 20 19 0
484 */
485
486 for (n = 0; n < adapter->num_rx_queues; n++) {
487 rx_ring = adapter->rx_ring[n];
488 printk(KERN_INFO "------------------------------------\n");
489 printk(KERN_INFO "RX QUEUE INDEX = %d\n", rx_ring->queue_index);
490 printk(KERN_INFO "------------------------------------\n");
491 printk(KERN_INFO "R [desc] [ PktBuf A0] "
492 "[ HeadBuf DD] [bi->dma ] [bi->skb] "
493 "<-- Adv Rx Read format\n");
494 printk(KERN_INFO "RWB[desc] [PcsmIpSHl PtRs] "
495 "[vl er S cks ln] ---------------- [bi->skb] "
496 "<-- Adv Rx Write-Back format\n");
497
498 for (i = 0; i < rx_ring->count; i++) {
06034649
AD
499 struct igb_rx_buffer *buffer_info;
500 buffer_info = &rx_ring->rx_buffer_info[i];
60136906 501 rx_desc = IGB_RX_DESC(rx_ring, i);
c97ec42a
TI
502 u0 = (struct my_u0 *)rx_desc;
503 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
504 if (staterr & E1000_RXD_STAT_DD) {
505 /* Descriptor Done */
506 printk(KERN_INFO "RWB[0x%03X] %016llX "
507 "%016llX ---------------- %p", i,
508 le64_to_cpu(u0->a),
509 le64_to_cpu(u0->b),
510 buffer_info->skb);
511 } else {
512 printk(KERN_INFO "R [0x%03X] %016llX "
513 "%016llX %016llX %p", i,
514 le64_to_cpu(u0->a),
515 le64_to_cpu(u0->b),
516 (u64)buffer_info->dma,
517 buffer_info->skb);
518
519 if (netif_msg_pktdata(adapter)) {
520 print_hex_dump(KERN_INFO, "",
521 DUMP_PREFIX_ADDRESS,
522 16, 1,
523 phys_to_virt(buffer_info->dma),
44390ca6
AD
524 IGB_RX_HDR_LEN, true);
525 print_hex_dump(KERN_INFO, "",
526 DUMP_PREFIX_ADDRESS,
527 16, 1,
528 phys_to_virt(
529 buffer_info->page_dma +
530 buffer_info->page_offset),
531 PAGE_SIZE/2, true);
c97ec42a
TI
532 }
533 }
534
535 if (i == rx_ring->next_to_use)
536 printk(KERN_CONT " NTU\n");
537 else if (i == rx_ring->next_to_clean)
538 printk(KERN_CONT " NTC\n");
539 else
540 printk(KERN_CONT "\n");
541
542 }
543 }
544
545exit:
546 return;
547}
548
549
38c845c7
PO
550/**
551 * igb_read_clock - read raw cycle counter (to be used by time counter)
552 */
553static cycle_t igb_read_clock(const struct cyclecounter *tc)
554{
555 struct igb_adapter *adapter =
556 container_of(tc, struct igb_adapter, cycles);
557 struct e1000_hw *hw = &adapter->hw;
c5b9bd5e
AD
558 u64 stamp = 0;
559 int shift = 0;
38c845c7 560
55cac248
AD
561 /*
562 * The timestamp latches on lowest register read. For the 82580
563 * the lowest register is SYSTIMR instead of SYSTIML. However we never
564 * adjusted TIMINCA so SYSTIMR will just read as all 0s so ignore it.
565 */
566 if (hw->mac.type == e1000_82580) {
567 stamp = rd32(E1000_SYSTIMR) >> 8;
568 shift = IGB_82580_TSYNC_SHIFT;
569 }
570
c5b9bd5e
AD
571 stamp |= (u64)rd32(E1000_SYSTIML) << shift;
572 stamp |= (u64)rd32(E1000_SYSTIMH) << (shift + 32);
38c845c7
PO
573 return stamp;
574}
575
9d5c8243 576/**
c041076a 577 * igb_get_hw_dev - return device
9d5c8243
AK
578 * used by hardware layer to print debugging information
579 **/
c041076a 580struct net_device *igb_get_hw_dev(struct e1000_hw *hw)
9d5c8243
AK
581{
582 struct igb_adapter *adapter = hw->back;
c041076a 583 return adapter->netdev;
9d5c8243 584}
38c845c7 585
9d5c8243
AK
586/**
587 * igb_init_module - Driver Registration Routine
588 *
589 * igb_init_module is the first routine called when the driver is
590 * loaded. All it does is register with the PCI subsystem.
591 **/
592static int __init igb_init_module(void)
593{
594 int ret;
595 printk(KERN_INFO "%s - version %s\n",
596 igb_driver_string, igb_driver_version);
597
598 printk(KERN_INFO "%s\n", igb_copyright);
599
421e02f0 600#ifdef CONFIG_IGB_DCA
fe4506b6
JC
601 dca_register_notify(&dca_notifier);
602#endif
bbd98fe4 603 ret = pci_register_driver(&igb_driver);
9d5c8243
AK
604 return ret;
605}
606
607module_init(igb_init_module);
608
609/**
610 * igb_exit_module - Driver Exit Cleanup Routine
611 *
612 * igb_exit_module is called just before the driver is removed
613 * from memory.
614 **/
615static void __exit igb_exit_module(void)
616{
421e02f0 617#ifdef CONFIG_IGB_DCA
fe4506b6
JC
618 dca_unregister_notify(&dca_notifier);
619#endif
9d5c8243
AK
620 pci_unregister_driver(&igb_driver);
621}
622
623module_exit(igb_exit_module);
624
26bc19ec
AD
625#define Q_IDX_82576(i) (((i & 0x1) << 3) + (i >> 1))
626/**
627 * igb_cache_ring_register - Descriptor ring to register mapping
628 * @adapter: board private structure to initialize
629 *
630 * Once we know the feature-set enabled for the device, we'll cache
631 * the register offset the descriptor ring is assigned to.
632 **/
633static void igb_cache_ring_register(struct igb_adapter *adapter)
634{
ee1b9f06 635 int i = 0, j = 0;
047e0030 636 u32 rbase_offset = adapter->vfs_allocated_count;
26bc19ec
AD
637
638 switch (adapter->hw.mac.type) {
639 case e1000_82576:
640 /* The queues are allocated for virtualization such that VF 0
641 * is allocated queues 0 and 8, VF 1 queues 1 and 9, etc.
642 * In order to avoid collision we start at the first free queue
643 * and continue consuming queues in the same sequence
644 */
ee1b9f06 645 if (adapter->vfs_allocated_count) {
a99955fc 646 for (; i < adapter->rss_queues; i++)
3025a446
AD
647 adapter->rx_ring[i]->reg_idx = rbase_offset +
648 Q_IDX_82576(i);
ee1b9f06 649 }
26bc19ec 650 case e1000_82575:
55cac248 651 case e1000_82580:
d2ba2ed8 652 case e1000_i350:
26bc19ec 653 default:
ee1b9f06 654 for (; i < adapter->num_rx_queues; i++)
3025a446 655 adapter->rx_ring[i]->reg_idx = rbase_offset + i;
ee1b9f06 656 for (; j < adapter->num_tx_queues; j++)
3025a446 657 adapter->tx_ring[j]->reg_idx = rbase_offset + j;
26bc19ec
AD
658 break;
659 }
660}
661
047e0030
AD
662static void igb_free_queues(struct igb_adapter *adapter)
663{
3025a446 664 int i;
047e0030 665
3025a446
AD
666 for (i = 0; i < adapter->num_tx_queues; i++) {
667 kfree(adapter->tx_ring[i]);
668 adapter->tx_ring[i] = NULL;
669 }
670 for (i = 0; i < adapter->num_rx_queues; i++) {
671 kfree(adapter->rx_ring[i]);
672 adapter->rx_ring[i] = NULL;
673 }
047e0030
AD
674 adapter->num_rx_queues = 0;
675 adapter->num_tx_queues = 0;
676}
677
9d5c8243
AK
678/**
679 * igb_alloc_queues - Allocate memory for all rings
680 * @adapter: board private structure to initialize
681 *
682 * We allocate one ring per queue at run-time since we don't know the
683 * number of queues at compile-time.
684 **/
685static int igb_alloc_queues(struct igb_adapter *adapter)
686{
3025a446 687 struct igb_ring *ring;
9d5c8243 688 int i;
81c2fc22 689 int orig_node = adapter->node;
9d5c8243 690
661086df 691 for (i = 0; i < adapter->num_tx_queues; i++) {
81c2fc22
AD
692 if (orig_node == -1) {
693 int cur_node = next_online_node(adapter->node);
694 if (cur_node == MAX_NUMNODES)
695 cur_node = first_online_node;
696 adapter->node = cur_node;
697 }
698 ring = kzalloc_node(sizeof(struct igb_ring), GFP_KERNEL,
699 adapter->node);
700 if (!ring)
701 ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL);
3025a446
AD
702 if (!ring)
703 goto err;
68fd9910 704 ring->count = adapter->tx_ring_count;
661086df 705 ring->queue_index = i;
59d71989 706 ring->dev = &adapter->pdev->dev;
e694e964 707 ring->netdev = adapter->netdev;
81c2fc22 708 ring->numa_node = adapter->node;
85ad76b2
AD
709 /* For 82575, context index must be unique per ring. */
710 if (adapter->hw.mac.type == e1000_82575)
866cff06 711 set_bit(IGB_RING_FLAG_TX_CTX_IDX, &ring->flags);
3025a446 712 adapter->tx_ring[i] = ring;
661086df 713 }
81c2fc22
AD
714 /* Restore the adapter's original node */
715 adapter->node = orig_node;
85ad76b2 716
9d5c8243 717 for (i = 0; i < adapter->num_rx_queues; i++) {
81c2fc22
AD
718 if (orig_node == -1) {
719 int cur_node = next_online_node(adapter->node);
720 if (cur_node == MAX_NUMNODES)
721 cur_node = first_online_node;
722 adapter->node = cur_node;
723 }
724 ring = kzalloc_node(sizeof(struct igb_ring), GFP_KERNEL,
725 adapter->node);
726 if (!ring)
727 ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL);
3025a446
AD
728 if (!ring)
729 goto err;
68fd9910 730 ring->count = adapter->rx_ring_count;
844290e5 731 ring->queue_index = i;
59d71989 732 ring->dev = &adapter->pdev->dev;
e694e964 733 ring->netdev = adapter->netdev;
81c2fc22 734 ring->numa_node = adapter->node;
866cff06
AD
735 /* enable rx checksum */
736 set_bit(IGB_RING_FLAG_RX_CSUM, &ring->flags);
85ad76b2
AD
737 /* set flag indicating ring supports SCTP checksum offload */
738 if (adapter->hw.mac.type >= e1000_82576)
866cff06 739 set_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags);
3025a446 740 adapter->rx_ring[i] = ring;
9d5c8243 741 }
81c2fc22
AD
742 /* Restore the adapter's original node */
743 adapter->node = orig_node;
26bc19ec
AD
744
745 igb_cache_ring_register(adapter);
9d5c8243 746
047e0030 747 return 0;
a88f10ec 748
047e0030 749err:
81c2fc22
AD
750 /* Restore the adapter's original node */
751 adapter->node = orig_node;
047e0030 752 igb_free_queues(adapter);
d1a8c9e1 753
047e0030 754 return -ENOMEM;
a88f10ec
AD
755}
756
4be000c8
AD
757/**
758 * igb_write_ivar - configure ivar for given MSI-X vector
759 * @hw: pointer to the HW structure
760 * @msix_vector: vector number we are allocating to a given ring
761 * @index: row index of IVAR register to write within IVAR table
762 * @offset: column offset of in IVAR, should be multiple of 8
763 *
764 * This function is intended to handle the writing of the IVAR register
765 * for adapters 82576 and newer. The IVAR table consists of 2 columns,
766 * each containing an cause allocation for an Rx and Tx ring, and a
767 * variable number of rows depending on the number of queues supported.
768 **/
769static void igb_write_ivar(struct e1000_hw *hw, int msix_vector,
770 int index, int offset)
771{
772 u32 ivar = array_rd32(E1000_IVAR0, index);
773
774 /* clear any bits that are currently set */
775 ivar &= ~((u32)0xFF << offset);
776
777 /* write vector and valid bit */
778 ivar |= (msix_vector | E1000_IVAR_VALID) << offset;
779
780 array_wr32(E1000_IVAR0, index, ivar);
781}
782
9d5c8243 783#define IGB_N0_QUEUE -1
047e0030 784static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector)
9d5c8243 785{
047e0030 786 struct igb_adapter *adapter = q_vector->adapter;
9d5c8243 787 struct e1000_hw *hw = &adapter->hw;
047e0030
AD
788 int rx_queue = IGB_N0_QUEUE;
789 int tx_queue = IGB_N0_QUEUE;
4be000c8 790 u32 msixbm = 0;
047e0030 791
0ba82994
AD
792 if (q_vector->rx.ring)
793 rx_queue = q_vector->rx.ring->reg_idx;
794 if (q_vector->tx.ring)
795 tx_queue = q_vector->tx.ring->reg_idx;
2d064c06
AD
796
797 switch (hw->mac.type) {
798 case e1000_82575:
9d5c8243
AK
799 /* The 82575 assigns vectors using a bitmask, which matches the
800 bitmask for the EICR/EIMS/EIMC registers. To assign one
801 or more queues to a vector, we write the appropriate bits
802 into the MSIXBM register for that vector. */
047e0030 803 if (rx_queue > IGB_N0_QUEUE)
9d5c8243 804 msixbm = E1000_EICR_RX_QUEUE0 << rx_queue;
047e0030 805 if (tx_queue > IGB_N0_QUEUE)
9d5c8243 806 msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue;
feeb2721
AD
807 if (!adapter->msix_entries && msix_vector == 0)
808 msixbm |= E1000_EIMS_OTHER;
9d5c8243 809 array_wr32(E1000_MSIXBM(0), msix_vector, msixbm);
047e0030 810 q_vector->eims_value = msixbm;
2d064c06
AD
811 break;
812 case e1000_82576:
4be000c8
AD
813 /*
814 * 82576 uses a table that essentially consists of 2 columns
815 * with 8 rows. The ordering is column-major so we use the
816 * lower 3 bits as the row index, and the 4th bit as the
817 * column offset.
818 */
819 if (rx_queue > IGB_N0_QUEUE)
820 igb_write_ivar(hw, msix_vector,
821 rx_queue & 0x7,
822 (rx_queue & 0x8) << 1);
823 if (tx_queue > IGB_N0_QUEUE)
824 igb_write_ivar(hw, msix_vector,
825 tx_queue & 0x7,
826 ((tx_queue & 0x8) << 1) + 8);
047e0030 827 q_vector->eims_value = 1 << msix_vector;
2d064c06 828 break;
55cac248 829 case e1000_82580:
d2ba2ed8 830 case e1000_i350:
4be000c8
AD
831 /*
832 * On 82580 and newer adapters the scheme is similar to 82576
833 * however instead of ordering column-major we have things
834 * ordered row-major. So we traverse the table by using
835 * bit 0 as the column offset, and the remaining bits as the
836 * row index.
837 */
838 if (rx_queue > IGB_N0_QUEUE)
839 igb_write_ivar(hw, msix_vector,
840 rx_queue >> 1,
841 (rx_queue & 0x1) << 4);
842 if (tx_queue > IGB_N0_QUEUE)
843 igb_write_ivar(hw, msix_vector,
844 tx_queue >> 1,
845 ((tx_queue & 0x1) << 4) + 8);
55cac248
AD
846 q_vector->eims_value = 1 << msix_vector;
847 break;
2d064c06
AD
848 default:
849 BUG();
850 break;
851 }
26b39276
AD
852
853 /* add q_vector eims value to global eims_enable_mask */
854 adapter->eims_enable_mask |= q_vector->eims_value;
855
856 /* configure q_vector to set itr on first interrupt */
857 q_vector->set_itr = 1;
9d5c8243
AK
858}
859
860/**
861 * igb_configure_msix - Configure MSI-X hardware
862 *
863 * igb_configure_msix sets up the hardware to properly
864 * generate MSI-X interrupts.
865 **/
866static void igb_configure_msix(struct igb_adapter *adapter)
867{
868 u32 tmp;
869 int i, vector = 0;
870 struct e1000_hw *hw = &adapter->hw;
871
872 adapter->eims_enable_mask = 0;
9d5c8243
AK
873
874 /* set vector for other causes, i.e. link changes */
2d064c06
AD
875 switch (hw->mac.type) {
876 case e1000_82575:
9d5c8243
AK
877 tmp = rd32(E1000_CTRL_EXT);
878 /* enable MSI-X PBA support*/
879 tmp |= E1000_CTRL_EXT_PBA_CLR;
880
881 /* Auto-Mask interrupts upon ICR read. */
882 tmp |= E1000_CTRL_EXT_EIAME;
883 tmp |= E1000_CTRL_EXT_IRCA;
884
885 wr32(E1000_CTRL_EXT, tmp);
047e0030
AD
886
887 /* enable msix_other interrupt */
888 array_wr32(E1000_MSIXBM(0), vector++,
889 E1000_EIMS_OTHER);
844290e5 890 adapter->eims_other = E1000_EIMS_OTHER;
9d5c8243 891
2d064c06
AD
892 break;
893
894 case e1000_82576:
55cac248 895 case e1000_82580:
d2ba2ed8 896 case e1000_i350:
047e0030
AD
897 /* Turn on MSI-X capability first, or our settings
898 * won't stick. And it will take days to debug. */
899 wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE |
900 E1000_GPIE_PBA | E1000_GPIE_EIAME |
901 E1000_GPIE_NSICR);
902
903 /* enable msix_other interrupt */
904 adapter->eims_other = 1 << vector;
2d064c06 905 tmp = (vector++ | E1000_IVAR_VALID) << 8;
2d064c06 906
047e0030 907 wr32(E1000_IVAR_MISC, tmp);
2d064c06
AD
908 break;
909 default:
910 /* do nothing, since nothing else supports MSI-X */
911 break;
912 } /* switch (hw->mac.type) */
047e0030
AD
913
914 adapter->eims_enable_mask |= adapter->eims_other;
915
26b39276
AD
916 for (i = 0; i < adapter->num_q_vectors; i++)
917 igb_assign_vector(adapter->q_vector[i], vector++);
047e0030 918
9d5c8243
AK
919 wrfl();
920}
921
922/**
923 * igb_request_msix - Initialize MSI-X interrupts
924 *
925 * igb_request_msix allocates MSI-X vectors and requests interrupts from the
926 * kernel.
927 **/
928static int igb_request_msix(struct igb_adapter *adapter)
929{
930 struct net_device *netdev = adapter->netdev;
047e0030 931 struct e1000_hw *hw = &adapter->hw;
9d5c8243
AK
932 int i, err = 0, vector = 0;
933
047e0030 934 err = request_irq(adapter->msix_entries[vector].vector,
a0607fd3 935 igb_msix_other, 0, netdev->name, adapter);
047e0030
AD
936 if (err)
937 goto out;
938 vector++;
939
940 for (i = 0; i < adapter->num_q_vectors; i++) {
941 struct igb_q_vector *q_vector = adapter->q_vector[i];
942
943 q_vector->itr_register = hw->hw_addr + E1000_EITR(vector);
944
0ba82994 945 if (q_vector->rx.ring && q_vector->tx.ring)
047e0030 946 sprintf(q_vector->name, "%s-TxRx-%u", netdev->name,
0ba82994
AD
947 q_vector->rx.ring->queue_index);
948 else if (q_vector->tx.ring)
047e0030 949 sprintf(q_vector->name, "%s-tx-%u", netdev->name,
0ba82994
AD
950 q_vector->tx.ring->queue_index);
951 else if (q_vector->rx.ring)
047e0030 952 sprintf(q_vector->name, "%s-rx-%u", netdev->name,
0ba82994 953 q_vector->rx.ring->queue_index);
9d5c8243 954 else
047e0030
AD
955 sprintf(q_vector->name, "%s-unused", netdev->name);
956
9d5c8243 957 err = request_irq(adapter->msix_entries[vector].vector,
a0607fd3 958 igb_msix_ring, 0, q_vector->name,
047e0030 959 q_vector);
9d5c8243
AK
960 if (err)
961 goto out;
9d5c8243
AK
962 vector++;
963 }
964
9d5c8243
AK
965 igb_configure_msix(adapter);
966 return 0;
967out:
968 return err;
969}
970
971static void igb_reset_interrupt_capability(struct igb_adapter *adapter)
972{
973 if (adapter->msix_entries) {
974 pci_disable_msix(adapter->pdev);
975 kfree(adapter->msix_entries);
976 adapter->msix_entries = NULL;
047e0030 977 } else if (adapter->flags & IGB_FLAG_HAS_MSI) {
9d5c8243 978 pci_disable_msi(adapter->pdev);
047e0030 979 }
9d5c8243
AK
980}
981
047e0030
AD
982/**
983 * igb_free_q_vectors - Free memory allocated for interrupt vectors
984 * @adapter: board private structure to initialize
985 *
986 * This function frees the memory allocated to the q_vectors. In addition if
987 * NAPI is enabled it will delete any references to the NAPI struct prior
988 * to freeing the q_vector.
989 **/
990static void igb_free_q_vectors(struct igb_adapter *adapter)
991{
992 int v_idx;
993
994 for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
995 struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
996 adapter->q_vector[v_idx] = NULL;
fe0592b4
NN
997 if (!q_vector)
998 continue;
047e0030
AD
999 netif_napi_del(&q_vector->napi);
1000 kfree(q_vector);
1001 }
1002 adapter->num_q_vectors = 0;
1003}
1004
1005/**
1006 * igb_clear_interrupt_scheme - reset the device to a state of no interrupts
1007 *
1008 * This function resets the device so that it has 0 rx queues, tx queues, and
1009 * MSI-X interrupts allocated.
1010 */
1011static void igb_clear_interrupt_scheme(struct igb_adapter *adapter)
1012{
1013 igb_free_queues(adapter);
1014 igb_free_q_vectors(adapter);
1015 igb_reset_interrupt_capability(adapter);
1016}
9d5c8243
AK
1017
1018/**
1019 * igb_set_interrupt_capability - set MSI or MSI-X if supported
1020 *
1021 * Attempt to configure interrupts using the best available
1022 * capabilities of the hardware and kernel.
1023 **/
21adef3e 1024static int igb_set_interrupt_capability(struct igb_adapter *adapter)
9d5c8243
AK
1025{
1026 int err;
1027 int numvecs, i;
1028
83b7180d 1029 /* Number of supported queues. */
a99955fc 1030 adapter->num_rx_queues = adapter->rss_queues;
5fa8517f
GR
1031 if (adapter->vfs_allocated_count)
1032 adapter->num_tx_queues = 1;
1033 else
1034 adapter->num_tx_queues = adapter->rss_queues;
83b7180d 1035
047e0030
AD
1036 /* start with one vector for every rx queue */
1037 numvecs = adapter->num_rx_queues;
1038
3ad2f3fb 1039 /* if tx handler is separate add 1 for every tx queue */
a99955fc
AD
1040 if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS))
1041 numvecs += adapter->num_tx_queues;
047e0030
AD
1042
1043 /* store the number of vectors reserved for queues */
1044 adapter->num_q_vectors = numvecs;
1045
1046 /* add 1 vector for link status interrupts */
1047 numvecs++;
9d5c8243
AK
1048 adapter->msix_entries = kcalloc(numvecs, sizeof(struct msix_entry),
1049 GFP_KERNEL);
1050 if (!adapter->msix_entries)
1051 goto msi_only;
1052
1053 for (i = 0; i < numvecs; i++)
1054 adapter->msix_entries[i].entry = i;
1055
1056 err = pci_enable_msix(adapter->pdev,
1057 adapter->msix_entries,
1058 numvecs);
1059 if (err == 0)
34a20e89 1060 goto out;
9d5c8243
AK
1061
1062 igb_reset_interrupt_capability(adapter);
1063
1064 /* If we can't do MSI-X, try MSI */
1065msi_only:
2a3abf6d
AD
1066#ifdef CONFIG_PCI_IOV
1067 /* disable SR-IOV for non MSI-X configurations */
1068 if (adapter->vf_data) {
1069 struct e1000_hw *hw = &adapter->hw;
1070 /* disable iov and allow time for transactions to clear */
1071 pci_disable_sriov(adapter->pdev);
1072 msleep(500);
1073
1074 kfree(adapter->vf_data);
1075 adapter->vf_data = NULL;
1076 wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
945a5151 1077 wrfl();
2a3abf6d
AD
1078 msleep(100);
1079 dev_info(&adapter->pdev->dev, "IOV Disabled\n");
1080 }
1081#endif
4fc82adf 1082 adapter->vfs_allocated_count = 0;
a99955fc 1083 adapter->rss_queues = 1;
4fc82adf 1084 adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
9d5c8243 1085 adapter->num_rx_queues = 1;
661086df 1086 adapter->num_tx_queues = 1;
047e0030 1087 adapter->num_q_vectors = 1;
9d5c8243 1088 if (!pci_enable_msi(adapter->pdev))
7dfc16fa 1089 adapter->flags |= IGB_FLAG_HAS_MSI;
34a20e89 1090out:
21adef3e
BH
1091 /* Notify the stack of the (possibly) reduced queue counts. */
1092 netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues);
1093 return netif_set_real_num_rx_queues(adapter->netdev,
1094 adapter->num_rx_queues);
9d5c8243
AK
1095}
1096
047e0030
AD
1097/**
1098 * igb_alloc_q_vectors - Allocate memory for interrupt vectors
1099 * @adapter: board private structure to initialize
1100 *
1101 * We allocate one q_vector per queue interrupt. If allocation fails we
1102 * return -ENOMEM.
1103 **/
1104static int igb_alloc_q_vectors(struct igb_adapter *adapter)
1105{
1106 struct igb_q_vector *q_vector;
1107 struct e1000_hw *hw = &adapter->hw;
1108 int v_idx;
81c2fc22 1109 int orig_node = adapter->node;
047e0030
AD
1110
1111 for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
81c2fc22
AD
1112 if ((adapter->num_q_vectors == (adapter->num_rx_queues +
1113 adapter->num_tx_queues)) &&
1114 (adapter->num_rx_queues == v_idx))
1115 adapter->node = orig_node;
1116 if (orig_node == -1) {
1117 int cur_node = next_online_node(adapter->node);
1118 if (cur_node == MAX_NUMNODES)
1119 cur_node = first_online_node;
1120 adapter->node = cur_node;
1121 }
1122 q_vector = kzalloc_node(sizeof(struct igb_q_vector), GFP_KERNEL,
1123 adapter->node);
1124 if (!q_vector)
1125 q_vector = kzalloc(sizeof(struct igb_q_vector),
1126 GFP_KERNEL);
047e0030
AD
1127 if (!q_vector)
1128 goto err_out;
1129 q_vector->adapter = adapter;
047e0030
AD
1130 q_vector->itr_register = hw->hw_addr + E1000_EITR(0);
1131 q_vector->itr_val = IGB_START_ITR;
047e0030
AD
1132 netif_napi_add(adapter->netdev, &q_vector->napi, igb_poll, 64);
1133 adapter->q_vector[v_idx] = q_vector;
1134 }
81c2fc22
AD
1135 /* Restore the adapter's original node */
1136 adapter->node = orig_node;
1137
047e0030
AD
1138 return 0;
1139
1140err_out:
81c2fc22
AD
1141 /* Restore the adapter's original node */
1142 adapter->node = orig_node;
fe0592b4 1143 igb_free_q_vectors(adapter);
047e0030
AD
1144 return -ENOMEM;
1145}
1146
1147static void igb_map_rx_ring_to_vector(struct igb_adapter *adapter,
1148 int ring_idx, int v_idx)
1149{
3025a446 1150 struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
047e0030 1151
0ba82994
AD
1152 q_vector->rx.ring = adapter->rx_ring[ring_idx];
1153 q_vector->rx.ring->q_vector = q_vector;
1154 q_vector->rx.count++;
4fc82adf
AD
1155 q_vector->itr_val = adapter->rx_itr_setting;
1156 if (q_vector->itr_val && q_vector->itr_val <= 3)
1157 q_vector->itr_val = IGB_START_ITR;
047e0030
AD
1158}
1159
1160static void igb_map_tx_ring_to_vector(struct igb_adapter *adapter,
1161 int ring_idx, int v_idx)
1162{
3025a446 1163 struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
047e0030 1164
0ba82994
AD
1165 q_vector->tx.ring = adapter->tx_ring[ring_idx];
1166 q_vector->tx.ring->q_vector = q_vector;
1167 q_vector->tx.count++;
4fc82adf 1168 q_vector->itr_val = adapter->tx_itr_setting;
0ba82994 1169 q_vector->tx.work_limit = adapter->tx_work_limit;
4fc82adf
AD
1170 if (q_vector->itr_val && q_vector->itr_val <= 3)
1171 q_vector->itr_val = IGB_START_ITR;
047e0030
AD
1172}
1173
1174/**
1175 * igb_map_ring_to_vector - maps allocated queues to vectors
1176 *
1177 * This function maps the recently allocated queues to vectors.
1178 **/
1179static int igb_map_ring_to_vector(struct igb_adapter *adapter)
1180{
1181 int i;
1182 int v_idx = 0;
1183
1184 if ((adapter->num_q_vectors < adapter->num_rx_queues) ||
1185 (adapter->num_q_vectors < adapter->num_tx_queues))
1186 return -ENOMEM;
1187
1188 if (adapter->num_q_vectors >=
1189 (adapter->num_rx_queues + adapter->num_tx_queues)) {
1190 for (i = 0; i < adapter->num_rx_queues; i++)
1191 igb_map_rx_ring_to_vector(adapter, i, v_idx++);
1192 for (i = 0; i < adapter->num_tx_queues; i++)
1193 igb_map_tx_ring_to_vector(adapter, i, v_idx++);
1194 } else {
1195 for (i = 0; i < adapter->num_rx_queues; i++) {
1196 if (i < adapter->num_tx_queues)
1197 igb_map_tx_ring_to_vector(adapter, i, v_idx);
1198 igb_map_rx_ring_to_vector(adapter, i, v_idx++);
1199 }
1200 for (; i < adapter->num_tx_queues; i++)
1201 igb_map_tx_ring_to_vector(adapter, i, v_idx++);
1202 }
1203 return 0;
1204}
1205
1206/**
1207 * igb_init_interrupt_scheme - initialize interrupts, allocate queues/vectors
1208 *
1209 * This function initializes the interrupts and allocates all of the queues.
1210 **/
1211static int igb_init_interrupt_scheme(struct igb_adapter *adapter)
1212{
1213 struct pci_dev *pdev = adapter->pdev;
1214 int err;
1215
21adef3e
BH
1216 err = igb_set_interrupt_capability(adapter);
1217 if (err)
1218 return err;
047e0030
AD
1219
1220 err = igb_alloc_q_vectors(adapter);
1221 if (err) {
1222 dev_err(&pdev->dev, "Unable to allocate memory for vectors\n");
1223 goto err_alloc_q_vectors;
1224 }
1225
1226 err = igb_alloc_queues(adapter);
1227 if (err) {
1228 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
1229 goto err_alloc_queues;
1230 }
1231
1232 err = igb_map_ring_to_vector(adapter);
1233 if (err) {
1234 dev_err(&pdev->dev, "Invalid q_vector to ring mapping\n");
1235 goto err_map_queues;
1236 }
1237
1238
1239 return 0;
1240err_map_queues:
1241 igb_free_queues(adapter);
1242err_alloc_queues:
1243 igb_free_q_vectors(adapter);
1244err_alloc_q_vectors:
1245 igb_reset_interrupt_capability(adapter);
1246 return err;
1247}
1248
9d5c8243
AK
1249/**
1250 * igb_request_irq - initialize interrupts
1251 *
1252 * Attempts to configure interrupts using the best available
1253 * capabilities of the hardware and kernel.
1254 **/
1255static int igb_request_irq(struct igb_adapter *adapter)
1256{
1257 struct net_device *netdev = adapter->netdev;
047e0030 1258 struct pci_dev *pdev = adapter->pdev;
9d5c8243
AK
1259 int err = 0;
1260
1261 if (adapter->msix_entries) {
1262 err = igb_request_msix(adapter);
844290e5 1263 if (!err)
9d5c8243 1264 goto request_done;
9d5c8243 1265 /* fall back to MSI */
047e0030 1266 igb_clear_interrupt_scheme(adapter);
9d5c8243 1267 if (!pci_enable_msi(adapter->pdev))
7dfc16fa 1268 adapter->flags |= IGB_FLAG_HAS_MSI;
9d5c8243
AK
1269 igb_free_all_tx_resources(adapter);
1270 igb_free_all_rx_resources(adapter);
047e0030 1271 adapter->num_tx_queues = 1;
9d5c8243 1272 adapter->num_rx_queues = 1;
047e0030
AD
1273 adapter->num_q_vectors = 1;
1274 err = igb_alloc_q_vectors(adapter);
1275 if (err) {
1276 dev_err(&pdev->dev,
1277 "Unable to allocate memory for vectors\n");
1278 goto request_done;
1279 }
1280 err = igb_alloc_queues(adapter);
1281 if (err) {
1282 dev_err(&pdev->dev,
1283 "Unable to allocate memory for queues\n");
1284 igb_free_q_vectors(adapter);
1285 goto request_done;
1286 }
1287 igb_setup_all_tx_resources(adapter);
1288 igb_setup_all_rx_resources(adapter);
844290e5 1289 } else {
feeb2721 1290 igb_assign_vector(adapter->q_vector[0], 0);
9d5c8243 1291 }
844290e5 1292
7dfc16fa 1293 if (adapter->flags & IGB_FLAG_HAS_MSI) {
a0607fd3 1294 err = request_irq(adapter->pdev->irq, igb_intr_msi, 0,
047e0030 1295 netdev->name, adapter);
9d5c8243
AK
1296 if (!err)
1297 goto request_done;
047e0030 1298
9d5c8243
AK
1299 /* fall back to legacy interrupts */
1300 igb_reset_interrupt_capability(adapter);
7dfc16fa 1301 adapter->flags &= ~IGB_FLAG_HAS_MSI;
9d5c8243
AK
1302 }
1303
a0607fd3 1304 err = request_irq(adapter->pdev->irq, igb_intr, IRQF_SHARED,
047e0030 1305 netdev->name, adapter);
9d5c8243 1306
6cb5e577 1307 if (err)
9d5c8243
AK
1308 dev_err(&adapter->pdev->dev, "Error %d getting interrupt\n",
1309 err);
9d5c8243
AK
1310
1311request_done:
1312 return err;
1313}
1314
1315static void igb_free_irq(struct igb_adapter *adapter)
1316{
9d5c8243
AK
1317 if (adapter->msix_entries) {
1318 int vector = 0, i;
1319
047e0030 1320 free_irq(adapter->msix_entries[vector++].vector, adapter);
9d5c8243 1321
047e0030
AD
1322 for (i = 0; i < adapter->num_q_vectors; i++) {
1323 struct igb_q_vector *q_vector = adapter->q_vector[i];
1324 free_irq(adapter->msix_entries[vector++].vector,
1325 q_vector);
1326 }
1327 } else {
1328 free_irq(adapter->pdev->irq, adapter);
9d5c8243 1329 }
9d5c8243
AK
1330}
1331
1332/**
1333 * igb_irq_disable - Mask off interrupt generation on the NIC
1334 * @adapter: board private structure
1335 **/
1336static void igb_irq_disable(struct igb_adapter *adapter)
1337{
1338 struct e1000_hw *hw = &adapter->hw;
1339
25568a53
AD
1340 /*
1341 * we need to be careful when disabling interrupts. The VFs are also
1342 * mapped into these registers and so clearing the bits can cause
1343 * issues on the VF drivers so we only need to clear what we set
1344 */
9d5c8243 1345 if (adapter->msix_entries) {
2dfd1212
AD
1346 u32 regval = rd32(E1000_EIAM);
1347 wr32(E1000_EIAM, regval & ~adapter->eims_enable_mask);
1348 wr32(E1000_EIMC, adapter->eims_enable_mask);
1349 regval = rd32(E1000_EIAC);
1350 wr32(E1000_EIAC, regval & ~adapter->eims_enable_mask);
9d5c8243 1351 }
844290e5
PW
1352
1353 wr32(E1000_IAM, 0);
9d5c8243
AK
1354 wr32(E1000_IMC, ~0);
1355 wrfl();
81a61859
ET
1356 if (adapter->msix_entries) {
1357 int i;
1358 for (i = 0; i < adapter->num_q_vectors; i++)
1359 synchronize_irq(adapter->msix_entries[i].vector);
1360 } else {
1361 synchronize_irq(adapter->pdev->irq);
1362 }
9d5c8243
AK
1363}
1364
1365/**
1366 * igb_irq_enable - Enable default interrupt generation settings
1367 * @adapter: board private structure
1368 **/
1369static void igb_irq_enable(struct igb_adapter *adapter)
1370{
1371 struct e1000_hw *hw = &adapter->hw;
1372
1373 if (adapter->msix_entries) {
25568a53 1374 u32 ims = E1000_IMS_LSC | E1000_IMS_DOUTSYNC;
2dfd1212
AD
1375 u32 regval = rd32(E1000_EIAC);
1376 wr32(E1000_EIAC, regval | adapter->eims_enable_mask);
1377 regval = rd32(E1000_EIAM);
1378 wr32(E1000_EIAM, regval | adapter->eims_enable_mask);
844290e5 1379 wr32(E1000_EIMS, adapter->eims_enable_mask);
25568a53 1380 if (adapter->vfs_allocated_count) {
4ae196df 1381 wr32(E1000_MBVFIMR, 0xFF);
25568a53
AD
1382 ims |= E1000_IMS_VMMB;
1383 }
55cac248
AD
1384 if (adapter->hw.mac.type == e1000_82580)
1385 ims |= E1000_IMS_DRSTA;
1386
25568a53 1387 wr32(E1000_IMS, ims);
844290e5 1388 } else {
55cac248
AD
1389 wr32(E1000_IMS, IMS_ENABLE_MASK |
1390 E1000_IMS_DRSTA);
1391 wr32(E1000_IAM, IMS_ENABLE_MASK |
1392 E1000_IMS_DRSTA);
844290e5 1393 }
9d5c8243
AK
1394}
1395
1396static void igb_update_mng_vlan(struct igb_adapter *adapter)
1397{
51466239 1398 struct e1000_hw *hw = &adapter->hw;
9d5c8243
AK
1399 u16 vid = adapter->hw.mng_cookie.vlan_id;
1400 u16 old_vid = adapter->mng_vlan_id;
51466239
AD
1401
1402 if (hw->mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
1403 /* add VID to filter table */
1404 igb_vfta_set(hw, vid, true);
1405 adapter->mng_vlan_id = vid;
1406 } else {
1407 adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
1408 }
1409
1410 if ((old_vid != (u16)IGB_MNG_VLAN_NONE) &&
1411 (vid != old_vid) &&
b2cb09b1 1412 !test_bit(old_vid, adapter->active_vlans)) {
51466239
AD
1413 /* remove VID from filter table */
1414 igb_vfta_set(hw, old_vid, false);
9d5c8243
AK
1415 }
1416}
1417
1418/**
1419 * igb_release_hw_control - release control of the h/w to f/w
1420 * @adapter: address of board private structure
1421 *
1422 * igb_release_hw_control resets CTRL_EXT:DRV_LOAD bit.
1423 * For ASF and Pass Through versions of f/w this means that the
1424 * driver is no longer loaded.
1425 *
1426 **/
1427static void igb_release_hw_control(struct igb_adapter *adapter)
1428{
1429 struct e1000_hw *hw = &adapter->hw;
1430 u32 ctrl_ext;
1431
1432 /* Let firmware take over control of h/w */
1433 ctrl_ext = rd32(E1000_CTRL_EXT);
1434 wr32(E1000_CTRL_EXT,
1435 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
1436}
1437
9d5c8243
AK
1438/**
1439 * igb_get_hw_control - get control of the h/w from f/w
1440 * @adapter: address of board private structure
1441 *
1442 * igb_get_hw_control sets CTRL_EXT:DRV_LOAD bit.
1443 * For ASF and Pass Through versions of f/w this means that
1444 * the driver is loaded.
1445 *
1446 **/
1447static void igb_get_hw_control(struct igb_adapter *adapter)
1448{
1449 struct e1000_hw *hw = &adapter->hw;
1450 u32 ctrl_ext;
1451
1452 /* Let firmware know the driver has taken over */
1453 ctrl_ext = rd32(E1000_CTRL_EXT);
1454 wr32(E1000_CTRL_EXT,
1455 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
1456}
1457
9d5c8243
AK
1458/**
1459 * igb_configure - configure the hardware for RX and TX
1460 * @adapter: private board structure
1461 **/
1462static void igb_configure(struct igb_adapter *adapter)
1463{
1464 struct net_device *netdev = adapter->netdev;
1465 int i;
1466
1467 igb_get_hw_control(adapter);
ff41f8dc 1468 igb_set_rx_mode(netdev);
9d5c8243
AK
1469
1470 igb_restore_vlan(adapter);
9d5c8243 1471
85b430b4 1472 igb_setup_tctl(adapter);
06cf2666 1473 igb_setup_mrqc(adapter);
9d5c8243 1474 igb_setup_rctl(adapter);
85b430b4
AD
1475
1476 igb_configure_tx(adapter);
9d5c8243 1477 igb_configure_rx(adapter);
662d7205
AD
1478
1479 igb_rx_fifo_flush_82575(&adapter->hw);
1480
c493ea45 1481 /* call igb_desc_unused which always leaves
9d5c8243
AK
1482 * at least 1 descriptor unused to make sure
1483 * next_to_use != next_to_clean */
1484 for (i = 0; i < adapter->num_rx_queues; i++) {
3025a446 1485 struct igb_ring *ring = adapter->rx_ring[i];
cd392f5c 1486 igb_alloc_rx_buffers(ring, igb_desc_unused(ring));
9d5c8243 1487 }
9d5c8243
AK
1488}
1489
88a268c1
NN
1490/**
1491 * igb_power_up_link - Power up the phy/serdes link
1492 * @adapter: address of board private structure
1493 **/
1494void igb_power_up_link(struct igb_adapter *adapter)
1495{
1496 if (adapter->hw.phy.media_type == e1000_media_type_copper)
1497 igb_power_up_phy_copper(&adapter->hw);
1498 else
1499 igb_power_up_serdes_link_82575(&adapter->hw);
1500}
1501
1502/**
1503 * igb_power_down_link - Power down the phy/serdes link
1504 * @adapter: address of board private structure
1505 */
1506static void igb_power_down_link(struct igb_adapter *adapter)
1507{
1508 if (adapter->hw.phy.media_type == e1000_media_type_copper)
1509 igb_power_down_phy_copper_82575(&adapter->hw);
1510 else
1511 igb_shutdown_serdes_link_82575(&adapter->hw);
1512}
9d5c8243
AK
1513
1514/**
1515 * igb_up - Open the interface and prepare it to handle traffic
1516 * @adapter: board private structure
1517 **/
9d5c8243
AK
1518int igb_up(struct igb_adapter *adapter)
1519{
1520 struct e1000_hw *hw = &adapter->hw;
1521 int i;
1522
1523 /* hardware has been reset, we need to reload some things */
1524 igb_configure(adapter);
1525
1526 clear_bit(__IGB_DOWN, &adapter->state);
1527
047e0030
AD
1528 for (i = 0; i < adapter->num_q_vectors; i++) {
1529 struct igb_q_vector *q_vector = adapter->q_vector[i];
1530 napi_enable(&q_vector->napi);
1531 }
844290e5 1532 if (adapter->msix_entries)
9d5c8243 1533 igb_configure_msix(adapter);
feeb2721
AD
1534 else
1535 igb_assign_vector(adapter->q_vector[0], 0);
9d5c8243
AK
1536
1537 /* Clear any pending interrupts. */
1538 rd32(E1000_ICR);
1539 igb_irq_enable(adapter);
1540
d4960307
AD
1541 /* notify VFs that reset has been completed */
1542 if (adapter->vfs_allocated_count) {
1543 u32 reg_data = rd32(E1000_CTRL_EXT);
1544 reg_data |= E1000_CTRL_EXT_PFRSTD;
1545 wr32(E1000_CTRL_EXT, reg_data);
1546 }
1547
4cb9be7a
JB
1548 netif_tx_start_all_queues(adapter->netdev);
1549
25568a53
AD
1550 /* start the watchdog. */
1551 hw->mac.get_link_status = 1;
1552 schedule_work(&adapter->watchdog_task);
1553
9d5c8243
AK
1554 return 0;
1555}
1556
1557void igb_down(struct igb_adapter *adapter)
1558{
9d5c8243 1559 struct net_device *netdev = adapter->netdev;
330a6d6a 1560 struct e1000_hw *hw = &adapter->hw;
9d5c8243
AK
1561 u32 tctl, rctl;
1562 int i;
1563
1564 /* signal that we're down so the interrupt handler does not
1565 * reschedule our watchdog timer */
1566 set_bit(__IGB_DOWN, &adapter->state);
1567
1568 /* disable receives in the hardware */
1569 rctl = rd32(E1000_RCTL);
1570 wr32(E1000_RCTL, rctl & ~E1000_RCTL_EN);
1571 /* flush and sleep below */
1572
fd2ea0a7 1573 netif_tx_stop_all_queues(netdev);
9d5c8243
AK
1574
1575 /* disable transmits in the hardware */
1576 tctl = rd32(E1000_TCTL);
1577 tctl &= ~E1000_TCTL_EN;
1578 wr32(E1000_TCTL, tctl);
1579 /* flush both disables and wait for them to finish */
1580 wrfl();
1581 msleep(10);
1582
047e0030
AD
1583 for (i = 0; i < adapter->num_q_vectors; i++) {
1584 struct igb_q_vector *q_vector = adapter->q_vector[i];
1585 napi_disable(&q_vector->napi);
1586 }
9d5c8243 1587
9d5c8243
AK
1588 igb_irq_disable(adapter);
1589
1590 del_timer_sync(&adapter->watchdog_timer);
1591 del_timer_sync(&adapter->phy_info_timer);
1592
9d5c8243 1593 netif_carrier_off(netdev);
04fe6358
AD
1594
1595 /* record the stats before reset*/
12dcd86b
ED
1596 spin_lock(&adapter->stats64_lock);
1597 igb_update_stats(adapter, &adapter->stats64);
1598 spin_unlock(&adapter->stats64_lock);
04fe6358 1599
9d5c8243
AK
1600 adapter->link_speed = 0;
1601 adapter->link_duplex = 0;
1602
3023682e
JK
1603 if (!pci_channel_offline(adapter->pdev))
1604 igb_reset(adapter);
9d5c8243
AK
1605 igb_clean_all_tx_rings(adapter);
1606 igb_clean_all_rx_rings(adapter);
7e0e99ef
AD
1607#ifdef CONFIG_IGB_DCA
1608
1609 /* since we reset the hardware DCA settings were cleared */
1610 igb_setup_dca(adapter);
1611#endif
9d5c8243
AK
1612}
1613
1614void igb_reinit_locked(struct igb_adapter *adapter)
1615{
1616 WARN_ON(in_interrupt());
1617 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
1618 msleep(1);
1619 igb_down(adapter);
1620 igb_up(adapter);
1621 clear_bit(__IGB_RESETTING, &adapter->state);
1622}
1623
1624void igb_reset(struct igb_adapter *adapter)
1625{
090b1795 1626 struct pci_dev *pdev = adapter->pdev;
9d5c8243 1627 struct e1000_hw *hw = &adapter->hw;
2d064c06
AD
1628 struct e1000_mac_info *mac = &hw->mac;
1629 struct e1000_fc_info *fc = &hw->fc;
9d5c8243
AK
1630 u32 pba = 0, tx_space, min_tx_space, min_rx_space;
1631 u16 hwm;
1632
1633 /* Repartition Pba for greater than 9k mtu
1634 * To take effect CTRL.RST is required.
1635 */
fa4dfae0 1636 switch (mac->type) {
d2ba2ed8 1637 case e1000_i350:
55cac248
AD
1638 case e1000_82580:
1639 pba = rd32(E1000_RXPBS);
1640 pba = igb_rxpbs_adjust_82580(pba);
1641 break;
fa4dfae0 1642 case e1000_82576:
d249be54
AD
1643 pba = rd32(E1000_RXPBS);
1644 pba &= E1000_RXPBS_SIZE_MASK_82576;
fa4dfae0
AD
1645 break;
1646 case e1000_82575:
1647 default:
1648 pba = E1000_PBA_34K;
1649 break;
2d064c06 1650 }
9d5c8243 1651
2d064c06
AD
1652 if ((adapter->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) &&
1653 (mac->type < e1000_82576)) {
9d5c8243
AK
1654 /* adjust PBA for jumbo frames */
1655 wr32(E1000_PBA, pba);
1656
1657 /* To maintain wire speed transmits, the Tx FIFO should be
1658 * large enough to accommodate two full transmit packets,
1659 * rounded up to the next 1KB and expressed in KB. Likewise,
1660 * the Rx FIFO should be large enough to accommodate at least
1661 * one full receive packet and is similarly rounded up and
1662 * expressed in KB. */
1663 pba = rd32(E1000_PBA);
1664 /* upper 16 bits has Tx packet buffer allocation size in KB */
1665 tx_space = pba >> 16;
1666 /* lower 16 bits has Rx packet buffer allocation size in KB */
1667 pba &= 0xffff;
1668 /* the tx fifo also stores 16 bytes of information about the tx
1669 * but don't include ethernet FCS because hardware appends it */
1670 min_tx_space = (adapter->max_frame_size +
85e8d004 1671 sizeof(union e1000_adv_tx_desc) -
9d5c8243
AK
1672 ETH_FCS_LEN) * 2;
1673 min_tx_space = ALIGN(min_tx_space, 1024);
1674 min_tx_space >>= 10;
1675 /* software strips receive CRC, so leave room for it */
1676 min_rx_space = adapter->max_frame_size;
1677 min_rx_space = ALIGN(min_rx_space, 1024);
1678 min_rx_space >>= 10;
1679
1680 /* If current Tx allocation is less than the min Tx FIFO size,
1681 * and the min Tx FIFO size is less than the current Rx FIFO
1682 * allocation, take space away from current Rx allocation */
1683 if (tx_space < min_tx_space &&
1684 ((min_tx_space - tx_space) < pba)) {
1685 pba = pba - (min_tx_space - tx_space);
1686
1687 /* if short on rx space, rx wins and must trump tx
1688 * adjustment */
1689 if (pba < min_rx_space)
1690 pba = min_rx_space;
1691 }
2d064c06 1692 wr32(E1000_PBA, pba);
9d5c8243 1693 }
9d5c8243
AK
1694
1695 /* flow control settings */
1696 /* The high water mark must be low enough to fit one full frame
1697 * (or the size used for early receive) above it in the Rx FIFO.
1698 * Set it to the lower of:
1699 * - 90% of the Rx FIFO size, or
1700 * - the full Rx FIFO size minus one full frame */
1701 hwm = min(((pba << 10) * 9 / 10),
2d064c06 1702 ((pba << 10) - 2 * adapter->max_frame_size));
9d5c8243 1703
d405ea3e
AD
1704 fc->high_water = hwm & 0xFFF0; /* 16-byte granularity */
1705 fc->low_water = fc->high_water - 16;
9d5c8243
AK
1706 fc->pause_time = 0xFFFF;
1707 fc->send_xon = 1;
0cce119a 1708 fc->current_mode = fc->requested_mode;
9d5c8243 1709
4ae196df
AD
1710 /* disable receive for all VFs and wait one second */
1711 if (adapter->vfs_allocated_count) {
1712 int i;
1713 for (i = 0 ; i < adapter->vfs_allocated_count; i++)
8fa7e0f7 1714 adapter->vf_data[i].flags &= IGB_VF_FLAG_PF_SET_MAC;
4ae196df
AD
1715
1716 /* ping all the active vfs to let them know we are going down */
f2ca0dbe 1717 igb_ping_all_vfs(adapter);
4ae196df
AD
1718
1719 /* disable transmits and receives */
1720 wr32(E1000_VFRE, 0);
1721 wr32(E1000_VFTE, 0);
1722 }
1723
9d5c8243 1724 /* Allow time for pending master requests to run */
330a6d6a 1725 hw->mac.ops.reset_hw(hw);
9d5c8243
AK
1726 wr32(E1000_WUC, 0);
1727
330a6d6a 1728 if (hw->mac.ops.init_hw(hw))
090b1795 1729 dev_err(&pdev->dev, "Hardware Error\n");
831ec0b4
CW
1730 if (hw->mac.type > e1000_82580) {
1731 if (adapter->flags & IGB_FLAG_DMAC) {
1732 u32 reg;
1733
1734 /*
1735 * DMA Coalescing high water mark needs to be higher
1736 * than * the * Rx threshold. The Rx threshold is
1737 * currently * pba - 6, so we * should use a high water
1738 * mark of pba * - 4. */
1739 hwm = (pba - 4) << 10;
1740
1741 reg = (((pba-6) << E1000_DMACR_DMACTHR_SHIFT)
1742 & E1000_DMACR_DMACTHR_MASK);
1743
1744 /* transition to L0x or L1 if available..*/
1745 reg |= (E1000_DMACR_DMAC_EN | E1000_DMACR_DMAC_LX_MASK);
1746
1747 /* watchdog timer= +-1000 usec in 32usec intervals */
1748 reg |= (1000 >> 5);
1749 wr32(E1000_DMACR, reg);
1750
1751 /* no lower threshold to disable coalescing(smart fifb)
1752 * -UTRESH=0*/
1753 wr32(E1000_DMCRTRH, 0);
1754
1755 /* set hwm to PBA - 2 * max frame size */
1756 wr32(E1000_FCRTC, hwm);
1757
1758 /*
1759 * This sets the time to wait before requesting tran-
1760 * sition to * low power state to number of usecs needed
1761 * to receive 1 512 * byte frame at gigabit line rate
1762 */
1763 reg = rd32(E1000_DMCTLX);
1764 reg |= IGB_DMCTLX_DCFLUSH_DIS;
1765
1766 /* Delay 255 usec before entering Lx state. */
1767 reg |= 0xFF;
1768 wr32(E1000_DMCTLX, reg);
1769
1770 /* free space in Tx packet buffer to wake from DMAC */
1771 wr32(E1000_DMCTXTH,
1772 (IGB_MIN_TXPBSIZE -
1773 (IGB_TX_BUF_4096 + adapter->max_frame_size))
1774 >> 6);
1775
1776 /* make low power state decision controlled by DMAC */
1777 reg = rd32(E1000_PCIEMISC);
1778 reg |= E1000_PCIEMISC_LX_DECISION;
1779 wr32(E1000_PCIEMISC, reg);
1780 } /* end if IGB_FLAG_DMAC set */
1781 }
55cac248
AD
1782 if (hw->mac.type == e1000_82580) {
1783 u32 reg = rd32(E1000_PCIEMISC);
1784 wr32(E1000_PCIEMISC,
1785 reg & ~E1000_PCIEMISC_LX_DECISION);
1786 }
88a268c1
NN
1787 if (!netif_running(adapter->netdev))
1788 igb_power_down_link(adapter);
1789
9d5c8243
AK
1790 igb_update_mng_vlan(adapter);
1791
1792 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
1793 wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE);
1794
330a6d6a 1795 igb_get_phy_info(hw);
9d5c8243
AK
1796}
1797
b2cb09b1
JP
1798static u32 igb_fix_features(struct net_device *netdev, u32 features)
1799{
1800 /*
1801 * Since there is no support for separate rx/tx vlan accel
1802 * enable/disable make sure tx flag is always in same state as rx.
1803 */
1804 if (features & NETIF_F_HW_VLAN_RX)
1805 features |= NETIF_F_HW_VLAN_TX;
1806 else
1807 features &= ~NETIF_F_HW_VLAN_TX;
1808
1809 return features;
1810}
1811
ac52caa3
MM
1812static int igb_set_features(struct net_device *netdev, u32 features)
1813{
1814 struct igb_adapter *adapter = netdev_priv(netdev);
1815 int i;
b2cb09b1 1816 u32 changed = netdev->features ^ features;
ac52caa3
MM
1817
1818 for (i = 0; i < adapter->num_rx_queues; i++) {
1819 if (features & NETIF_F_RXCSUM)
866cff06
AD
1820 set_bit(IGB_RING_FLAG_RX_CSUM,
1821 &adapter->rx_ring[i]->flags);
ac52caa3 1822 else
866cff06
AD
1823 clear_bit(IGB_RING_FLAG_RX_CSUM,
1824 &adapter->rx_ring[i]->flags);
ac52caa3
MM
1825 }
1826
b2cb09b1
JP
1827 if (changed & NETIF_F_HW_VLAN_RX)
1828 igb_vlan_mode(netdev, features);
1829
ac52caa3
MM
1830 return 0;
1831}
1832
2e5c6922 1833static const struct net_device_ops igb_netdev_ops = {
559e9c49 1834 .ndo_open = igb_open,
2e5c6922 1835 .ndo_stop = igb_close,
cd392f5c 1836 .ndo_start_xmit = igb_xmit_frame,
12dcd86b 1837 .ndo_get_stats64 = igb_get_stats64,
ff41f8dc 1838 .ndo_set_rx_mode = igb_set_rx_mode,
2e5c6922
SH
1839 .ndo_set_mac_address = igb_set_mac,
1840 .ndo_change_mtu = igb_change_mtu,
1841 .ndo_do_ioctl = igb_ioctl,
1842 .ndo_tx_timeout = igb_tx_timeout,
1843 .ndo_validate_addr = eth_validate_addr,
2e5c6922
SH
1844 .ndo_vlan_rx_add_vid = igb_vlan_rx_add_vid,
1845 .ndo_vlan_rx_kill_vid = igb_vlan_rx_kill_vid,
8151d294
WM
1846 .ndo_set_vf_mac = igb_ndo_set_vf_mac,
1847 .ndo_set_vf_vlan = igb_ndo_set_vf_vlan,
1848 .ndo_set_vf_tx_rate = igb_ndo_set_vf_bw,
1849 .ndo_get_vf_config = igb_ndo_get_vf_config,
2e5c6922
SH
1850#ifdef CONFIG_NET_POLL_CONTROLLER
1851 .ndo_poll_controller = igb_netpoll,
1852#endif
b2cb09b1
JP
1853 .ndo_fix_features = igb_fix_features,
1854 .ndo_set_features = igb_set_features,
2e5c6922
SH
1855};
1856
9d5c8243
AK
1857/**
1858 * igb_probe - Device Initialization Routine
1859 * @pdev: PCI device information struct
1860 * @ent: entry in igb_pci_tbl
1861 *
1862 * Returns 0 on success, negative on failure
1863 *
1864 * igb_probe initializes an adapter identified by a pci_dev structure.
1865 * The OS initialization, configuring of the adapter private structure,
1866 * and a hardware reset occur.
1867 **/
1868static int __devinit igb_probe(struct pci_dev *pdev,
1869 const struct pci_device_id *ent)
1870{
1871 struct net_device *netdev;
1872 struct igb_adapter *adapter;
1873 struct e1000_hw *hw;
4337e993 1874 u16 eeprom_data = 0;
9835fd73 1875 s32 ret_val;
4337e993 1876 static int global_quad_port_a; /* global quad port a indication */
9d5c8243
AK
1877 const struct e1000_info *ei = igb_info_tbl[ent->driver_data];
1878 unsigned long mmio_start, mmio_len;
2d6a5e95 1879 int err, pci_using_dac;
9d5c8243 1880 u16 eeprom_apme_mask = IGB_EEPROM_APME;
9835fd73 1881 u8 part_str[E1000_PBANUM_LENGTH];
9d5c8243 1882
bded64a7
AG
1883 /* Catch broken hardware that put the wrong VF device ID in
1884 * the PCIe SR-IOV capability.
1885 */
1886 if (pdev->is_virtfn) {
1887 WARN(1, KERN_ERR "%s (%hx:%hx) should not be a VF!\n",
1888 pci_name(pdev), pdev->vendor, pdev->device);
1889 return -EINVAL;
1890 }
1891
aed5dec3 1892 err = pci_enable_device_mem(pdev);
9d5c8243
AK
1893 if (err)
1894 return err;
1895
1896 pci_using_dac = 0;
59d71989 1897 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
9d5c8243 1898 if (!err) {
59d71989 1899 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
9d5c8243
AK
1900 if (!err)
1901 pci_using_dac = 1;
1902 } else {
59d71989 1903 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
9d5c8243 1904 if (err) {
59d71989 1905 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
9d5c8243
AK
1906 if (err) {
1907 dev_err(&pdev->dev, "No usable DMA "
1908 "configuration, aborting\n");
1909 goto err_dma;
1910 }
1911 }
1912 }
1913
aed5dec3
AD
1914 err = pci_request_selected_regions(pdev, pci_select_bars(pdev,
1915 IORESOURCE_MEM),
1916 igb_driver_name);
9d5c8243
AK
1917 if (err)
1918 goto err_pci_reg;
1919
19d5afd4 1920 pci_enable_pcie_error_reporting(pdev);
40a914fa 1921
9d5c8243 1922 pci_set_master(pdev);
c682fc23 1923 pci_save_state(pdev);
9d5c8243
AK
1924
1925 err = -ENOMEM;
1bfaf07b 1926 netdev = alloc_etherdev_mq(sizeof(struct igb_adapter),
1cc3bd87 1927 IGB_MAX_TX_QUEUES);
9d5c8243
AK
1928 if (!netdev)
1929 goto err_alloc_etherdev;
1930
1931 SET_NETDEV_DEV(netdev, &pdev->dev);
1932
1933 pci_set_drvdata(pdev, netdev);
1934 adapter = netdev_priv(netdev);
1935 adapter->netdev = netdev;
1936 adapter->pdev = pdev;
1937 hw = &adapter->hw;
1938 hw->back = adapter;
1939 adapter->msg_enable = NETIF_MSG_DRV | NETIF_MSG_PROBE;
1940
1941 mmio_start = pci_resource_start(pdev, 0);
1942 mmio_len = pci_resource_len(pdev, 0);
1943
1944 err = -EIO;
28b0759c
AD
1945 hw->hw_addr = ioremap(mmio_start, mmio_len);
1946 if (!hw->hw_addr)
9d5c8243
AK
1947 goto err_ioremap;
1948
2e5c6922 1949 netdev->netdev_ops = &igb_netdev_ops;
9d5c8243 1950 igb_set_ethtool_ops(netdev);
9d5c8243 1951 netdev->watchdog_timeo = 5 * HZ;
9d5c8243
AK
1952
1953 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
1954
1955 netdev->mem_start = mmio_start;
1956 netdev->mem_end = mmio_start + mmio_len;
1957
9d5c8243
AK
1958 /* PCI config space info */
1959 hw->vendor_id = pdev->vendor;
1960 hw->device_id = pdev->device;
1961 hw->revision_id = pdev->revision;
1962 hw->subsystem_vendor_id = pdev->subsystem_vendor;
1963 hw->subsystem_device_id = pdev->subsystem_device;
1964
9d5c8243
AK
1965 /* Copy the default MAC, PHY and NVM function pointers */
1966 memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
1967 memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
1968 memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops));
1969 /* Initialize skew-specific constants */
1970 err = ei->get_invariants(hw);
1971 if (err)
450c87c8 1972 goto err_sw_init;
9d5c8243 1973
450c87c8 1974 /* setup the private structure */
9d5c8243
AK
1975 err = igb_sw_init(adapter);
1976 if (err)
1977 goto err_sw_init;
1978
1979 igb_get_bus_info_pcie(hw);
1980
1981 hw->phy.autoneg_wait_to_complete = false;
9d5c8243
AK
1982
1983 /* Copper options */
1984 if (hw->phy.media_type == e1000_media_type_copper) {
1985 hw->phy.mdix = AUTO_ALL_MODES;
1986 hw->phy.disable_polarity_correction = false;
1987 hw->phy.ms_type = e1000_ms_hw_default;
1988 }
1989
1990 if (igb_check_reset_block(hw))
1991 dev_info(&pdev->dev,
1992 "PHY reset is blocked due to SOL/IDER session.\n");
1993
ac52caa3 1994 netdev->hw_features = NETIF_F_SG |
7d8eb29e 1995 NETIF_F_IP_CSUM |
ac52caa3
MM
1996 NETIF_F_IPV6_CSUM |
1997 NETIF_F_TSO |
1998 NETIF_F_TSO6 |
b2cb09b1
JP
1999 NETIF_F_RXCSUM |
2000 NETIF_F_HW_VLAN_RX;
ac52caa3
MM
2001
2002 netdev->features = netdev->hw_features |
9d5c8243 2003 NETIF_F_HW_VLAN_TX |
9d5c8243
AK
2004 NETIF_F_HW_VLAN_FILTER;
2005
48f29ffc
JK
2006 netdev->vlan_features |= NETIF_F_TSO;
2007 netdev->vlan_features |= NETIF_F_TSO6;
7d8eb29e 2008 netdev->vlan_features |= NETIF_F_IP_CSUM;
cd1da503 2009 netdev->vlan_features |= NETIF_F_IPV6_CSUM;
48f29ffc
JK
2010 netdev->vlan_features |= NETIF_F_SG;
2011
7b872a55 2012 if (pci_using_dac) {
9d5c8243 2013 netdev->features |= NETIF_F_HIGHDMA;
7b872a55
YZ
2014 netdev->vlan_features |= NETIF_F_HIGHDMA;
2015 }
9d5c8243 2016
ac52caa3
MM
2017 if (hw->mac.type >= e1000_82576) {
2018 netdev->hw_features |= NETIF_F_SCTP_CSUM;
b9473560 2019 netdev->features |= NETIF_F_SCTP_CSUM;
ac52caa3 2020 }
b9473560 2021
01789349
JP
2022 netdev->priv_flags |= IFF_UNICAST_FLT;
2023
330a6d6a 2024 adapter->en_mng_pt = igb_enable_mng_pass_thru(hw);
9d5c8243
AK
2025
2026 /* before reading the NVM, reset the controller to put the device in a
2027 * known good starting state */
2028 hw->mac.ops.reset_hw(hw);
2029
2030 /* make sure the NVM is good */
4322e561 2031 if (hw->nvm.ops.validate(hw) < 0) {
9d5c8243
AK
2032 dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n");
2033 err = -EIO;
2034 goto err_eeprom;
2035 }
2036
2037 /* copy the MAC address out of the NVM */
2038 if (hw->mac.ops.read_mac_addr(hw))
2039 dev_err(&pdev->dev, "NVM Read Error\n");
2040
2041 memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len);
2042 memcpy(netdev->perm_addr, hw->mac.addr, netdev->addr_len);
2043
2044 if (!is_valid_ether_addr(netdev->perm_addr)) {
2045 dev_err(&pdev->dev, "Invalid MAC Address\n");
2046 err = -EIO;
2047 goto err_eeprom;
2048 }
2049
c061b18d 2050 setup_timer(&adapter->watchdog_timer, igb_watchdog,
0e340485 2051 (unsigned long) adapter);
c061b18d 2052 setup_timer(&adapter->phy_info_timer, igb_update_phy_info,
0e340485 2053 (unsigned long) adapter);
9d5c8243
AK
2054
2055 INIT_WORK(&adapter->reset_task, igb_reset_task);
2056 INIT_WORK(&adapter->watchdog_task, igb_watchdog_task);
2057
450c87c8 2058 /* Initialize link properties that are user-changeable */
9d5c8243
AK
2059 adapter->fc_autoneg = true;
2060 hw->mac.autoneg = true;
2061 hw->phy.autoneg_advertised = 0x2f;
2062
0cce119a
AD
2063 hw->fc.requested_mode = e1000_fc_default;
2064 hw->fc.current_mode = e1000_fc_default;
9d5c8243 2065
9d5c8243
AK
2066 igb_validate_mdi_setting(hw);
2067
9d5c8243
AK
2068 /* Initial Wake on LAN setting If APM wake is enabled in the EEPROM,
2069 * enable the ACPI Magic Packet filter
2070 */
2071
a2cf8b6c 2072 if (hw->bus.func == 0)
312c75ae 2073 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
6d337dce 2074 else if (hw->mac.type >= e1000_82580)
55cac248
AD
2075 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A +
2076 NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1,
2077 &eeprom_data);
a2cf8b6c
AD
2078 else if (hw->bus.func == 1)
2079 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
9d5c8243
AK
2080
2081 if (eeprom_data & eeprom_apme_mask)
2082 adapter->eeprom_wol |= E1000_WUFC_MAG;
2083
2084 /* now that we have the eeprom settings, apply the special cases where
2085 * the eeprom may be wrong or the board simply won't support wake on
2086 * lan on a particular port */
2087 switch (pdev->device) {
2088 case E1000_DEV_ID_82575GB_QUAD_COPPER:
2089 adapter->eeprom_wol = 0;
2090 break;
2091 case E1000_DEV_ID_82575EB_FIBER_SERDES:
2d064c06
AD
2092 case E1000_DEV_ID_82576_FIBER:
2093 case E1000_DEV_ID_82576_SERDES:
9d5c8243
AK
2094 /* Wake events only supported on port A for dual fiber
2095 * regardless of eeprom setting */
2096 if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1)
2097 adapter->eeprom_wol = 0;
2098 break;
c8ea5ea9 2099 case E1000_DEV_ID_82576_QUAD_COPPER:
d5aa2252 2100 case E1000_DEV_ID_82576_QUAD_COPPER_ET2:
c8ea5ea9
AD
2101 /* if quad port adapter, disable WoL on all but port A */
2102 if (global_quad_port_a != 0)
2103 adapter->eeprom_wol = 0;
2104 else
2105 adapter->flags |= IGB_FLAG_QUAD_PORT_A;
2106 /* Reset for multiple quad port adapters */
2107 if (++global_quad_port_a == 4)
2108 global_quad_port_a = 0;
2109 break;
9d5c8243
AK
2110 }
2111
2112 /* initialize the wol settings based on the eeprom settings */
2113 adapter->wol = adapter->eeprom_wol;
e1b86d84 2114 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
9d5c8243
AK
2115
2116 /* reset the hardware with the new settings */
2117 igb_reset(adapter);
2118
2119 /* let the f/w know that the h/w is now under the control of the
2120 * driver. */
2121 igb_get_hw_control(adapter);
2122
9d5c8243
AK
2123 strcpy(netdev->name, "eth%d");
2124 err = register_netdev(netdev);
2125 if (err)
2126 goto err_register;
2127
b2cb09b1
JP
2128 igb_vlan_mode(netdev, netdev->features);
2129
b168dfc5
JB
2130 /* carrier off reporting is important to ethtool even BEFORE open */
2131 netif_carrier_off(netdev);
2132
421e02f0 2133#ifdef CONFIG_IGB_DCA
bbd98fe4 2134 if (dca_add_requester(&pdev->dev) == 0) {
7dfc16fa 2135 adapter->flags |= IGB_FLAG_DCA_ENABLED;
fe4506b6 2136 dev_info(&pdev->dev, "DCA enabled\n");
fe4506b6
JC
2137 igb_setup_dca(adapter);
2138 }
fe4506b6 2139
38c845c7 2140#endif
673b8b70
AB
2141 /* do hw tstamp init after resetting */
2142 igb_init_hw_timer(adapter);
2143
9d5c8243
AK
2144 dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n");
2145 /* print bus type/speed/width info */
7c510e4b 2146 dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n",
9d5c8243 2147 netdev->name,
559e9c49 2148 ((hw->bus.speed == e1000_bus_speed_2500) ? "2.5Gb/s" :
ff846f52 2149 (hw->bus.speed == e1000_bus_speed_5000) ? "5.0Gb/s" :
559e9c49 2150 "unknown"),
59c3de89
AD
2151 ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" :
2152 (hw->bus.width == e1000_bus_width_pcie_x2) ? "Width x2" :
2153 (hw->bus.width == e1000_bus_width_pcie_x1) ? "Width x1" :
2154 "unknown"),
7c510e4b 2155 netdev->dev_addr);
9d5c8243 2156
9835fd73
CW
2157 ret_val = igb_read_part_string(hw, part_str, E1000_PBANUM_LENGTH);
2158 if (ret_val)
2159 strcpy(part_str, "Unknown");
2160 dev_info(&pdev->dev, "%s: PBA No: %s\n", netdev->name, part_str);
9d5c8243
AK
2161 dev_info(&pdev->dev,
2162 "Using %s interrupts. %d rx queue(s), %d tx queue(s)\n",
2163 adapter->msix_entries ? "MSI-X" :
7dfc16fa 2164 (adapter->flags & IGB_FLAG_HAS_MSI) ? "MSI" : "legacy",
9d5c8243 2165 adapter->num_rx_queues, adapter->num_tx_queues);
09b068d4
CW
2166 switch (hw->mac.type) {
2167 case e1000_i350:
2168 igb_set_eee_i350(hw);
2169 break;
2170 default:
2171 break;
2172 }
9d5c8243
AK
2173 return 0;
2174
2175err_register:
2176 igb_release_hw_control(adapter);
2177err_eeprom:
2178 if (!igb_check_reset_block(hw))
f5f4cf08 2179 igb_reset_phy(hw);
9d5c8243
AK
2180
2181 if (hw->flash_address)
2182 iounmap(hw->flash_address);
9d5c8243 2183err_sw_init:
047e0030 2184 igb_clear_interrupt_scheme(adapter);
9d5c8243
AK
2185 iounmap(hw->hw_addr);
2186err_ioremap:
2187 free_netdev(netdev);
2188err_alloc_etherdev:
559e9c49
AD
2189 pci_release_selected_regions(pdev,
2190 pci_select_bars(pdev, IORESOURCE_MEM));
9d5c8243
AK
2191err_pci_reg:
2192err_dma:
2193 pci_disable_device(pdev);
2194 return err;
2195}
2196
2197/**
2198 * igb_remove - Device Removal Routine
2199 * @pdev: PCI device information struct
2200 *
2201 * igb_remove is called by the PCI subsystem to alert the driver
2202 * that it should release a PCI device. The could be caused by a
2203 * Hot-Plug event, or because the driver is going to be removed from
2204 * memory.
2205 **/
2206static void __devexit igb_remove(struct pci_dev *pdev)
2207{
2208 struct net_device *netdev = pci_get_drvdata(pdev);
2209 struct igb_adapter *adapter = netdev_priv(netdev);
fe4506b6 2210 struct e1000_hw *hw = &adapter->hw;
9d5c8243 2211
760141a5
TH
2212 /*
2213 * The watchdog timer may be rescheduled, so explicitly
2214 * disable watchdog from being rescheduled.
2215 */
9d5c8243
AK
2216 set_bit(__IGB_DOWN, &adapter->state);
2217 del_timer_sync(&adapter->watchdog_timer);
2218 del_timer_sync(&adapter->phy_info_timer);
2219
760141a5
TH
2220 cancel_work_sync(&adapter->reset_task);
2221 cancel_work_sync(&adapter->watchdog_task);
9d5c8243 2222
421e02f0 2223#ifdef CONFIG_IGB_DCA
7dfc16fa 2224 if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
fe4506b6
JC
2225 dev_info(&pdev->dev, "DCA disabled\n");
2226 dca_remove_requester(&pdev->dev);
7dfc16fa 2227 adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
cbd347ad 2228 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE);
fe4506b6
JC
2229 }
2230#endif
2231
9d5c8243
AK
2232 /* Release control of h/w to f/w. If f/w is AMT enabled, this
2233 * would have already happened in close and is redundant. */
2234 igb_release_hw_control(adapter);
2235
2236 unregister_netdev(netdev);
2237
047e0030 2238 igb_clear_interrupt_scheme(adapter);
9d5c8243 2239
37680117
AD
2240#ifdef CONFIG_PCI_IOV
2241 /* reclaim resources allocated to VFs */
2242 if (adapter->vf_data) {
2243 /* disable iov and allow time for transactions to clear */
2244 pci_disable_sriov(pdev);
2245 msleep(500);
2246
2247 kfree(adapter->vf_data);
2248 adapter->vf_data = NULL;
2249 wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
945a5151 2250 wrfl();
37680117
AD
2251 msleep(100);
2252 dev_info(&pdev->dev, "IOV Disabled\n");
2253 }
2254#endif
559e9c49 2255
28b0759c
AD
2256 iounmap(hw->hw_addr);
2257 if (hw->flash_address)
2258 iounmap(hw->flash_address);
559e9c49
AD
2259 pci_release_selected_regions(pdev,
2260 pci_select_bars(pdev, IORESOURCE_MEM));
9d5c8243
AK
2261
2262 free_netdev(netdev);
2263
19d5afd4 2264 pci_disable_pcie_error_reporting(pdev);
40a914fa 2265
9d5c8243
AK
2266 pci_disable_device(pdev);
2267}
2268
a6b623e0
AD
2269/**
2270 * igb_probe_vfs - Initialize vf data storage and add VFs to pci config space
2271 * @adapter: board private structure to initialize
2272 *
2273 * This function initializes the vf specific data storage and then attempts to
2274 * allocate the VFs. The reason for ordering it this way is because it is much
2275 * mor expensive time wise to disable SR-IOV than it is to allocate and free
2276 * the memory for the VFs.
2277 **/
2278static void __devinit igb_probe_vfs(struct igb_adapter * adapter)
2279{
2280#ifdef CONFIG_PCI_IOV
2281 struct pci_dev *pdev = adapter->pdev;
2282
a6b623e0
AD
2283 if (adapter->vfs_allocated_count) {
2284 adapter->vf_data = kcalloc(adapter->vfs_allocated_count,
2285 sizeof(struct vf_data_storage),
2286 GFP_KERNEL);
2287 /* if allocation failed then we do not support SR-IOV */
2288 if (!adapter->vf_data) {
2289 adapter->vfs_allocated_count = 0;
2290 dev_err(&pdev->dev, "Unable to allocate memory for VF "
2291 "Data Storage\n");
2292 }
2293 }
2294
2295 if (pci_enable_sriov(pdev, adapter->vfs_allocated_count)) {
2296 kfree(adapter->vf_data);
2297 adapter->vf_data = NULL;
2298#endif /* CONFIG_PCI_IOV */
2299 adapter->vfs_allocated_count = 0;
2300#ifdef CONFIG_PCI_IOV
2301 } else {
2302 unsigned char mac_addr[ETH_ALEN];
2303 int i;
2304 dev_info(&pdev->dev, "%d vfs allocated\n",
2305 adapter->vfs_allocated_count);
2306 for (i = 0; i < adapter->vfs_allocated_count; i++) {
2307 random_ether_addr(mac_addr);
2308 igb_set_vf_mac(adapter, i, mac_addr);
2309 }
831ec0b4
CW
2310 /* DMA Coalescing is not supported in IOV mode. */
2311 if (adapter->flags & IGB_FLAG_DMAC)
2312 adapter->flags &= ~IGB_FLAG_DMAC;
a6b623e0
AD
2313 }
2314#endif /* CONFIG_PCI_IOV */
2315}
2316
115f459a
AD
2317
2318/**
2319 * igb_init_hw_timer - Initialize hardware timer used with IEEE 1588 timestamp
2320 * @adapter: board private structure to initialize
2321 *
2322 * igb_init_hw_timer initializes the function pointer and values for the hw
2323 * timer found in hardware.
2324 **/
2325static void igb_init_hw_timer(struct igb_adapter *adapter)
2326{
2327 struct e1000_hw *hw = &adapter->hw;
2328
2329 switch (hw->mac.type) {
d2ba2ed8 2330 case e1000_i350:
55cac248
AD
2331 case e1000_82580:
2332 memset(&adapter->cycles, 0, sizeof(adapter->cycles));
2333 adapter->cycles.read = igb_read_clock;
2334 adapter->cycles.mask = CLOCKSOURCE_MASK(64);
2335 adapter->cycles.mult = 1;
2336 /*
2337 * The 82580 timesync updates the system timer every 8ns by 8ns
2338 * and the value cannot be shifted. Instead we need to shift
2339 * the registers to generate a 64bit timer value. As a result
2340 * SYSTIMR/L/H, TXSTMPL/H, RXSTMPL/H all have to be shifted by
2341 * 24 in order to generate a larger value for synchronization.
2342 */
2343 adapter->cycles.shift = IGB_82580_TSYNC_SHIFT;
2344 /* disable system timer temporarily by setting bit 31 */
2345 wr32(E1000_TSAUXC, 0x80000000);
2346 wrfl();
2347
2348 /* Set registers so that rollover occurs soon to test this. */
2349 wr32(E1000_SYSTIMR, 0x00000000);
2350 wr32(E1000_SYSTIML, 0x80000000);
2351 wr32(E1000_SYSTIMH, 0x000000FF);
2352 wrfl();
2353
2354 /* enable system timer by clearing bit 31 */
2355 wr32(E1000_TSAUXC, 0x0);
2356 wrfl();
2357
2358 timecounter_init(&adapter->clock,
2359 &adapter->cycles,
2360 ktime_to_ns(ktime_get_real()));
2361 /*
2362 * Synchronize our NIC clock against system wall clock. NIC
2363 * time stamp reading requires ~3us per sample, each sample
2364 * was pretty stable even under load => only require 10
2365 * samples for each offset comparison.
2366 */
2367 memset(&adapter->compare, 0, sizeof(adapter->compare));
2368 adapter->compare.source = &adapter->clock;
2369 adapter->compare.target = ktime_get_real;
2370 adapter->compare.num_samples = 10;
2371 timecompare_update(&adapter->compare, 0);
2372 break;
115f459a
AD
2373 case e1000_82576:
2374 /*
2375 * Initialize hardware timer: we keep it running just in case
2376 * that some program needs it later on.
2377 */
2378 memset(&adapter->cycles, 0, sizeof(adapter->cycles));
2379 adapter->cycles.read = igb_read_clock;
2380 adapter->cycles.mask = CLOCKSOURCE_MASK(64);
2381 adapter->cycles.mult = 1;
2382 /**
2383 * Scale the NIC clock cycle by a large factor so that
2384 * relatively small clock corrections can be added or
25985edc 2385 * subtracted at each clock tick. The drawbacks of a large
115f459a
AD
2386 * factor are a) that the clock register overflows more quickly
2387 * (not such a big deal) and b) that the increment per tick has
2388 * to fit into 24 bits. As a result we need to use a shift of
2389 * 19 so we can fit a value of 16 into the TIMINCA register.
2390 */
2391 adapter->cycles.shift = IGB_82576_TSYNC_SHIFT;
2392 wr32(E1000_TIMINCA,
2393 (1 << E1000_TIMINCA_16NS_SHIFT) |
2394 (16 << IGB_82576_TSYNC_SHIFT));
2395
2396 /* Set registers so that rollover occurs soon to test this. */
2397 wr32(E1000_SYSTIML, 0x00000000);
2398 wr32(E1000_SYSTIMH, 0xFF800000);
2399 wrfl();
2400
2401 timecounter_init(&adapter->clock,
2402 &adapter->cycles,
2403 ktime_to_ns(ktime_get_real()));
2404 /*
2405 * Synchronize our NIC clock against system wall clock. NIC
2406 * time stamp reading requires ~3us per sample, each sample
2407 * was pretty stable even under load => only require 10
2408 * samples for each offset comparison.
2409 */
2410 memset(&adapter->compare, 0, sizeof(adapter->compare));
2411 adapter->compare.source = &adapter->clock;
2412 adapter->compare.target = ktime_get_real;
2413 adapter->compare.num_samples = 10;
2414 timecompare_update(&adapter->compare, 0);
2415 break;
2416 case e1000_82575:
2417 /* 82575 does not support timesync */
2418 default:
2419 break;
2420 }
2421
2422}
2423
9d5c8243
AK
2424/**
2425 * igb_sw_init - Initialize general software structures (struct igb_adapter)
2426 * @adapter: board private structure to initialize
2427 *
2428 * igb_sw_init initializes the Adapter private data structure.
2429 * Fields are initialized based on PCI device information and
2430 * OS network device settings (MTU size).
2431 **/
2432static int __devinit igb_sw_init(struct igb_adapter *adapter)
2433{
2434 struct e1000_hw *hw = &adapter->hw;
2435 struct net_device *netdev = adapter->netdev;
2436 struct pci_dev *pdev = adapter->pdev;
2437
2438 pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word);
2439
13fde97a 2440 /* set default ring sizes */
68fd9910
AD
2441 adapter->tx_ring_count = IGB_DEFAULT_TXD;
2442 adapter->rx_ring_count = IGB_DEFAULT_RXD;
13fde97a
AD
2443
2444 /* set default ITR values */
4fc82adf
AD
2445 adapter->rx_itr_setting = IGB_DEFAULT_ITR;
2446 adapter->tx_itr_setting = IGB_DEFAULT_ITR;
2447
13fde97a
AD
2448 /* set default work limits */
2449 adapter->tx_work_limit = IGB_DEFAULT_TX_WORK;
2450
153285f9
AD
2451 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN +
2452 VLAN_HLEN;
9d5c8243
AK
2453 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
2454
81c2fc22
AD
2455 adapter->node = -1;
2456
12dcd86b 2457 spin_lock_init(&adapter->stats64_lock);
a6b623e0 2458#ifdef CONFIG_PCI_IOV
6b78bb1d
CW
2459 switch (hw->mac.type) {
2460 case e1000_82576:
2461 case e1000_i350:
9b082d73
SA
2462 if (max_vfs > 7) {
2463 dev_warn(&pdev->dev,
2464 "Maximum of 7 VFs per PF, using max\n");
2465 adapter->vfs_allocated_count = 7;
2466 } else
2467 adapter->vfs_allocated_count = max_vfs;
6b78bb1d
CW
2468 break;
2469 default:
2470 break;
2471 }
a6b623e0 2472#endif /* CONFIG_PCI_IOV */
a99955fc 2473 adapter->rss_queues = min_t(u32, IGB_MAX_RX_QUEUES, num_online_cpus());
665c8c8e
WM
2474 /* i350 cannot do RSS and SR-IOV at the same time */
2475 if (hw->mac.type == e1000_i350 && adapter->vfs_allocated_count)
2476 adapter->rss_queues = 1;
a99955fc
AD
2477
2478 /*
2479 * if rss_queues > 4 or vfs are going to be allocated with rss_queues
2480 * then we should combine the queues into a queue pair in order to
2481 * conserve interrupts due to limited supply
2482 */
2483 if ((adapter->rss_queues > 4) ||
2484 ((adapter->rss_queues > 1) && (adapter->vfs_allocated_count > 6)))
2485 adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
2486
a6b623e0 2487 /* This call may decrease the number of queues */
047e0030 2488 if (igb_init_interrupt_scheme(adapter)) {
9d5c8243
AK
2489 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
2490 return -ENOMEM;
2491 }
2492
a6b623e0
AD
2493 igb_probe_vfs(adapter);
2494
9d5c8243
AK
2495 /* Explicitly disable IRQ since the NIC can be in any state. */
2496 igb_irq_disable(adapter);
2497
831ec0b4
CW
2498 if (hw->mac.type == e1000_i350)
2499 adapter->flags &= ~IGB_FLAG_DMAC;
2500
9d5c8243
AK
2501 set_bit(__IGB_DOWN, &adapter->state);
2502 return 0;
2503}
2504
2505/**
2506 * igb_open - Called when a network interface is made active
2507 * @netdev: network interface device structure
2508 *
2509 * Returns 0 on success, negative value on failure
2510 *
2511 * The open entry point is called when a network interface is made
2512 * active by the system (IFF_UP). At this point all resources needed
2513 * for transmit and receive operations are allocated, the interrupt
2514 * handler is registered with the OS, the watchdog timer is started,
2515 * and the stack is notified that the interface is ready.
2516 **/
2517static int igb_open(struct net_device *netdev)
2518{
2519 struct igb_adapter *adapter = netdev_priv(netdev);
2520 struct e1000_hw *hw = &adapter->hw;
2521 int err;
2522 int i;
2523
2524 /* disallow open during test */
2525 if (test_bit(__IGB_TESTING, &adapter->state))
2526 return -EBUSY;
2527
b168dfc5
JB
2528 netif_carrier_off(netdev);
2529
9d5c8243
AK
2530 /* allocate transmit descriptors */
2531 err = igb_setup_all_tx_resources(adapter);
2532 if (err)
2533 goto err_setup_tx;
2534
2535 /* allocate receive descriptors */
2536 err = igb_setup_all_rx_resources(adapter);
2537 if (err)
2538 goto err_setup_rx;
2539
88a268c1 2540 igb_power_up_link(adapter);
9d5c8243 2541
9d5c8243
AK
2542 /* before we allocate an interrupt, we must be ready to handle it.
2543 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
2544 * as soon as we call pci_request_irq, so we have to setup our
2545 * clean_rx handler before we do so. */
2546 igb_configure(adapter);
2547
2548 err = igb_request_irq(adapter);
2549 if (err)
2550 goto err_req_irq;
2551
2552 /* From here on the code is the same as igb_up() */
2553 clear_bit(__IGB_DOWN, &adapter->state);
2554
047e0030
AD
2555 for (i = 0; i < adapter->num_q_vectors; i++) {
2556 struct igb_q_vector *q_vector = adapter->q_vector[i];
2557 napi_enable(&q_vector->napi);
2558 }
9d5c8243
AK
2559
2560 /* Clear any pending interrupts. */
2561 rd32(E1000_ICR);
844290e5
PW
2562
2563 igb_irq_enable(adapter);
2564
d4960307
AD
2565 /* notify VFs that reset has been completed */
2566 if (adapter->vfs_allocated_count) {
2567 u32 reg_data = rd32(E1000_CTRL_EXT);
2568 reg_data |= E1000_CTRL_EXT_PFRSTD;
2569 wr32(E1000_CTRL_EXT, reg_data);
2570 }
2571
d55b53ff
JK
2572 netif_tx_start_all_queues(netdev);
2573
25568a53
AD
2574 /* start the watchdog. */
2575 hw->mac.get_link_status = 1;
2576 schedule_work(&adapter->watchdog_task);
9d5c8243
AK
2577
2578 return 0;
2579
2580err_req_irq:
2581 igb_release_hw_control(adapter);
88a268c1 2582 igb_power_down_link(adapter);
9d5c8243
AK
2583 igb_free_all_rx_resources(adapter);
2584err_setup_rx:
2585 igb_free_all_tx_resources(adapter);
2586err_setup_tx:
2587 igb_reset(adapter);
2588
2589 return err;
2590}
2591
2592/**
2593 * igb_close - Disables a network interface
2594 * @netdev: network interface device structure
2595 *
2596 * Returns 0, this is not allowed to fail
2597 *
2598 * The close entry point is called when an interface is de-activated
2599 * by the OS. The hardware is still under the driver's control, but
2600 * needs to be disabled. A global MAC reset is issued to stop the
2601 * hardware, and all transmit and receive resources are freed.
2602 **/
2603static int igb_close(struct net_device *netdev)
2604{
2605 struct igb_adapter *adapter = netdev_priv(netdev);
2606
2607 WARN_ON(test_bit(__IGB_RESETTING, &adapter->state));
2608 igb_down(adapter);
2609
2610 igb_free_irq(adapter);
2611
2612 igb_free_all_tx_resources(adapter);
2613 igb_free_all_rx_resources(adapter);
2614
9d5c8243
AK
2615 return 0;
2616}
2617
2618/**
2619 * igb_setup_tx_resources - allocate Tx resources (Descriptors)
9d5c8243
AK
2620 * @tx_ring: tx descriptor ring (for a specific queue) to setup
2621 *
2622 * Return 0 on success, negative on failure
2623 **/
80785298 2624int igb_setup_tx_resources(struct igb_ring *tx_ring)
9d5c8243 2625{
59d71989 2626 struct device *dev = tx_ring->dev;
81c2fc22 2627 int orig_node = dev_to_node(dev);
9d5c8243
AK
2628 int size;
2629
06034649 2630 size = sizeof(struct igb_tx_buffer) * tx_ring->count;
81c2fc22
AD
2631 tx_ring->tx_buffer_info = vzalloc_node(size, tx_ring->numa_node);
2632 if (!tx_ring->tx_buffer_info)
2633 tx_ring->tx_buffer_info = vzalloc(size);
06034649 2634 if (!tx_ring->tx_buffer_info)
9d5c8243 2635 goto err;
9d5c8243
AK
2636
2637 /* round up to nearest 4K */
85e8d004 2638 tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
9d5c8243
AK
2639 tx_ring->size = ALIGN(tx_ring->size, 4096);
2640
81c2fc22 2641 set_dev_node(dev, tx_ring->numa_node);
59d71989
AD
2642 tx_ring->desc = dma_alloc_coherent(dev,
2643 tx_ring->size,
2644 &tx_ring->dma,
2645 GFP_KERNEL);
81c2fc22
AD
2646 set_dev_node(dev, orig_node);
2647 if (!tx_ring->desc)
2648 tx_ring->desc = dma_alloc_coherent(dev,
2649 tx_ring->size,
2650 &tx_ring->dma,
2651 GFP_KERNEL);
9d5c8243
AK
2652
2653 if (!tx_ring->desc)
2654 goto err;
2655
9d5c8243
AK
2656 tx_ring->next_to_use = 0;
2657 tx_ring->next_to_clean = 0;
81c2fc22 2658
9d5c8243
AK
2659 return 0;
2660
2661err:
06034649 2662 vfree(tx_ring->tx_buffer_info);
59d71989 2663 dev_err(dev,
9d5c8243
AK
2664 "Unable to allocate memory for the transmit descriptor ring\n");
2665 return -ENOMEM;
2666}
2667
2668/**
2669 * igb_setup_all_tx_resources - wrapper to allocate Tx resources
2670 * (Descriptors) for all queues
2671 * @adapter: board private structure
2672 *
2673 * Return 0 on success, negative on failure
2674 **/
2675static int igb_setup_all_tx_resources(struct igb_adapter *adapter)
2676{
439705e1 2677 struct pci_dev *pdev = adapter->pdev;
9d5c8243
AK
2678 int i, err = 0;
2679
2680 for (i = 0; i < adapter->num_tx_queues; i++) {
3025a446 2681 err = igb_setup_tx_resources(adapter->tx_ring[i]);
9d5c8243 2682 if (err) {
439705e1 2683 dev_err(&pdev->dev,
9d5c8243
AK
2684 "Allocation for Tx Queue %u failed\n", i);
2685 for (i--; i >= 0; i--)
3025a446 2686 igb_free_tx_resources(adapter->tx_ring[i]);
9d5c8243
AK
2687 break;
2688 }
2689 }
2690
2691 return err;
2692}
2693
2694/**
85b430b4
AD
2695 * igb_setup_tctl - configure the transmit control registers
2696 * @adapter: Board private structure
9d5c8243 2697 **/
d7ee5b3a 2698void igb_setup_tctl(struct igb_adapter *adapter)
9d5c8243 2699{
9d5c8243
AK
2700 struct e1000_hw *hw = &adapter->hw;
2701 u32 tctl;
9d5c8243 2702
85b430b4
AD
2703 /* disable queue 0 which is enabled by default on 82575 and 82576 */
2704 wr32(E1000_TXDCTL(0), 0);
9d5c8243
AK
2705
2706 /* Program the Transmit Control Register */
9d5c8243
AK
2707 tctl = rd32(E1000_TCTL);
2708 tctl &= ~E1000_TCTL_CT;
2709 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
2710 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
2711
2712 igb_config_collision_dist(hw);
2713
9d5c8243
AK
2714 /* Enable transmits */
2715 tctl |= E1000_TCTL_EN;
2716
2717 wr32(E1000_TCTL, tctl);
2718}
2719
85b430b4
AD
2720/**
2721 * igb_configure_tx_ring - Configure transmit ring after Reset
2722 * @adapter: board private structure
2723 * @ring: tx ring to configure
2724 *
2725 * Configure a transmit ring after a reset.
2726 **/
d7ee5b3a
AD
2727void igb_configure_tx_ring(struct igb_adapter *adapter,
2728 struct igb_ring *ring)
85b430b4
AD
2729{
2730 struct e1000_hw *hw = &adapter->hw;
a74420e0 2731 u32 txdctl = 0;
85b430b4
AD
2732 u64 tdba = ring->dma;
2733 int reg_idx = ring->reg_idx;
2734
2735 /* disable the queue */
a74420e0 2736 wr32(E1000_TXDCTL(reg_idx), 0);
85b430b4
AD
2737 wrfl();
2738 mdelay(10);
2739
2740 wr32(E1000_TDLEN(reg_idx),
2741 ring->count * sizeof(union e1000_adv_tx_desc));
2742 wr32(E1000_TDBAL(reg_idx),
2743 tdba & 0x00000000ffffffffULL);
2744 wr32(E1000_TDBAH(reg_idx), tdba >> 32);
2745
fce99e34 2746 ring->tail = hw->hw_addr + E1000_TDT(reg_idx);
a74420e0 2747 wr32(E1000_TDH(reg_idx), 0);
fce99e34 2748 writel(0, ring->tail);
85b430b4
AD
2749
2750 txdctl |= IGB_TX_PTHRESH;
2751 txdctl |= IGB_TX_HTHRESH << 8;
2752 txdctl |= IGB_TX_WTHRESH << 16;
2753
2754 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
2755 wr32(E1000_TXDCTL(reg_idx), txdctl);
2756}
2757
2758/**
2759 * igb_configure_tx - Configure transmit Unit after Reset
2760 * @adapter: board private structure
2761 *
2762 * Configure the Tx unit of the MAC after a reset.
2763 **/
2764static void igb_configure_tx(struct igb_adapter *adapter)
2765{
2766 int i;
2767
2768 for (i = 0; i < adapter->num_tx_queues; i++)
3025a446 2769 igb_configure_tx_ring(adapter, adapter->tx_ring[i]);
85b430b4
AD
2770}
2771
9d5c8243
AK
2772/**
2773 * igb_setup_rx_resources - allocate Rx resources (Descriptors)
9d5c8243
AK
2774 * @rx_ring: rx descriptor ring (for a specific queue) to setup
2775 *
2776 * Returns 0 on success, negative on failure
2777 **/
80785298 2778int igb_setup_rx_resources(struct igb_ring *rx_ring)
9d5c8243 2779{
59d71989 2780 struct device *dev = rx_ring->dev;
81c2fc22 2781 int orig_node = dev_to_node(dev);
9d5c8243
AK
2782 int size, desc_len;
2783
06034649 2784 size = sizeof(struct igb_rx_buffer) * rx_ring->count;
81c2fc22
AD
2785 rx_ring->rx_buffer_info = vzalloc_node(size, rx_ring->numa_node);
2786 if (!rx_ring->rx_buffer_info)
2787 rx_ring->rx_buffer_info = vzalloc(size);
06034649 2788 if (!rx_ring->rx_buffer_info)
9d5c8243 2789 goto err;
9d5c8243
AK
2790
2791 desc_len = sizeof(union e1000_adv_rx_desc);
2792
2793 /* Round up to nearest 4K */
2794 rx_ring->size = rx_ring->count * desc_len;
2795 rx_ring->size = ALIGN(rx_ring->size, 4096);
2796
81c2fc22 2797 set_dev_node(dev, rx_ring->numa_node);
59d71989
AD
2798 rx_ring->desc = dma_alloc_coherent(dev,
2799 rx_ring->size,
2800 &rx_ring->dma,
2801 GFP_KERNEL);
81c2fc22
AD
2802 set_dev_node(dev, orig_node);
2803 if (!rx_ring->desc)
2804 rx_ring->desc = dma_alloc_coherent(dev,
2805 rx_ring->size,
2806 &rx_ring->dma,
2807 GFP_KERNEL);
9d5c8243
AK
2808
2809 if (!rx_ring->desc)
2810 goto err;
2811
2812 rx_ring->next_to_clean = 0;
2813 rx_ring->next_to_use = 0;
9d5c8243 2814
9d5c8243
AK
2815 return 0;
2816
2817err:
06034649
AD
2818 vfree(rx_ring->rx_buffer_info);
2819 rx_ring->rx_buffer_info = NULL;
59d71989
AD
2820 dev_err(dev, "Unable to allocate memory for the receive descriptor"
2821 " ring\n");
9d5c8243
AK
2822 return -ENOMEM;
2823}
2824
2825/**
2826 * igb_setup_all_rx_resources - wrapper to allocate Rx resources
2827 * (Descriptors) for all queues
2828 * @adapter: board private structure
2829 *
2830 * Return 0 on success, negative on failure
2831 **/
2832static int igb_setup_all_rx_resources(struct igb_adapter *adapter)
2833{
439705e1 2834 struct pci_dev *pdev = adapter->pdev;
9d5c8243
AK
2835 int i, err = 0;
2836
2837 for (i = 0; i < adapter->num_rx_queues; i++) {
3025a446 2838 err = igb_setup_rx_resources(adapter->rx_ring[i]);
9d5c8243 2839 if (err) {
439705e1 2840 dev_err(&pdev->dev,
9d5c8243
AK
2841 "Allocation for Rx Queue %u failed\n", i);
2842 for (i--; i >= 0; i--)
3025a446 2843 igb_free_rx_resources(adapter->rx_ring[i]);
9d5c8243
AK
2844 break;
2845 }
2846 }
2847
2848 return err;
2849}
2850
06cf2666
AD
2851/**
2852 * igb_setup_mrqc - configure the multiple receive queue control registers
2853 * @adapter: Board private structure
2854 **/
2855static void igb_setup_mrqc(struct igb_adapter *adapter)
2856{
2857 struct e1000_hw *hw = &adapter->hw;
2858 u32 mrqc, rxcsum;
2859 u32 j, num_rx_queues, shift = 0, shift2 = 0;
2860 union e1000_reta {
2861 u32 dword;
2862 u8 bytes[4];
2863 } reta;
2864 static const u8 rsshash[40] = {
2865 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2, 0x41, 0x67,
2866 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0, 0xd0, 0xca, 0x2b, 0xcb,
2867 0xae, 0x7b, 0x30, 0xb4, 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30,
2868 0xf2, 0x0c, 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa };
2869
2870 /* Fill out hash function seeds */
2871 for (j = 0; j < 10; j++) {
2872 u32 rsskey = rsshash[(j * 4)];
2873 rsskey |= rsshash[(j * 4) + 1] << 8;
2874 rsskey |= rsshash[(j * 4) + 2] << 16;
2875 rsskey |= rsshash[(j * 4) + 3] << 24;
2876 array_wr32(E1000_RSSRK(0), j, rsskey);
2877 }
2878
a99955fc 2879 num_rx_queues = adapter->rss_queues;
06cf2666
AD
2880
2881 if (adapter->vfs_allocated_count) {
2882 /* 82575 and 82576 supports 2 RSS queues for VMDq */
2883 switch (hw->mac.type) {
d2ba2ed8 2884 case e1000_i350:
55cac248
AD
2885 case e1000_82580:
2886 num_rx_queues = 1;
2887 shift = 0;
2888 break;
06cf2666
AD
2889 case e1000_82576:
2890 shift = 3;
2891 num_rx_queues = 2;
2892 break;
2893 case e1000_82575:
2894 shift = 2;
2895 shift2 = 6;
2896 default:
2897 break;
2898 }
2899 } else {
2900 if (hw->mac.type == e1000_82575)
2901 shift = 6;
2902 }
2903
2904 for (j = 0; j < (32 * 4); j++) {
2905 reta.bytes[j & 3] = (j % num_rx_queues) << shift;
2906 if (shift2)
2907 reta.bytes[j & 3] |= num_rx_queues << shift2;
2908 if ((j & 3) == 3)
2909 wr32(E1000_RETA(j >> 2), reta.dword);
2910 }
2911
2912 /*
2913 * Disable raw packet checksumming so that RSS hash is placed in
2914 * descriptor on writeback. No need to enable TCP/UDP/IP checksum
2915 * offloads as they are enabled by default
2916 */
2917 rxcsum = rd32(E1000_RXCSUM);
2918 rxcsum |= E1000_RXCSUM_PCSD;
2919
2920 if (adapter->hw.mac.type >= e1000_82576)
2921 /* Enable Receive Checksum Offload for SCTP */
2922 rxcsum |= E1000_RXCSUM_CRCOFL;
2923
2924 /* Don't need to set TUOFL or IPOFL, they default to 1 */
2925 wr32(E1000_RXCSUM, rxcsum);
2926
2927 /* If VMDq is enabled then we set the appropriate mode for that, else
2928 * we default to RSS so that an RSS hash is calculated per packet even
2929 * if we are only using one queue */
2930 if (adapter->vfs_allocated_count) {
2931 if (hw->mac.type > e1000_82575) {
2932 /* Set the default pool for the PF's first queue */
2933 u32 vtctl = rd32(E1000_VT_CTL);
2934 vtctl &= ~(E1000_VT_CTL_DEFAULT_POOL_MASK |
2935 E1000_VT_CTL_DISABLE_DEF_POOL);
2936 vtctl |= adapter->vfs_allocated_count <<
2937 E1000_VT_CTL_DEFAULT_POOL_SHIFT;
2938 wr32(E1000_VT_CTL, vtctl);
2939 }
a99955fc 2940 if (adapter->rss_queues > 1)
06cf2666
AD
2941 mrqc = E1000_MRQC_ENABLE_VMDQ_RSS_2Q;
2942 else
2943 mrqc = E1000_MRQC_ENABLE_VMDQ;
2944 } else {
2945 mrqc = E1000_MRQC_ENABLE_RSS_4Q;
2946 }
2947 igb_vmm_control(adapter);
2948
4478a9cd
AD
2949 /*
2950 * Generate RSS hash based on TCP port numbers and/or
2951 * IPv4/v6 src and dst addresses since UDP cannot be
2952 * hashed reliably due to IP fragmentation
2953 */
2954 mrqc |= E1000_MRQC_RSS_FIELD_IPV4 |
2955 E1000_MRQC_RSS_FIELD_IPV4_TCP |
2956 E1000_MRQC_RSS_FIELD_IPV6 |
2957 E1000_MRQC_RSS_FIELD_IPV6_TCP |
2958 E1000_MRQC_RSS_FIELD_IPV6_TCP_EX;
06cf2666
AD
2959
2960 wr32(E1000_MRQC, mrqc);
2961}
2962
9d5c8243
AK
2963/**
2964 * igb_setup_rctl - configure the receive control registers
2965 * @adapter: Board private structure
2966 **/
d7ee5b3a 2967void igb_setup_rctl(struct igb_adapter *adapter)
9d5c8243
AK
2968{
2969 struct e1000_hw *hw = &adapter->hw;
2970 u32 rctl;
9d5c8243
AK
2971
2972 rctl = rd32(E1000_RCTL);
2973
2974 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
69d728ba 2975 rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
9d5c8243 2976
69d728ba 2977 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_RDMTS_HALF |
28b0759c 2978 (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
9d5c8243 2979
87cb7e8c
AK
2980 /*
2981 * enable stripping of CRC. It's unlikely this will break BMC
2982 * redirection as it did with e1000. Newer features require
2983 * that the HW strips the CRC.
73cd78f1 2984 */
87cb7e8c 2985 rctl |= E1000_RCTL_SECRC;
9d5c8243 2986
559e9c49 2987 /* disable store bad packets and clear size bits. */
ec54d7d6 2988 rctl &= ~(E1000_RCTL_SBP | E1000_RCTL_SZ_256);
9d5c8243 2989
6ec43fe6
AD
2990 /* enable LPE to prevent packets larger than max_frame_size */
2991 rctl |= E1000_RCTL_LPE;
9d5c8243 2992
952f72a8
AD
2993 /* disable queue 0 to prevent tail write w/o re-config */
2994 wr32(E1000_RXDCTL(0), 0);
9d5c8243 2995
e1739522
AD
2996 /* Attention!!! For SR-IOV PF driver operations you must enable
2997 * queue drop for all VF and PF queues to prevent head of line blocking
2998 * if an un-trusted VF does not provide descriptors to hardware.
2999 */
3000 if (adapter->vfs_allocated_count) {
e1739522
AD
3001 /* set all queue drop enable bits */
3002 wr32(E1000_QDE, ALL_QUEUES);
e1739522
AD
3003 }
3004
9d5c8243
AK
3005 wr32(E1000_RCTL, rctl);
3006}
3007
7d5753f0
AD
3008static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size,
3009 int vfn)
3010{
3011 struct e1000_hw *hw = &adapter->hw;
3012 u32 vmolr;
3013
3014 /* if it isn't the PF check to see if VFs are enabled and
3015 * increase the size to support vlan tags */
3016 if (vfn < adapter->vfs_allocated_count &&
3017 adapter->vf_data[vfn].vlans_enabled)
3018 size += VLAN_TAG_SIZE;
3019
3020 vmolr = rd32(E1000_VMOLR(vfn));
3021 vmolr &= ~E1000_VMOLR_RLPML_MASK;
3022 vmolr |= size | E1000_VMOLR_LPE;
3023 wr32(E1000_VMOLR(vfn), vmolr);
3024
3025 return 0;
3026}
3027
e1739522
AD
3028/**
3029 * igb_rlpml_set - set maximum receive packet size
3030 * @adapter: board private structure
3031 *
3032 * Configure maximum receivable packet size.
3033 **/
3034static void igb_rlpml_set(struct igb_adapter *adapter)
3035{
153285f9 3036 u32 max_frame_size = adapter->max_frame_size;
e1739522
AD
3037 struct e1000_hw *hw = &adapter->hw;
3038 u16 pf_id = adapter->vfs_allocated_count;
3039
e1739522
AD
3040 if (pf_id) {
3041 igb_set_vf_rlpml(adapter, max_frame_size, pf_id);
153285f9
AD
3042 /*
3043 * If we're in VMDQ or SR-IOV mode, then set global RLPML
3044 * to our max jumbo frame size, in case we need to enable
3045 * jumbo frames on one of the rings later.
3046 * This will not pass over-length frames into the default
3047 * queue because it's gated by the VMOLR.RLPML.
3048 */
7d5753f0 3049 max_frame_size = MAX_JUMBO_FRAME_SIZE;
e1739522
AD
3050 }
3051
3052 wr32(E1000_RLPML, max_frame_size);
3053}
3054
8151d294
WM
3055static inline void igb_set_vmolr(struct igb_adapter *adapter,
3056 int vfn, bool aupe)
7d5753f0
AD
3057{
3058 struct e1000_hw *hw = &adapter->hw;
3059 u32 vmolr;
3060
3061 /*
3062 * This register exists only on 82576 and newer so if we are older then
3063 * we should exit and do nothing
3064 */
3065 if (hw->mac.type < e1000_82576)
3066 return;
3067
3068 vmolr = rd32(E1000_VMOLR(vfn));
8151d294
WM
3069 vmolr |= E1000_VMOLR_STRVLAN; /* Strip vlan tags */
3070 if (aupe)
3071 vmolr |= E1000_VMOLR_AUPE; /* Accept untagged packets */
3072 else
3073 vmolr &= ~(E1000_VMOLR_AUPE); /* Tagged packets ONLY */
7d5753f0
AD
3074
3075 /* clear all bits that might not be set */
3076 vmolr &= ~(E1000_VMOLR_BAM | E1000_VMOLR_RSSE);
3077
a99955fc 3078 if (adapter->rss_queues > 1 && vfn == adapter->vfs_allocated_count)
7d5753f0
AD
3079 vmolr |= E1000_VMOLR_RSSE; /* enable RSS */
3080 /*
3081 * for VMDq only allow the VFs and pool 0 to accept broadcast and
3082 * multicast packets
3083 */
3084 if (vfn <= adapter->vfs_allocated_count)
3085 vmolr |= E1000_VMOLR_BAM; /* Accept broadcast */
3086
3087 wr32(E1000_VMOLR(vfn), vmolr);
3088}
3089
85b430b4
AD
3090/**
3091 * igb_configure_rx_ring - Configure a receive ring after Reset
3092 * @adapter: board private structure
3093 * @ring: receive ring to be configured
3094 *
3095 * Configure the Rx unit of the MAC after a reset.
3096 **/
d7ee5b3a
AD
3097void igb_configure_rx_ring(struct igb_adapter *adapter,
3098 struct igb_ring *ring)
85b430b4
AD
3099{
3100 struct e1000_hw *hw = &adapter->hw;
3101 u64 rdba = ring->dma;
3102 int reg_idx = ring->reg_idx;
a74420e0 3103 u32 srrctl = 0, rxdctl = 0;
85b430b4
AD
3104
3105 /* disable the queue */
a74420e0 3106 wr32(E1000_RXDCTL(reg_idx), 0);
85b430b4
AD
3107
3108 /* Set DMA base address registers */
3109 wr32(E1000_RDBAL(reg_idx),
3110 rdba & 0x00000000ffffffffULL);
3111 wr32(E1000_RDBAH(reg_idx), rdba >> 32);
3112 wr32(E1000_RDLEN(reg_idx),
3113 ring->count * sizeof(union e1000_adv_rx_desc));
3114
3115 /* initialize head and tail */
fce99e34 3116 ring->tail = hw->hw_addr + E1000_RDT(reg_idx);
a74420e0 3117 wr32(E1000_RDH(reg_idx), 0);
fce99e34 3118 writel(0, ring->tail);
85b430b4 3119
952f72a8 3120 /* set descriptor configuration */
44390ca6 3121 srrctl = IGB_RX_HDR_LEN << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
952f72a8 3122#if (PAGE_SIZE / 2) > IGB_RXBUFFER_16384
44390ca6 3123 srrctl |= IGB_RXBUFFER_16384 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
952f72a8 3124#else
44390ca6 3125 srrctl |= (PAGE_SIZE / 2) >> E1000_SRRCTL_BSIZEPKT_SHIFT;
952f72a8 3126#endif
44390ca6 3127 srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
757b77e2
NN
3128 if (hw->mac.type == e1000_82580)
3129 srrctl |= E1000_SRRCTL_TIMESTAMP;
e6bdb6fe
NN
3130 /* Only set Drop Enable if we are supporting multiple queues */
3131 if (adapter->vfs_allocated_count || adapter->num_rx_queues > 1)
3132 srrctl |= E1000_SRRCTL_DROP_EN;
952f72a8
AD
3133
3134 wr32(E1000_SRRCTL(reg_idx), srrctl);
3135
7d5753f0 3136 /* set filtering for VMDQ pools */
8151d294 3137 igb_set_vmolr(adapter, reg_idx & 0x7, true);
7d5753f0 3138
85b430b4
AD
3139 rxdctl |= IGB_RX_PTHRESH;
3140 rxdctl |= IGB_RX_HTHRESH << 8;
3141 rxdctl |= IGB_RX_WTHRESH << 16;
a74420e0
AD
3142
3143 /* enable receive descriptor fetching */
3144 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
85b430b4
AD
3145 wr32(E1000_RXDCTL(reg_idx), rxdctl);
3146}
3147
9d5c8243
AK
3148/**
3149 * igb_configure_rx - Configure receive Unit after Reset
3150 * @adapter: board private structure
3151 *
3152 * Configure the Rx unit of the MAC after a reset.
3153 **/
3154static void igb_configure_rx(struct igb_adapter *adapter)
3155{
9107584e 3156 int i;
9d5c8243 3157
68d480c4
AD
3158 /* set UTA to appropriate mode */
3159 igb_set_uta(adapter);
3160
26ad9178
AD
3161 /* set the correct pool for the PF default MAC address in entry 0 */
3162 igb_rar_set_qsel(adapter, adapter->hw.mac.addr, 0,
3163 adapter->vfs_allocated_count);
3164
06cf2666
AD
3165 /* Setup the HW Rx Head and Tail Descriptor Pointers and
3166 * the Base and Length of the Rx Descriptor Ring */
3167 for (i = 0; i < adapter->num_rx_queues; i++)
3025a446 3168 igb_configure_rx_ring(adapter, adapter->rx_ring[i]);
9d5c8243
AK
3169}
3170
3171/**
3172 * igb_free_tx_resources - Free Tx Resources per Queue
9d5c8243
AK
3173 * @tx_ring: Tx descriptor ring for a specific queue
3174 *
3175 * Free all transmit software resources
3176 **/
68fd9910 3177void igb_free_tx_resources(struct igb_ring *tx_ring)
9d5c8243 3178{
3b644cf6 3179 igb_clean_tx_ring(tx_ring);
9d5c8243 3180
06034649
AD
3181 vfree(tx_ring->tx_buffer_info);
3182 tx_ring->tx_buffer_info = NULL;
9d5c8243 3183
439705e1
AD
3184 /* if not set, then don't free */
3185 if (!tx_ring->desc)
3186 return;
3187
59d71989
AD
3188 dma_free_coherent(tx_ring->dev, tx_ring->size,
3189 tx_ring->desc, tx_ring->dma);
9d5c8243
AK
3190
3191 tx_ring->desc = NULL;
3192}
3193
3194/**
3195 * igb_free_all_tx_resources - Free Tx Resources for All Queues
3196 * @adapter: board private structure
3197 *
3198 * Free all transmit software resources
3199 **/
3200static void igb_free_all_tx_resources(struct igb_adapter *adapter)
3201{
3202 int i;
3203
3204 for (i = 0; i < adapter->num_tx_queues; i++)
3025a446 3205 igb_free_tx_resources(adapter->tx_ring[i]);
9d5c8243
AK
3206}
3207
ebe42d16
AD
3208void igb_unmap_and_free_tx_resource(struct igb_ring *ring,
3209 struct igb_tx_buffer *tx_buffer)
3210{
3211 if (tx_buffer->skb) {
3212 dev_kfree_skb_any(tx_buffer->skb);
3213 if (tx_buffer->dma)
3214 dma_unmap_single(ring->dev,
3215 tx_buffer->dma,
3216 tx_buffer->length,
3217 DMA_TO_DEVICE);
3218 } else if (tx_buffer->dma) {
3219 dma_unmap_page(ring->dev,
3220 tx_buffer->dma,
3221 tx_buffer->length,
3222 DMA_TO_DEVICE);
3223 }
3224 tx_buffer->next_to_watch = NULL;
3225 tx_buffer->skb = NULL;
3226 tx_buffer->dma = 0;
3227 /* buffer_info must be completely set up in the transmit path */
9d5c8243
AK
3228}
3229
3230/**
3231 * igb_clean_tx_ring - Free Tx Buffers
9d5c8243
AK
3232 * @tx_ring: ring to be cleaned
3233 **/
3b644cf6 3234static void igb_clean_tx_ring(struct igb_ring *tx_ring)
9d5c8243 3235{
06034649 3236 struct igb_tx_buffer *buffer_info;
9d5c8243 3237 unsigned long size;
6ad4edfc 3238 u16 i;
9d5c8243 3239
06034649 3240 if (!tx_ring->tx_buffer_info)
9d5c8243
AK
3241 return;
3242 /* Free all the Tx ring sk_buffs */
3243
3244 for (i = 0; i < tx_ring->count; i++) {
06034649 3245 buffer_info = &tx_ring->tx_buffer_info[i];
80785298 3246 igb_unmap_and_free_tx_resource(tx_ring, buffer_info);
9d5c8243
AK
3247 }
3248
06034649
AD
3249 size = sizeof(struct igb_tx_buffer) * tx_ring->count;
3250 memset(tx_ring->tx_buffer_info, 0, size);
9d5c8243
AK
3251
3252 /* Zero out the descriptor ring */
9d5c8243
AK
3253 memset(tx_ring->desc, 0, tx_ring->size);
3254
3255 tx_ring->next_to_use = 0;
3256 tx_ring->next_to_clean = 0;
9d5c8243
AK
3257}
3258
3259/**
3260 * igb_clean_all_tx_rings - Free Tx Buffers for all queues
3261 * @adapter: board private structure
3262 **/
3263static void igb_clean_all_tx_rings(struct igb_adapter *adapter)
3264{
3265 int i;
3266
3267 for (i = 0; i < adapter->num_tx_queues; i++)
3025a446 3268 igb_clean_tx_ring(adapter->tx_ring[i]);
9d5c8243
AK
3269}
3270
3271/**
3272 * igb_free_rx_resources - Free Rx Resources
9d5c8243
AK
3273 * @rx_ring: ring to clean the resources from
3274 *
3275 * Free all receive software resources
3276 **/
68fd9910 3277void igb_free_rx_resources(struct igb_ring *rx_ring)
9d5c8243 3278{
3b644cf6 3279 igb_clean_rx_ring(rx_ring);
9d5c8243 3280
06034649
AD
3281 vfree(rx_ring->rx_buffer_info);
3282 rx_ring->rx_buffer_info = NULL;
9d5c8243 3283
439705e1
AD
3284 /* if not set, then don't free */
3285 if (!rx_ring->desc)
3286 return;
3287
59d71989
AD
3288 dma_free_coherent(rx_ring->dev, rx_ring->size,
3289 rx_ring->desc, rx_ring->dma);
9d5c8243
AK
3290
3291 rx_ring->desc = NULL;
3292}
3293
3294/**
3295 * igb_free_all_rx_resources - Free Rx Resources for All Queues
3296 * @adapter: board private structure
3297 *
3298 * Free all receive software resources
3299 **/
3300static void igb_free_all_rx_resources(struct igb_adapter *adapter)
3301{
3302 int i;
3303
3304 for (i = 0; i < adapter->num_rx_queues; i++)
3025a446 3305 igb_free_rx_resources(adapter->rx_ring[i]);
9d5c8243
AK
3306}
3307
3308/**
3309 * igb_clean_rx_ring - Free Rx Buffers per Queue
9d5c8243
AK
3310 * @rx_ring: ring to free buffers from
3311 **/
3b644cf6 3312static void igb_clean_rx_ring(struct igb_ring *rx_ring)
9d5c8243 3313{
9d5c8243 3314 unsigned long size;
c023cd88 3315 u16 i;
9d5c8243 3316
06034649 3317 if (!rx_ring->rx_buffer_info)
9d5c8243 3318 return;
439705e1 3319
9d5c8243
AK
3320 /* Free all the Rx ring sk_buffs */
3321 for (i = 0; i < rx_ring->count; i++) {
06034649 3322 struct igb_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i];
9d5c8243 3323 if (buffer_info->dma) {
59d71989 3324 dma_unmap_single(rx_ring->dev,
80785298 3325 buffer_info->dma,
44390ca6 3326 IGB_RX_HDR_LEN,
59d71989 3327 DMA_FROM_DEVICE);
9d5c8243
AK
3328 buffer_info->dma = 0;
3329 }
3330
3331 if (buffer_info->skb) {
3332 dev_kfree_skb(buffer_info->skb);
3333 buffer_info->skb = NULL;
3334 }
6ec43fe6 3335 if (buffer_info->page_dma) {
59d71989 3336 dma_unmap_page(rx_ring->dev,
80785298 3337 buffer_info->page_dma,
6ec43fe6 3338 PAGE_SIZE / 2,
59d71989 3339 DMA_FROM_DEVICE);
6ec43fe6
AD
3340 buffer_info->page_dma = 0;
3341 }
9d5c8243 3342 if (buffer_info->page) {
9d5c8243
AK
3343 put_page(buffer_info->page);
3344 buffer_info->page = NULL;
bf36c1a0 3345 buffer_info->page_offset = 0;
9d5c8243
AK
3346 }
3347 }
3348
06034649
AD
3349 size = sizeof(struct igb_rx_buffer) * rx_ring->count;
3350 memset(rx_ring->rx_buffer_info, 0, size);
9d5c8243
AK
3351
3352 /* Zero out the descriptor ring */
3353 memset(rx_ring->desc, 0, rx_ring->size);
3354
3355 rx_ring->next_to_clean = 0;
3356 rx_ring->next_to_use = 0;
9d5c8243
AK
3357}
3358
3359/**
3360 * igb_clean_all_rx_rings - Free Rx Buffers for all queues
3361 * @adapter: board private structure
3362 **/
3363static void igb_clean_all_rx_rings(struct igb_adapter *adapter)
3364{
3365 int i;
3366
3367 for (i = 0; i < adapter->num_rx_queues; i++)
3025a446 3368 igb_clean_rx_ring(adapter->rx_ring[i]);
9d5c8243
AK
3369}
3370
3371/**
3372 * igb_set_mac - Change the Ethernet Address of the NIC
3373 * @netdev: network interface device structure
3374 * @p: pointer to an address structure
3375 *
3376 * Returns 0 on success, negative on failure
3377 **/
3378static int igb_set_mac(struct net_device *netdev, void *p)
3379{
3380 struct igb_adapter *adapter = netdev_priv(netdev);
28b0759c 3381 struct e1000_hw *hw = &adapter->hw;
9d5c8243
AK
3382 struct sockaddr *addr = p;
3383
3384 if (!is_valid_ether_addr(addr->sa_data))
3385 return -EADDRNOTAVAIL;
3386
3387 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
28b0759c 3388 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
9d5c8243 3389
26ad9178
AD
3390 /* set the correct pool for the new PF MAC address in entry 0 */
3391 igb_rar_set_qsel(adapter, hw->mac.addr, 0,
3392 adapter->vfs_allocated_count);
e1739522 3393
9d5c8243
AK
3394 return 0;
3395}
3396
3397/**
68d480c4 3398 * igb_write_mc_addr_list - write multicast addresses to MTA
9d5c8243
AK
3399 * @netdev: network interface device structure
3400 *
68d480c4
AD
3401 * Writes multicast address list to the MTA hash table.
3402 * Returns: -ENOMEM on failure
3403 * 0 on no addresses written
3404 * X on writing X addresses to MTA
9d5c8243 3405 **/
68d480c4 3406static int igb_write_mc_addr_list(struct net_device *netdev)
9d5c8243
AK
3407{
3408 struct igb_adapter *adapter = netdev_priv(netdev);
3409 struct e1000_hw *hw = &adapter->hw;
22bedad3 3410 struct netdev_hw_addr *ha;
68d480c4 3411 u8 *mta_list;
9d5c8243
AK
3412 int i;
3413
4cd24eaf 3414 if (netdev_mc_empty(netdev)) {
68d480c4
AD
3415 /* nothing to program, so clear mc list */
3416 igb_update_mc_addr_list(hw, NULL, 0);
3417 igb_restore_vf_multicasts(adapter);
3418 return 0;
3419 }
9d5c8243 3420
4cd24eaf 3421 mta_list = kzalloc(netdev_mc_count(netdev) * 6, GFP_ATOMIC);
68d480c4
AD
3422 if (!mta_list)
3423 return -ENOMEM;
ff41f8dc 3424
68d480c4 3425 /* The shared function expects a packed array of only addresses. */
48e2f183 3426 i = 0;
22bedad3
JP
3427 netdev_for_each_mc_addr(ha, netdev)
3428 memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
68d480c4 3429
68d480c4
AD
3430 igb_update_mc_addr_list(hw, mta_list, i);
3431 kfree(mta_list);
3432
4cd24eaf 3433 return netdev_mc_count(netdev);
68d480c4
AD
3434}
3435
3436/**
3437 * igb_write_uc_addr_list - write unicast addresses to RAR table
3438 * @netdev: network interface device structure
3439 *
3440 * Writes unicast address list to the RAR table.
3441 * Returns: -ENOMEM on failure/insufficient address space
3442 * 0 on no addresses written
3443 * X on writing X addresses to the RAR table
3444 **/
3445static int igb_write_uc_addr_list(struct net_device *netdev)
3446{
3447 struct igb_adapter *adapter = netdev_priv(netdev);
3448 struct e1000_hw *hw = &adapter->hw;
3449 unsigned int vfn = adapter->vfs_allocated_count;
3450 unsigned int rar_entries = hw->mac.rar_entry_count - (vfn + 1);
3451 int count = 0;
3452
3453 /* return ENOMEM indicating insufficient memory for addresses */
32e7bfc4 3454 if (netdev_uc_count(netdev) > rar_entries)
68d480c4 3455 return -ENOMEM;
9d5c8243 3456
32e7bfc4 3457 if (!netdev_uc_empty(netdev) && rar_entries) {
ff41f8dc 3458 struct netdev_hw_addr *ha;
32e7bfc4
JP
3459
3460 netdev_for_each_uc_addr(ha, netdev) {
ff41f8dc
AD
3461 if (!rar_entries)
3462 break;
26ad9178
AD
3463 igb_rar_set_qsel(adapter, ha->addr,
3464 rar_entries--,
68d480c4
AD
3465 vfn);
3466 count++;
ff41f8dc
AD
3467 }
3468 }
3469 /* write the addresses in reverse order to avoid write combining */
3470 for (; rar_entries > 0 ; rar_entries--) {
3471 wr32(E1000_RAH(rar_entries), 0);
3472 wr32(E1000_RAL(rar_entries), 0);
3473 }
3474 wrfl();
3475
68d480c4
AD
3476 return count;
3477}
3478
3479/**
3480 * igb_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
3481 * @netdev: network interface device structure
3482 *
3483 * The set_rx_mode entry point is called whenever the unicast or multicast
3484 * address lists or the network interface flags are updated. This routine is
3485 * responsible for configuring the hardware for proper unicast, multicast,
3486 * promiscuous mode, and all-multi behavior.
3487 **/
3488static void igb_set_rx_mode(struct net_device *netdev)
3489{
3490 struct igb_adapter *adapter = netdev_priv(netdev);
3491 struct e1000_hw *hw = &adapter->hw;
3492 unsigned int vfn = adapter->vfs_allocated_count;
3493 u32 rctl, vmolr = 0;
3494 int count;
3495
3496 /* Check for Promiscuous and All Multicast modes */
3497 rctl = rd32(E1000_RCTL);
3498
3499 /* clear the effected bits */
3500 rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_VFE);
3501
3502 if (netdev->flags & IFF_PROMISC) {
3503 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
3504 vmolr |= (E1000_VMOLR_ROPE | E1000_VMOLR_MPME);
3505 } else {
3506 if (netdev->flags & IFF_ALLMULTI) {
3507 rctl |= E1000_RCTL_MPE;
3508 vmolr |= E1000_VMOLR_MPME;
3509 } else {
3510 /*
3511 * Write addresses to the MTA, if the attempt fails
25985edc 3512 * then we should just turn on promiscuous mode so
68d480c4
AD
3513 * that we can at least receive multicast traffic
3514 */
3515 count = igb_write_mc_addr_list(netdev);
3516 if (count < 0) {
3517 rctl |= E1000_RCTL_MPE;
3518 vmolr |= E1000_VMOLR_MPME;
3519 } else if (count) {
3520 vmolr |= E1000_VMOLR_ROMPE;
3521 }
3522 }
3523 /*
3524 * Write addresses to available RAR registers, if there is not
3525 * sufficient space to store all the addresses then enable
25985edc 3526 * unicast promiscuous mode
68d480c4
AD
3527 */
3528 count = igb_write_uc_addr_list(netdev);
3529 if (count < 0) {
3530 rctl |= E1000_RCTL_UPE;
3531 vmolr |= E1000_VMOLR_ROPE;
3532 }
3533 rctl |= E1000_RCTL_VFE;
28fc06f5 3534 }
68d480c4 3535 wr32(E1000_RCTL, rctl);
28fc06f5 3536
68d480c4
AD
3537 /*
3538 * In order to support SR-IOV and eventually VMDq it is necessary to set
3539 * the VMOLR to enable the appropriate modes. Without this workaround
3540 * we will have issues with VLAN tag stripping not being done for frames
3541 * that are only arriving because we are the default pool
3542 */
3543 if (hw->mac.type < e1000_82576)
28fc06f5 3544 return;
9d5c8243 3545
68d480c4
AD
3546 vmolr |= rd32(E1000_VMOLR(vfn)) &
3547 ~(E1000_VMOLR_ROPE | E1000_VMOLR_MPME | E1000_VMOLR_ROMPE);
3548 wr32(E1000_VMOLR(vfn), vmolr);
28fc06f5 3549 igb_restore_vf_multicasts(adapter);
9d5c8243
AK
3550}
3551
13800469
GR
3552static void igb_check_wvbr(struct igb_adapter *adapter)
3553{
3554 struct e1000_hw *hw = &adapter->hw;
3555 u32 wvbr = 0;
3556
3557 switch (hw->mac.type) {
3558 case e1000_82576:
3559 case e1000_i350:
3560 if (!(wvbr = rd32(E1000_WVBR)))
3561 return;
3562 break;
3563 default:
3564 break;
3565 }
3566
3567 adapter->wvbr |= wvbr;
3568}
3569
3570#define IGB_STAGGERED_QUEUE_OFFSET 8
3571
3572static void igb_spoof_check(struct igb_adapter *adapter)
3573{
3574 int j;
3575
3576 if (!adapter->wvbr)
3577 return;
3578
3579 for(j = 0; j < adapter->vfs_allocated_count; j++) {
3580 if (adapter->wvbr & (1 << j) ||
3581 adapter->wvbr & (1 << (j + IGB_STAGGERED_QUEUE_OFFSET))) {
3582 dev_warn(&adapter->pdev->dev,
3583 "Spoof event(s) detected on VF %d\n", j);
3584 adapter->wvbr &=
3585 ~((1 << j) |
3586 (1 << (j + IGB_STAGGERED_QUEUE_OFFSET)));
3587 }
3588 }
3589}
3590
9d5c8243
AK
3591/* Need to wait a few seconds after link up to get diagnostic information from
3592 * the phy */
3593static void igb_update_phy_info(unsigned long data)
3594{
3595 struct igb_adapter *adapter = (struct igb_adapter *) data;
f5f4cf08 3596 igb_get_phy_info(&adapter->hw);
9d5c8243
AK
3597}
3598
4d6b725e
AD
3599/**
3600 * igb_has_link - check shared code for link and determine up/down
3601 * @adapter: pointer to driver private info
3602 **/
3145535a 3603bool igb_has_link(struct igb_adapter *adapter)
4d6b725e
AD
3604{
3605 struct e1000_hw *hw = &adapter->hw;
3606 bool link_active = false;
3607 s32 ret_val = 0;
3608
3609 /* get_link_status is set on LSC (link status) interrupt or
3610 * rx sequence error interrupt. get_link_status will stay
3611 * false until the e1000_check_for_link establishes link
3612 * for copper adapters ONLY
3613 */
3614 switch (hw->phy.media_type) {
3615 case e1000_media_type_copper:
3616 if (hw->mac.get_link_status) {
3617 ret_val = hw->mac.ops.check_for_link(hw);
3618 link_active = !hw->mac.get_link_status;
3619 } else {
3620 link_active = true;
3621 }
3622 break;
4d6b725e
AD
3623 case e1000_media_type_internal_serdes:
3624 ret_val = hw->mac.ops.check_for_link(hw);
3625 link_active = hw->mac.serdes_has_link;
3626 break;
3627 default:
3628 case e1000_media_type_unknown:
3629 break;
3630 }
3631
3632 return link_active;
3633}
3634
563988dc
SA
3635static bool igb_thermal_sensor_event(struct e1000_hw *hw, u32 event)
3636{
3637 bool ret = false;
3638 u32 ctrl_ext, thstat;
3639
3640 /* check for thermal sensor event on i350, copper only */
3641 if (hw->mac.type == e1000_i350) {
3642 thstat = rd32(E1000_THSTAT);
3643 ctrl_ext = rd32(E1000_CTRL_EXT);
3644
3645 if ((hw->phy.media_type == e1000_media_type_copper) &&
3646 !(ctrl_ext & E1000_CTRL_EXT_LINK_MODE_SGMII)) {
3647 ret = !!(thstat & event);
3648 }
3649 }
3650
3651 return ret;
3652}
3653
9d5c8243
AK
3654/**
3655 * igb_watchdog - Timer Call-back
3656 * @data: pointer to adapter cast into an unsigned long
3657 **/
3658static void igb_watchdog(unsigned long data)
3659{
3660 struct igb_adapter *adapter = (struct igb_adapter *)data;
3661 /* Do the rest outside of interrupt context */
3662 schedule_work(&adapter->watchdog_task);
3663}
3664
3665static void igb_watchdog_task(struct work_struct *work)
3666{
3667 struct igb_adapter *adapter = container_of(work,
559e9c49
AD
3668 struct igb_adapter,
3669 watchdog_task);
9d5c8243 3670 struct e1000_hw *hw = &adapter->hw;
9d5c8243 3671 struct net_device *netdev = adapter->netdev;
563988dc 3672 u32 link;
7a6ea550 3673 int i;
9d5c8243 3674
4d6b725e 3675 link = igb_has_link(adapter);
9d5c8243
AK
3676 if (link) {
3677 if (!netif_carrier_ok(netdev)) {
3678 u32 ctrl;
330a6d6a
AD
3679 hw->mac.ops.get_speed_and_duplex(hw,
3680 &adapter->link_speed,
3681 &adapter->link_duplex);
9d5c8243
AK
3682
3683 ctrl = rd32(E1000_CTRL);
527d47c1
AD
3684 /* Links status message must follow this format */
3685 printk(KERN_INFO "igb: %s NIC Link is Up %d Mbps %s, "
9d5c8243 3686 "Flow Control: %s\n",
559e9c49
AD
3687 netdev->name,
3688 adapter->link_speed,
3689 adapter->link_duplex == FULL_DUPLEX ?
9d5c8243 3690 "Full Duplex" : "Half Duplex",
559e9c49
AD
3691 ((ctrl & E1000_CTRL_TFCE) &&
3692 (ctrl & E1000_CTRL_RFCE)) ? "RX/TX" :
3693 ((ctrl & E1000_CTRL_RFCE) ? "RX" :
3694 ((ctrl & E1000_CTRL_TFCE) ? "TX" : "None")));
9d5c8243 3695
563988dc
SA
3696 /* check for thermal sensor event */
3697 if (igb_thermal_sensor_event(hw, E1000_THSTAT_LINK_THROTTLE)) {
3698 printk(KERN_INFO "igb: %s The network adapter "
3699 "link speed was downshifted "
3700 "because it overheated.\n",
3701 netdev->name);
7ef5ed1c 3702 }
563988dc 3703
d07f3e37 3704 /* adjust timeout factor according to speed/duplex */
9d5c8243
AK
3705 adapter->tx_timeout_factor = 1;
3706 switch (adapter->link_speed) {
3707 case SPEED_10:
9d5c8243
AK
3708 adapter->tx_timeout_factor = 14;
3709 break;
3710 case SPEED_100:
9d5c8243
AK
3711 /* maybe add some timeout factor ? */
3712 break;
3713 }
3714
3715 netif_carrier_on(netdev);
9d5c8243 3716
4ae196df 3717 igb_ping_all_vfs(adapter);
17dc566c 3718 igb_check_vf_rate_limit(adapter);
4ae196df 3719
4b1a9877 3720 /* link state has changed, schedule phy info update */
9d5c8243
AK
3721 if (!test_bit(__IGB_DOWN, &adapter->state))
3722 mod_timer(&adapter->phy_info_timer,
3723 round_jiffies(jiffies + 2 * HZ));
3724 }
3725 } else {
3726 if (netif_carrier_ok(netdev)) {
3727 adapter->link_speed = 0;
3728 adapter->link_duplex = 0;
563988dc
SA
3729
3730 /* check for thermal sensor event */
3731 if (igb_thermal_sensor_event(hw, E1000_THSTAT_PWR_DOWN)) {
3732 printk(KERN_ERR "igb: %s The network adapter "
3733 "was stopped because it "
3734 "overheated.\n",
7ef5ed1c 3735 netdev->name);
7ef5ed1c 3736 }
563988dc 3737
527d47c1
AD
3738 /* Links status message must follow this format */
3739 printk(KERN_INFO "igb: %s NIC Link is Down\n",
3740 netdev->name);
9d5c8243 3741 netif_carrier_off(netdev);
4b1a9877 3742
4ae196df
AD
3743 igb_ping_all_vfs(adapter);
3744
4b1a9877 3745 /* link state has changed, schedule phy info update */
9d5c8243
AK
3746 if (!test_bit(__IGB_DOWN, &adapter->state))
3747 mod_timer(&adapter->phy_info_timer,
3748 round_jiffies(jiffies + 2 * HZ));
3749 }
3750 }
3751
12dcd86b
ED
3752 spin_lock(&adapter->stats64_lock);
3753 igb_update_stats(adapter, &adapter->stats64);
3754 spin_unlock(&adapter->stats64_lock);
9d5c8243 3755
dbabb065 3756 for (i = 0; i < adapter->num_tx_queues; i++) {
3025a446 3757 struct igb_ring *tx_ring = adapter->tx_ring[i];
dbabb065 3758 if (!netif_carrier_ok(netdev)) {
9d5c8243
AK
3759 /* We've lost link, so the controller stops DMA,
3760 * but we've got queued Tx work that's never going
3761 * to get done, so reset controller to flush Tx.
3762 * (Do the reset outside of interrupt context). */
dbabb065
AD
3763 if (igb_desc_unused(tx_ring) + 1 < tx_ring->count) {
3764 adapter->tx_timeout_count++;
3765 schedule_work(&adapter->reset_task);
3766 /* return immediately since reset is imminent */
3767 return;
3768 }
9d5c8243 3769 }
9d5c8243 3770
dbabb065
AD
3771 /* Force detection of hung controller every watchdog period */
3772 tx_ring->detect_tx_hung = true;
3773 }
f7ba205e 3774
9d5c8243 3775 /* Cause software interrupt to ensure rx ring is cleaned */
7a6ea550 3776 if (adapter->msix_entries) {
047e0030
AD
3777 u32 eics = 0;
3778 for (i = 0; i < adapter->num_q_vectors; i++) {
3779 struct igb_q_vector *q_vector = adapter->q_vector[i];
3780 eics |= q_vector->eims_value;
3781 }
7a6ea550
AD
3782 wr32(E1000_EICS, eics);
3783 } else {
3784 wr32(E1000_ICS, E1000_ICS_RXDMT0);
3785 }
9d5c8243 3786
13800469
GR
3787 igb_spoof_check(adapter);
3788
9d5c8243
AK
3789 /* Reset the timer */
3790 if (!test_bit(__IGB_DOWN, &adapter->state))
3791 mod_timer(&adapter->watchdog_timer,
3792 round_jiffies(jiffies + 2 * HZ));
3793}
3794
3795enum latency_range {
3796 lowest_latency = 0,
3797 low_latency = 1,
3798 bulk_latency = 2,
3799 latency_invalid = 255
3800};
3801
6eb5a7f1
AD
3802/**
3803 * igb_update_ring_itr - update the dynamic ITR value based on packet size
3804 *
3805 * Stores a new ITR value based on strictly on packet size. This
3806 * algorithm is less sophisticated than that used in igb_update_itr,
3807 * due to the difficulty of synchronizing statistics across multiple
eef35c2d 3808 * receive rings. The divisors and thresholds used by this function
6eb5a7f1
AD
3809 * were determined based on theoretical maximum wire speed and testing
3810 * data, in order to minimize response time while increasing bulk
3811 * throughput.
3812 * This functionality is controlled by the InterruptThrottleRate module
3813 * parameter (see igb_param.c)
3814 * NOTE: This function is called only when operating in a multiqueue
3815 * receive environment.
047e0030 3816 * @q_vector: pointer to q_vector
6eb5a7f1 3817 **/
047e0030 3818static void igb_update_ring_itr(struct igb_q_vector *q_vector)
9d5c8243 3819{
047e0030 3820 int new_val = q_vector->itr_val;
6eb5a7f1 3821 int avg_wire_size = 0;
047e0030 3822 struct igb_adapter *adapter = q_vector->adapter;
12dcd86b 3823 unsigned int packets;
9d5c8243 3824
6eb5a7f1
AD
3825 /* For non-gigabit speeds, just fix the interrupt rate at 4000
3826 * ints/sec - ITR timer value of 120 ticks.
3827 */
3828 if (adapter->link_speed != SPEED_1000) {
0ba82994 3829 new_val = IGB_4K_ITR;
6eb5a7f1 3830 goto set_itr_val;
9d5c8243 3831 }
047e0030 3832
0ba82994
AD
3833 packets = q_vector->rx.total_packets;
3834 if (packets)
3835 avg_wire_size = q_vector->rx.total_bytes / packets;
047e0030 3836
0ba82994
AD
3837 packets = q_vector->tx.total_packets;
3838 if (packets)
3839 avg_wire_size = max_t(u32, avg_wire_size,
3840 q_vector->tx.total_bytes / packets);
047e0030
AD
3841
3842 /* if avg_wire_size isn't set no work was done */
3843 if (!avg_wire_size)
3844 goto clear_counts;
9d5c8243 3845
6eb5a7f1
AD
3846 /* Add 24 bytes to size to account for CRC, preamble, and gap */
3847 avg_wire_size += 24;
3848
3849 /* Don't starve jumbo frames */
3850 avg_wire_size = min(avg_wire_size, 3000);
9d5c8243 3851
6eb5a7f1
AD
3852 /* Give a little boost to mid-size frames */
3853 if ((avg_wire_size > 300) && (avg_wire_size < 1200))
3854 new_val = avg_wire_size / 3;
3855 else
3856 new_val = avg_wire_size / 2;
9d5c8243 3857
0ba82994
AD
3858 /* conservative mode (itr 3) eliminates the lowest_latency setting */
3859 if (new_val < IGB_20K_ITR &&
3860 ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
3861 (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
3862 new_val = IGB_20K_ITR;
abe1c363 3863
6eb5a7f1 3864set_itr_val:
047e0030
AD
3865 if (new_val != q_vector->itr_val) {
3866 q_vector->itr_val = new_val;
3867 q_vector->set_itr = 1;
9d5c8243 3868 }
6eb5a7f1 3869clear_counts:
0ba82994
AD
3870 q_vector->rx.total_bytes = 0;
3871 q_vector->rx.total_packets = 0;
3872 q_vector->tx.total_bytes = 0;
3873 q_vector->tx.total_packets = 0;
9d5c8243
AK
3874}
3875
3876/**
3877 * igb_update_itr - update the dynamic ITR value based on statistics
3878 * Stores a new ITR value based on packets and byte
3879 * counts during the last interrupt. The advantage of per interrupt
3880 * computation is faster updates and more accurate ITR for the current
3881 * traffic pattern. Constants in this function were computed
3882 * based on theoretical maximum wire speed and thresholds were set based
3883 * on testing data as well as attempting to minimize response time
3884 * while increasing bulk throughput.
3885 * this functionality is controlled by the InterruptThrottleRate module
3886 * parameter (see igb_param.c)
3887 * NOTE: These calculations are only valid when operating in a single-
3888 * queue environment.
0ba82994
AD
3889 * @q_vector: pointer to q_vector
3890 * @ring_container: ring info to update the itr for
9d5c8243 3891 **/
0ba82994
AD
3892static void igb_update_itr(struct igb_q_vector *q_vector,
3893 struct igb_ring_container *ring_container)
9d5c8243 3894{
0ba82994
AD
3895 unsigned int packets = ring_container->total_packets;
3896 unsigned int bytes = ring_container->total_bytes;
3897 u8 itrval = ring_container->itr;
9d5c8243 3898
0ba82994 3899 /* no packets, exit with status unchanged */
9d5c8243 3900 if (packets == 0)
0ba82994 3901 return;
9d5c8243 3902
0ba82994 3903 switch (itrval) {
9d5c8243
AK
3904 case lowest_latency:
3905 /* handle TSO and jumbo frames */
3906 if (bytes/packets > 8000)
0ba82994 3907 itrval = bulk_latency;
9d5c8243 3908 else if ((packets < 5) && (bytes > 512))
0ba82994 3909 itrval = low_latency;
9d5c8243
AK
3910 break;
3911 case low_latency: /* 50 usec aka 20000 ints/s */
3912 if (bytes > 10000) {
3913 /* this if handles the TSO accounting */
3914 if (bytes/packets > 8000) {
0ba82994 3915 itrval = bulk_latency;
9d5c8243 3916 } else if ((packets < 10) || ((bytes/packets) > 1200)) {
0ba82994 3917 itrval = bulk_latency;
9d5c8243 3918 } else if ((packets > 35)) {
0ba82994 3919 itrval = lowest_latency;
9d5c8243
AK
3920 }
3921 } else if (bytes/packets > 2000) {
0ba82994 3922 itrval = bulk_latency;
9d5c8243 3923 } else if (packets <= 2 && bytes < 512) {
0ba82994 3924 itrval = lowest_latency;
9d5c8243
AK
3925 }
3926 break;
3927 case bulk_latency: /* 250 usec aka 4000 ints/s */
3928 if (bytes > 25000) {
3929 if (packets > 35)
0ba82994 3930 itrval = low_latency;
1e5c3d21 3931 } else if (bytes < 1500) {
0ba82994 3932 itrval = low_latency;
9d5c8243
AK
3933 }
3934 break;
3935 }
3936
0ba82994
AD
3937 /* clear work counters since we have the values we need */
3938 ring_container->total_bytes = 0;
3939 ring_container->total_packets = 0;
3940
3941 /* write updated itr to ring container */
3942 ring_container->itr = itrval;
9d5c8243
AK
3943}
3944
0ba82994 3945static void igb_set_itr(struct igb_q_vector *q_vector)
9d5c8243 3946{
0ba82994 3947 struct igb_adapter *adapter = q_vector->adapter;
047e0030 3948 u32 new_itr = q_vector->itr_val;
0ba82994 3949 u8 current_itr = 0;
9d5c8243
AK
3950
3951 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
3952 if (adapter->link_speed != SPEED_1000) {
3953 current_itr = 0;
0ba82994 3954 new_itr = IGB_4K_ITR;
9d5c8243
AK
3955 goto set_itr_now;
3956 }
3957
0ba82994
AD
3958 igb_update_itr(q_vector, &q_vector->tx);
3959 igb_update_itr(q_vector, &q_vector->rx);
9d5c8243 3960
0ba82994 3961 current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
9d5c8243 3962
6eb5a7f1 3963 /* conservative mode (itr 3) eliminates the lowest_latency setting */
0ba82994
AD
3964 if (current_itr == lowest_latency &&
3965 ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
3966 (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
6eb5a7f1
AD
3967 current_itr = low_latency;
3968
9d5c8243
AK
3969 switch (current_itr) {
3970 /* counts and packets in update_itr are dependent on these numbers */
3971 case lowest_latency:
0ba82994 3972 new_itr = IGB_70K_ITR; /* 70,000 ints/sec */
9d5c8243
AK
3973 break;
3974 case low_latency:
0ba82994 3975 new_itr = IGB_20K_ITR; /* 20,000 ints/sec */
9d5c8243
AK
3976 break;
3977 case bulk_latency:
0ba82994 3978 new_itr = IGB_4K_ITR; /* 4,000 ints/sec */
9d5c8243
AK
3979 break;
3980 default:
3981 break;
3982 }
3983
3984set_itr_now:
047e0030 3985 if (new_itr != q_vector->itr_val) {
9d5c8243
AK
3986 /* this attempts to bias the interrupt rate towards Bulk
3987 * by adding intermediate steps when interrupt rate is
3988 * increasing */
047e0030
AD
3989 new_itr = new_itr > q_vector->itr_val ?
3990 max((new_itr * q_vector->itr_val) /
3991 (new_itr + (q_vector->itr_val >> 2)),
0ba82994 3992 new_itr) :
9d5c8243
AK
3993 new_itr;
3994 /* Don't write the value here; it resets the adapter's
3995 * internal timer, and causes us to delay far longer than
3996 * we should between interrupts. Instead, we write the ITR
3997 * value at the beginning of the next interrupt so the timing
3998 * ends up being correct.
3999 */
047e0030
AD
4000 q_vector->itr_val = new_itr;
4001 q_vector->set_itr = 1;
9d5c8243 4002 }
9d5c8243
AK
4003}
4004
7d13a7d0
AD
4005void igb_tx_ctxtdesc(struct igb_ring *tx_ring, u32 vlan_macip_lens,
4006 u32 type_tucmd, u32 mss_l4len_idx)
4007{
4008 struct e1000_adv_tx_context_desc *context_desc;
4009 u16 i = tx_ring->next_to_use;
4010
4011 context_desc = IGB_TX_CTXTDESC(tx_ring, i);
4012
4013 i++;
4014 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
4015
4016 /* set bits to identify this as an advanced context descriptor */
4017 type_tucmd |= E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT;
4018
4019 /* For 82575, context index must be unique per ring. */
866cff06 4020 if (test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
7d13a7d0
AD
4021 mss_l4len_idx |= tx_ring->reg_idx << 4;
4022
4023 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
4024 context_desc->seqnum_seed = 0;
4025 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
4026 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
4027}
4028
7af40ad9
AD
4029static int igb_tso(struct igb_ring *tx_ring,
4030 struct igb_tx_buffer *first,
4031 u8 *hdr_len)
9d5c8243 4032{
7af40ad9 4033 struct sk_buff *skb = first->skb;
7d13a7d0
AD
4034 u32 vlan_macip_lens, type_tucmd;
4035 u32 mss_l4len_idx, l4len;
4036
4037 if (!skb_is_gso(skb))
4038 return 0;
9d5c8243
AK
4039
4040 if (skb_header_cloned(skb)) {
7af40ad9 4041 int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
9d5c8243
AK
4042 if (err)
4043 return err;
4044 }
4045
7d13a7d0
AD
4046 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
4047 type_tucmd = E1000_ADVTXD_TUCMD_L4T_TCP;
9d5c8243 4048
7af40ad9 4049 if (first->protocol == __constant_htons(ETH_P_IP)) {
9d5c8243
AK
4050 struct iphdr *iph = ip_hdr(skb);
4051 iph->tot_len = 0;
4052 iph->check = 0;
4053 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
4054 iph->daddr, 0,
4055 IPPROTO_TCP,
4056 0);
7d13a7d0 4057 type_tucmd |= E1000_ADVTXD_TUCMD_IPV4;
7af40ad9
AD
4058 first->tx_flags |= IGB_TX_FLAGS_TSO |
4059 IGB_TX_FLAGS_CSUM |
4060 IGB_TX_FLAGS_IPV4;
8e1e8a47 4061 } else if (skb_is_gso_v6(skb)) {
9d5c8243
AK
4062 ipv6_hdr(skb)->payload_len = 0;
4063 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
4064 &ipv6_hdr(skb)->daddr,
4065 0, IPPROTO_TCP, 0);
7af40ad9
AD
4066 first->tx_flags |= IGB_TX_FLAGS_TSO |
4067 IGB_TX_FLAGS_CSUM;
9d5c8243
AK
4068 }
4069
7af40ad9 4070 /* compute header lengths */
7d13a7d0
AD
4071 l4len = tcp_hdrlen(skb);
4072 *hdr_len = skb_transport_offset(skb) + l4len;
9d5c8243 4073
7af40ad9
AD
4074 /* update gso size and bytecount with header size */
4075 first->gso_segs = skb_shinfo(skb)->gso_segs;
4076 first->bytecount += (first->gso_segs - 1) * *hdr_len;
4077
9d5c8243 4078 /* MSS L4LEN IDX */
7d13a7d0
AD
4079 mss_l4len_idx = l4len << E1000_ADVTXD_L4LEN_SHIFT;
4080 mss_l4len_idx |= skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT;
9d5c8243 4081
7d13a7d0
AD
4082 /* VLAN MACLEN IPLEN */
4083 vlan_macip_lens = skb_network_header_len(skb);
4084 vlan_macip_lens |= skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT;
7af40ad9 4085 vlan_macip_lens |= first->tx_flags & IGB_TX_FLAGS_VLAN_MASK;
9d5c8243 4086
7d13a7d0 4087 igb_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, mss_l4len_idx);
9d5c8243 4088
7d13a7d0 4089 return 1;
9d5c8243
AK
4090}
4091
7af40ad9 4092static void igb_tx_csum(struct igb_ring *tx_ring, struct igb_tx_buffer *first)
9d5c8243 4093{
7af40ad9 4094 struct sk_buff *skb = first->skb;
7d13a7d0
AD
4095 u32 vlan_macip_lens = 0;
4096 u32 mss_l4len_idx = 0;
4097 u32 type_tucmd = 0;
9d5c8243 4098
7d13a7d0 4099 if (skb->ip_summed != CHECKSUM_PARTIAL) {
7af40ad9
AD
4100 if (!(first->tx_flags & IGB_TX_FLAGS_VLAN))
4101 return;
7d13a7d0
AD
4102 } else {
4103 u8 l4_hdr = 0;
7af40ad9 4104 switch (first->protocol) {
7d13a7d0
AD
4105 case __constant_htons(ETH_P_IP):
4106 vlan_macip_lens |= skb_network_header_len(skb);
4107 type_tucmd |= E1000_ADVTXD_TUCMD_IPV4;
4108 l4_hdr = ip_hdr(skb)->protocol;
4109 break;
4110 case __constant_htons(ETH_P_IPV6):
4111 vlan_macip_lens |= skb_network_header_len(skb);
4112 l4_hdr = ipv6_hdr(skb)->nexthdr;
4113 break;
4114 default:
4115 if (unlikely(net_ratelimit())) {
4116 dev_warn(tx_ring->dev,
4117 "partial checksum but proto=%x!\n",
7af40ad9 4118 first->protocol);
fa4a7ef3 4119 }
7d13a7d0
AD
4120 break;
4121 }
fa4a7ef3 4122
7d13a7d0
AD
4123 switch (l4_hdr) {
4124 case IPPROTO_TCP:
4125 type_tucmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
4126 mss_l4len_idx = tcp_hdrlen(skb) <<
4127 E1000_ADVTXD_L4LEN_SHIFT;
4128 break;
4129 case IPPROTO_SCTP:
4130 type_tucmd |= E1000_ADVTXD_TUCMD_L4T_SCTP;
4131 mss_l4len_idx = sizeof(struct sctphdr) <<
4132 E1000_ADVTXD_L4LEN_SHIFT;
4133 break;
4134 case IPPROTO_UDP:
4135 mss_l4len_idx = sizeof(struct udphdr) <<
4136 E1000_ADVTXD_L4LEN_SHIFT;
4137 break;
4138 default:
4139 if (unlikely(net_ratelimit())) {
4140 dev_warn(tx_ring->dev,
4141 "partial checksum but l4 proto=%x!\n",
4142 l4_hdr);
44b0cda3 4143 }
7d13a7d0 4144 break;
9d5c8243 4145 }
7af40ad9
AD
4146
4147 /* update TX checksum flag */
4148 first->tx_flags |= IGB_TX_FLAGS_CSUM;
7d13a7d0 4149 }
9d5c8243 4150
7d13a7d0 4151 vlan_macip_lens |= skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT;
7af40ad9 4152 vlan_macip_lens |= first->tx_flags & IGB_TX_FLAGS_VLAN_MASK;
9d5c8243 4153
7d13a7d0 4154 igb_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, mss_l4len_idx);
9d5c8243
AK
4155}
4156
e032afc8
AD
4157static __le32 igb_tx_cmd_type(u32 tx_flags)
4158{
4159 /* set type for advanced descriptor with frame checksum insertion */
4160 __le32 cmd_type = cpu_to_le32(E1000_ADVTXD_DTYP_DATA |
4161 E1000_ADVTXD_DCMD_IFCS |
4162 E1000_ADVTXD_DCMD_DEXT);
4163
4164 /* set HW vlan bit if vlan is present */
4165 if (tx_flags & IGB_TX_FLAGS_VLAN)
4166 cmd_type |= cpu_to_le32(E1000_ADVTXD_DCMD_VLE);
4167
4168 /* set timestamp bit if present */
4169 if (tx_flags & IGB_TX_FLAGS_TSTAMP)
4170 cmd_type |= cpu_to_le32(E1000_ADVTXD_MAC_TSTAMP);
4171
4172 /* set segmentation bits for TSO */
4173 if (tx_flags & IGB_TX_FLAGS_TSO)
4174 cmd_type |= cpu_to_le32(E1000_ADVTXD_DCMD_TSE);
4175
4176 return cmd_type;
4177}
4178
7af40ad9
AD
4179static void igb_tx_olinfo_status(struct igb_ring *tx_ring,
4180 union e1000_adv_tx_desc *tx_desc,
4181 u32 tx_flags, unsigned int paylen)
e032afc8
AD
4182{
4183 u32 olinfo_status = paylen << E1000_ADVTXD_PAYLEN_SHIFT;
4184
4185 /* 82575 requires a unique index per ring if any offload is enabled */
4186 if ((tx_flags & (IGB_TX_FLAGS_CSUM | IGB_TX_FLAGS_VLAN)) &&
866cff06 4187 test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
e032afc8
AD
4188 olinfo_status |= tx_ring->reg_idx << 4;
4189
4190 /* insert L4 checksum */
4191 if (tx_flags & IGB_TX_FLAGS_CSUM) {
4192 olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
4193
4194 /* insert IPv4 checksum */
4195 if (tx_flags & IGB_TX_FLAGS_IPV4)
4196 olinfo_status |= E1000_TXD_POPTS_IXSM << 8;
4197 }
4198
7af40ad9 4199 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
e032afc8
AD
4200}
4201
ebe42d16
AD
4202/*
4203 * The largest size we can write to the descriptor is 65535. In order to
4204 * maintain a power of two alignment we have to limit ourselves to 32K.
4205 */
4206#define IGB_MAX_TXD_PWR 15
7af40ad9 4207#define IGB_MAX_DATA_PER_TXD (1<<IGB_MAX_TXD_PWR)
9d5c8243 4208
7af40ad9
AD
4209static void igb_tx_map(struct igb_ring *tx_ring,
4210 struct igb_tx_buffer *first,
ebe42d16 4211 const u8 hdr_len)
9d5c8243 4212{
7af40ad9 4213 struct sk_buff *skb = first->skb;
ebe42d16
AD
4214 struct igb_tx_buffer *tx_buffer_info;
4215 union e1000_adv_tx_desc *tx_desc;
4216 dma_addr_t dma;
4217 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
4218 unsigned int data_len = skb->data_len;
4219 unsigned int size = skb_headlen(skb);
4220 unsigned int paylen = skb->len - hdr_len;
4221 __le32 cmd_type;
7af40ad9 4222 u32 tx_flags = first->tx_flags;
ebe42d16 4223 u16 i = tx_ring->next_to_use;
ebe42d16
AD
4224
4225 tx_desc = IGB_TX_DESC(tx_ring, i);
4226
7af40ad9 4227 igb_tx_olinfo_status(tx_ring, tx_desc, tx_flags, paylen);
ebe42d16
AD
4228 cmd_type = igb_tx_cmd_type(tx_flags);
4229
4230 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
4231 if (dma_mapping_error(tx_ring->dev, dma))
6366ad33 4232 goto dma_error;
9d5c8243 4233
ebe42d16
AD
4234 /* record length, and DMA address */
4235 first->length = size;
4236 first->dma = dma;
ebe42d16
AD
4237 tx_desc->read.buffer_addr = cpu_to_le64(dma);
4238
4239 for (;;) {
4240 while (unlikely(size > IGB_MAX_DATA_PER_TXD)) {
4241 tx_desc->read.cmd_type_len =
4242 cmd_type | cpu_to_le32(IGB_MAX_DATA_PER_TXD);
4243
4244 i++;
4245 tx_desc++;
4246 if (i == tx_ring->count) {
4247 tx_desc = IGB_TX_DESC(tx_ring, 0);
4248 i = 0;
4249 }
4250
4251 dma += IGB_MAX_DATA_PER_TXD;
4252 size -= IGB_MAX_DATA_PER_TXD;
4253
4254 tx_desc->read.olinfo_status = 0;
4255 tx_desc->read.buffer_addr = cpu_to_le64(dma);
4256 }
4257
4258 if (likely(!data_len))
4259 break;
2bbfebe2 4260
ebe42d16 4261 tx_desc->read.cmd_type_len = cmd_type | cpu_to_le32(size);
9d5c8243 4262
65689fef 4263 i++;
ebe42d16
AD
4264 tx_desc++;
4265 if (i == tx_ring->count) {
4266 tx_desc = IGB_TX_DESC(tx_ring, 0);
65689fef 4267 i = 0;
ebe42d16 4268 }
65689fef 4269
ebe42d16
AD
4270 size = frag->size;
4271 data_len -= size;
4272
4273 dma = skb_frag_dma_map(tx_ring->dev, frag, 0,
4274 size, DMA_TO_DEVICE);
4275 if (dma_mapping_error(tx_ring->dev, dma))
6366ad33
AD
4276 goto dma_error;
4277
ebe42d16
AD
4278 tx_buffer_info = &tx_ring->tx_buffer_info[i];
4279 tx_buffer_info->length = size;
4280 tx_buffer_info->dma = dma;
4281
4282 tx_desc->read.olinfo_status = 0;
4283 tx_desc->read.buffer_addr = cpu_to_le64(dma);
4284
4285 frag++;
9d5c8243
AK
4286 }
4287
ebe42d16
AD
4288 /* write last descriptor with RS and EOP bits */
4289 cmd_type |= cpu_to_le32(size) | cpu_to_le32(IGB_TXD_DCMD);
4290 tx_desc->read.cmd_type_len = cmd_type;
8542db05
AD
4291
4292 /* set the timestamp */
4293 first->time_stamp = jiffies;
4294
ebe42d16
AD
4295 /*
4296 * Force memory writes to complete before letting h/w know there
4297 * are new descriptors to fetch. (Only applicable for weak-ordered
4298 * memory model archs, such as IA-64).
4299 *
4300 * We also need this memory barrier to make certain all of the
4301 * status bits have been updated before next_to_watch is written.
4302 */
4303 wmb();
4304
8542db05 4305 /* set next_to_watch value indicating a packet is present */
ebe42d16 4306 first->next_to_watch = tx_desc;
9d5c8243 4307
ebe42d16
AD
4308 i++;
4309 if (i == tx_ring->count)
4310 i = 0;
6366ad33 4311
ebe42d16 4312 tx_ring->next_to_use = i;
6366ad33 4313
ebe42d16 4314 writel(i, tx_ring->tail);
6366ad33 4315
ebe42d16
AD
4316 /* we need this if more than one processor can write to our tail
4317 * at a time, it syncronizes IO on IA64/Altix systems */
4318 mmiowb();
4319
4320 return;
4321
4322dma_error:
4323 dev_err(tx_ring->dev, "TX DMA map failed\n");
4324
4325 /* clear dma mappings for failed tx_buffer_info map */
4326 for (;;) {
4327 tx_buffer_info = &tx_ring->tx_buffer_info[i];
4328 igb_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
4329 if (tx_buffer_info == first)
4330 break;
a77ff709
NN
4331 if (i == 0)
4332 i = tx_ring->count;
6366ad33 4333 i--;
6366ad33
AD
4334 }
4335
9d5c8243 4336 tx_ring->next_to_use = i;
9d5c8243
AK
4337}
4338
6ad4edfc 4339static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size)
9d5c8243 4340{
e694e964
AD
4341 struct net_device *netdev = tx_ring->netdev;
4342
661086df 4343 netif_stop_subqueue(netdev, tx_ring->queue_index);
661086df 4344
9d5c8243
AK
4345 /* Herbert's original patch had:
4346 * smp_mb__after_netif_stop_queue();
4347 * but since that doesn't exist yet, just open code it. */
4348 smp_mb();
4349
4350 /* We need to check again in a case another CPU has just
4351 * made room available. */
c493ea45 4352 if (igb_desc_unused(tx_ring) < size)
9d5c8243
AK
4353 return -EBUSY;
4354
4355 /* A reprieve! */
661086df 4356 netif_wake_subqueue(netdev, tx_ring->queue_index);
12dcd86b
ED
4357
4358 u64_stats_update_begin(&tx_ring->tx_syncp2);
4359 tx_ring->tx_stats.restart_queue2++;
4360 u64_stats_update_end(&tx_ring->tx_syncp2);
4361
9d5c8243
AK
4362 return 0;
4363}
4364
6ad4edfc 4365static inline int igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size)
9d5c8243 4366{
c493ea45 4367 if (igb_desc_unused(tx_ring) >= size)
9d5c8243 4368 return 0;
e694e964 4369 return __igb_maybe_stop_tx(tx_ring, size);
9d5c8243
AK
4370}
4371
cd392f5c
AD
4372netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
4373 struct igb_ring *tx_ring)
9d5c8243 4374{
8542db05 4375 struct igb_tx_buffer *first;
ebe42d16 4376 int tso;
91d4ee33 4377 u32 tx_flags = 0;
31f6adbb 4378 __be16 protocol = vlan_get_protocol(skb);
91d4ee33 4379 u8 hdr_len = 0;
9d5c8243 4380
9d5c8243
AK
4381 /* need: 1 descriptor per page,
4382 * + 2 desc gap to keep tail from touching head,
4383 * + 1 desc for skb->data,
4384 * + 1 desc for context descriptor,
4385 * otherwise try next time */
e694e964 4386 if (igb_maybe_stop_tx(tx_ring, skb_shinfo(skb)->nr_frags + 4)) {
9d5c8243 4387 /* this is a hard error */
9d5c8243
AK
4388 return NETDEV_TX_BUSY;
4389 }
33af6bcc 4390
7af40ad9
AD
4391 /* record the location of the first descriptor for this packet */
4392 first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
4393 first->skb = skb;
4394 first->bytecount = skb->len;
4395 first->gso_segs = 1;
4396
2244d07b
OH
4397 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
4398 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
33af6bcc 4399 tx_flags |= IGB_TX_FLAGS_TSTAMP;
33af6bcc 4400 }
9d5c8243 4401
eab6d18d 4402 if (vlan_tx_tag_present(skb)) {
9d5c8243
AK
4403 tx_flags |= IGB_TX_FLAGS_VLAN;
4404 tx_flags |= (vlan_tx_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT);
4405 }
4406
7af40ad9
AD
4407 /* record initial flags and protocol */
4408 first->tx_flags = tx_flags;
4409 first->protocol = protocol;
cdfd01fc 4410
7af40ad9
AD
4411 tso = igb_tso(tx_ring, first, &hdr_len);
4412 if (tso < 0)
7d13a7d0 4413 goto out_drop;
7af40ad9
AD
4414 else if (!tso)
4415 igb_tx_csum(tx_ring, first);
9d5c8243 4416
7af40ad9 4417 igb_tx_map(tx_ring, first, hdr_len);
85ad76b2
AD
4418
4419 /* Make sure there is space in the ring for the next send. */
e694e964 4420 igb_maybe_stop_tx(tx_ring, MAX_SKB_FRAGS + 4);
85ad76b2 4421
9d5c8243 4422 return NETDEV_TX_OK;
7d13a7d0
AD
4423
4424out_drop:
7af40ad9
AD
4425 igb_unmap_and_free_tx_resource(tx_ring, first);
4426
7d13a7d0 4427 return NETDEV_TX_OK;
9d5c8243
AK
4428}
4429
1cc3bd87
AD
4430static inline struct igb_ring *igb_tx_queue_mapping(struct igb_adapter *adapter,
4431 struct sk_buff *skb)
4432{
4433 unsigned int r_idx = skb->queue_mapping;
4434
4435 if (r_idx >= adapter->num_tx_queues)
4436 r_idx = r_idx % adapter->num_tx_queues;
4437
4438 return adapter->tx_ring[r_idx];
4439}
4440
cd392f5c
AD
4441static netdev_tx_t igb_xmit_frame(struct sk_buff *skb,
4442 struct net_device *netdev)
9d5c8243
AK
4443{
4444 struct igb_adapter *adapter = netdev_priv(netdev);
b1a436c3
AD
4445
4446 if (test_bit(__IGB_DOWN, &adapter->state)) {
4447 dev_kfree_skb_any(skb);
4448 return NETDEV_TX_OK;
4449 }
4450
4451 if (skb->len <= 0) {
4452 dev_kfree_skb_any(skb);
4453 return NETDEV_TX_OK;
4454 }
4455
1cc3bd87
AD
4456 /*
4457 * The minimum packet size with TCTL.PSP set is 17 so pad the skb
4458 * in order to meet this minimum size requirement.
4459 */
4460 if (skb->len < 17) {
4461 if (skb_padto(skb, 17))
4462 return NETDEV_TX_OK;
4463 skb->len = 17;
4464 }
9d5c8243 4465
1cc3bd87 4466 return igb_xmit_frame_ring(skb, igb_tx_queue_mapping(adapter, skb));
9d5c8243
AK
4467}
4468
4469/**
4470 * igb_tx_timeout - Respond to a Tx Hang
4471 * @netdev: network interface device structure
4472 **/
4473static void igb_tx_timeout(struct net_device *netdev)
4474{
4475 struct igb_adapter *adapter = netdev_priv(netdev);
4476 struct e1000_hw *hw = &adapter->hw;
4477
4478 /* Do the reset outside of interrupt context */
4479 adapter->tx_timeout_count++;
f7ba205e 4480
55cac248
AD
4481 if (hw->mac.type == e1000_82580)
4482 hw->dev_spec._82575.global_device_reset = true;
4483
9d5c8243 4484 schedule_work(&adapter->reset_task);
265de409
AD
4485 wr32(E1000_EICS,
4486 (adapter->eims_enable_mask & ~adapter->eims_other));
9d5c8243
AK
4487}
4488
4489static void igb_reset_task(struct work_struct *work)
4490{
4491 struct igb_adapter *adapter;
4492 adapter = container_of(work, struct igb_adapter, reset_task);
4493
c97ec42a
TI
4494 igb_dump(adapter);
4495 netdev_err(adapter->netdev, "Reset adapter\n");
9d5c8243
AK
4496 igb_reinit_locked(adapter);
4497}
4498
4499/**
12dcd86b 4500 * igb_get_stats64 - Get System Network Statistics
9d5c8243 4501 * @netdev: network interface device structure
12dcd86b 4502 * @stats: rtnl_link_stats64 pointer
9d5c8243 4503 *
9d5c8243 4504 **/
12dcd86b
ED
4505static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *netdev,
4506 struct rtnl_link_stats64 *stats)
9d5c8243 4507{
12dcd86b
ED
4508 struct igb_adapter *adapter = netdev_priv(netdev);
4509
4510 spin_lock(&adapter->stats64_lock);
4511 igb_update_stats(adapter, &adapter->stats64);
4512 memcpy(stats, &adapter->stats64, sizeof(*stats));
4513 spin_unlock(&adapter->stats64_lock);
4514
4515 return stats;
9d5c8243
AK
4516}
4517
4518/**
4519 * igb_change_mtu - Change the Maximum Transfer Unit
4520 * @netdev: network interface device structure
4521 * @new_mtu: new value for maximum frame size
4522 *
4523 * Returns 0 on success, negative on failure
4524 **/
4525static int igb_change_mtu(struct net_device *netdev, int new_mtu)
4526{
4527 struct igb_adapter *adapter = netdev_priv(netdev);
090b1795 4528 struct pci_dev *pdev = adapter->pdev;
153285f9 4529 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
9d5c8243 4530
c809d227 4531 if ((new_mtu < 68) || (max_frame > MAX_JUMBO_FRAME_SIZE)) {
090b1795 4532 dev_err(&pdev->dev, "Invalid MTU setting\n");
9d5c8243
AK
4533 return -EINVAL;
4534 }
4535
153285f9 4536#define MAX_STD_JUMBO_FRAME_SIZE 9238
9d5c8243 4537 if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
090b1795 4538 dev_err(&pdev->dev, "MTU > 9216 not supported.\n");
9d5c8243
AK
4539 return -EINVAL;
4540 }
4541
4542 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
4543 msleep(1);
73cd78f1 4544
9d5c8243
AK
4545 /* igb_down has a dependency on max_frame_size */
4546 adapter->max_frame_size = max_frame;
559e9c49 4547
4c844851
AD
4548 if (netif_running(netdev))
4549 igb_down(adapter);
9d5c8243 4550
090b1795 4551 dev_info(&pdev->dev, "changing MTU from %d to %d\n",
9d5c8243
AK
4552 netdev->mtu, new_mtu);
4553 netdev->mtu = new_mtu;
4554
4555 if (netif_running(netdev))
4556 igb_up(adapter);
4557 else
4558 igb_reset(adapter);
4559
4560 clear_bit(__IGB_RESETTING, &adapter->state);
4561
4562 return 0;
4563}
4564
4565/**
4566 * igb_update_stats - Update the board statistics counters
4567 * @adapter: board private structure
4568 **/
4569
12dcd86b
ED
4570void igb_update_stats(struct igb_adapter *adapter,
4571 struct rtnl_link_stats64 *net_stats)
9d5c8243
AK
4572{
4573 struct e1000_hw *hw = &adapter->hw;
4574 struct pci_dev *pdev = adapter->pdev;
fa3d9a6d 4575 u32 reg, mpc;
9d5c8243 4576 u16 phy_tmp;
3f9c0164
AD
4577 int i;
4578 u64 bytes, packets;
12dcd86b
ED
4579 unsigned int start;
4580 u64 _bytes, _packets;
9d5c8243
AK
4581
4582#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
4583
4584 /*
4585 * Prevent stats update while adapter is being reset, or if the pci
4586 * connection is down.
4587 */
4588 if (adapter->link_speed == 0)
4589 return;
4590 if (pci_channel_offline(pdev))
4591 return;
4592
3f9c0164
AD
4593 bytes = 0;
4594 packets = 0;
4595 for (i = 0; i < adapter->num_rx_queues; i++) {
4596 u32 rqdpc_tmp = rd32(E1000_RQDPC(i)) & 0x0FFF;
3025a446 4597 struct igb_ring *ring = adapter->rx_ring[i];
12dcd86b 4598
3025a446 4599 ring->rx_stats.drops += rqdpc_tmp;
128e45eb 4600 net_stats->rx_fifo_errors += rqdpc_tmp;
12dcd86b
ED
4601
4602 do {
4603 start = u64_stats_fetch_begin_bh(&ring->rx_syncp);
4604 _bytes = ring->rx_stats.bytes;
4605 _packets = ring->rx_stats.packets;
4606 } while (u64_stats_fetch_retry_bh(&ring->rx_syncp, start));
4607 bytes += _bytes;
4608 packets += _packets;
3f9c0164
AD
4609 }
4610
128e45eb
AD
4611 net_stats->rx_bytes = bytes;
4612 net_stats->rx_packets = packets;
3f9c0164
AD
4613
4614 bytes = 0;
4615 packets = 0;
4616 for (i = 0; i < adapter->num_tx_queues; i++) {
3025a446 4617 struct igb_ring *ring = adapter->tx_ring[i];
12dcd86b
ED
4618 do {
4619 start = u64_stats_fetch_begin_bh(&ring->tx_syncp);
4620 _bytes = ring->tx_stats.bytes;
4621 _packets = ring->tx_stats.packets;
4622 } while (u64_stats_fetch_retry_bh(&ring->tx_syncp, start));
4623 bytes += _bytes;
4624 packets += _packets;
3f9c0164 4625 }
128e45eb
AD
4626 net_stats->tx_bytes = bytes;
4627 net_stats->tx_packets = packets;
3f9c0164
AD
4628
4629 /* read stats registers */
9d5c8243
AK
4630 adapter->stats.crcerrs += rd32(E1000_CRCERRS);
4631 adapter->stats.gprc += rd32(E1000_GPRC);
4632 adapter->stats.gorc += rd32(E1000_GORCL);
4633 rd32(E1000_GORCH); /* clear GORCL */
4634 adapter->stats.bprc += rd32(E1000_BPRC);
4635 adapter->stats.mprc += rd32(E1000_MPRC);
4636 adapter->stats.roc += rd32(E1000_ROC);
4637
4638 adapter->stats.prc64 += rd32(E1000_PRC64);
4639 adapter->stats.prc127 += rd32(E1000_PRC127);
4640 adapter->stats.prc255 += rd32(E1000_PRC255);
4641 adapter->stats.prc511 += rd32(E1000_PRC511);
4642 adapter->stats.prc1023 += rd32(E1000_PRC1023);
4643 adapter->stats.prc1522 += rd32(E1000_PRC1522);
4644 adapter->stats.symerrs += rd32(E1000_SYMERRS);
4645 adapter->stats.sec += rd32(E1000_SEC);
4646
fa3d9a6d
MW
4647 mpc = rd32(E1000_MPC);
4648 adapter->stats.mpc += mpc;
4649 net_stats->rx_fifo_errors += mpc;
9d5c8243
AK
4650 adapter->stats.scc += rd32(E1000_SCC);
4651 adapter->stats.ecol += rd32(E1000_ECOL);
4652 adapter->stats.mcc += rd32(E1000_MCC);
4653 adapter->stats.latecol += rd32(E1000_LATECOL);
4654 adapter->stats.dc += rd32(E1000_DC);
4655 adapter->stats.rlec += rd32(E1000_RLEC);
4656 adapter->stats.xonrxc += rd32(E1000_XONRXC);
4657 adapter->stats.xontxc += rd32(E1000_XONTXC);
4658 adapter->stats.xoffrxc += rd32(E1000_XOFFRXC);
4659 adapter->stats.xofftxc += rd32(E1000_XOFFTXC);
4660 adapter->stats.fcruc += rd32(E1000_FCRUC);
4661 adapter->stats.gptc += rd32(E1000_GPTC);
4662 adapter->stats.gotc += rd32(E1000_GOTCL);
4663 rd32(E1000_GOTCH); /* clear GOTCL */
fa3d9a6d 4664 adapter->stats.rnbc += rd32(E1000_RNBC);
9d5c8243
AK
4665 adapter->stats.ruc += rd32(E1000_RUC);
4666 adapter->stats.rfc += rd32(E1000_RFC);
4667 adapter->stats.rjc += rd32(E1000_RJC);
4668 adapter->stats.tor += rd32(E1000_TORH);
4669 adapter->stats.tot += rd32(E1000_TOTH);
4670 adapter->stats.tpr += rd32(E1000_TPR);
4671
4672 adapter->stats.ptc64 += rd32(E1000_PTC64);
4673 adapter->stats.ptc127 += rd32(E1000_PTC127);
4674 adapter->stats.ptc255 += rd32(E1000_PTC255);
4675 adapter->stats.ptc511 += rd32(E1000_PTC511);
4676 adapter->stats.ptc1023 += rd32(E1000_PTC1023);
4677 adapter->stats.ptc1522 += rd32(E1000_PTC1522);
4678
4679 adapter->stats.mptc += rd32(E1000_MPTC);
4680 adapter->stats.bptc += rd32(E1000_BPTC);
4681
2d0b0f69
NN
4682 adapter->stats.tpt += rd32(E1000_TPT);
4683 adapter->stats.colc += rd32(E1000_COLC);
9d5c8243
AK
4684
4685 adapter->stats.algnerrc += rd32(E1000_ALGNERRC);
43915c7c
NN
4686 /* read internal phy specific stats */
4687 reg = rd32(E1000_CTRL_EXT);
4688 if (!(reg & E1000_CTRL_EXT_LINK_MODE_MASK)) {
4689 adapter->stats.rxerrc += rd32(E1000_RXERRC);
4690 adapter->stats.tncrs += rd32(E1000_TNCRS);
4691 }
4692
9d5c8243
AK
4693 adapter->stats.tsctc += rd32(E1000_TSCTC);
4694 adapter->stats.tsctfc += rd32(E1000_TSCTFC);
4695
4696 adapter->stats.iac += rd32(E1000_IAC);
4697 adapter->stats.icrxoc += rd32(E1000_ICRXOC);
4698 adapter->stats.icrxptc += rd32(E1000_ICRXPTC);
4699 adapter->stats.icrxatc += rd32(E1000_ICRXATC);
4700 adapter->stats.ictxptc += rd32(E1000_ICTXPTC);
4701 adapter->stats.ictxatc += rd32(E1000_ICTXATC);
4702 adapter->stats.ictxqec += rd32(E1000_ICTXQEC);
4703 adapter->stats.ictxqmtc += rd32(E1000_ICTXQMTC);
4704 adapter->stats.icrxdmtc += rd32(E1000_ICRXDMTC);
4705
4706 /* Fill out the OS statistics structure */
128e45eb
AD
4707 net_stats->multicast = adapter->stats.mprc;
4708 net_stats->collisions = adapter->stats.colc;
9d5c8243
AK
4709
4710 /* Rx Errors */
4711
4712 /* RLEC on some newer hardware can be incorrect so build
8c0ab70a 4713 * our own version based on RUC and ROC */
128e45eb 4714 net_stats->rx_errors = adapter->stats.rxerrc +
9d5c8243
AK
4715 adapter->stats.crcerrs + adapter->stats.algnerrc +
4716 adapter->stats.ruc + adapter->stats.roc +
4717 adapter->stats.cexterr;
128e45eb
AD
4718 net_stats->rx_length_errors = adapter->stats.ruc +
4719 adapter->stats.roc;
4720 net_stats->rx_crc_errors = adapter->stats.crcerrs;
4721 net_stats->rx_frame_errors = adapter->stats.algnerrc;
4722 net_stats->rx_missed_errors = adapter->stats.mpc;
9d5c8243
AK
4723
4724 /* Tx Errors */
128e45eb
AD
4725 net_stats->tx_errors = adapter->stats.ecol +
4726 adapter->stats.latecol;
4727 net_stats->tx_aborted_errors = adapter->stats.ecol;
4728 net_stats->tx_window_errors = adapter->stats.latecol;
4729 net_stats->tx_carrier_errors = adapter->stats.tncrs;
9d5c8243
AK
4730
4731 /* Tx Dropped needs to be maintained elsewhere */
4732
4733 /* Phy Stats */
4734 if (hw->phy.media_type == e1000_media_type_copper) {
4735 if ((adapter->link_speed == SPEED_1000) &&
73cd78f1 4736 (!igb_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
9d5c8243
AK
4737 phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
4738 adapter->phy_stats.idle_errors += phy_tmp;
4739 }
4740 }
4741
4742 /* Management Stats */
4743 adapter->stats.mgptc += rd32(E1000_MGTPTC);
4744 adapter->stats.mgprc += rd32(E1000_MGTPRC);
4745 adapter->stats.mgpdc += rd32(E1000_MGTPDC);
0a915b95
CW
4746
4747 /* OS2BMC Stats */
4748 reg = rd32(E1000_MANC);
4749 if (reg & E1000_MANC_EN_BMC2OS) {
4750 adapter->stats.o2bgptc += rd32(E1000_O2BGPTC);
4751 adapter->stats.o2bspc += rd32(E1000_O2BSPC);
4752 adapter->stats.b2ospc += rd32(E1000_B2OSPC);
4753 adapter->stats.b2ogprc += rd32(E1000_B2OGPRC);
4754 }
9d5c8243
AK
4755}
4756
9d5c8243
AK
4757static irqreturn_t igb_msix_other(int irq, void *data)
4758{
047e0030 4759 struct igb_adapter *adapter = data;
9d5c8243 4760 struct e1000_hw *hw = &adapter->hw;
844290e5 4761 u32 icr = rd32(E1000_ICR);
844290e5 4762 /* reading ICR causes bit 31 of EICR to be cleared */
dda0e083 4763
7f081d40
AD
4764 if (icr & E1000_ICR_DRSTA)
4765 schedule_work(&adapter->reset_task);
4766
047e0030 4767 if (icr & E1000_ICR_DOUTSYNC) {
dda0e083
AD
4768 /* HW is reporting DMA is out of sync */
4769 adapter->stats.doosync++;
13800469
GR
4770 /* The DMA Out of Sync is also indication of a spoof event
4771 * in IOV mode. Check the Wrong VM Behavior register to
4772 * see if it is really a spoof event. */
4773 igb_check_wvbr(adapter);
dda0e083 4774 }
eebbbdba 4775
4ae196df
AD
4776 /* Check for a mailbox event */
4777 if (icr & E1000_ICR_VMMB)
4778 igb_msg_task(adapter);
4779
4780 if (icr & E1000_ICR_LSC) {
4781 hw->mac.get_link_status = 1;
4782 /* guard against interrupt when we're going down */
4783 if (!test_bit(__IGB_DOWN, &adapter->state))
4784 mod_timer(&adapter->watchdog_timer, jiffies + 1);
4785 }
4786
25568a53
AD
4787 if (adapter->vfs_allocated_count)
4788 wr32(E1000_IMS, E1000_IMS_LSC |
4789 E1000_IMS_VMMB |
4790 E1000_IMS_DOUTSYNC);
4791 else
4792 wr32(E1000_IMS, E1000_IMS_LSC | E1000_IMS_DOUTSYNC);
844290e5 4793 wr32(E1000_EIMS, adapter->eims_other);
9d5c8243
AK
4794
4795 return IRQ_HANDLED;
4796}
4797
047e0030 4798static void igb_write_itr(struct igb_q_vector *q_vector)
9d5c8243 4799{
26b39276 4800 struct igb_adapter *adapter = q_vector->adapter;
047e0030 4801 u32 itr_val = q_vector->itr_val & 0x7FFC;
9d5c8243 4802
047e0030
AD
4803 if (!q_vector->set_itr)
4804 return;
73cd78f1 4805
047e0030
AD
4806 if (!itr_val)
4807 itr_val = 0x4;
661086df 4808
26b39276
AD
4809 if (adapter->hw.mac.type == e1000_82575)
4810 itr_val |= itr_val << 16;
661086df 4811 else
0ba82994 4812 itr_val |= E1000_EITR_CNT_IGNR;
661086df 4813
047e0030
AD
4814 writel(itr_val, q_vector->itr_register);
4815 q_vector->set_itr = 0;
6eb5a7f1
AD
4816}
4817
047e0030 4818static irqreturn_t igb_msix_ring(int irq, void *data)
9d5c8243 4819{
047e0030 4820 struct igb_q_vector *q_vector = data;
9d5c8243 4821
047e0030
AD
4822 /* Write the ITR value calculated from the previous interrupt. */
4823 igb_write_itr(q_vector);
9d5c8243 4824
047e0030 4825 napi_schedule(&q_vector->napi);
844290e5 4826
047e0030 4827 return IRQ_HANDLED;
fe4506b6
JC
4828}
4829
421e02f0 4830#ifdef CONFIG_IGB_DCA
047e0030 4831static void igb_update_dca(struct igb_q_vector *q_vector)
fe4506b6 4832{
047e0030 4833 struct igb_adapter *adapter = q_vector->adapter;
fe4506b6
JC
4834 struct e1000_hw *hw = &adapter->hw;
4835 int cpu = get_cpu();
fe4506b6 4836
047e0030
AD
4837 if (q_vector->cpu == cpu)
4838 goto out_no_update;
4839
0ba82994
AD
4840 if (q_vector->tx.ring) {
4841 int q = q_vector->tx.ring->reg_idx;
047e0030
AD
4842 u32 dca_txctrl = rd32(E1000_DCA_TXCTRL(q));
4843 if (hw->mac.type == e1000_82575) {
4844 dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK;
4845 dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
2d064c06 4846 } else {
047e0030
AD
4847 dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK_82576;
4848 dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) <<
4849 E1000_DCA_TXCTRL_CPUID_SHIFT;
4850 }
4851 dca_txctrl |= E1000_DCA_TXCTRL_DESC_DCA_EN;
4852 wr32(E1000_DCA_TXCTRL(q), dca_txctrl);
4853 }
0ba82994
AD
4854 if (q_vector->rx.ring) {
4855 int q = q_vector->rx.ring->reg_idx;
047e0030
AD
4856 u32 dca_rxctrl = rd32(E1000_DCA_RXCTRL(q));
4857 if (hw->mac.type == e1000_82575) {
2d064c06 4858 dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK;
92be7917 4859 dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
047e0030
AD
4860 } else {
4861 dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK_82576;
4862 dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) <<
4863 E1000_DCA_RXCTRL_CPUID_SHIFT;
2d064c06 4864 }
fe4506b6
JC
4865 dca_rxctrl |= E1000_DCA_RXCTRL_DESC_DCA_EN;
4866 dca_rxctrl |= E1000_DCA_RXCTRL_HEAD_DCA_EN;
4867 dca_rxctrl |= E1000_DCA_RXCTRL_DATA_DCA_EN;
4868 wr32(E1000_DCA_RXCTRL(q), dca_rxctrl);
fe4506b6 4869 }
047e0030
AD
4870 q_vector->cpu = cpu;
4871out_no_update:
fe4506b6
JC
4872 put_cpu();
4873}
4874
4875static void igb_setup_dca(struct igb_adapter *adapter)
4876{
7e0e99ef 4877 struct e1000_hw *hw = &adapter->hw;
fe4506b6
JC
4878 int i;
4879
7dfc16fa 4880 if (!(adapter->flags & IGB_FLAG_DCA_ENABLED))
fe4506b6
JC
4881 return;
4882
7e0e99ef
AD
4883 /* Always use CB2 mode, difference is masked in the CB driver. */
4884 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_CB2);
4885
047e0030 4886 for (i = 0; i < adapter->num_q_vectors; i++) {
26b39276
AD
4887 adapter->q_vector[i]->cpu = -1;
4888 igb_update_dca(adapter->q_vector[i]);
fe4506b6
JC
4889 }
4890}
4891
4892static int __igb_notify_dca(struct device *dev, void *data)
4893{
4894 struct net_device *netdev = dev_get_drvdata(dev);
4895 struct igb_adapter *adapter = netdev_priv(netdev);
090b1795 4896 struct pci_dev *pdev = adapter->pdev;
fe4506b6
JC
4897 struct e1000_hw *hw = &adapter->hw;
4898 unsigned long event = *(unsigned long *)data;
4899
4900 switch (event) {
4901 case DCA_PROVIDER_ADD:
4902 /* if already enabled, don't do it again */
7dfc16fa 4903 if (adapter->flags & IGB_FLAG_DCA_ENABLED)
fe4506b6 4904 break;
fe4506b6 4905 if (dca_add_requester(dev) == 0) {
bbd98fe4 4906 adapter->flags |= IGB_FLAG_DCA_ENABLED;
090b1795 4907 dev_info(&pdev->dev, "DCA enabled\n");
fe4506b6
JC
4908 igb_setup_dca(adapter);
4909 break;
4910 }
4911 /* Fall Through since DCA is disabled. */
4912 case DCA_PROVIDER_REMOVE:
7dfc16fa 4913 if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
fe4506b6 4914 /* without this a class_device is left
047e0030 4915 * hanging around in the sysfs model */
fe4506b6 4916 dca_remove_requester(dev);
090b1795 4917 dev_info(&pdev->dev, "DCA disabled\n");
7dfc16fa 4918 adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
cbd347ad 4919 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE);
fe4506b6
JC
4920 }
4921 break;
4922 }
bbd98fe4 4923
fe4506b6 4924 return 0;
9d5c8243
AK
4925}
4926
fe4506b6
JC
4927static int igb_notify_dca(struct notifier_block *nb, unsigned long event,
4928 void *p)
4929{
4930 int ret_val;
4931
4932 ret_val = driver_for_each_device(&igb_driver.driver, NULL, &event,
4933 __igb_notify_dca);
4934
4935 return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
4936}
421e02f0 4937#endif /* CONFIG_IGB_DCA */
9d5c8243 4938
4ae196df
AD
4939static void igb_ping_all_vfs(struct igb_adapter *adapter)
4940{
4941 struct e1000_hw *hw = &adapter->hw;
4942 u32 ping;
4943 int i;
4944
4945 for (i = 0 ; i < adapter->vfs_allocated_count; i++) {
4946 ping = E1000_PF_CONTROL_MSG;
f2ca0dbe 4947 if (adapter->vf_data[i].flags & IGB_VF_FLAG_CTS)
4ae196df
AD
4948 ping |= E1000_VT_MSGTYPE_CTS;
4949 igb_write_mbx(hw, &ping, 1, i);
4950 }
4951}
4952
7d5753f0
AD
4953static int igb_set_vf_promisc(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
4954{
4955 struct e1000_hw *hw = &adapter->hw;
4956 u32 vmolr = rd32(E1000_VMOLR(vf));
4957 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
4958
d85b9004 4959 vf_data->flags &= ~(IGB_VF_FLAG_UNI_PROMISC |
7d5753f0
AD
4960 IGB_VF_FLAG_MULTI_PROMISC);
4961 vmolr &= ~(E1000_VMOLR_ROPE | E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
4962
4963 if (*msgbuf & E1000_VF_SET_PROMISC_MULTICAST) {
4964 vmolr |= E1000_VMOLR_MPME;
d85b9004 4965 vf_data->flags |= IGB_VF_FLAG_MULTI_PROMISC;
7d5753f0
AD
4966 *msgbuf &= ~E1000_VF_SET_PROMISC_MULTICAST;
4967 } else {
4968 /*
4969 * if we have hashes and we are clearing a multicast promisc
4970 * flag we need to write the hashes to the MTA as this step
4971 * was previously skipped
4972 */
4973 if (vf_data->num_vf_mc_hashes > 30) {
4974 vmolr |= E1000_VMOLR_MPME;
4975 } else if (vf_data->num_vf_mc_hashes) {
4976 int j;
4977 vmolr |= E1000_VMOLR_ROMPE;
4978 for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
4979 igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
4980 }
4981 }
4982
4983 wr32(E1000_VMOLR(vf), vmolr);
4984
4985 /* there are flags left unprocessed, likely not supported */
4986 if (*msgbuf & E1000_VT_MSGINFO_MASK)
4987 return -EINVAL;
4988
4989 return 0;
4990
4991}
4992
4ae196df
AD
4993static int igb_set_vf_multicasts(struct igb_adapter *adapter,
4994 u32 *msgbuf, u32 vf)
4995{
4996 int n = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
4997 u16 *hash_list = (u16 *)&msgbuf[1];
4998 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
4999 int i;
5000
7d5753f0 5001 /* salt away the number of multicast addresses assigned
4ae196df
AD
5002 * to this VF for later use to restore when the PF multi cast
5003 * list changes
5004 */
5005 vf_data->num_vf_mc_hashes = n;
5006
7d5753f0
AD
5007 /* only up to 30 hash values supported */
5008 if (n > 30)
5009 n = 30;
5010
5011 /* store the hashes for later use */
4ae196df 5012 for (i = 0; i < n; i++)
a419aef8 5013 vf_data->vf_mc_hashes[i] = hash_list[i];
4ae196df
AD
5014
5015 /* Flush and reset the mta with the new values */
ff41f8dc 5016 igb_set_rx_mode(adapter->netdev);
4ae196df
AD
5017
5018 return 0;
5019}
5020
5021static void igb_restore_vf_multicasts(struct igb_adapter *adapter)
5022{
5023 struct e1000_hw *hw = &adapter->hw;
5024 struct vf_data_storage *vf_data;
5025 int i, j;
5026
5027 for (i = 0; i < adapter->vfs_allocated_count; i++) {
7d5753f0
AD
5028 u32 vmolr = rd32(E1000_VMOLR(i));
5029 vmolr &= ~(E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
5030
4ae196df 5031 vf_data = &adapter->vf_data[i];
7d5753f0
AD
5032
5033 if ((vf_data->num_vf_mc_hashes > 30) ||
5034 (vf_data->flags & IGB_VF_FLAG_MULTI_PROMISC)) {
5035 vmolr |= E1000_VMOLR_MPME;
5036 } else if (vf_data->num_vf_mc_hashes) {
5037 vmolr |= E1000_VMOLR_ROMPE;
5038 for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
5039 igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
5040 }
5041 wr32(E1000_VMOLR(i), vmolr);
4ae196df
AD
5042 }
5043}
5044
5045static void igb_clear_vf_vfta(struct igb_adapter *adapter, u32 vf)
5046{
5047 struct e1000_hw *hw = &adapter->hw;
5048 u32 pool_mask, reg, vid;
5049 int i;
5050
5051 pool_mask = 1 << (E1000_VLVF_POOLSEL_SHIFT + vf);
5052
5053 /* Find the vlan filter for this id */
5054 for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
5055 reg = rd32(E1000_VLVF(i));
5056
5057 /* remove the vf from the pool */
5058 reg &= ~pool_mask;
5059
5060 /* if pool is empty then remove entry from vfta */
5061 if (!(reg & E1000_VLVF_POOLSEL_MASK) &&
5062 (reg & E1000_VLVF_VLANID_ENABLE)) {
5063 reg = 0;
5064 vid = reg & E1000_VLVF_VLANID_MASK;
5065 igb_vfta_set(hw, vid, false);
5066 }
5067
5068 wr32(E1000_VLVF(i), reg);
5069 }
ae641bdc
AD
5070
5071 adapter->vf_data[vf].vlans_enabled = 0;
4ae196df
AD
5072}
5073
5074static s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf)
5075{
5076 struct e1000_hw *hw = &adapter->hw;
5077 u32 reg, i;
5078
51466239
AD
5079 /* The vlvf table only exists on 82576 hardware and newer */
5080 if (hw->mac.type < e1000_82576)
5081 return -1;
5082
5083 /* we only need to do this if VMDq is enabled */
4ae196df
AD
5084 if (!adapter->vfs_allocated_count)
5085 return -1;
5086
5087 /* Find the vlan filter for this id */
5088 for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
5089 reg = rd32(E1000_VLVF(i));
5090 if ((reg & E1000_VLVF_VLANID_ENABLE) &&
5091 vid == (reg & E1000_VLVF_VLANID_MASK))
5092 break;
5093 }
5094
5095 if (add) {
5096 if (i == E1000_VLVF_ARRAY_SIZE) {
5097 /* Did not find a matching VLAN ID entry that was
5098 * enabled. Search for a free filter entry, i.e.
5099 * one without the enable bit set
5100 */
5101 for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
5102 reg = rd32(E1000_VLVF(i));
5103 if (!(reg & E1000_VLVF_VLANID_ENABLE))
5104 break;
5105 }
5106 }
5107 if (i < E1000_VLVF_ARRAY_SIZE) {
5108 /* Found an enabled/available entry */
5109 reg |= 1 << (E1000_VLVF_POOLSEL_SHIFT + vf);
5110
5111 /* if !enabled we need to set this up in vfta */
5112 if (!(reg & E1000_VLVF_VLANID_ENABLE)) {
51466239
AD
5113 /* add VID to filter table */
5114 igb_vfta_set(hw, vid, true);
4ae196df
AD
5115 reg |= E1000_VLVF_VLANID_ENABLE;
5116 }
cad6d05f
AD
5117 reg &= ~E1000_VLVF_VLANID_MASK;
5118 reg |= vid;
4ae196df 5119 wr32(E1000_VLVF(i), reg);
ae641bdc
AD
5120
5121 /* do not modify RLPML for PF devices */
5122 if (vf >= adapter->vfs_allocated_count)
5123 return 0;
5124
5125 if (!adapter->vf_data[vf].vlans_enabled) {
5126 u32 size;
5127 reg = rd32(E1000_VMOLR(vf));
5128 size = reg & E1000_VMOLR_RLPML_MASK;
5129 size += 4;
5130 reg &= ~E1000_VMOLR_RLPML_MASK;
5131 reg |= size;
5132 wr32(E1000_VMOLR(vf), reg);
5133 }
ae641bdc 5134
51466239 5135 adapter->vf_data[vf].vlans_enabled++;
4ae196df
AD
5136 return 0;
5137 }
5138 } else {
5139 if (i < E1000_VLVF_ARRAY_SIZE) {
5140 /* remove vf from the pool */
5141 reg &= ~(1 << (E1000_VLVF_POOLSEL_SHIFT + vf));
5142 /* if pool is empty then remove entry from vfta */
5143 if (!(reg & E1000_VLVF_POOLSEL_MASK)) {
5144 reg = 0;
5145 igb_vfta_set(hw, vid, false);
5146 }
5147 wr32(E1000_VLVF(i), reg);
ae641bdc
AD
5148
5149 /* do not modify RLPML for PF devices */
5150 if (vf >= adapter->vfs_allocated_count)
5151 return 0;
5152
5153 adapter->vf_data[vf].vlans_enabled--;
5154 if (!adapter->vf_data[vf].vlans_enabled) {
5155 u32 size;
5156 reg = rd32(E1000_VMOLR(vf));
5157 size = reg & E1000_VMOLR_RLPML_MASK;
5158 size -= 4;
5159 reg &= ~E1000_VMOLR_RLPML_MASK;
5160 reg |= size;
5161 wr32(E1000_VMOLR(vf), reg);
5162 }
4ae196df
AD
5163 }
5164 }
8151d294
WM
5165 return 0;
5166}
5167
5168static void igb_set_vmvir(struct igb_adapter *adapter, u32 vid, u32 vf)
5169{
5170 struct e1000_hw *hw = &adapter->hw;
5171
5172 if (vid)
5173 wr32(E1000_VMVIR(vf), (vid | E1000_VMVIR_VLANA_DEFAULT));
5174 else
5175 wr32(E1000_VMVIR(vf), 0);
5176}
5177
5178static int igb_ndo_set_vf_vlan(struct net_device *netdev,
5179 int vf, u16 vlan, u8 qos)
5180{
5181 int err = 0;
5182 struct igb_adapter *adapter = netdev_priv(netdev);
5183
5184 if ((vf >= adapter->vfs_allocated_count) || (vlan > 4095) || (qos > 7))
5185 return -EINVAL;
5186 if (vlan || qos) {
5187 err = igb_vlvf_set(adapter, vlan, !!vlan, vf);
5188 if (err)
5189 goto out;
5190 igb_set_vmvir(adapter, vlan | (qos << VLAN_PRIO_SHIFT), vf);
5191 igb_set_vmolr(adapter, vf, !vlan);
5192 adapter->vf_data[vf].pf_vlan = vlan;
5193 adapter->vf_data[vf].pf_qos = qos;
5194 dev_info(&adapter->pdev->dev,
5195 "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf);
5196 if (test_bit(__IGB_DOWN, &adapter->state)) {
5197 dev_warn(&adapter->pdev->dev,
5198 "The VF VLAN has been set,"
5199 " but the PF device is not up.\n");
5200 dev_warn(&adapter->pdev->dev,
5201 "Bring the PF device up before"
5202 " attempting to use the VF device.\n");
5203 }
5204 } else {
5205 igb_vlvf_set(adapter, adapter->vf_data[vf].pf_vlan,
5206 false, vf);
5207 igb_set_vmvir(adapter, vlan, vf);
5208 igb_set_vmolr(adapter, vf, true);
5209 adapter->vf_data[vf].pf_vlan = 0;
5210 adapter->vf_data[vf].pf_qos = 0;
5211 }
5212out:
5213 return err;
4ae196df
AD
5214}
5215
5216static int igb_set_vf_vlan(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
5217{
5218 int add = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
5219 int vid = (msgbuf[1] & E1000_VLVF_VLANID_MASK);
5220
5221 return igb_vlvf_set(adapter, vid, add, vf);
5222}
5223
f2ca0dbe 5224static inline void igb_vf_reset(struct igb_adapter *adapter, u32 vf)
4ae196df 5225{
8fa7e0f7
GR
5226 /* clear flags - except flag that indicates PF has set the MAC */
5227 adapter->vf_data[vf].flags &= IGB_VF_FLAG_PF_SET_MAC;
f2ca0dbe 5228 adapter->vf_data[vf].last_nack = jiffies;
4ae196df
AD
5229
5230 /* reset offloads to defaults */
8151d294 5231 igb_set_vmolr(adapter, vf, true);
4ae196df
AD
5232
5233 /* reset vlans for device */
5234 igb_clear_vf_vfta(adapter, vf);
8151d294
WM
5235 if (adapter->vf_data[vf].pf_vlan)
5236 igb_ndo_set_vf_vlan(adapter->netdev, vf,
5237 adapter->vf_data[vf].pf_vlan,
5238 adapter->vf_data[vf].pf_qos);
5239 else
5240 igb_clear_vf_vfta(adapter, vf);
4ae196df
AD
5241
5242 /* reset multicast table array for vf */
5243 adapter->vf_data[vf].num_vf_mc_hashes = 0;
5244
5245 /* Flush and reset the mta with the new values */
ff41f8dc 5246 igb_set_rx_mode(adapter->netdev);
4ae196df
AD
5247}
5248
f2ca0dbe
AD
5249static void igb_vf_reset_event(struct igb_adapter *adapter, u32 vf)
5250{
5251 unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
5252
5253 /* generate a new mac address as we were hotplug removed/added */
8151d294
WM
5254 if (!(adapter->vf_data[vf].flags & IGB_VF_FLAG_PF_SET_MAC))
5255 random_ether_addr(vf_mac);
f2ca0dbe
AD
5256
5257 /* process remaining reset events */
5258 igb_vf_reset(adapter, vf);
5259}
5260
5261static void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf)
4ae196df
AD
5262{
5263 struct e1000_hw *hw = &adapter->hw;
5264 unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
ff41f8dc 5265 int rar_entry = hw->mac.rar_entry_count - (vf + 1);
4ae196df
AD
5266 u32 reg, msgbuf[3];
5267 u8 *addr = (u8 *)(&msgbuf[1]);
5268
5269 /* process all the same items cleared in a function level reset */
f2ca0dbe 5270 igb_vf_reset(adapter, vf);
4ae196df
AD
5271
5272 /* set vf mac address */
26ad9178 5273 igb_rar_set_qsel(adapter, vf_mac, rar_entry, vf);
4ae196df
AD
5274
5275 /* enable transmit and receive for vf */
5276 reg = rd32(E1000_VFTE);
5277 wr32(E1000_VFTE, reg | (1 << vf));
5278 reg = rd32(E1000_VFRE);
5279 wr32(E1000_VFRE, reg | (1 << vf));
5280
8fa7e0f7 5281 adapter->vf_data[vf].flags |= IGB_VF_FLAG_CTS;
4ae196df
AD
5282
5283 /* reply to reset with ack and vf mac address */
5284 msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_ACK;
5285 memcpy(addr, vf_mac, 6);
5286 igb_write_mbx(hw, msgbuf, 3, vf);
5287}
5288
5289static int igb_set_vf_mac_addr(struct igb_adapter *adapter, u32 *msg, int vf)
5290{
de42edde
GR
5291 /*
5292 * The VF MAC Address is stored in a packed array of bytes
5293 * starting at the second 32 bit word of the msg array
5294 */
f2ca0dbe
AD
5295 unsigned char *addr = (char *)&msg[1];
5296 int err = -1;
4ae196df 5297
f2ca0dbe
AD
5298 if (is_valid_ether_addr(addr))
5299 err = igb_set_vf_mac(adapter, vf, addr);
4ae196df 5300
f2ca0dbe 5301 return err;
4ae196df
AD
5302}
5303
5304static void igb_rcv_ack_from_vf(struct igb_adapter *adapter, u32 vf)
5305{
5306 struct e1000_hw *hw = &adapter->hw;
f2ca0dbe 5307 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
4ae196df
AD
5308 u32 msg = E1000_VT_MSGTYPE_NACK;
5309
5310 /* if device isn't clear to send it shouldn't be reading either */
f2ca0dbe
AD
5311 if (!(vf_data->flags & IGB_VF_FLAG_CTS) &&
5312 time_after(jiffies, vf_data->last_nack + (2 * HZ))) {
4ae196df 5313 igb_write_mbx(hw, &msg, 1, vf);
f2ca0dbe 5314 vf_data->last_nack = jiffies;
4ae196df
AD
5315 }
5316}
5317
f2ca0dbe 5318static void igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf)
4ae196df 5319{
f2ca0dbe
AD
5320 struct pci_dev *pdev = adapter->pdev;
5321 u32 msgbuf[E1000_VFMAILBOX_SIZE];
4ae196df 5322 struct e1000_hw *hw = &adapter->hw;
f2ca0dbe 5323 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
4ae196df
AD
5324 s32 retval;
5325
f2ca0dbe 5326 retval = igb_read_mbx(hw, msgbuf, E1000_VFMAILBOX_SIZE, vf);
4ae196df 5327
fef45f4c
AD
5328 if (retval) {
5329 /* if receive failed revoke VF CTS stats and restart init */
f2ca0dbe 5330 dev_err(&pdev->dev, "Error receiving message from VF\n");
fef45f4c
AD
5331 vf_data->flags &= ~IGB_VF_FLAG_CTS;
5332 if (!time_after(jiffies, vf_data->last_nack + (2 * HZ)))
5333 return;
5334 goto out;
5335 }
4ae196df
AD
5336
5337 /* this is a message we already processed, do nothing */
5338 if (msgbuf[0] & (E1000_VT_MSGTYPE_ACK | E1000_VT_MSGTYPE_NACK))
f2ca0dbe 5339 return;
4ae196df
AD
5340
5341 /*
5342 * until the vf completes a reset it should not be
5343 * allowed to start any configuration.
5344 */
5345
5346 if (msgbuf[0] == E1000_VF_RESET) {
5347 igb_vf_reset_msg(adapter, vf);
f2ca0dbe 5348 return;
4ae196df
AD
5349 }
5350
f2ca0dbe 5351 if (!(vf_data->flags & IGB_VF_FLAG_CTS)) {
fef45f4c
AD
5352 if (!time_after(jiffies, vf_data->last_nack + (2 * HZ)))
5353 return;
5354 retval = -1;
5355 goto out;
4ae196df
AD
5356 }
5357
5358 switch ((msgbuf[0] & 0xFFFF)) {
5359 case E1000_VF_SET_MAC_ADDR:
a6b5ea35
GR
5360 retval = -EINVAL;
5361 if (!(vf_data->flags & IGB_VF_FLAG_PF_SET_MAC))
5362 retval = igb_set_vf_mac_addr(adapter, msgbuf, vf);
5363 else
5364 dev_warn(&pdev->dev,
5365 "VF %d attempted to override administratively "
5366 "set MAC address\nReload the VF driver to "
5367 "resume operations\n", vf);
4ae196df 5368 break;
7d5753f0
AD
5369 case E1000_VF_SET_PROMISC:
5370 retval = igb_set_vf_promisc(adapter, msgbuf, vf);
5371 break;
4ae196df
AD
5372 case E1000_VF_SET_MULTICAST:
5373 retval = igb_set_vf_multicasts(adapter, msgbuf, vf);
5374 break;
5375 case E1000_VF_SET_LPE:
5376 retval = igb_set_vf_rlpml(adapter, msgbuf[1], vf);
5377 break;
5378 case E1000_VF_SET_VLAN:
a6b5ea35
GR
5379 retval = -1;
5380 if (vf_data->pf_vlan)
5381 dev_warn(&pdev->dev,
5382 "VF %d attempted to override administratively "
5383 "set VLAN tag\nReload the VF driver to "
5384 "resume operations\n", vf);
8151d294
WM
5385 else
5386 retval = igb_set_vf_vlan(adapter, msgbuf, vf);
4ae196df
AD
5387 break;
5388 default:
090b1795 5389 dev_err(&pdev->dev, "Unhandled Msg %08x\n", msgbuf[0]);
4ae196df
AD
5390 retval = -1;
5391 break;
5392 }
5393
fef45f4c
AD
5394 msgbuf[0] |= E1000_VT_MSGTYPE_CTS;
5395out:
4ae196df
AD
5396 /* notify the VF of the results of what it sent us */
5397 if (retval)
5398 msgbuf[0] |= E1000_VT_MSGTYPE_NACK;
5399 else
5400 msgbuf[0] |= E1000_VT_MSGTYPE_ACK;
5401
4ae196df 5402 igb_write_mbx(hw, msgbuf, 1, vf);
f2ca0dbe 5403}
4ae196df 5404
f2ca0dbe
AD
5405static void igb_msg_task(struct igb_adapter *adapter)
5406{
5407 struct e1000_hw *hw = &adapter->hw;
5408 u32 vf;
5409
5410 for (vf = 0; vf < adapter->vfs_allocated_count; vf++) {
5411 /* process any reset requests */
5412 if (!igb_check_for_rst(hw, vf))
5413 igb_vf_reset_event(adapter, vf);
5414
5415 /* process any messages pending */
5416 if (!igb_check_for_msg(hw, vf))
5417 igb_rcv_msg_from_vf(adapter, vf);
5418
5419 /* process any acks */
5420 if (!igb_check_for_ack(hw, vf))
5421 igb_rcv_ack_from_vf(adapter, vf);
5422 }
4ae196df
AD
5423}
5424
68d480c4
AD
5425/**
5426 * igb_set_uta - Set unicast filter table address
5427 * @adapter: board private structure
5428 *
5429 * The unicast table address is a register array of 32-bit registers.
5430 * The table is meant to be used in a way similar to how the MTA is used
5431 * however due to certain limitations in the hardware it is necessary to
25985edc
LDM
5432 * set all the hash bits to 1 and use the VMOLR ROPE bit as a promiscuous
5433 * enable bit to allow vlan tag stripping when promiscuous mode is enabled
68d480c4
AD
5434 **/
5435static void igb_set_uta(struct igb_adapter *adapter)
5436{
5437 struct e1000_hw *hw = &adapter->hw;
5438 int i;
5439
5440 /* The UTA table only exists on 82576 hardware and newer */
5441 if (hw->mac.type < e1000_82576)
5442 return;
5443
5444 /* we only need to do this if VMDq is enabled */
5445 if (!adapter->vfs_allocated_count)
5446 return;
5447
5448 for (i = 0; i < hw->mac.uta_reg_count; i++)
5449 array_wr32(E1000_UTA, i, ~0);
5450}
5451
9d5c8243
AK
5452/**
5453 * igb_intr_msi - Interrupt Handler
5454 * @irq: interrupt number
5455 * @data: pointer to a network interface device structure
5456 **/
5457static irqreturn_t igb_intr_msi(int irq, void *data)
5458{
047e0030
AD
5459 struct igb_adapter *adapter = data;
5460 struct igb_q_vector *q_vector = adapter->q_vector[0];
9d5c8243
AK
5461 struct e1000_hw *hw = &adapter->hw;
5462 /* read ICR disables interrupts using IAM */
5463 u32 icr = rd32(E1000_ICR);
5464
047e0030 5465 igb_write_itr(q_vector);
9d5c8243 5466
7f081d40
AD
5467 if (icr & E1000_ICR_DRSTA)
5468 schedule_work(&adapter->reset_task);
5469
047e0030 5470 if (icr & E1000_ICR_DOUTSYNC) {
dda0e083
AD
5471 /* HW is reporting DMA is out of sync */
5472 adapter->stats.doosync++;
5473 }
5474
9d5c8243
AK
5475 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
5476 hw->mac.get_link_status = 1;
5477 if (!test_bit(__IGB_DOWN, &adapter->state))
5478 mod_timer(&adapter->watchdog_timer, jiffies + 1);
5479 }
5480
047e0030 5481 napi_schedule(&q_vector->napi);
9d5c8243
AK
5482
5483 return IRQ_HANDLED;
5484}
5485
5486/**
4a3c6433 5487 * igb_intr - Legacy Interrupt Handler
9d5c8243
AK
5488 * @irq: interrupt number
5489 * @data: pointer to a network interface device structure
5490 **/
5491static irqreturn_t igb_intr(int irq, void *data)
5492{
047e0030
AD
5493 struct igb_adapter *adapter = data;
5494 struct igb_q_vector *q_vector = adapter->q_vector[0];
9d5c8243
AK
5495 struct e1000_hw *hw = &adapter->hw;
5496 /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No
5497 * need for the IMC write */
5498 u32 icr = rd32(E1000_ICR);
9d5c8243
AK
5499
5500 /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
5501 * not set, then the adapter didn't send an interrupt */
5502 if (!(icr & E1000_ICR_INT_ASSERTED))
5503 return IRQ_NONE;
5504
0ba82994
AD
5505 igb_write_itr(q_vector);
5506
7f081d40
AD
5507 if (icr & E1000_ICR_DRSTA)
5508 schedule_work(&adapter->reset_task);
5509
047e0030 5510 if (icr & E1000_ICR_DOUTSYNC) {
dda0e083
AD
5511 /* HW is reporting DMA is out of sync */
5512 adapter->stats.doosync++;
5513 }
5514
9d5c8243
AK
5515 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
5516 hw->mac.get_link_status = 1;
5517 /* guard against interrupt when we're going down */
5518 if (!test_bit(__IGB_DOWN, &adapter->state))
5519 mod_timer(&adapter->watchdog_timer, jiffies + 1);
5520 }
5521
047e0030 5522 napi_schedule(&q_vector->napi);
9d5c8243
AK
5523
5524 return IRQ_HANDLED;
5525}
5526
0ba82994 5527void igb_ring_irq_enable(struct igb_q_vector *q_vector)
9d5c8243 5528{
047e0030 5529 struct igb_adapter *adapter = q_vector->adapter;
46544258 5530 struct e1000_hw *hw = &adapter->hw;
9d5c8243 5531
0ba82994
AD
5532 if ((q_vector->rx.ring && (adapter->rx_itr_setting & 3)) ||
5533 (!q_vector->rx.ring && (adapter->tx_itr_setting & 3))) {
5534 if ((adapter->num_q_vectors == 1) && !adapter->vf_data)
5535 igb_set_itr(q_vector);
46544258 5536 else
047e0030 5537 igb_update_ring_itr(q_vector);
9d5c8243
AK
5538 }
5539
46544258
AD
5540 if (!test_bit(__IGB_DOWN, &adapter->state)) {
5541 if (adapter->msix_entries)
047e0030 5542 wr32(E1000_EIMS, q_vector->eims_value);
46544258
AD
5543 else
5544 igb_irq_enable(adapter);
5545 }
9d5c8243
AK
5546}
5547
46544258
AD
5548/**
5549 * igb_poll - NAPI Rx polling callback
5550 * @napi: napi polling structure
5551 * @budget: count of how many packets we should handle
5552 **/
5553static int igb_poll(struct napi_struct *napi, int budget)
9d5c8243 5554{
047e0030
AD
5555 struct igb_q_vector *q_vector = container_of(napi,
5556 struct igb_q_vector,
5557 napi);
16eb8815 5558 bool clean_complete = true;
9d5c8243 5559
421e02f0 5560#ifdef CONFIG_IGB_DCA
047e0030
AD
5561 if (q_vector->adapter->flags & IGB_FLAG_DCA_ENABLED)
5562 igb_update_dca(q_vector);
fe4506b6 5563#endif
0ba82994 5564 if (q_vector->tx.ring)
13fde97a 5565 clean_complete = igb_clean_tx_irq(q_vector);
9d5c8243 5566
0ba82994 5567 if (q_vector->rx.ring)
cd392f5c 5568 clean_complete &= igb_clean_rx_irq(q_vector, budget);
047e0030 5569
16eb8815
AD
5570 /* If all work not completed, return budget and keep polling */
5571 if (!clean_complete)
5572 return budget;
46544258 5573
9d5c8243 5574 /* If not enough Rx work done, exit the polling mode */
16eb8815
AD
5575 napi_complete(napi);
5576 igb_ring_irq_enable(q_vector);
9d5c8243 5577
16eb8815 5578 return 0;
9d5c8243 5579}
6d8126f9 5580
33af6bcc 5581/**
c5b9bd5e 5582 * igb_systim_to_hwtstamp - convert system time value to hw timestamp
33af6bcc 5583 * @adapter: board private structure
c5b9bd5e
AD
5584 * @shhwtstamps: timestamp structure to update
5585 * @regval: unsigned 64bit system time value.
5586 *
5587 * We need to convert the system time value stored in the RX/TXSTMP registers
5588 * into a hwtstamp which can be used by the upper level timestamping functions
5589 */
5590static void igb_systim_to_hwtstamp(struct igb_adapter *adapter,
5591 struct skb_shared_hwtstamps *shhwtstamps,
5592 u64 regval)
5593{
5594 u64 ns;
5595
55cac248
AD
5596 /*
5597 * The 82580 starts with 1ns at bit 0 in RX/TXSTMPL, shift this up to
5598 * 24 to match clock shift we setup earlier.
5599 */
5600 if (adapter->hw.mac.type == e1000_82580)
5601 regval <<= IGB_82580_TSYNC_SHIFT;
5602
c5b9bd5e
AD
5603 ns = timecounter_cyc2time(&adapter->clock, regval);
5604 timecompare_update(&adapter->compare, ns);
5605 memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps));
5606 shhwtstamps->hwtstamp = ns_to_ktime(ns);
5607 shhwtstamps->syststamp = timecompare_transform(&adapter->compare, ns);
5608}
5609
5610/**
5611 * igb_tx_hwtstamp - utility function which checks for TX time stamp
5612 * @q_vector: pointer to q_vector containing needed info
06034649 5613 * @buffer: pointer to igb_tx_buffer structure
33af6bcc
PO
5614 *
5615 * If we were asked to do hardware stamping and such a time stamp is
5616 * available, then it must have been for this skb here because we only
5617 * allow only one such packet into the queue.
5618 */
06034649
AD
5619static void igb_tx_hwtstamp(struct igb_q_vector *q_vector,
5620 struct igb_tx_buffer *buffer_info)
33af6bcc 5621{
c5b9bd5e 5622 struct igb_adapter *adapter = q_vector->adapter;
33af6bcc 5623 struct e1000_hw *hw = &adapter->hw;
c5b9bd5e
AD
5624 struct skb_shared_hwtstamps shhwtstamps;
5625 u64 regval;
33af6bcc 5626
c5b9bd5e 5627 /* if skb does not support hw timestamp or TX stamp not valid exit */
2bbfebe2 5628 if (likely(!(buffer_info->tx_flags & IGB_TX_FLAGS_TSTAMP)) ||
c5b9bd5e
AD
5629 !(rd32(E1000_TSYNCTXCTL) & E1000_TSYNCTXCTL_VALID))
5630 return;
5631
5632 regval = rd32(E1000_TXSTMPL);
5633 regval |= (u64)rd32(E1000_TXSTMPH) << 32;
5634
5635 igb_systim_to_hwtstamp(adapter, &shhwtstamps, regval);
2873957d 5636 skb_tstamp_tx(buffer_info->skb, &shhwtstamps);
33af6bcc
PO
5637}
5638
9d5c8243
AK
5639/**
5640 * igb_clean_tx_irq - Reclaim resources after transmit completes
047e0030 5641 * @q_vector: pointer to q_vector containing needed info
9d5c8243
AK
5642 * returns true if ring is completely cleaned
5643 **/
047e0030 5644static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
9d5c8243 5645{
047e0030 5646 struct igb_adapter *adapter = q_vector->adapter;
0ba82994 5647 struct igb_ring *tx_ring = q_vector->tx.ring;
06034649 5648 struct igb_tx_buffer *tx_buffer;
8542db05 5649 union e1000_adv_tx_desc *tx_desc, *eop_desc;
9d5c8243 5650 unsigned int total_bytes = 0, total_packets = 0;
0ba82994 5651 unsigned int budget = q_vector->tx.work_limit;
8542db05 5652 unsigned int i = tx_ring->next_to_clean;
9d5c8243 5653
13fde97a
AD
5654 if (test_bit(__IGB_DOWN, &adapter->state))
5655 return true;
0e014cb1 5656
06034649 5657 tx_buffer = &tx_ring->tx_buffer_info[i];
13fde97a 5658 tx_desc = IGB_TX_DESC(tx_ring, i);
8542db05 5659 i -= tx_ring->count;
9d5c8243 5660
13fde97a 5661 for (; budget; budget--) {
8542db05 5662 eop_desc = tx_buffer->next_to_watch;
13fde97a 5663
8542db05
AD
5664 /* prevent any other reads prior to eop_desc */
5665 rmb();
5666
5667 /* if next_to_watch is not set then there is no work pending */
5668 if (!eop_desc)
5669 break;
13fde97a
AD
5670
5671 /* if DD is not set pending work has not been completed */
5672 if (!(eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)))
5673 break;
5674
8542db05
AD
5675 /* clear next_to_watch to prevent false hangs */
5676 tx_buffer->next_to_watch = NULL;
9d5c8243 5677
ebe42d16
AD
5678 /* update the statistics for this packet */
5679 total_bytes += tx_buffer->bytecount;
5680 total_packets += tx_buffer->gso_segs;
13fde97a 5681
ebe42d16
AD
5682 /* retrieve hardware timestamp */
5683 igb_tx_hwtstamp(q_vector, tx_buffer);
5684
5685 /* free the skb */
5686 dev_kfree_skb_any(tx_buffer->skb);
5687 tx_buffer->skb = NULL;
13fde97a 5688
ebe42d16
AD
5689 /* unmap skb header data */
5690 dma_unmap_single(tx_ring->dev,
5691 tx_buffer->dma,
5692 tx_buffer->length,
5693 DMA_TO_DEVICE);
5694
5695 /* clear last DMA location and unmap remaining buffers */
5696 while (tx_desc != eop_desc) {
5697 tx_buffer->dma = 0;
9d5c8243 5698
13fde97a
AD
5699 tx_buffer++;
5700 tx_desc++;
9d5c8243 5701 i++;
8542db05
AD
5702 if (unlikely(!i)) {
5703 i -= tx_ring->count;
06034649 5704 tx_buffer = tx_ring->tx_buffer_info;
13fde97a
AD
5705 tx_desc = IGB_TX_DESC(tx_ring, 0);
5706 }
ebe42d16
AD
5707
5708 /* unmap any remaining paged data */
5709 if (tx_buffer->dma) {
5710 dma_unmap_page(tx_ring->dev,
5711 tx_buffer->dma,
5712 tx_buffer->length,
5713 DMA_TO_DEVICE);
5714 }
5715 }
5716
5717 /* clear last DMA location */
5718 tx_buffer->dma = 0;
5719
5720 /* move us one more past the eop_desc for start of next pkt */
5721 tx_buffer++;
5722 tx_desc++;
5723 i++;
5724 if (unlikely(!i)) {
5725 i -= tx_ring->count;
5726 tx_buffer = tx_ring->tx_buffer_info;
5727 tx_desc = IGB_TX_DESC(tx_ring, 0);
5728 }
0e014cb1
AD
5729 }
5730
8542db05 5731 i += tx_ring->count;
9d5c8243 5732 tx_ring->next_to_clean = i;
13fde97a
AD
5733 u64_stats_update_begin(&tx_ring->tx_syncp);
5734 tx_ring->tx_stats.bytes += total_bytes;
5735 tx_ring->tx_stats.packets += total_packets;
5736 u64_stats_update_end(&tx_ring->tx_syncp);
0ba82994
AD
5737 q_vector->tx.total_bytes += total_bytes;
5738 q_vector->tx.total_packets += total_packets;
9d5c8243 5739
13fde97a
AD
5740 if (tx_ring->detect_tx_hung) {
5741 struct e1000_hw *hw = &adapter->hw;
12dcd86b 5742
8542db05 5743 eop_desc = tx_buffer->next_to_watch;
9d5c8243 5744
9d5c8243
AK
5745 /* Detect a transmit hang in hardware, this serializes the
5746 * check with the clearing of time_stamp and movement of i */
5747 tx_ring->detect_tx_hung = false;
8542db05
AD
5748 if (eop_desc &&
5749 time_after(jiffies, tx_buffer->time_stamp +
8e95a202
JP
5750 (adapter->tx_timeout_factor * HZ)) &&
5751 !(rd32(E1000_STATUS) & E1000_STATUS_TXOFF)) {
9d5c8243 5752
9d5c8243 5753 /* detected Tx unit hang */
59d71989 5754 dev_err(tx_ring->dev,
9d5c8243 5755 "Detected Tx Unit Hang\n"
2d064c06 5756 " Tx Queue <%d>\n"
9d5c8243
AK
5757 " TDH <%x>\n"
5758 " TDT <%x>\n"
5759 " next_to_use <%x>\n"
5760 " next_to_clean <%x>\n"
9d5c8243
AK
5761 "buffer_info[next_to_clean]\n"
5762 " time_stamp <%lx>\n"
8542db05 5763 " next_to_watch <%p>\n"
9d5c8243
AK
5764 " jiffies <%lx>\n"
5765 " desc.status <%x>\n",
2d064c06 5766 tx_ring->queue_index,
238ac817 5767 rd32(E1000_TDH(tx_ring->reg_idx)),
fce99e34 5768 readl(tx_ring->tail),
9d5c8243
AK
5769 tx_ring->next_to_use,
5770 tx_ring->next_to_clean,
8542db05
AD
5771 tx_buffer->time_stamp,
5772 eop_desc,
9d5c8243 5773 jiffies,
0e014cb1 5774 eop_desc->wb.status);
13fde97a
AD
5775 netif_stop_subqueue(tx_ring->netdev,
5776 tx_ring->queue_index);
5777
5778 /* we are about to reset, no point in enabling stuff */
5779 return true;
9d5c8243
AK
5780 }
5781 }
13fde97a
AD
5782
5783 if (unlikely(total_packets &&
5784 netif_carrier_ok(tx_ring->netdev) &&
5785 igb_desc_unused(tx_ring) >= IGB_TX_QUEUE_WAKE)) {
5786 /* Make sure that anybody stopping the queue after this
5787 * sees the new next_to_clean.
5788 */
5789 smp_mb();
5790 if (__netif_subqueue_stopped(tx_ring->netdev,
5791 tx_ring->queue_index) &&
5792 !(test_bit(__IGB_DOWN, &adapter->state))) {
5793 netif_wake_subqueue(tx_ring->netdev,
5794 tx_ring->queue_index);
5795
5796 u64_stats_update_begin(&tx_ring->tx_syncp);
5797 tx_ring->tx_stats.restart_queue++;
5798 u64_stats_update_end(&tx_ring->tx_syncp);
5799 }
5800 }
5801
5802 return !!budget;
9d5c8243
AK
5803}
5804
cd392f5c
AD
5805static inline void igb_rx_checksum(struct igb_ring *ring,
5806 u32 status_err, struct sk_buff *skb)
9d5c8243 5807{
bc8acf2c 5808 skb_checksum_none_assert(skb);
9d5c8243
AK
5809
5810 /* Ignore Checksum bit is set or checksum is disabled through ethtool */
866cff06 5811 if (!test_bit(IGB_RING_FLAG_RX_CSUM, &ring->flags) ||
85ad76b2 5812 (status_err & E1000_RXD_STAT_IXSM))
9d5c8243 5813 return;
85ad76b2 5814
9d5c8243
AK
5815 /* TCP/UDP checksum error bit is set */
5816 if (status_err &
5817 (E1000_RXDEXT_STATERR_TCPE | E1000_RXDEXT_STATERR_IPE)) {
b9473560
JB
5818 /*
5819 * work around errata with sctp packets where the TCPE aka
5820 * L4E bit is set incorrectly on 64 byte (60 byte w/o crc)
5821 * packets, (aka let the stack check the crc32c)
5822 */
866cff06
AD
5823 if (!((skb->len == 60) &&
5824 test_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags))) {
12dcd86b 5825 u64_stats_update_begin(&ring->rx_syncp);
04a5fcaa 5826 ring->rx_stats.csum_err++;
12dcd86b
ED
5827 u64_stats_update_end(&ring->rx_syncp);
5828 }
9d5c8243 5829 /* let the stack verify checksum errors */
9d5c8243
AK
5830 return;
5831 }
5832 /* It must be a TCP or UDP packet with a valid checksum */
5833 if (status_err & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS))
5834 skb->ip_summed = CHECKSUM_UNNECESSARY;
5835
59d71989 5836 dev_dbg(ring->dev, "cksum success: bits %08X\n", status_err);
9d5c8243
AK
5837}
5838
757b77e2 5839static void igb_rx_hwtstamp(struct igb_q_vector *q_vector, u32 staterr,
c5b9bd5e
AD
5840 struct sk_buff *skb)
5841{
5842 struct igb_adapter *adapter = q_vector->adapter;
5843 struct e1000_hw *hw = &adapter->hw;
5844 u64 regval;
5845
5846 /*
5847 * If this bit is set, then the RX registers contain the time stamp. No
5848 * other packet will be time stamped until we read these registers, so
5849 * read the registers to make them available again. Because only one
5850 * packet can be time stamped at a time, we know that the register
5851 * values must belong to this one here and therefore we don't need to
5852 * compare any of the additional attributes stored for it.
5853 *
2244d07b 5854 * If nothing went wrong, then it should have a shared tx_flags that we
c5b9bd5e
AD
5855 * can turn into a skb_shared_hwtstamps.
5856 */
757b77e2
NN
5857 if (staterr & E1000_RXDADV_STAT_TSIP) {
5858 u32 *stamp = (u32 *)skb->data;
5859 regval = le32_to_cpu(*(stamp + 2));
5860 regval |= (u64)le32_to_cpu(*(stamp + 3)) << 32;
5861 skb_pull(skb, IGB_TS_HDR_LEN);
5862 } else {
5863 if(!(rd32(E1000_TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID))
5864 return;
c5b9bd5e 5865
757b77e2
NN
5866 regval = rd32(E1000_RXSTMPL);
5867 regval |= (u64)rd32(E1000_RXSTMPH) << 32;
5868 }
c5b9bd5e
AD
5869
5870 igb_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval);
5871}
44390ca6 5872static inline u16 igb_get_hlen(union e1000_adv_rx_desc *rx_desc)
2d94d8ab
AD
5873{
5874 /* HW will not DMA in data larger than the given buffer, even if it
5875 * parses the (NFS, of course) header to be larger. In that case, it
5876 * fills the header buffer and spills the rest into the page.
5877 */
5878 u16 hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hdr_info) &
5879 E1000_RXDADV_HDRBUFLEN_MASK) >> E1000_RXDADV_HDRBUFLEN_SHIFT;
44390ca6
AD
5880 if (hlen > IGB_RX_HDR_LEN)
5881 hlen = IGB_RX_HDR_LEN;
2d94d8ab
AD
5882 return hlen;
5883}
5884
cd392f5c 5885static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, int budget)
9d5c8243 5886{
0ba82994 5887 struct igb_ring *rx_ring = q_vector->rx.ring;
16eb8815
AD
5888 union e1000_adv_rx_desc *rx_desc;
5889 const int current_node = numa_node_id();
9d5c8243 5890 unsigned int total_bytes = 0, total_packets = 0;
2d94d8ab 5891 u32 staterr;
16eb8815
AD
5892 u16 cleaned_count = igb_desc_unused(rx_ring);
5893 u16 i = rx_ring->next_to_clean;
9d5c8243 5894
60136906 5895 rx_desc = IGB_RX_DESC(rx_ring, i);
9d5c8243
AK
5896 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
5897
5898 while (staterr & E1000_RXD_STAT_DD) {
06034649 5899 struct igb_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i];
16eb8815
AD
5900 struct sk_buff *skb = buffer_info->skb;
5901 union e1000_adv_rx_desc *next_rxd;
9d5c8243 5902
69d3ca53 5903 buffer_info->skb = NULL;
16eb8815 5904 prefetch(skb->data);
69d3ca53
AD
5905
5906 i++;
5907 if (i == rx_ring->count)
5908 i = 0;
42d0781a 5909
60136906 5910 next_rxd = IGB_RX_DESC(rx_ring, i);
69d3ca53 5911 prefetch(next_rxd);
9d5c8243 5912
16eb8815
AD
5913 /*
5914 * This memory barrier is needed to keep us from reading
5915 * any other fields out of the rx_desc until we know the
5916 * RXD_STAT_DD bit is set
5917 */
5918 rmb();
9d5c8243 5919
16eb8815
AD
5920 if (!skb_is_nonlinear(skb)) {
5921 __skb_put(skb, igb_get_hlen(rx_desc));
5922 dma_unmap_single(rx_ring->dev, buffer_info->dma,
44390ca6 5923 IGB_RX_HDR_LEN,
59d71989 5924 DMA_FROM_DEVICE);
91615f76 5925 buffer_info->dma = 0;
bf36c1a0
AD
5926 }
5927
16eb8815
AD
5928 if (rx_desc->wb.upper.length) {
5929 u16 length = le16_to_cpu(rx_desc->wb.upper.length);
bf36c1a0 5930
aa913403 5931 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
bf36c1a0
AD
5932 buffer_info->page,
5933 buffer_info->page_offset,
5934 length);
5935
16eb8815
AD
5936 skb->len += length;
5937 skb->data_len += length;
5938 skb->truesize += length;
5939
d1eff350
AD
5940 if ((page_count(buffer_info->page) != 1) ||
5941 (page_to_nid(buffer_info->page) != current_node))
bf36c1a0
AD
5942 buffer_info->page = NULL;
5943 else
5944 get_page(buffer_info->page);
9d5c8243 5945
16eb8815
AD
5946 dma_unmap_page(rx_ring->dev, buffer_info->page_dma,
5947 PAGE_SIZE / 2, DMA_FROM_DEVICE);
5948 buffer_info->page_dma = 0;
9d5c8243 5949 }
9d5c8243 5950
bf36c1a0 5951 if (!(staterr & E1000_RXD_STAT_EOP)) {
06034649
AD
5952 struct igb_rx_buffer *next_buffer;
5953 next_buffer = &rx_ring->rx_buffer_info[i];
b2d56536
AD
5954 buffer_info->skb = next_buffer->skb;
5955 buffer_info->dma = next_buffer->dma;
5956 next_buffer->skb = skb;
5957 next_buffer->dma = 0;
bf36c1a0
AD
5958 goto next_desc;
5959 }
44390ca6 5960
9d5c8243 5961 if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) {
16eb8815 5962 dev_kfree_skb_any(skb);
9d5c8243
AK
5963 goto next_desc;
5964 }
9d5c8243 5965
757b77e2
NN
5966 if (staterr & (E1000_RXDADV_STAT_TSIP | E1000_RXDADV_STAT_TS))
5967 igb_rx_hwtstamp(q_vector, staterr, skb);
9d5c8243
AK
5968 total_bytes += skb->len;
5969 total_packets++;
5970
cd392f5c 5971 igb_rx_checksum(rx_ring, staterr, skb);
9d5c8243 5972
16eb8815 5973 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
047e0030 5974
b2cb09b1
JP
5975 if (staterr & E1000_RXD_STAT_VP) {
5976 u16 vid = le16_to_cpu(rx_desc->wb.upper.vlan);
9d5c8243 5977
b2cb09b1
JP
5978 __vlan_hwaccel_put_tag(skb, vid);
5979 }
5980 napi_gro_receive(&q_vector->napi, skb);
9d5c8243 5981
16eb8815 5982 budget--;
9d5c8243 5983next_desc:
16eb8815
AD
5984 if (!budget)
5985 break;
5986
5987 cleaned_count++;
9d5c8243
AK
5988 /* return some buffers to hardware, one at a time is too slow */
5989 if (cleaned_count >= IGB_RX_BUFFER_WRITE) {
cd392f5c 5990 igb_alloc_rx_buffers(rx_ring, cleaned_count);
9d5c8243
AK
5991 cleaned_count = 0;
5992 }
5993
5994 /* use prefetched values */
5995 rx_desc = next_rxd;
9d5c8243
AK
5996 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
5997 }
bf36c1a0 5998
9d5c8243 5999 rx_ring->next_to_clean = i;
12dcd86b 6000 u64_stats_update_begin(&rx_ring->rx_syncp);
9d5c8243
AK
6001 rx_ring->rx_stats.packets += total_packets;
6002 rx_ring->rx_stats.bytes += total_bytes;
12dcd86b 6003 u64_stats_update_end(&rx_ring->rx_syncp);
0ba82994
AD
6004 q_vector->rx.total_packets += total_packets;
6005 q_vector->rx.total_bytes += total_bytes;
c023cd88
AD
6006
6007 if (cleaned_count)
cd392f5c 6008 igb_alloc_rx_buffers(rx_ring, cleaned_count);
c023cd88 6009
16eb8815 6010 return !!budget;
9d5c8243
AK
6011}
6012
c023cd88 6013static bool igb_alloc_mapped_skb(struct igb_ring *rx_ring,
06034649 6014 struct igb_rx_buffer *bi)
c023cd88
AD
6015{
6016 struct sk_buff *skb = bi->skb;
6017 dma_addr_t dma = bi->dma;
6018
6019 if (dma)
6020 return true;
6021
6022 if (likely(!skb)) {
6023 skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
6024 IGB_RX_HDR_LEN);
6025 bi->skb = skb;
6026 if (!skb) {
6027 rx_ring->rx_stats.alloc_failed++;
6028 return false;
6029 }
6030
6031 /* initialize skb for ring */
6032 skb_record_rx_queue(skb, rx_ring->queue_index);
6033 }
6034
6035 dma = dma_map_single(rx_ring->dev, skb->data,
6036 IGB_RX_HDR_LEN, DMA_FROM_DEVICE);
6037
6038 if (dma_mapping_error(rx_ring->dev, dma)) {
6039 rx_ring->rx_stats.alloc_failed++;
6040 return false;
6041 }
6042
6043 bi->dma = dma;
6044 return true;
6045}
6046
6047static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
06034649 6048 struct igb_rx_buffer *bi)
c023cd88
AD
6049{
6050 struct page *page = bi->page;
6051 dma_addr_t page_dma = bi->page_dma;
6052 unsigned int page_offset = bi->page_offset ^ (PAGE_SIZE / 2);
6053
6054 if (page_dma)
6055 return true;
6056
6057 if (!page) {
6058 page = netdev_alloc_page(rx_ring->netdev);
6059 bi->page = page;
6060 if (unlikely(!page)) {
6061 rx_ring->rx_stats.alloc_failed++;
6062 return false;
6063 }
6064 }
6065
6066 page_dma = dma_map_page(rx_ring->dev, page,
6067 page_offset, PAGE_SIZE / 2,
6068 DMA_FROM_DEVICE);
6069
6070 if (dma_mapping_error(rx_ring->dev, page_dma)) {
6071 rx_ring->rx_stats.alloc_failed++;
6072 return false;
6073 }
6074
6075 bi->page_dma = page_dma;
6076 bi->page_offset = page_offset;
6077 return true;
6078}
6079
9d5c8243 6080/**
cd392f5c 6081 * igb_alloc_rx_buffers - Replace used receive buffers; packet split
9d5c8243
AK
6082 * @adapter: address of board private structure
6083 **/
cd392f5c 6084void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)
9d5c8243 6085{
9d5c8243 6086 union e1000_adv_rx_desc *rx_desc;
06034649 6087 struct igb_rx_buffer *bi;
c023cd88 6088 u16 i = rx_ring->next_to_use;
9d5c8243 6089
60136906 6090 rx_desc = IGB_RX_DESC(rx_ring, i);
06034649 6091 bi = &rx_ring->rx_buffer_info[i];
c023cd88 6092 i -= rx_ring->count;
9d5c8243
AK
6093
6094 while (cleaned_count--) {
c023cd88
AD
6095 if (!igb_alloc_mapped_skb(rx_ring, bi))
6096 break;
9d5c8243 6097
c023cd88
AD
6098 /* Refresh the desc even if buffer_addrs didn't change
6099 * because each write-back erases this info. */
6100 rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
9d5c8243 6101
c023cd88
AD
6102 if (!igb_alloc_mapped_page(rx_ring, bi))
6103 break;
6104
6105 rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
9d5c8243 6106
c023cd88
AD
6107 rx_desc++;
6108 bi++;
9d5c8243 6109 i++;
c023cd88 6110 if (unlikely(!i)) {
60136906 6111 rx_desc = IGB_RX_DESC(rx_ring, 0);
06034649 6112 bi = rx_ring->rx_buffer_info;
c023cd88
AD
6113 i -= rx_ring->count;
6114 }
6115
6116 /* clear the hdr_addr for the next_to_use descriptor */
6117 rx_desc->read.hdr_addr = 0;
9d5c8243
AK
6118 }
6119
c023cd88
AD
6120 i += rx_ring->count;
6121
9d5c8243
AK
6122 if (rx_ring->next_to_use != i) {
6123 rx_ring->next_to_use = i;
9d5c8243
AK
6124
6125 /* Force memory writes to complete before letting h/w
6126 * know there are new descriptors to fetch. (Only
6127 * applicable for weak-ordered memory model archs,
6128 * such as IA-64). */
6129 wmb();
fce99e34 6130 writel(i, rx_ring->tail);
9d5c8243
AK
6131 }
6132}
6133
6134/**
6135 * igb_mii_ioctl -
6136 * @netdev:
6137 * @ifreq:
6138 * @cmd:
6139 **/
6140static int igb_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
6141{
6142 struct igb_adapter *adapter = netdev_priv(netdev);
6143 struct mii_ioctl_data *data = if_mii(ifr);
6144
6145 if (adapter->hw.phy.media_type != e1000_media_type_copper)
6146 return -EOPNOTSUPP;
6147
6148 switch (cmd) {
6149 case SIOCGMIIPHY:
6150 data->phy_id = adapter->hw.phy.addr;
6151 break;
6152 case SIOCGMIIREG:
f5f4cf08
AD
6153 if (igb_read_phy_reg(&adapter->hw, data->reg_num & 0x1F,
6154 &data->val_out))
9d5c8243
AK
6155 return -EIO;
6156 break;
6157 case SIOCSMIIREG:
6158 default:
6159 return -EOPNOTSUPP;
6160 }
6161 return 0;
6162}
6163
c6cb090b
PO
6164/**
6165 * igb_hwtstamp_ioctl - control hardware time stamping
6166 * @netdev:
6167 * @ifreq:
6168 * @cmd:
6169 *
33af6bcc
PO
6170 * Outgoing time stamping can be enabled and disabled. Play nice and
6171 * disable it when requested, although it shouldn't case any overhead
6172 * when no packet needs it. At most one packet in the queue may be
6173 * marked for time stamping, otherwise it would be impossible to tell
6174 * for sure to which packet the hardware time stamp belongs.
6175 *
6176 * Incoming time stamping has to be configured via the hardware
6177 * filters. Not all combinations are supported, in particular event
6178 * type has to be specified. Matching the kind of event packet is
6179 * not supported, with the exception of "all V2 events regardless of
6180 * level 2 or 4".
6181 *
c6cb090b
PO
6182 **/
6183static int igb_hwtstamp_ioctl(struct net_device *netdev,
6184 struct ifreq *ifr, int cmd)
6185{
33af6bcc
PO
6186 struct igb_adapter *adapter = netdev_priv(netdev);
6187 struct e1000_hw *hw = &adapter->hw;
c6cb090b 6188 struct hwtstamp_config config;
c5b9bd5e
AD
6189 u32 tsync_tx_ctl = E1000_TSYNCTXCTL_ENABLED;
6190 u32 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED;
33af6bcc 6191 u32 tsync_rx_cfg = 0;
c5b9bd5e
AD
6192 bool is_l4 = false;
6193 bool is_l2 = false;
33af6bcc 6194 u32 regval;
c6cb090b
PO
6195
6196 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
6197 return -EFAULT;
6198
6199 /* reserved for future extensions */
6200 if (config.flags)
6201 return -EINVAL;
6202
33af6bcc
PO
6203 switch (config.tx_type) {
6204 case HWTSTAMP_TX_OFF:
c5b9bd5e 6205 tsync_tx_ctl = 0;
33af6bcc 6206 case HWTSTAMP_TX_ON:
33af6bcc
PO
6207 break;
6208 default:
6209 return -ERANGE;
6210 }
6211
6212 switch (config.rx_filter) {
6213 case HWTSTAMP_FILTER_NONE:
c5b9bd5e 6214 tsync_rx_ctl = 0;
33af6bcc
PO
6215 break;
6216 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
6217 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
6218 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
6219 case HWTSTAMP_FILTER_ALL:
6220 /*
6221 * register TSYNCRXCFG must be set, therefore it is not
6222 * possible to time stamp both Sync and Delay_Req messages
6223 * => fall back to time stamping all packets
6224 */
c5b9bd5e 6225 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL;
33af6bcc
PO
6226 config.rx_filter = HWTSTAMP_FILTER_ALL;
6227 break;
6228 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
c5b9bd5e 6229 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1;
33af6bcc 6230 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE;
c5b9bd5e 6231 is_l4 = true;
33af6bcc
PO
6232 break;
6233 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
c5b9bd5e 6234 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1;
33af6bcc 6235 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE;
c5b9bd5e 6236 is_l4 = true;
33af6bcc
PO
6237 break;
6238 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
6239 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
c5b9bd5e 6240 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
33af6bcc 6241 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE;
c5b9bd5e
AD
6242 is_l2 = true;
6243 is_l4 = true;
33af6bcc
PO
6244 config.rx_filter = HWTSTAMP_FILTER_SOME;
6245 break;
6246 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
6247 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
c5b9bd5e 6248 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
33af6bcc 6249 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE;
c5b9bd5e
AD
6250 is_l2 = true;
6251 is_l4 = true;
33af6bcc
PO
6252 config.rx_filter = HWTSTAMP_FILTER_SOME;
6253 break;
6254 case HWTSTAMP_FILTER_PTP_V2_EVENT:
6255 case HWTSTAMP_FILTER_PTP_V2_SYNC:
6256 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
c5b9bd5e 6257 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_EVENT_V2;
33af6bcc 6258 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
c5b9bd5e 6259 is_l2 = true;
33af6bcc
PO
6260 break;
6261 default:
6262 return -ERANGE;
6263 }
6264
c5b9bd5e
AD
6265 if (hw->mac.type == e1000_82575) {
6266 if (tsync_rx_ctl | tsync_tx_ctl)
6267 return -EINVAL;
6268 return 0;
6269 }
6270
757b77e2
NN
6271 /*
6272 * Per-packet timestamping only works if all packets are
6273 * timestamped, so enable timestamping in all packets as
6274 * long as one rx filter was configured.
6275 */
6276 if ((hw->mac.type == e1000_82580) && tsync_rx_ctl) {
6277 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED;
6278 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL;
6279 }
6280
33af6bcc
PO
6281 /* enable/disable TX */
6282 regval = rd32(E1000_TSYNCTXCTL);
c5b9bd5e
AD
6283 regval &= ~E1000_TSYNCTXCTL_ENABLED;
6284 regval |= tsync_tx_ctl;
33af6bcc
PO
6285 wr32(E1000_TSYNCTXCTL, regval);
6286
c5b9bd5e 6287 /* enable/disable RX */
33af6bcc 6288 regval = rd32(E1000_TSYNCRXCTL);
c5b9bd5e
AD
6289 regval &= ~(E1000_TSYNCRXCTL_ENABLED | E1000_TSYNCRXCTL_TYPE_MASK);
6290 regval |= tsync_rx_ctl;
33af6bcc 6291 wr32(E1000_TSYNCRXCTL, regval);
33af6bcc 6292
c5b9bd5e
AD
6293 /* define which PTP packets are time stamped */
6294 wr32(E1000_TSYNCRXCFG, tsync_rx_cfg);
33af6bcc 6295
c5b9bd5e
AD
6296 /* define ethertype filter for timestamped packets */
6297 if (is_l2)
6298 wr32(E1000_ETQF(3),
6299 (E1000_ETQF_FILTER_ENABLE | /* enable filter */
6300 E1000_ETQF_1588 | /* enable timestamping */
6301 ETH_P_1588)); /* 1588 eth protocol type */
6302 else
6303 wr32(E1000_ETQF(3), 0);
6304
6305#define PTP_PORT 319
6306 /* L4 Queue Filter[3]: filter by destination port and protocol */
6307 if (is_l4) {
6308 u32 ftqf = (IPPROTO_UDP /* UDP */
6309 | E1000_FTQF_VF_BP /* VF not compared */
6310 | E1000_FTQF_1588_TIME_STAMP /* Enable Timestamping */
6311 | E1000_FTQF_MASK); /* mask all inputs */
6312 ftqf &= ~E1000_FTQF_MASK_PROTO_BP; /* enable protocol check */
6313
6314 wr32(E1000_IMIR(3), htons(PTP_PORT));
6315 wr32(E1000_IMIREXT(3),
6316 (E1000_IMIREXT_SIZE_BP | E1000_IMIREXT_CTRL_BP));
6317 if (hw->mac.type == e1000_82576) {
6318 /* enable source port check */
6319 wr32(E1000_SPQF(3), htons(PTP_PORT));
6320 ftqf &= ~E1000_FTQF_MASK_SOURCE_PORT_BP;
6321 }
6322 wr32(E1000_FTQF(3), ftqf);
6323 } else {
6324 wr32(E1000_FTQF(3), E1000_FTQF_MASK);
6325 }
33af6bcc
PO
6326 wrfl();
6327
6328 adapter->hwtstamp_config = config;
6329
6330 /* clear TX/RX time stamp registers, just to be sure */
6331 regval = rd32(E1000_TXSTMPH);
6332 regval = rd32(E1000_RXSTMPH);
c6cb090b 6333
33af6bcc
PO
6334 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
6335 -EFAULT : 0;
c6cb090b
PO
6336}
6337
9d5c8243
AK
6338/**
6339 * igb_ioctl -
6340 * @netdev:
6341 * @ifreq:
6342 * @cmd:
6343 **/
6344static int igb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
6345{
6346 switch (cmd) {
6347 case SIOCGMIIPHY:
6348 case SIOCGMIIREG:
6349 case SIOCSMIIREG:
6350 return igb_mii_ioctl(netdev, ifr, cmd);
c6cb090b
PO
6351 case SIOCSHWTSTAMP:
6352 return igb_hwtstamp_ioctl(netdev, ifr, cmd);
9d5c8243
AK
6353 default:
6354 return -EOPNOTSUPP;
6355 }
6356}
6357
009bc06e
AD
6358s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
6359{
6360 struct igb_adapter *adapter = hw->back;
6361 u16 cap_offset;
6362
bdaae04c 6363 cap_offset = adapter->pdev->pcie_cap;
009bc06e
AD
6364 if (!cap_offset)
6365 return -E1000_ERR_CONFIG;
6366
6367 pci_read_config_word(adapter->pdev, cap_offset + reg, value);
6368
6369 return 0;
6370}
6371
6372s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
6373{
6374 struct igb_adapter *adapter = hw->back;
6375 u16 cap_offset;
6376
bdaae04c 6377 cap_offset = adapter->pdev->pcie_cap;
009bc06e
AD
6378 if (!cap_offset)
6379 return -E1000_ERR_CONFIG;
6380
6381 pci_write_config_word(adapter->pdev, cap_offset + reg, *value);
6382
6383 return 0;
6384}
6385
b2cb09b1 6386static void igb_vlan_mode(struct net_device *netdev, u32 features)
9d5c8243
AK
6387{
6388 struct igb_adapter *adapter = netdev_priv(netdev);
6389 struct e1000_hw *hw = &adapter->hw;
6390 u32 ctrl, rctl;
6391
6392 igb_irq_disable(adapter);
9d5c8243 6393
b2cb09b1 6394 if (features & NETIF_F_HW_VLAN_RX) {
9d5c8243
AK
6395 /* enable VLAN tag insert/strip */
6396 ctrl = rd32(E1000_CTRL);
6397 ctrl |= E1000_CTRL_VME;
6398 wr32(E1000_CTRL, ctrl);
6399
51466239 6400 /* Disable CFI check */
9d5c8243 6401 rctl = rd32(E1000_RCTL);
9d5c8243
AK
6402 rctl &= ~E1000_RCTL_CFIEN;
6403 wr32(E1000_RCTL, rctl);
9d5c8243
AK
6404 } else {
6405 /* disable VLAN tag insert/strip */
6406 ctrl = rd32(E1000_CTRL);
6407 ctrl &= ~E1000_CTRL_VME;
6408 wr32(E1000_CTRL, ctrl);
9d5c8243
AK
6409 }
6410
e1739522
AD
6411 igb_rlpml_set(adapter);
6412
9d5c8243
AK
6413 if (!test_bit(__IGB_DOWN, &adapter->state))
6414 igb_irq_enable(adapter);
6415}
6416
6417static void igb_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
6418{
6419 struct igb_adapter *adapter = netdev_priv(netdev);
6420 struct e1000_hw *hw = &adapter->hw;
4ae196df 6421 int pf_id = adapter->vfs_allocated_count;
9d5c8243 6422
51466239
AD
6423 /* attempt to add filter to vlvf array */
6424 igb_vlvf_set(adapter, vid, true, pf_id);
4ae196df 6425
51466239
AD
6426 /* add the filter since PF can receive vlans w/o entry in vlvf */
6427 igb_vfta_set(hw, vid, true);
b2cb09b1
JP
6428
6429 set_bit(vid, adapter->active_vlans);
9d5c8243
AK
6430}
6431
6432static void igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
6433{
6434 struct igb_adapter *adapter = netdev_priv(netdev);
6435 struct e1000_hw *hw = &adapter->hw;
4ae196df 6436 int pf_id = adapter->vfs_allocated_count;
51466239 6437 s32 err;
9d5c8243
AK
6438
6439 igb_irq_disable(adapter);
9d5c8243
AK
6440
6441 if (!test_bit(__IGB_DOWN, &adapter->state))
6442 igb_irq_enable(adapter);
6443
51466239
AD
6444 /* remove vlan from VLVF table array */
6445 err = igb_vlvf_set(adapter, vid, false, pf_id);
9d5c8243 6446
51466239
AD
6447 /* if vid was not present in VLVF just remove it from table */
6448 if (err)
4ae196df 6449 igb_vfta_set(hw, vid, false);
b2cb09b1
JP
6450
6451 clear_bit(vid, adapter->active_vlans);
9d5c8243
AK
6452}
6453
6454static void igb_restore_vlan(struct igb_adapter *adapter)
6455{
b2cb09b1 6456 u16 vid;
9d5c8243 6457
b2cb09b1
JP
6458 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
6459 igb_vlan_rx_add_vid(adapter->netdev, vid);
9d5c8243
AK
6460}
6461
14ad2513 6462int igb_set_spd_dplx(struct igb_adapter *adapter, u32 spd, u8 dplx)
9d5c8243 6463{
090b1795 6464 struct pci_dev *pdev = adapter->pdev;
9d5c8243
AK
6465 struct e1000_mac_info *mac = &adapter->hw.mac;
6466
6467 mac->autoneg = 0;
6468
14ad2513
DD
6469 /* Make sure dplx is at most 1 bit and lsb of speed is not set
6470 * for the switch() below to work */
6471 if ((spd & 1) || (dplx & ~1))
6472 goto err_inval;
6473
cd2638a8
CW
6474 /* Fiber NIC's only allow 1000 Gbps Full duplex */
6475 if ((adapter->hw.phy.media_type == e1000_media_type_internal_serdes) &&
14ad2513
DD
6476 spd != SPEED_1000 &&
6477 dplx != DUPLEX_FULL)
6478 goto err_inval;
cd2638a8 6479
14ad2513 6480 switch (spd + dplx) {
9d5c8243
AK
6481 case SPEED_10 + DUPLEX_HALF:
6482 mac->forced_speed_duplex = ADVERTISE_10_HALF;
6483 break;
6484 case SPEED_10 + DUPLEX_FULL:
6485 mac->forced_speed_duplex = ADVERTISE_10_FULL;
6486 break;
6487 case SPEED_100 + DUPLEX_HALF:
6488 mac->forced_speed_duplex = ADVERTISE_100_HALF;
6489 break;
6490 case SPEED_100 + DUPLEX_FULL:
6491 mac->forced_speed_duplex = ADVERTISE_100_FULL;
6492 break;
6493 case SPEED_1000 + DUPLEX_FULL:
6494 mac->autoneg = 1;
6495 adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
6496 break;
6497 case SPEED_1000 + DUPLEX_HALF: /* not supported */
6498 default:
14ad2513 6499 goto err_inval;
9d5c8243
AK
6500 }
6501 return 0;
14ad2513
DD
6502
6503err_inval:
6504 dev_err(&pdev->dev, "Unsupported Speed/Duplex configuration\n");
6505 return -EINVAL;
9d5c8243
AK
6506}
6507
3fe7c4c9 6508static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake)
9d5c8243
AK
6509{
6510 struct net_device *netdev = pci_get_drvdata(pdev);
6511 struct igb_adapter *adapter = netdev_priv(netdev);
6512 struct e1000_hw *hw = &adapter->hw;
2d064c06 6513 u32 ctrl, rctl, status;
9d5c8243
AK
6514 u32 wufc = adapter->wol;
6515#ifdef CONFIG_PM
6516 int retval = 0;
6517#endif
6518
6519 netif_device_detach(netdev);
6520
a88f10ec
AD
6521 if (netif_running(netdev))
6522 igb_close(netdev);
6523
047e0030 6524 igb_clear_interrupt_scheme(adapter);
9d5c8243
AK
6525
6526#ifdef CONFIG_PM
6527 retval = pci_save_state(pdev);
6528 if (retval)
6529 return retval;
6530#endif
6531
6532 status = rd32(E1000_STATUS);
6533 if (status & E1000_STATUS_LU)
6534 wufc &= ~E1000_WUFC_LNKC;
6535
6536 if (wufc) {
6537 igb_setup_rctl(adapter);
ff41f8dc 6538 igb_set_rx_mode(netdev);
9d5c8243
AK
6539
6540 /* turn on all-multi mode if wake on multicast is enabled */
6541 if (wufc & E1000_WUFC_MC) {
6542 rctl = rd32(E1000_RCTL);
6543 rctl |= E1000_RCTL_MPE;
6544 wr32(E1000_RCTL, rctl);
6545 }
6546
6547 ctrl = rd32(E1000_CTRL);
6548 /* advertise wake from D3Cold */
6549 #define E1000_CTRL_ADVD3WUC 0x00100000
6550 /* phy power management enable */
6551 #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
6552 ctrl |= E1000_CTRL_ADVD3WUC;
6553 wr32(E1000_CTRL, ctrl);
6554
9d5c8243 6555 /* Allow time for pending master requests to run */
330a6d6a 6556 igb_disable_pcie_master(hw);
9d5c8243
AK
6557
6558 wr32(E1000_WUC, E1000_WUC_PME_EN);
6559 wr32(E1000_WUFC, wufc);
9d5c8243
AK
6560 } else {
6561 wr32(E1000_WUC, 0);
6562 wr32(E1000_WUFC, 0);
9d5c8243
AK
6563 }
6564
3fe7c4c9
RW
6565 *enable_wake = wufc || adapter->en_mng_pt;
6566 if (!*enable_wake)
88a268c1
NN
6567 igb_power_down_link(adapter);
6568 else
6569 igb_power_up_link(adapter);
9d5c8243
AK
6570
6571 /* Release control of h/w to f/w. If f/w is AMT enabled, this
6572 * would have already happened in close and is redundant. */
6573 igb_release_hw_control(adapter);
6574
6575 pci_disable_device(pdev);
6576
9d5c8243
AK
6577 return 0;
6578}
6579
6580#ifdef CONFIG_PM
3fe7c4c9
RW
6581static int igb_suspend(struct pci_dev *pdev, pm_message_t state)
6582{
6583 int retval;
6584 bool wake;
6585
6586 retval = __igb_shutdown(pdev, &wake);
6587 if (retval)
6588 return retval;
6589
6590 if (wake) {
6591 pci_prepare_to_sleep(pdev);
6592 } else {
6593 pci_wake_from_d3(pdev, false);
6594 pci_set_power_state(pdev, PCI_D3hot);
6595 }
6596
6597 return 0;
6598}
6599
9d5c8243
AK
6600static int igb_resume(struct pci_dev *pdev)
6601{
6602 struct net_device *netdev = pci_get_drvdata(pdev);
6603 struct igb_adapter *adapter = netdev_priv(netdev);
6604 struct e1000_hw *hw = &adapter->hw;
6605 u32 err;
6606
6607 pci_set_power_state(pdev, PCI_D0);
6608 pci_restore_state(pdev);
b94f2d77 6609 pci_save_state(pdev);
42bfd33a 6610
aed5dec3 6611 err = pci_enable_device_mem(pdev);
9d5c8243
AK
6612 if (err) {
6613 dev_err(&pdev->dev,
6614 "igb: Cannot enable PCI device from suspend\n");
6615 return err;
6616 }
6617 pci_set_master(pdev);
6618
6619 pci_enable_wake(pdev, PCI_D3hot, 0);
6620 pci_enable_wake(pdev, PCI_D3cold, 0);
6621
047e0030 6622 if (igb_init_interrupt_scheme(adapter)) {
a88f10ec
AD
6623 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
6624 return -ENOMEM;
9d5c8243
AK
6625 }
6626
9d5c8243 6627 igb_reset(adapter);
a8564f03
AD
6628
6629 /* let the f/w know that the h/w is now under the control of the
6630 * driver. */
6631 igb_get_hw_control(adapter);
6632
9d5c8243
AK
6633 wr32(E1000_WUS, ~0);
6634
a88f10ec
AD
6635 if (netif_running(netdev)) {
6636 err = igb_open(netdev);
6637 if (err)
6638 return err;
6639 }
9d5c8243
AK
6640
6641 netif_device_attach(netdev);
6642
9d5c8243
AK
6643 return 0;
6644}
6645#endif
6646
6647static void igb_shutdown(struct pci_dev *pdev)
6648{
3fe7c4c9
RW
6649 bool wake;
6650
6651 __igb_shutdown(pdev, &wake);
6652
6653 if (system_state == SYSTEM_POWER_OFF) {
6654 pci_wake_from_d3(pdev, wake);
6655 pci_set_power_state(pdev, PCI_D3hot);
6656 }
9d5c8243
AK
6657}
6658
6659#ifdef CONFIG_NET_POLL_CONTROLLER
6660/*
6661 * Polling 'interrupt' - used by things like netconsole to send skbs
6662 * without having to re-enable interrupts. It's not called while
6663 * the interrupt routine is executing.
6664 */
6665static void igb_netpoll(struct net_device *netdev)
6666{
6667 struct igb_adapter *adapter = netdev_priv(netdev);
eebbbdba 6668 struct e1000_hw *hw = &adapter->hw;
9d5c8243 6669 int i;
9d5c8243 6670
eebbbdba 6671 if (!adapter->msix_entries) {
047e0030 6672 struct igb_q_vector *q_vector = adapter->q_vector[0];
eebbbdba 6673 igb_irq_disable(adapter);
047e0030 6674 napi_schedule(&q_vector->napi);
eebbbdba
AD
6675 return;
6676 }
9d5c8243 6677
047e0030
AD
6678 for (i = 0; i < adapter->num_q_vectors; i++) {
6679 struct igb_q_vector *q_vector = adapter->q_vector[i];
6680 wr32(E1000_EIMC, q_vector->eims_value);
6681 napi_schedule(&q_vector->napi);
eebbbdba 6682 }
9d5c8243
AK
6683}
6684#endif /* CONFIG_NET_POLL_CONTROLLER */
6685
6686/**
6687 * igb_io_error_detected - called when PCI error is detected
6688 * @pdev: Pointer to PCI device
6689 * @state: The current pci connection state
6690 *
6691 * This function is called after a PCI bus error affecting
6692 * this device has been detected.
6693 */
6694static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev,
6695 pci_channel_state_t state)
6696{
6697 struct net_device *netdev = pci_get_drvdata(pdev);
6698 struct igb_adapter *adapter = netdev_priv(netdev);
6699
6700 netif_device_detach(netdev);
6701
59ed6eec
AD
6702 if (state == pci_channel_io_perm_failure)
6703 return PCI_ERS_RESULT_DISCONNECT;
6704
9d5c8243
AK
6705 if (netif_running(netdev))
6706 igb_down(adapter);
6707 pci_disable_device(pdev);
6708
6709 /* Request a slot slot reset. */
6710 return PCI_ERS_RESULT_NEED_RESET;
6711}
6712
6713/**
6714 * igb_io_slot_reset - called after the pci bus has been reset.
6715 * @pdev: Pointer to PCI device
6716 *
6717 * Restart the card from scratch, as if from a cold-boot. Implementation
6718 * resembles the first-half of the igb_resume routine.
6719 */
6720static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev)
6721{
6722 struct net_device *netdev = pci_get_drvdata(pdev);
6723 struct igb_adapter *adapter = netdev_priv(netdev);
6724 struct e1000_hw *hw = &adapter->hw;
40a914fa 6725 pci_ers_result_t result;
42bfd33a 6726 int err;
9d5c8243 6727
aed5dec3 6728 if (pci_enable_device_mem(pdev)) {
9d5c8243
AK
6729 dev_err(&pdev->dev,
6730 "Cannot re-enable PCI device after reset.\n");
40a914fa
AD
6731 result = PCI_ERS_RESULT_DISCONNECT;
6732 } else {
6733 pci_set_master(pdev);
6734 pci_restore_state(pdev);
b94f2d77 6735 pci_save_state(pdev);
9d5c8243 6736
40a914fa
AD
6737 pci_enable_wake(pdev, PCI_D3hot, 0);
6738 pci_enable_wake(pdev, PCI_D3cold, 0);
9d5c8243 6739
40a914fa
AD
6740 igb_reset(adapter);
6741 wr32(E1000_WUS, ~0);
6742 result = PCI_ERS_RESULT_RECOVERED;
6743 }
9d5c8243 6744
ea943d41
JK
6745 err = pci_cleanup_aer_uncorrect_error_status(pdev);
6746 if (err) {
6747 dev_err(&pdev->dev, "pci_cleanup_aer_uncorrect_error_status "
6748 "failed 0x%0x\n", err);
6749 /* non-fatal, continue */
6750 }
40a914fa
AD
6751
6752 return result;
9d5c8243
AK
6753}
6754
6755/**
6756 * igb_io_resume - called when traffic can start flowing again.
6757 * @pdev: Pointer to PCI device
6758 *
6759 * This callback is called when the error recovery driver tells us that
6760 * its OK to resume normal operation. Implementation resembles the
6761 * second-half of the igb_resume routine.
6762 */
6763static void igb_io_resume(struct pci_dev *pdev)
6764{
6765 struct net_device *netdev = pci_get_drvdata(pdev);
6766 struct igb_adapter *adapter = netdev_priv(netdev);
6767
9d5c8243
AK
6768 if (netif_running(netdev)) {
6769 if (igb_up(adapter)) {
6770 dev_err(&pdev->dev, "igb_up failed after reset\n");
6771 return;
6772 }
6773 }
6774
6775 netif_device_attach(netdev);
6776
6777 /* let the f/w know that the h/w is now under the control of the
6778 * driver. */
6779 igb_get_hw_control(adapter);
9d5c8243
AK
6780}
6781
26ad9178
AD
6782static void igb_rar_set_qsel(struct igb_adapter *adapter, u8 *addr, u32 index,
6783 u8 qsel)
6784{
6785 u32 rar_low, rar_high;
6786 struct e1000_hw *hw = &adapter->hw;
6787
6788 /* HW expects these in little endian so we reverse the byte order
6789 * from network order (big endian) to little endian
6790 */
6791 rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) |
6792 ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
6793 rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
6794
6795 /* Indicate to hardware the Address is Valid. */
6796 rar_high |= E1000_RAH_AV;
6797
6798 if (hw->mac.type == e1000_82575)
6799 rar_high |= E1000_RAH_POOL_1 * qsel;
6800 else
6801 rar_high |= E1000_RAH_POOL_1 << qsel;
6802
6803 wr32(E1000_RAL(index), rar_low);
6804 wrfl();
6805 wr32(E1000_RAH(index), rar_high);
6806 wrfl();
6807}
6808
4ae196df
AD
6809static int igb_set_vf_mac(struct igb_adapter *adapter,
6810 int vf, unsigned char *mac_addr)
6811{
6812 struct e1000_hw *hw = &adapter->hw;
ff41f8dc
AD
6813 /* VF MAC addresses start at end of receive addresses and moves
6814 * torwards the first, as a result a collision should not be possible */
6815 int rar_entry = hw->mac.rar_entry_count - (vf + 1);
4ae196df 6816
37680117 6817 memcpy(adapter->vf_data[vf].vf_mac_addresses, mac_addr, ETH_ALEN);
4ae196df 6818
26ad9178 6819 igb_rar_set_qsel(adapter, mac_addr, rar_entry, vf);
4ae196df
AD
6820
6821 return 0;
6822}
6823
8151d294
WM
6824static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
6825{
6826 struct igb_adapter *adapter = netdev_priv(netdev);
6827 if (!is_valid_ether_addr(mac) || (vf >= adapter->vfs_allocated_count))
6828 return -EINVAL;
6829 adapter->vf_data[vf].flags |= IGB_VF_FLAG_PF_SET_MAC;
6830 dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n", mac, vf);
6831 dev_info(&adapter->pdev->dev, "Reload the VF driver to make this"
6832 " change effective.");
6833 if (test_bit(__IGB_DOWN, &adapter->state)) {
6834 dev_warn(&adapter->pdev->dev, "The VF MAC address has been set,"
6835 " but the PF device is not up.\n");
6836 dev_warn(&adapter->pdev->dev, "Bring the PF device up before"
6837 " attempting to use the VF device.\n");
6838 }
6839 return igb_set_vf_mac(adapter, vf, mac);
6840}
6841
17dc566c
LL
6842static int igb_link_mbps(int internal_link_speed)
6843{
6844 switch (internal_link_speed) {
6845 case SPEED_100:
6846 return 100;
6847 case SPEED_1000:
6848 return 1000;
6849 default:
6850 return 0;
6851 }
6852}
6853
6854static void igb_set_vf_rate_limit(struct e1000_hw *hw, int vf, int tx_rate,
6855 int link_speed)
6856{
6857 int rf_dec, rf_int;
6858 u32 bcnrc_val;
6859
6860 if (tx_rate != 0) {
6861 /* Calculate the rate factor values to set */
6862 rf_int = link_speed / tx_rate;
6863 rf_dec = (link_speed - (rf_int * tx_rate));
6864 rf_dec = (rf_dec * (1<<E1000_RTTBCNRC_RF_INT_SHIFT)) / tx_rate;
6865
6866 bcnrc_val = E1000_RTTBCNRC_RS_ENA;
6867 bcnrc_val |= ((rf_int<<E1000_RTTBCNRC_RF_INT_SHIFT) &
6868 E1000_RTTBCNRC_RF_INT_MASK);
6869 bcnrc_val |= (rf_dec & E1000_RTTBCNRC_RF_DEC_MASK);
6870 } else {
6871 bcnrc_val = 0;
6872 }
6873
6874 wr32(E1000_RTTDQSEL, vf); /* vf X uses queue X */
6875 wr32(E1000_RTTBCNRC, bcnrc_val);
6876}
6877
6878static void igb_check_vf_rate_limit(struct igb_adapter *adapter)
6879{
6880 int actual_link_speed, i;
6881 bool reset_rate = false;
6882
6883 /* VF TX rate limit was not set or not supported */
6884 if ((adapter->vf_rate_link_speed == 0) ||
6885 (adapter->hw.mac.type != e1000_82576))
6886 return;
6887
6888 actual_link_speed = igb_link_mbps(adapter->link_speed);
6889 if (actual_link_speed != adapter->vf_rate_link_speed) {
6890 reset_rate = true;
6891 adapter->vf_rate_link_speed = 0;
6892 dev_info(&adapter->pdev->dev,
6893 "Link speed has been changed. VF Transmit "
6894 "rate is disabled\n");
6895 }
6896
6897 for (i = 0; i < adapter->vfs_allocated_count; i++) {
6898 if (reset_rate)
6899 adapter->vf_data[i].tx_rate = 0;
6900
6901 igb_set_vf_rate_limit(&adapter->hw, i,
6902 adapter->vf_data[i].tx_rate,
6903 actual_link_speed);
6904 }
6905}
6906
8151d294
WM
6907static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate)
6908{
17dc566c
LL
6909 struct igb_adapter *adapter = netdev_priv(netdev);
6910 struct e1000_hw *hw = &adapter->hw;
6911 int actual_link_speed;
6912
6913 if (hw->mac.type != e1000_82576)
6914 return -EOPNOTSUPP;
6915
6916 actual_link_speed = igb_link_mbps(adapter->link_speed);
6917 if ((vf >= adapter->vfs_allocated_count) ||
6918 (!(rd32(E1000_STATUS) & E1000_STATUS_LU)) ||
6919 (tx_rate < 0) || (tx_rate > actual_link_speed))
6920 return -EINVAL;
6921
6922 adapter->vf_rate_link_speed = actual_link_speed;
6923 adapter->vf_data[vf].tx_rate = (u16)tx_rate;
6924 igb_set_vf_rate_limit(hw, vf, tx_rate, actual_link_speed);
6925
6926 return 0;
8151d294
WM
6927}
6928
6929static int igb_ndo_get_vf_config(struct net_device *netdev,
6930 int vf, struct ifla_vf_info *ivi)
6931{
6932 struct igb_adapter *adapter = netdev_priv(netdev);
6933 if (vf >= adapter->vfs_allocated_count)
6934 return -EINVAL;
6935 ivi->vf = vf;
6936 memcpy(&ivi->mac, adapter->vf_data[vf].vf_mac_addresses, ETH_ALEN);
17dc566c 6937 ivi->tx_rate = adapter->vf_data[vf].tx_rate;
8151d294
WM
6938 ivi->vlan = adapter->vf_data[vf].pf_vlan;
6939 ivi->qos = adapter->vf_data[vf].pf_qos;
6940 return 0;
6941}
6942
4ae196df
AD
6943static void igb_vmm_control(struct igb_adapter *adapter)
6944{
6945 struct e1000_hw *hw = &adapter->hw;
10d8e907 6946 u32 reg;
4ae196df 6947
52a1dd4d
AD
6948 switch (hw->mac.type) {
6949 case e1000_82575:
6950 default:
6951 /* replication is not supported for 82575 */
4ae196df 6952 return;
52a1dd4d
AD
6953 case e1000_82576:
6954 /* notify HW that the MAC is adding vlan tags */
6955 reg = rd32(E1000_DTXCTL);
6956 reg |= E1000_DTXCTL_VLAN_ADDED;
6957 wr32(E1000_DTXCTL, reg);
6958 case e1000_82580:
6959 /* enable replication vlan tag stripping */
6960 reg = rd32(E1000_RPLOLR);
6961 reg |= E1000_RPLOLR_STRVLAN;
6962 wr32(E1000_RPLOLR, reg);
d2ba2ed8
AD
6963 case e1000_i350:
6964 /* none of the above registers are supported by i350 */
52a1dd4d
AD
6965 break;
6966 }
10d8e907 6967
d4960307
AD
6968 if (adapter->vfs_allocated_count) {
6969 igb_vmdq_set_loopback_pf(hw, true);
6970 igb_vmdq_set_replication_pf(hw, true);
13800469
GR
6971 igb_vmdq_set_anti_spoofing_pf(hw, true,
6972 adapter->vfs_allocated_count);
d4960307
AD
6973 } else {
6974 igb_vmdq_set_loopback_pf(hw, false);
6975 igb_vmdq_set_replication_pf(hw, false);
6976 }
4ae196df
AD
6977}
6978
9d5c8243 6979/* igb_main.c */