]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/net/e1000/e1000_main.c
Fix common misspellings
[mirror_ubuntu-bionic-kernel.git] / drivers / net / e1000 / e1000_main.c
CommitLineData
1da177e4
LT
1/*******************************************************************************
2
0abb6eb1
AK
3 Intel PRO/1000 Linux driver
4 Copyright(c) 1999 - 2006 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
1da177e4 13 more details.
0abb6eb1 14
1da177e4 15 You should have received a copy of the GNU General Public License along with
0abb6eb1
AK
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
1da177e4
LT
22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
3d41e30a 24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
1da177e4
LT
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27*******************************************************************************/
28
29#include "e1000.h"
d0bb53e1 30#include <net/ip6_checksum.h>
5377a416
DB
31#include <linux/io.h>
32
33/* Intel Media SOC GbE MDIO physical base address */
34static unsigned long ce4100_gbe_mdio_base_phy;
35/* Intel Media SOC GbE MDIO virtual base address */
36void __iomem *ce4100_gbe_mdio_base_virt;
1da177e4 37
1da177e4 38char e1000_driver_name[] = "e1000";
3ad2cc67 39static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
ab08853f 40#define DRV_VERSION "7.3.21-k8-NAPI"
abec42a4
SH
41const char e1000_driver_version[] = DRV_VERSION;
42static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
1da177e4
LT
43
44/* e1000_pci_tbl - PCI Device ID Table
45 *
46 * Last entry must be all 0s
47 *
48 * Macro expands to...
49 * {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
50 */
a3aa1884 51static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = {
1da177e4
LT
52 INTEL_E1000_ETHERNET_DEVICE(0x1000),
53 INTEL_E1000_ETHERNET_DEVICE(0x1001),
54 INTEL_E1000_ETHERNET_DEVICE(0x1004),
55 INTEL_E1000_ETHERNET_DEVICE(0x1008),
56 INTEL_E1000_ETHERNET_DEVICE(0x1009),
57 INTEL_E1000_ETHERNET_DEVICE(0x100C),
58 INTEL_E1000_ETHERNET_DEVICE(0x100D),
59 INTEL_E1000_ETHERNET_DEVICE(0x100E),
60 INTEL_E1000_ETHERNET_DEVICE(0x100F),
61 INTEL_E1000_ETHERNET_DEVICE(0x1010),
62 INTEL_E1000_ETHERNET_DEVICE(0x1011),
63 INTEL_E1000_ETHERNET_DEVICE(0x1012),
64 INTEL_E1000_ETHERNET_DEVICE(0x1013),
65 INTEL_E1000_ETHERNET_DEVICE(0x1014),
66 INTEL_E1000_ETHERNET_DEVICE(0x1015),
67 INTEL_E1000_ETHERNET_DEVICE(0x1016),
68 INTEL_E1000_ETHERNET_DEVICE(0x1017),
69 INTEL_E1000_ETHERNET_DEVICE(0x1018),
70 INTEL_E1000_ETHERNET_DEVICE(0x1019),
2648345f 71 INTEL_E1000_ETHERNET_DEVICE(0x101A),
1da177e4
LT
72 INTEL_E1000_ETHERNET_DEVICE(0x101D),
73 INTEL_E1000_ETHERNET_DEVICE(0x101E),
74 INTEL_E1000_ETHERNET_DEVICE(0x1026),
75 INTEL_E1000_ETHERNET_DEVICE(0x1027),
76 INTEL_E1000_ETHERNET_DEVICE(0x1028),
77 INTEL_E1000_ETHERNET_DEVICE(0x1075),
78 INTEL_E1000_ETHERNET_DEVICE(0x1076),
79 INTEL_E1000_ETHERNET_DEVICE(0x1077),
80 INTEL_E1000_ETHERNET_DEVICE(0x1078),
81 INTEL_E1000_ETHERNET_DEVICE(0x1079),
82 INTEL_E1000_ETHERNET_DEVICE(0x107A),
83 INTEL_E1000_ETHERNET_DEVICE(0x107B),
84 INTEL_E1000_ETHERNET_DEVICE(0x107C),
85 INTEL_E1000_ETHERNET_DEVICE(0x108A),
b7ee49db 86 INTEL_E1000_ETHERNET_DEVICE(0x1099),
b7ee49db 87 INTEL_E1000_ETHERNET_DEVICE(0x10B5),
5377a416 88 INTEL_E1000_ETHERNET_DEVICE(0x2E6E),
1da177e4
LT
89 /* required last entry */
90 {0,}
91};
92
93MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
94
35574764
NN
95int e1000_up(struct e1000_adapter *adapter);
96void e1000_down(struct e1000_adapter *adapter);
97void e1000_reinit_locked(struct e1000_adapter *adapter);
98void e1000_reset(struct e1000_adapter *adapter);
406874a7 99int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx);
35574764
NN
100int e1000_setup_all_tx_resources(struct e1000_adapter *adapter);
101int e1000_setup_all_rx_resources(struct e1000_adapter *adapter);
102void e1000_free_all_tx_resources(struct e1000_adapter *adapter);
103void e1000_free_all_rx_resources(struct e1000_adapter *adapter);
3ad2cc67 104static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
35574764 105 struct e1000_tx_ring *txdr);
3ad2cc67 106static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
35574764 107 struct e1000_rx_ring *rxdr);
3ad2cc67 108static void e1000_free_tx_resources(struct e1000_adapter *adapter,
35574764 109 struct e1000_tx_ring *tx_ring);
3ad2cc67 110static void e1000_free_rx_resources(struct e1000_adapter *adapter,
35574764
NN
111 struct e1000_rx_ring *rx_ring);
112void e1000_update_stats(struct e1000_adapter *adapter);
1da177e4
LT
113
114static int e1000_init_module(void);
115static void e1000_exit_module(void);
116static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
117static void __devexit e1000_remove(struct pci_dev *pdev);
581d708e 118static int e1000_alloc_queues(struct e1000_adapter *adapter);
1da177e4
LT
119static int e1000_sw_init(struct e1000_adapter *adapter);
120static int e1000_open(struct net_device *netdev);
121static int e1000_close(struct net_device *netdev);
122static void e1000_configure_tx(struct e1000_adapter *adapter);
123static void e1000_configure_rx(struct e1000_adapter *adapter);
124static void e1000_setup_rctl(struct e1000_adapter *adapter);
581d708e
MC
125static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter);
126static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter);
127static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
128 struct e1000_tx_ring *tx_ring);
129static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
130 struct e1000_rx_ring *rx_ring);
db0ce50d 131static void e1000_set_rx_mode(struct net_device *netdev);
1da177e4 132static void e1000_update_phy_info(unsigned long data);
5cf42fcd 133static void e1000_update_phy_info_task(struct work_struct *work);
1da177e4 134static void e1000_watchdog(unsigned long data);
1da177e4 135static void e1000_82547_tx_fifo_stall(unsigned long data);
5cf42fcd 136static void e1000_82547_tx_fifo_stall_task(struct work_struct *work);
3b29a56d
SH
137static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
138 struct net_device *netdev);
1da177e4
LT
139static struct net_device_stats * e1000_get_stats(struct net_device *netdev);
140static int e1000_change_mtu(struct net_device *netdev, int new_mtu);
141static int e1000_set_mac(struct net_device *netdev, void *p);
7d12e780 142static irqreturn_t e1000_intr(int irq, void *data);
c3033b01
JP
143static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
144 struct e1000_tx_ring *tx_ring);
bea3348e 145static int e1000_clean(struct napi_struct *napi, int budget);
c3033b01
JP
146static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
147 struct e1000_rx_ring *rx_ring,
148 int *work_done, int work_to_do);
edbbb3ca
JB
149static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
150 struct e1000_rx_ring *rx_ring,
151 int *work_done, int work_to_do);
581d708e 152static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
edbbb3ca 153 struct e1000_rx_ring *rx_ring,
72d64a43 154 int cleaned_count);
edbbb3ca
JB
155static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
156 struct e1000_rx_ring *rx_ring,
157 int cleaned_count);
1da177e4
LT
158static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
159static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
160 int cmd);
1da177e4
LT
161static void e1000_enter_82542_rst(struct e1000_adapter *adapter);
162static void e1000_leave_82542_rst(struct e1000_adapter *adapter);
163static void e1000_tx_timeout(struct net_device *dev);
65f27f38 164static void e1000_reset_task(struct work_struct *work);
1da177e4 165static void e1000_smartspeed(struct e1000_adapter *adapter);
e619d523
AK
166static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
167 struct sk_buff *skb);
1da177e4
LT
168
169static void e1000_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp);
406874a7
JP
170static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid);
171static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid);
1da177e4
LT
172static void e1000_restore_vlan(struct e1000_adapter *adapter);
173
6fdfef16 174#ifdef CONFIG_PM
b43fcd7d 175static int e1000_suspend(struct pci_dev *pdev, pm_message_t state);
1da177e4
LT
176static int e1000_resume(struct pci_dev *pdev);
177#endif
c653e635 178static void e1000_shutdown(struct pci_dev *pdev);
1da177e4
LT
179
180#ifdef CONFIG_NET_POLL_CONTROLLER
181/* for netdump / net console */
182static void e1000_netpoll (struct net_device *netdev);
183#endif
184
1f753861
JB
185#define COPYBREAK_DEFAULT 256
186static unsigned int copybreak __read_mostly = COPYBREAK_DEFAULT;
187module_param(copybreak, uint, 0644);
188MODULE_PARM_DESC(copybreak,
189 "Maximum size of packet that is copied to a new buffer on receive");
190
9026729b
AK
191static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
192 pci_channel_state_t state);
193static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev);
194static void e1000_io_resume(struct pci_dev *pdev);
195
196static struct pci_error_handlers e1000_err_handler = {
197 .error_detected = e1000_io_error_detected,
198 .slot_reset = e1000_io_slot_reset,
199 .resume = e1000_io_resume,
200};
24025e4e 201
1da177e4
LT
202static struct pci_driver e1000_driver = {
203 .name = e1000_driver_name,
204 .id_table = e1000_pci_tbl,
205 .probe = e1000_probe,
206 .remove = __devexit_p(e1000_remove),
c4e24f01 207#ifdef CONFIG_PM
25985edc 208 /* Power Management Hooks */
1da177e4 209 .suspend = e1000_suspend,
c653e635 210 .resume = e1000_resume,
1da177e4 211#endif
9026729b
AK
212 .shutdown = e1000_shutdown,
213 .err_handler = &e1000_err_handler
1da177e4
LT
214};
215
216MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
217MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver");
218MODULE_LICENSE("GPL");
219MODULE_VERSION(DRV_VERSION);
220
221static int debug = NETIF_MSG_DRV | NETIF_MSG_PROBE;
222module_param(debug, int, 0);
223MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
224
675ad473
ET
225/**
226 * e1000_get_hw_dev - return device
227 * used by hardware layer to print debugging information
228 *
229 **/
230struct net_device *e1000_get_hw_dev(struct e1000_hw *hw)
231{
232 struct e1000_adapter *adapter = hw->back;
233 return adapter->netdev;
234}
235
1da177e4
LT
236/**
237 * e1000_init_module - Driver Registration Routine
238 *
239 * e1000_init_module is the first routine called when the driver is
240 * loaded. All it does is register with the PCI subsystem.
241 **/
242
64798845 243static int __init e1000_init_module(void)
1da177e4
LT
244{
245 int ret;
675ad473 246 pr_info("%s - version %s\n", e1000_driver_string, e1000_driver_version);
1da177e4 247
675ad473 248 pr_info("%s\n", e1000_copyright);
1da177e4 249
29917620 250 ret = pci_register_driver(&e1000_driver);
1f753861
JB
251 if (copybreak != COPYBREAK_DEFAULT) {
252 if (copybreak == 0)
675ad473 253 pr_info("copybreak disabled\n");
1f753861 254 else
675ad473
ET
255 pr_info("copybreak enabled for "
256 "packets <= %u bytes\n", copybreak);
1f753861 257 }
1da177e4
LT
258 return ret;
259}
260
261module_init(e1000_init_module);
262
263/**
264 * e1000_exit_module - Driver Exit Cleanup Routine
265 *
266 * e1000_exit_module is called just before the driver is removed
267 * from memory.
268 **/
269
64798845 270static void __exit e1000_exit_module(void)
1da177e4 271{
1da177e4
LT
272 pci_unregister_driver(&e1000_driver);
273}
274
275module_exit(e1000_exit_module);
276
2db10a08
AK
277static int e1000_request_irq(struct e1000_adapter *adapter)
278{
279 struct net_device *netdev = adapter->netdev;
3e18826c 280 irq_handler_t handler = e1000_intr;
e94bd23f
AK
281 int irq_flags = IRQF_SHARED;
282 int err;
2db10a08 283
e94bd23f
AK
284 err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name,
285 netdev);
286 if (err) {
feb8f478 287 e_err(probe, "Unable to allocate interrupt Error: %d\n", err);
e94bd23f 288 }
2db10a08
AK
289
290 return err;
291}
292
293static void e1000_free_irq(struct e1000_adapter *adapter)
294{
295 struct net_device *netdev = adapter->netdev;
296
297 free_irq(adapter->pdev->irq, netdev);
2db10a08
AK
298}
299
1da177e4
LT
300/**
301 * e1000_irq_disable - Mask off interrupt generation on the NIC
302 * @adapter: board private structure
303 **/
304
64798845 305static void e1000_irq_disable(struct e1000_adapter *adapter)
1da177e4 306{
1dc32918
JP
307 struct e1000_hw *hw = &adapter->hw;
308
309 ew32(IMC, ~0);
310 E1000_WRITE_FLUSH();
1da177e4
LT
311 synchronize_irq(adapter->pdev->irq);
312}
313
314/**
315 * e1000_irq_enable - Enable default interrupt generation settings
316 * @adapter: board private structure
317 **/
318
64798845 319static void e1000_irq_enable(struct e1000_adapter *adapter)
1da177e4 320{
1dc32918
JP
321 struct e1000_hw *hw = &adapter->hw;
322
323 ew32(IMS, IMS_ENABLE_MASK);
324 E1000_WRITE_FLUSH();
1da177e4 325}
3ad2cc67 326
64798845 327static void e1000_update_mng_vlan(struct e1000_adapter *adapter)
2d7edb92 328{
1dc32918 329 struct e1000_hw *hw = &adapter->hw;
2d7edb92 330 struct net_device *netdev = adapter->netdev;
1dc32918 331 u16 vid = hw->mng_cookie.vlan_id;
406874a7 332 u16 old_vid = adapter->mng_vlan_id;
96838a40 333 if (adapter->vlgrp) {
5c15bdec 334 if (!vlan_group_get_device(adapter->vlgrp, vid)) {
1dc32918 335 if (hw->mng_cookie.status &
2d7edb92
MC
336 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) {
337 e1000_vlan_rx_add_vid(netdev, vid);
338 adapter->mng_vlan_id = vid;
339 } else
340 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
96838a40 341
406874a7 342 if ((old_vid != (u16)E1000_MNG_VLAN_NONE) &&
96838a40 343 (vid != old_vid) &&
5c15bdec 344 !vlan_group_get_device(adapter->vlgrp, old_vid))
2d7edb92 345 e1000_vlan_rx_kill_vid(netdev, old_vid);
c5f226fe
JK
346 } else
347 adapter->mng_vlan_id = vid;
2d7edb92
MC
348 }
349}
b55ccb35 350
64798845 351static void e1000_init_manageability(struct e1000_adapter *adapter)
0fccd0e9 352{
1dc32918
JP
353 struct e1000_hw *hw = &adapter->hw;
354
0fccd0e9 355 if (adapter->en_mng_pt) {
1dc32918 356 u32 manc = er32(MANC);
0fccd0e9
JG
357
358 /* disable hardware interception of ARP */
359 manc &= ~(E1000_MANC_ARP_EN);
360
1dc32918 361 ew32(MANC, manc);
0fccd0e9
JG
362 }
363}
364
64798845 365static void e1000_release_manageability(struct e1000_adapter *adapter)
0fccd0e9 366{
1dc32918
JP
367 struct e1000_hw *hw = &adapter->hw;
368
0fccd0e9 369 if (adapter->en_mng_pt) {
1dc32918 370 u32 manc = er32(MANC);
0fccd0e9
JG
371
372 /* re-enable hardware interception of ARP */
373 manc |= E1000_MANC_ARP_EN;
374
1dc32918 375 ew32(MANC, manc);
0fccd0e9
JG
376 }
377}
378
e0aac5a2
AK
379/**
380 * e1000_configure - configure the hardware for RX and TX
381 * @adapter = private board structure
382 **/
383static void e1000_configure(struct e1000_adapter *adapter)
1da177e4
LT
384{
385 struct net_device *netdev = adapter->netdev;
2db10a08 386 int i;
1da177e4 387
db0ce50d 388 e1000_set_rx_mode(netdev);
1da177e4
LT
389
390 e1000_restore_vlan(adapter);
0fccd0e9 391 e1000_init_manageability(adapter);
1da177e4
LT
392
393 e1000_configure_tx(adapter);
394 e1000_setup_rctl(adapter);
395 e1000_configure_rx(adapter);
72d64a43
JK
396 /* call E1000_DESC_UNUSED which always leaves
397 * at least 1 descriptor unused to make sure
398 * next_to_use != next_to_clean */
f56799ea 399 for (i = 0; i < adapter->num_rx_queues; i++) {
72d64a43 400 struct e1000_rx_ring *ring = &adapter->rx_ring[i];
a292ca6e
JK
401 adapter->alloc_rx_buf(adapter, ring,
402 E1000_DESC_UNUSED(ring));
f56799ea 403 }
e0aac5a2
AK
404}
405
406int e1000_up(struct e1000_adapter *adapter)
407{
1dc32918
JP
408 struct e1000_hw *hw = &adapter->hw;
409
e0aac5a2
AK
410 /* hardware has been reset, we need to reload some things */
411 e1000_configure(adapter);
412
413 clear_bit(__E1000_DOWN, &adapter->flags);
7bfa4816 414
bea3348e 415 napi_enable(&adapter->napi);
c3570acb 416
5de55624
MC
417 e1000_irq_enable(adapter);
418
4cb9be7a
JB
419 netif_wake_queue(adapter->netdev);
420
79f3d399 421 /* fire a link change interrupt to start the watchdog */
1dc32918 422 ew32(ICS, E1000_ICS_LSC);
1da177e4
LT
423 return 0;
424}
425
79f05bf0
AK
426/**
427 * e1000_power_up_phy - restore link in case the phy was powered down
428 * @adapter: address of board private structure
429 *
430 * The phy may be powered down to save power and turn off link when the
431 * driver is unloaded and wake on lan is not enabled (among others)
432 * *** this routine MUST be followed by a call to e1000_reset ***
433 *
434 **/
435
d658266e 436void e1000_power_up_phy(struct e1000_adapter *adapter)
79f05bf0 437{
1dc32918 438 struct e1000_hw *hw = &adapter->hw;
406874a7 439 u16 mii_reg = 0;
79f05bf0
AK
440
441 /* Just clear the power down bit to wake the phy back up */
1dc32918 442 if (hw->media_type == e1000_media_type_copper) {
79f05bf0
AK
443 /* according to the manual, the phy will retain its
444 * settings across a power-down/up cycle */
1dc32918 445 e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg);
79f05bf0 446 mii_reg &= ~MII_CR_POWER_DOWN;
1dc32918 447 e1000_write_phy_reg(hw, PHY_CTRL, mii_reg);
79f05bf0
AK
448 }
449}
450
451static void e1000_power_down_phy(struct e1000_adapter *adapter)
452{
1dc32918
JP
453 struct e1000_hw *hw = &adapter->hw;
454
61c2505f 455 /* Power down the PHY so no link is implied when interface is down *
c3033b01 456 * The PHY cannot be powered down if any of the following is true *
79f05bf0
AK
457 * (a) WoL is enabled
458 * (b) AMT is active
459 * (c) SoL/IDER session is active */
1dc32918
JP
460 if (!adapter->wol && hw->mac_type >= e1000_82540 &&
461 hw->media_type == e1000_media_type_copper) {
406874a7 462 u16 mii_reg = 0;
61c2505f 463
1dc32918 464 switch (hw->mac_type) {
61c2505f
BA
465 case e1000_82540:
466 case e1000_82545:
467 case e1000_82545_rev_3:
468 case e1000_82546:
5377a416 469 case e1000_ce4100:
61c2505f
BA
470 case e1000_82546_rev_3:
471 case e1000_82541:
472 case e1000_82541_rev_2:
473 case e1000_82547:
474 case e1000_82547_rev_2:
1dc32918 475 if (er32(MANC) & E1000_MANC_SMBUS_EN)
61c2505f
BA
476 goto out;
477 break;
61c2505f
BA
478 default:
479 goto out;
480 }
1dc32918 481 e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg);
79f05bf0 482 mii_reg |= MII_CR_POWER_DOWN;
1dc32918 483 e1000_write_phy_reg(hw, PHY_CTRL, mii_reg);
79f05bf0
AK
484 mdelay(1);
485 }
61c2505f
BA
486out:
487 return;
79f05bf0
AK
488}
489
64798845 490void e1000_down(struct e1000_adapter *adapter)
1da177e4 491{
a6c42322 492 struct e1000_hw *hw = &adapter->hw;
1da177e4 493 struct net_device *netdev = adapter->netdev;
a6c42322 494 u32 rctl, tctl;
1da177e4 495
1314bbf3 496
a6c42322
JB
497 /* disable receives in the hardware */
498 rctl = er32(RCTL);
499 ew32(RCTL, rctl & ~E1000_RCTL_EN);
500 /* flush and sleep below */
501
51851073 502 netif_tx_disable(netdev);
a6c42322
JB
503
504 /* disable transmits in the hardware */
505 tctl = er32(TCTL);
506 tctl &= ~E1000_TCTL_EN;
507 ew32(TCTL, tctl);
508 /* flush both disables and wait for them to finish */
509 E1000_WRITE_FLUSH();
510 msleep(10);
511
bea3348e 512 napi_disable(&adapter->napi);
c3570acb 513
1da177e4 514 e1000_irq_disable(adapter);
c1605eb3 515
ab08853f
AC
516 /*
517 * Setting DOWN must be after irq_disable to prevent
518 * a screaming interrupt. Setting DOWN also prevents
519 * timers and tasks from rescheduling.
520 */
521 set_bit(__E1000_DOWN, &adapter->flags);
522
1da177e4
LT
523 del_timer_sync(&adapter->tx_fifo_stall_timer);
524 del_timer_sync(&adapter->watchdog_timer);
525 del_timer_sync(&adapter->phy_info_timer);
526
1da177e4
LT
527 adapter->link_speed = 0;
528 adapter->link_duplex = 0;
529 netif_carrier_off(netdev);
1da177e4
LT
530
531 e1000_reset(adapter);
581d708e
MC
532 e1000_clean_all_tx_rings(adapter);
533 e1000_clean_all_rx_rings(adapter);
1da177e4 534}
1da177e4 535
38df7a39 536static void e1000_reinit_safe(struct e1000_adapter *adapter)
338c15e4
JB
537{
538 while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
539 msleep(1);
540 rtnl_lock();
541 e1000_down(adapter);
542 e1000_up(adapter);
543 rtnl_unlock();
544 clear_bit(__E1000_RESETTING, &adapter->flags);
545}
546
64798845 547void e1000_reinit_locked(struct e1000_adapter *adapter)
2db10a08 548{
338c15e4
JB
549 /* if rtnl_lock is not held the call path is bogus */
550 ASSERT_RTNL();
2db10a08
AK
551 WARN_ON(in_interrupt());
552 while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
553 msleep(1);
554 e1000_down(adapter);
555 e1000_up(adapter);
556 clear_bit(__E1000_RESETTING, &adapter->flags);
1da177e4
LT
557}
558
64798845 559void e1000_reset(struct e1000_adapter *adapter)
1da177e4 560{
1dc32918 561 struct e1000_hw *hw = &adapter->hw;
406874a7 562 u32 pba = 0, tx_space, min_tx_space, min_rx_space;
c3033b01 563 bool legacy_pba_adjust = false;
b7cb8c2c 564 u16 hwm;
1da177e4
LT
565
566 /* Repartition Pba for greater than 9k mtu
567 * To take effect CTRL.RST is required.
568 */
569
1dc32918 570 switch (hw->mac_type) {
018ea44e
BA
571 case e1000_82542_rev2_0:
572 case e1000_82542_rev2_1:
573 case e1000_82543:
574 case e1000_82544:
575 case e1000_82540:
576 case e1000_82541:
577 case e1000_82541_rev_2:
c3033b01 578 legacy_pba_adjust = true;
018ea44e
BA
579 pba = E1000_PBA_48K;
580 break;
581 case e1000_82545:
582 case e1000_82545_rev_3:
583 case e1000_82546:
5377a416 584 case e1000_ce4100:
018ea44e
BA
585 case e1000_82546_rev_3:
586 pba = E1000_PBA_48K;
587 break;
2d7edb92 588 case e1000_82547:
0e6ef3e0 589 case e1000_82547_rev_2:
c3033b01 590 legacy_pba_adjust = true;
2d7edb92
MC
591 pba = E1000_PBA_30K;
592 break;
018ea44e
BA
593 case e1000_undefined:
594 case e1000_num_macs:
2d7edb92
MC
595 break;
596 }
597
c3033b01 598 if (legacy_pba_adjust) {
b7cb8c2c 599 if (hw->max_frame_size > E1000_RXBUFFER_8192)
018ea44e 600 pba -= 8; /* allocate more FIFO for Tx */
2d7edb92 601
1dc32918 602 if (hw->mac_type == e1000_82547) {
018ea44e
BA
603 adapter->tx_fifo_head = 0;
604 adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT;
605 adapter->tx_fifo_size =
606 (E1000_PBA_40K - pba) << E1000_PBA_BYTES_SHIFT;
607 atomic_set(&adapter->tx_fifo_stall, 0);
608 }
b7cb8c2c 609 } else if (hw->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) {
018ea44e 610 /* adjust PBA for jumbo frames */
1dc32918 611 ew32(PBA, pba);
018ea44e
BA
612
613 /* To maintain wire speed transmits, the Tx FIFO should be
b7cb8c2c 614 * large enough to accommodate two full transmit packets,
018ea44e 615 * rounded up to the next 1KB and expressed in KB. Likewise,
b7cb8c2c 616 * the Rx FIFO should be large enough to accommodate at least
018ea44e
BA
617 * one full receive packet and is similarly rounded up and
618 * expressed in KB. */
1dc32918 619 pba = er32(PBA);
018ea44e
BA
620 /* upper 16 bits has Tx packet buffer allocation size in KB */
621 tx_space = pba >> 16;
622 /* lower 16 bits has Rx packet buffer allocation size in KB */
623 pba &= 0xffff;
b7cb8c2c
JB
624 /*
625 * the tx fifo also stores 16 bytes of information about the tx
626 * but don't include ethernet FCS because hardware appends it
627 */
628 min_tx_space = (hw->max_frame_size +
629 sizeof(struct e1000_tx_desc) -
630 ETH_FCS_LEN) * 2;
9099cfb9 631 min_tx_space = ALIGN(min_tx_space, 1024);
018ea44e 632 min_tx_space >>= 10;
b7cb8c2c
JB
633 /* software strips receive CRC, so leave room for it */
634 min_rx_space = hw->max_frame_size;
9099cfb9 635 min_rx_space = ALIGN(min_rx_space, 1024);
018ea44e
BA
636 min_rx_space >>= 10;
637
638 /* If current Tx allocation is less than the min Tx FIFO size,
639 * and the min Tx FIFO size is less than the current Rx FIFO
640 * allocation, take space away from current Rx allocation */
641 if (tx_space < min_tx_space &&
642 ((min_tx_space - tx_space) < pba)) {
643 pba = pba - (min_tx_space - tx_space);
644
645 /* PCI/PCIx hardware has PBA alignment constraints */
1dc32918 646 switch (hw->mac_type) {
018ea44e
BA
647 case e1000_82545 ... e1000_82546_rev_3:
648 pba &= ~(E1000_PBA_8K - 1);
649 break;
650 default:
651 break;
652 }
653
654 /* if short on rx space, rx wins and must trump tx
655 * adjustment or use Early Receive if available */
1532ecea
JB
656 if (pba < min_rx_space)
657 pba = min_rx_space;
018ea44e 658 }
1da177e4 659 }
2d7edb92 660
1dc32918 661 ew32(PBA, pba);
1da177e4 662
b7cb8c2c
JB
663 /*
664 * flow control settings:
665 * The high water mark must be low enough to fit one full frame
666 * (or the size used for early receive) above it in the Rx FIFO.
667 * Set it to the lower of:
668 * - 90% of the Rx FIFO size, and
669 * - the full Rx FIFO size minus the early receive size (for parts
670 * with ERT support assuming ERT set to E1000_ERT_2048), or
671 * - the full Rx FIFO size minus one full frame
672 */
673 hwm = min(((pba << 10) * 9 / 10),
674 ((pba << 10) - hw->max_frame_size));
675
676 hw->fc_high_water = hwm & 0xFFF8; /* 8-byte granularity */
677 hw->fc_low_water = hw->fc_high_water - 8;
edbbb3ca 678 hw->fc_pause_time = E1000_FC_PAUSE_TIME;
1dc32918
JP
679 hw->fc_send_xon = 1;
680 hw->fc = hw->original_fc;
1da177e4 681
2d7edb92 682 /* Allow time for pending master requests to run */
1dc32918
JP
683 e1000_reset_hw(hw);
684 if (hw->mac_type >= e1000_82544)
685 ew32(WUC, 0);
09ae3e88 686
1dc32918 687 if (e1000_init_hw(hw))
feb8f478 688 e_dev_err("Hardware Error\n");
2d7edb92 689 e1000_update_mng_vlan(adapter);
3d5460a0
JB
690
691 /* if (adapter->hwflags & HWFLAGS_PHY_PWR_BIT) { */
1dc32918 692 if (hw->mac_type >= e1000_82544 &&
1dc32918
JP
693 hw->autoneg == 1 &&
694 hw->autoneg_advertised == ADVERTISE_1000_FULL) {
695 u32 ctrl = er32(CTRL);
3d5460a0
JB
696 /* clear phy power management bit if we are in gig only mode,
697 * which if enabled will attempt negotiation to 100Mb, which
698 * can cause a loss of link at power off or driver unload */
699 ctrl &= ~E1000_CTRL_SWDPIN3;
1dc32918 700 ew32(CTRL, ctrl);
3d5460a0
JB
701 }
702
1da177e4 703 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
1dc32918 704 ew32(VET, ETHERNET_IEEE_VLAN_TYPE);
1da177e4 705
1dc32918
JP
706 e1000_reset_adaptive(hw);
707 e1000_phy_get_info(hw, &adapter->phy_info);
9a53a202 708
0fccd0e9 709 e1000_release_manageability(adapter);
1da177e4
LT
710}
711
67b3c27c
AK
712/**
713 * Dump the eeprom for users having checksum issues
714 **/
b4ea895d 715static void e1000_dump_eeprom(struct e1000_adapter *adapter)
67b3c27c
AK
716{
717 struct net_device *netdev = adapter->netdev;
718 struct ethtool_eeprom eeprom;
719 const struct ethtool_ops *ops = netdev->ethtool_ops;
720 u8 *data;
721 int i;
722 u16 csum_old, csum_new = 0;
723
724 eeprom.len = ops->get_eeprom_len(netdev);
725 eeprom.offset = 0;
726
727 data = kmalloc(eeprom.len, GFP_KERNEL);
728 if (!data) {
675ad473 729 pr_err("Unable to allocate memory to dump EEPROM data\n");
67b3c27c
AK
730 return;
731 }
732
733 ops->get_eeprom(netdev, &eeprom, data);
734
735 csum_old = (data[EEPROM_CHECKSUM_REG * 2]) +
736 (data[EEPROM_CHECKSUM_REG * 2 + 1] << 8);
737 for (i = 0; i < EEPROM_CHECKSUM_REG * 2; i += 2)
738 csum_new += data[i] + (data[i + 1] << 8);
739 csum_new = EEPROM_SUM - csum_new;
740
675ad473
ET
741 pr_err("/*********************/\n");
742 pr_err("Current EEPROM Checksum : 0x%04x\n", csum_old);
743 pr_err("Calculated : 0x%04x\n", csum_new);
67b3c27c 744
675ad473
ET
745 pr_err("Offset Values\n");
746 pr_err("======== ======\n");
67b3c27c
AK
747 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1, data, 128, 0);
748
675ad473
ET
749 pr_err("Include this output when contacting your support provider.\n");
750 pr_err("This is not a software error! Something bad happened to\n");
751 pr_err("your hardware or EEPROM image. Ignoring this problem could\n");
752 pr_err("result in further problems, possibly loss of data,\n");
753 pr_err("corruption or system hangs!\n");
754 pr_err("The MAC Address will be reset to 00:00:00:00:00:00,\n");
755 pr_err("which is invalid and requires you to set the proper MAC\n");
756 pr_err("address manually before continuing to enable this network\n");
757 pr_err("device. Please inspect the EEPROM dump and report the\n");
758 pr_err("issue to your hardware vendor or Intel Customer Support.\n");
759 pr_err("/*********************/\n");
67b3c27c
AK
760
761 kfree(data);
762}
763
81250297
TI
764/**
765 * e1000_is_need_ioport - determine if an adapter needs ioport resources or not
766 * @pdev: PCI device information struct
767 *
768 * Return true if an adapter needs ioport resources
769 **/
770static int e1000_is_need_ioport(struct pci_dev *pdev)
771{
772 switch (pdev->device) {
773 case E1000_DEV_ID_82540EM:
774 case E1000_DEV_ID_82540EM_LOM:
775 case E1000_DEV_ID_82540EP:
776 case E1000_DEV_ID_82540EP_LOM:
777 case E1000_DEV_ID_82540EP_LP:
778 case E1000_DEV_ID_82541EI:
779 case E1000_DEV_ID_82541EI_MOBILE:
780 case E1000_DEV_ID_82541ER:
781 case E1000_DEV_ID_82541ER_LOM:
782 case E1000_DEV_ID_82541GI:
783 case E1000_DEV_ID_82541GI_LF:
784 case E1000_DEV_ID_82541GI_MOBILE:
785 case E1000_DEV_ID_82544EI_COPPER:
786 case E1000_DEV_ID_82544EI_FIBER:
787 case E1000_DEV_ID_82544GC_COPPER:
788 case E1000_DEV_ID_82544GC_LOM:
789 case E1000_DEV_ID_82545EM_COPPER:
790 case E1000_DEV_ID_82545EM_FIBER:
791 case E1000_DEV_ID_82546EB_COPPER:
792 case E1000_DEV_ID_82546EB_FIBER:
793 case E1000_DEV_ID_82546EB_QUAD_COPPER:
794 return true;
795 default:
796 return false;
797 }
798}
799
0e7614bc
SH
800static const struct net_device_ops e1000_netdev_ops = {
801 .ndo_open = e1000_open,
802 .ndo_stop = e1000_close,
00829823 803 .ndo_start_xmit = e1000_xmit_frame,
0e7614bc
SH
804 .ndo_get_stats = e1000_get_stats,
805 .ndo_set_rx_mode = e1000_set_rx_mode,
806 .ndo_set_mac_address = e1000_set_mac,
807 .ndo_tx_timeout = e1000_tx_timeout,
808 .ndo_change_mtu = e1000_change_mtu,
809 .ndo_do_ioctl = e1000_ioctl,
810 .ndo_validate_addr = eth_validate_addr,
811
812 .ndo_vlan_rx_register = e1000_vlan_rx_register,
813 .ndo_vlan_rx_add_vid = e1000_vlan_rx_add_vid,
814 .ndo_vlan_rx_kill_vid = e1000_vlan_rx_kill_vid,
815#ifdef CONFIG_NET_POLL_CONTROLLER
816 .ndo_poll_controller = e1000_netpoll,
817#endif
818};
819
e508be17
JB
820/**
821 * e1000_init_hw_struct - initialize members of hw struct
822 * @adapter: board private struct
823 * @hw: structure used by e1000_hw.c
824 *
825 * Factors out initialization of the e1000_hw struct to its own function
826 * that can be called very early at init (just after struct allocation).
827 * Fields are initialized based on PCI device information and
828 * OS network device settings (MTU size).
829 * Returns negative error codes if MAC type setup fails.
830 */
831static int e1000_init_hw_struct(struct e1000_adapter *adapter,
832 struct e1000_hw *hw)
833{
834 struct pci_dev *pdev = adapter->pdev;
835
836 /* PCI config space info */
837 hw->vendor_id = pdev->vendor;
838 hw->device_id = pdev->device;
839 hw->subsystem_vendor_id = pdev->subsystem_vendor;
840 hw->subsystem_id = pdev->subsystem_device;
841 hw->revision_id = pdev->revision;
842
843 pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
844
845 hw->max_frame_size = adapter->netdev->mtu +
846 ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
847 hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE;
848
849 /* identify the MAC */
850 if (e1000_set_mac_type(hw)) {
851 e_err(probe, "Unknown MAC Type\n");
852 return -EIO;
853 }
854
855 switch (hw->mac_type) {
856 default:
857 break;
858 case e1000_82541:
859 case e1000_82547:
860 case e1000_82541_rev_2:
861 case e1000_82547_rev_2:
862 hw->phy_init_script = 1;
863 break;
864 }
865
866 e1000_set_media_type(hw);
867 e1000_get_bus_info(hw);
868
869 hw->wait_autoneg_complete = false;
870 hw->tbi_compatibility_en = true;
871 hw->adaptive_ifs = true;
872
873 /* Copper options */
874
875 if (hw->media_type == e1000_media_type_copper) {
876 hw->mdix = AUTO_ALL_MODES;
877 hw->disable_polarity_correction = false;
878 hw->master_slave = E1000_MASTER_SLAVE;
879 }
880
881 return 0;
882}
883
1da177e4
LT
884/**
885 * e1000_probe - Device Initialization Routine
886 * @pdev: PCI device information struct
887 * @ent: entry in e1000_pci_tbl
888 *
889 * Returns 0 on success, negative on failure
890 *
891 * e1000_probe initializes an adapter identified by a pci_dev structure.
892 * The OS initialization, configuring of the adapter private structure,
893 * and a hardware reset occur.
894 **/
1dc32918
JP
895static int __devinit e1000_probe(struct pci_dev *pdev,
896 const struct pci_device_id *ent)
1da177e4
LT
897{
898 struct net_device *netdev;
899 struct e1000_adapter *adapter;
1dc32918 900 struct e1000_hw *hw;
2d7edb92 901
1da177e4 902 static int cards_found = 0;
120cd576 903 static int global_quad_port_a = 0; /* global ksp3 port a indication */
2d7edb92 904 int i, err, pci_using_dac;
406874a7 905 u16 eeprom_data = 0;
5377a416 906 u16 tmp = 0;
406874a7 907 u16 eeprom_apme_mask = E1000_EEPROM_APME;
81250297 908 int bars, need_ioport;
0795af57 909
81250297
TI
910 /* do not allocate ioport bars when not needed */
911 need_ioport = e1000_is_need_ioport(pdev);
912 if (need_ioport) {
913 bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO);
914 err = pci_enable_device(pdev);
915 } else {
916 bars = pci_select_bars(pdev, IORESOURCE_MEM);
4d7155b9 917 err = pci_enable_device_mem(pdev);
81250297 918 }
c7be73bc 919 if (err)
1da177e4
LT
920 return err;
921
81250297 922 err = pci_request_selected_regions(pdev, bars, e1000_driver_name);
c7be73bc 923 if (err)
6dd62ab0 924 goto err_pci_reg;
1da177e4
LT
925
926 pci_set_master(pdev);
dbb5aaeb
NN
927 err = pci_save_state(pdev);
928 if (err)
929 goto err_alloc_etherdev;
1da177e4 930
6dd62ab0 931 err = -ENOMEM;
1da177e4 932 netdev = alloc_etherdev(sizeof(struct e1000_adapter));
6dd62ab0 933 if (!netdev)
1da177e4 934 goto err_alloc_etherdev;
1da177e4 935
1da177e4
LT
936 SET_NETDEV_DEV(netdev, &pdev->dev);
937
938 pci_set_drvdata(pdev, netdev);
60490fe0 939 adapter = netdev_priv(netdev);
1da177e4
LT
940 adapter->netdev = netdev;
941 adapter->pdev = pdev;
1da177e4 942 adapter->msg_enable = (1 << debug) - 1;
81250297
TI
943 adapter->bars = bars;
944 adapter->need_ioport = need_ioport;
1da177e4 945
1dc32918
JP
946 hw = &adapter->hw;
947 hw->back = adapter;
948
6dd62ab0 949 err = -EIO;
275f165f 950 hw->hw_addr = pci_ioremap_bar(pdev, BAR_0);
1dc32918 951 if (!hw->hw_addr)
1da177e4 952 goto err_ioremap;
1da177e4 953
81250297
TI
954 if (adapter->need_ioport) {
955 for (i = BAR_1; i <= BAR_5; i++) {
956 if (pci_resource_len(pdev, i) == 0)
957 continue;
958 if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
959 hw->io_base = pci_resource_start(pdev, i);
960 break;
961 }
1da177e4
LT
962 }
963 }
964
e508be17
JB
965 /* make ready for any if (hw->...) below */
966 err = e1000_init_hw_struct(adapter, hw);
967 if (err)
968 goto err_sw_init;
969
970 /*
971 * there is a workaround being applied below that limits
972 * 64-bit DMA addresses to 64-bit hardware. There are some
973 * 32-bit adapters that Tx hang when given 64-bit DMA addresses
974 */
975 pci_using_dac = 0;
976 if ((hw->bus_type == e1000_bus_type_pcix) &&
977 !dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
978 /*
979 * according to DMA-API-HOWTO, coherent calls will always
980 * succeed if the set call did
981 */
982 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
983 pci_using_dac = 1;
e508be17 984 } else {
19a0b67a
DN
985 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
986 if (err) {
987 pr_err("No usable DMA config, aborting\n");
988 goto err_dma;
989 }
990 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
e508be17
JB
991 }
992
0e7614bc 993 netdev->netdev_ops = &e1000_netdev_ops;
1da177e4 994 e1000_set_ethtool_ops(netdev);
1da177e4 995 netdev->watchdog_timeo = 5 * HZ;
bea3348e 996 netif_napi_add(netdev, &adapter->napi, e1000_clean, 64);
0e7614bc 997
0eb5a34c 998 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
1da177e4 999
1da177e4
LT
1000 adapter->bd_number = cards_found;
1001
1002 /* setup the private structure */
1003
c7be73bc
JP
1004 err = e1000_sw_init(adapter);
1005 if (err)
1da177e4
LT
1006 goto err_sw_init;
1007
6dd62ab0 1008 err = -EIO;
5377a416
DB
1009 if (hw->mac_type == e1000_ce4100) {
1010 ce4100_gbe_mdio_base_phy = pci_resource_start(pdev, BAR_1);
1011 ce4100_gbe_mdio_base_virt = ioremap(ce4100_gbe_mdio_base_phy,
1012 pci_resource_len(pdev, BAR_1));
1013
1014 if (!ce4100_gbe_mdio_base_virt)
1015 goto err_mdio_ioremap;
1016 }
2d7edb92 1017
1dc32918 1018 if (hw->mac_type >= e1000_82543) {
1da177e4
LT
1019 netdev->features = NETIF_F_SG |
1020 NETIF_F_HW_CSUM |
1021 NETIF_F_HW_VLAN_TX |
1022 NETIF_F_HW_VLAN_RX |
1023 NETIF_F_HW_VLAN_FILTER;
1024 }
1025
1dc32918
JP
1026 if ((hw->mac_type >= e1000_82544) &&
1027 (hw->mac_type != e1000_82547))
1da177e4 1028 netdev->features |= NETIF_F_TSO;
2d7edb92 1029
7b872a55 1030 if (pci_using_dac) {
1da177e4 1031 netdev->features |= NETIF_F_HIGHDMA;
7b872a55
YZ
1032 netdev->vlan_features |= NETIF_F_HIGHDMA;
1033 }
1da177e4 1034
20501a69 1035 netdev->vlan_features |= NETIF_F_TSO;
20501a69
PM
1036 netdev->vlan_features |= NETIF_F_HW_CSUM;
1037 netdev->vlan_features |= NETIF_F_SG;
1038
1dc32918 1039 adapter->en_mng_pt = e1000_enable_mng_pass_thru(hw);
2d7edb92 1040
cd94dd0b 1041 /* initialize eeprom parameters */
1dc32918 1042 if (e1000_init_eeprom_params(hw)) {
feb8f478 1043 e_err(probe, "EEPROM initialization failed\n");
6dd62ab0 1044 goto err_eeprom;
cd94dd0b
AK
1045 }
1046
96838a40 1047 /* before reading the EEPROM, reset the controller to
1da177e4 1048 * put the device in a known good starting state */
96838a40 1049
1dc32918 1050 e1000_reset_hw(hw);
1da177e4
LT
1051
1052 /* make sure the EEPROM is good */
1dc32918 1053 if (e1000_validate_eeprom_checksum(hw) < 0) {
feb8f478 1054 e_err(probe, "The EEPROM Checksum Is Not Valid\n");
67b3c27c
AK
1055 e1000_dump_eeprom(adapter);
1056 /*
1057 * set MAC address to all zeroes to invalidate and temporary
1058 * disable this device for the user. This blocks regular
1059 * traffic while still permitting ethtool ioctls from reaching
1060 * the hardware as well as allowing the user to run the
1061 * interface after manually setting a hw addr using
1062 * `ip set address`
1063 */
1dc32918 1064 memset(hw->mac_addr, 0, netdev->addr_len);
67b3c27c
AK
1065 } else {
1066 /* copy the MAC address out of the EEPROM */
1dc32918 1067 if (e1000_read_mac_addr(hw))
feb8f478 1068 e_err(probe, "EEPROM Read Error\n");
1da177e4 1069 }
67b3c27c 1070 /* don't block initalization here due to bad MAC address */
1dc32918
JP
1071 memcpy(netdev->dev_addr, hw->mac_addr, netdev->addr_len);
1072 memcpy(netdev->perm_addr, hw->mac_addr, netdev->addr_len);
1da177e4 1073
67b3c27c 1074 if (!is_valid_ether_addr(netdev->perm_addr))
feb8f478 1075 e_err(probe, "Invalid MAC Address\n");
1da177e4 1076
1da177e4 1077 init_timer(&adapter->tx_fifo_stall_timer);
c061b18d 1078 adapter->tx_fifo_stall_timer.function = e1000_82547_tx_fifo_stall;
e982f17c 1079 adapter->tx_fifo_stall_timer.data = (unsigned long)adapter;
1da177e4
LT
1080
1081 init_timer(&adapter->watchdog_timer);
c061b18d 1082 adapter->watchdog_timer.function = e1000_watchdog;
1da177e4
LT
1083 adapter->watchdog_timer.data = (unsigned long) adapter;
1084
1da177e4 1085 init_timer(&adapter->phy_info_timer);
c061b18d 1086 adapter->phy_info_timer.function = e1000_update_phy_info;
e982f17c 1087 adapter->phy_info_timer.data = (unsigned long)adapter;
1da177e4 1088
5cf42fcd 1089 INIT_WORK(&adapter->fifo_stall_task, e1000_82547_tx_fifo_stall_task);
65f27f38 1090 INIT_WORK(&adapter->reset_task, e1000_reset_task);
5cf42fcd 1091 INIT_WORK(&adapter->phy_info_task, e1000_update_phy_info_task);
1da177e4 1092
1da177e4
LT
1093 e1000_check_options(adapter);
1094
1095 /* Initial Wake on LAN setting
1096 * If APM wake is enabled in the EEPROM,
1097 * enable the ACPI Magic Packet filter
1098 */
1099
1dc32918 1100 switch (hw->mac_type) {
1da177e4
LT
1101 case e1000_82542_rev2_0:
1102 case e1000_82542_rev2_1:
1103 case e1000_82543:
1104 break;
1105 case e1000_82544:
1dc32918 1106 e1000_read_eeprom(hw,
1da177e4
LT
1107 EEPROM_INIT_CONTROL2_REG, 1, &eeprom_data);
1108 eeprom_apme_mask = E1000_EEPROM_82544_APM;
1109 break;
1110 case e1000_82546:
1111 case e1000_82546_rev_3:
1dc32918
JP
1112 if (er32(STATUS) & E1000_STATUS_FUNC_1){
1113 e1000_read_eeprom(hw,
1da177e4
LT
1114 EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
1115 break;
1116 }
1117 /* Fall Through */
1118 default:
1dc32918 1119 e1000_read_eeprom(hw,
1da177e4
LT
1120 EEPROM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
1121 break;
1122 }
96838a40 1123 if (eeprom_data & eeprom_apme_mask)
120cd576
JB
1124 adapter->eeprom_wol |= E1000_WUFC_MAG;
1125
1126 /* now that we have the eeprom settings, apply the special cases
1127 * where the eeprom may be wrong or the board simply won't support
1128 * wake on lan on a particular port */
1129 switch (pdev->device) {
1130 case E1000_DEV_ID_82546GB_PCIE:
1131 adapter->eeprom_wol = 0;
1132 break;
1133 case E1000_DEV_ID_82546EB_FIBER:
1134 case E1000_DEV_ID_82546GB_FIBER:
120cd576
JB
1135 /* Wake events only supported on port A for dual fiber
1136 * regardless of eeprom setting */
1dc32918 1137 if (er32(STATUS) & E1000_STATUS_FUNC_1)
120cd576
JB
1138 adapter->eeprom_wol = 0;
1139 break;
1140 case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
1141 /* if quad port adapter, disable WoL on all but port A */
1142 if (global_quad_port_a != 0)
1143 adapter->eeprom_wol = 0;
1144 else
1145 adapter->quad_port_a = 1;
1146 /* Reset for multiple quad port adapters */
1147 if (++global_quad_port_a == 4)
1148 global_quad_port_a = 0;
1149 break;
1150 }
1151
1152 /* initialize the wol settings based on the eeprom settings */
1153 adapter->wol = adapter->eeprom_wol;
de126489 1154 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
1da177e4 1155
5377a416
DB
1156 /* Auto detect PHY address */
1157 if (hw->mac_type == e1000_ce4100) {
1158 for (i = 0; i < 32; i++) {
1159 hw->phy_addr = i;
1160 e1000_read_phy_reg(hw, PHY_ID2, &tmp);
1161 if (tmp == 0 || tmp == 0xFF) {
1162 if (i == 31)
1163 goto err_eeprom;
1164 continue;
1165 } else
1166 break;
1167 }
1168 }
1169
675ad473
ET
1170 /* reset the hardware with the new settings */
1171 e1000_reset(adapter);
1172
1173 strcpy(netdev->name, "eth%d");
1174 err = register_netdev(netdev);
1175 if (err)
1176 goto err_register;
1177
fb3d47d4 1178 /* print bus type/speed/width info */
feb8f478 1179 e_info(probe, "(PCI%s:%dMHz:%d-bit) %pM\n",
7837e58c
JP
1180 ((hw->bus_type == e1000_bus_type_pcix) ? "-X" : ""),
1181 ((hw->bus_speed == e1000_bus_speed_133) ? 133 :
1182 (hw->bus_speed == e1000_bus_speed_120) ? 120 :
1183 (hw->bus_speed == e1000_bus_speed_100) ? 100 :
1184 (hw->bus_speed == e1000_bus_speed_66) ? 66 : 33),
1185 ((hw->bus_width == e1000_bus_width_64) ? 64 : 32),
1186 netdev->dev_addr);
1314bbf3 1187
eb62efd2
JB
1188 /* carrier off reporting is important to ethtool even BEFORE open */
1189 netif_carrier_off(netdev);
1190
feb8f478 1191 e_info(probe, "Intel(R) PRO/1000 Network Connection\n");
1da177e4
LT
1192
1193 cards_found++;
1194 return 0;
1195
1196err_register:
6dd62ab0 1197err_eeprom:
1532ecea 1198 e1000_phy_hw_reset(hw);
6dd62ab0 1199
1dc32918
JP
1200 if (hw->flash_address)
1201 iounmap(hw->flash_address);
6dd62ab0
VA
1202 kfree(adapter->tx_ring);
1203 kfree(adapter->rx_ring);
e508be17 1204err_dma:
1da177e4 1205err_sw_init:
5377a416
DB
1206err_mdio_ioremap:
1207 iounmap(ce4100_gbe_mdio_base_virt);
1dc32918 1208 iounmap(hw->hw_addr);
1da177e4
LT
1209err_ioremap:
1210 free_netdev(netdev);
1211err_alloc_etherdev:
81250297 1212 pci_release_selected_regions(pdev, bars);
6dd62ab0 1213err_pci_reg:
6dd62ab0 1214 pci_disable_device(pdev);
1da177e4
LT
1215 return err;
1216}
1217
1218/**
1219 * e1000_remove - Device Removal Routine
1220 * @pdev: PCI device information struct
1221 *
1222 * e1000_remove is called by the PCI subsystem to alert the driver
1223 * that it should release a PCI device. The could be caused by a
1224 * Hot-Plug event, or because the driver is going to be removed from
1225 * memory.
1226 **/
1227
64798845 1228static void __devexit e1000_remove(struct pci_dev *pdev)
1da177e4
LT
1229{
1230 struct net_device *netdev = pci_get_drvdata(pdev);
60490fe0 1231 struct e1000_adapter *adapter = netdev_priv(netdev);
1dc32918 1232 struct e1000_hw *hw = &adapter->hw;
1da177e4 1233
baa34745
JB
1234 set_bit(__E1000_DOWN, &adapter->flags);
1235 del_timer_sync(&adapter->tx_fifo_stall_timer);
1236 del_timer_sync(&adapter->watchdog_timer);
1237 del_timer_sync(&adapter->phy_info_timer);
1238
28e53bdd 1239 cancel_work_sync(&adapter->reset_task);
be2b28ed 1240
0fccd0e9 1241 e1000_release_manageability(adapter);
1da177e4 1242
bea3348e
SH
1243 unregister_netdev(netdev);
1244
1532ecea 1245 e1000_phy_hw_reset(hw);
1da177e4 1246
24025e4e
MC
1247 kfree(adapter->tx_ring);
1248 kfree(adapter->rx_ring);
24025e4e 1249
1dc32918
JP
1250 iounmap(hw->hw_addr);
1251 if (hw->flash_address)
1252 iounmap(hw->flash_address);
81250297 1253 pci_release_selected_regions(pdev, adapter->bars);
1da177e4
LT
1254
1255 free_netdev(netdev);
1256
1257 pci_disable_device(pdev);
1258}
1259
1260/**
1261 * e1000_sw_init - Initialize general software structures (struct e1000_adapter)
1262 * @adapter: board private structure to initialize
1263 *
1264 * e1000_sw_init initializes the Adapter private data structure.
e508be17 1265 * e1000_init_hw_struct MUST be called before this function
1da177e4
LT
1266 **/
1267
64798845 1268static int __devinit e1000_sw_init(struct e1000_adapter *adapter)
1da177e4 1269{
eb0f8054 1270 adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
1da177e4 1271
f56799ea
JK
1272 adapter->num_tx_queues = 1;
1273 adapter->num_rx_queues = 1;
581d708e
MC
1274
1275 if (e1000_alloc_queues(adapter)) {
feb8f478 1276 e_err(probe, "Unable to allocate memory for queues\n");
581d708e
MC
1277 return -ENOMEM;
1278 }
1279
47313054 1280 /* Explicitly disable IRQ since the NIC can be in any state. */
47313054
HX
1281 e1000_irq_disable(adapter);
1282
1da177e4 1283 spin_lock_init(&adapter->stats_lock);
1da177e4 1284
1314bbf3
AK
1285 set_bit(__E1000_DOWN, &adapter->flags);
1286
1da177e4
LT
1287 return 0;
1288}
1289
581d708e
MC
1290/**
1291 * e1000_alloc_queues - Allocate memory for all rings
1292 * @adapter: board private structure to initialize
1293 *
1294 * We allocate one ring per queue at run-time since we don't know the
3e1d7cd2 1295 * number of queues at compile-time.
581d708e
MC
1296 **/
1297
64798845 1298static int __devinit e1000_alloc_queues(struct e1000_adapter *adapter)
581d708e 1299{
1c7e5b12
YB
1300 adapter->tx_ring = kcalloc(adapter->num_tx_queues,
1301 sizeof(struct e1000_tx_ring), GFP_KERNEL);
581d708e
MC
1302 if (!adapter->tx_ring)
1303 return -ENOMEM;
581d708e 1304
1c7e5b12
YB
1305 adapter->rx_ring = kcalloc(adapter->num_rx_queues,
1306 sizeof(struct e1000_rx_ring), GFP_KERNEL);
581d708e
MC
1307 if (!adapter->rx_ring) {
1308 kfree(adapter->tx_ring);
1309 return -ENOMEM;
1310 }
581d708e 1311
581d708e
MC
1312 return E1000_SUCCESS;
1313}
1314
1da177e4
LT
1315/**
1316 * e1000_open - Called when a network interface is made active
1317 * @netdev: network interface device structure
1318 *
1319 * Returns 0 on success, negative value on failure
1320 *
1321 * The open entry point is called when a network interface is made
1322 * active by the system (IFF_UP). At this point all resources needed
1323 * for transmit and receive operations are allocated, the interrupt
1324 * handler is registered with the OS, the watchdog timer is started,
1325 * and the stack is notified that the interface is ready.
1326 **/
1327
64798845 1328static int e1000_open(struct net_device *netdev)
1da177e4 1329{
60490fe0 1330 struct e1000_adapter *adapter = netdev_priv(netdev);
1dc32918 1331 struct e1000_hw *hw = &adapter->hw;
1da177e4
LT
1332 int err;
1333
2db10a08 1334 /* disallow open during test */
1314bbf3 1335 if (test_bit(__E1000_TESTING, &adapter->flags))
2db10a08
AK
1336 return -EBUSY;
1337
eb62efd2
JB
1338 netif_carrier_off(netdev);
1339
1da177e4 1340 /* allocate transmit descriptors */
e0aac5a2
AK
1341 err = e1000_setup_all_tx_resources(adapter);
1342 if (err)
1da177e4
LT
1343 goto err_setup_tx;
1344
1345 /* allocate receive descriptors */
e0aac5a2 1346 err = e1000_setup_all_rx_resources(adapter);
b5bf28cd 1347 if (err)
e0aac5a2 1348 goto err_setup_rx;
b5bf28cd 1349
79f05bf0
AK
1350 e1000_power_up_phy(adapter);
1351
2d7edb92 1352 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
1dc32918 1353 if ((hw->mng_cookie.status &
2d7edb92
MC
1354 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) {
1355 e1000_update_mng_vlan(adapter);
1356 }
1da177e4 1357
e0aac5a2
AK
1358 /* before we allocate an interrupt, we must be ready to handle it.
1359 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
1360 * as soon as we call pci_request_irq, so we have to setup our
1361 * clean_rx handler before we do so. */
1362 e1000_configure(adapter);
1363
1364 err = e1000_request_irq(adapter);
1365 if (err)
1366 goto err_req_irq;
1367
1368 /* From here on the code is the same as e1000_up() */
1369 clear_bit(__E1000_DOWN, &adapter->flags);
1370
bea3348e 1371 napi_enable(&adapter->napi);
47313054 1372
e0aac5a2
AK
1373 e1000_irq_enable(adapter);
1374
076152d5
BH
1375 netif_start_queue(netdev);
1376
e0aac5a2 1377 /* fire a link status change interrupt to start the watchdog */
1dc32918 1378 ew32(ICS, E1000_ICS_LSC);
e0aac5a2 1379
1da177e4
LT
1380 return E1000_SUCCESS;
1381
b5bf28cd 1382err_req_irq:
e0aac5a2 1383 e1000_power_down_phy(adapter);
581d708e 1384 e1000_free_all_rx_resources(adapter);
1da177e4 1385err_setup_rx:
581d708e 1386 e1000_free_all_tx_resources(adapter);
1da177e4
LT
1387err_setup_tx:
1388 e1000_reset(adapter);
1389
1390 return err;
1391}
1392
1393/**
1394 * e1000_close - Disables a network interface
1395 * @netdev: network interface device structure
1396 *
1397 * Returns 0, this is not allowed to fail
1398 *
1399 * The close entry point is called when an interface is de-activated
1400 * by the OS. The hardware is still under the drivers control, but
1401 * needs to be disabled. A global MAC reset is issued to stop the
1402 * hardware, and all transmit and receive resources are freed.
1403 **/
1404
64798845 1405static int e1000_close(struct net_device *netdev)
1da177e4 1406{
60490fe0 1407 struct e1000_adapter *adapter = netdev_priv(netdev);
1dc32918 1408 struct e1000_hw *hw = &adapter->hw;
1da177e4 1409
2db10a08 1410 WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
1da177e4 1411 e1000_down(adapter);
79f05bf0 1412 e1000_power_down_phy(adapter);
2db10a08 1413 e1000_free_irq(adapter);
1da177e4 1414
581d708e
MC
1415 e1000_free_all_tx_resources(adapter);
1416 e1000_free_all_rx_resources(adapter);
1da177e4 1417
4666560a
BA
1418 /* kill manageability vlan ID if supported, but not if a vlan with
1419 * the same ID is registered on the host OS (let 8021q kill it) */
1dc32918 1420 if ((hw->mng_cookie.status &
4666560a
BA
1421 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
1422 !(adapter->vlgrp &&
5c15bdec 1423 vlan_group_get_device(adapter->vlgrp, adapter->mng_vlan_id))) {
2d7edb92
MC
1424 e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
1425 }
b55ccb35 1426
1da177e4
LT
1427 return 0;
1428}
1429
1430/**
1431 * e1000_check_64k_bound - check that memory doesn't cross 64kB boundary
1432 * @adapter: address of board private structure
2d7edb92
MC
1433 * @start: address of beginning of memory
1434 * @len: length of memory
1da177e4 1435 **/
64798845
JP
1436static bool e1000_check_64k_bound(struct e1000_adapter *adapter, void *start,
1437 unsigned long len)
1da177e4 1438{
1dc32918 1439 struct e1000_hw *hw = &adapter->hw;
e982f17c 1440 unsigned long begin = (unsigned long)start;
1da177e4
LT
1441 unsigned long end = begin + len;
1442
2648345f
MC
1443 /* First rev 82545 and 82546 need to not allow any memory
1444 * write location to cross 64k boundary due to errata 23 */
1dc32918 1445 if (hw->mac_type == e1000_82545 ||
5377a416 1446 hw->mac_type == e1000_ce4100 ||
1dc32918 1447 hw->mac_type == e1000_82546) {
c3033b01 1448 return ((begin ^ (end - 1)) >> 16) != 0 ? false : true;
1da177e4
LT
1449 }
1450
c3033b01 1451 return true;
1da177e4
LT
1452}
1453
1454/**
1455 * e1000_setup_tx_resources - allocate Tx resources (Descriptors)
1456 * @adapter: board private structure
581d708e 1457 * @txdr: tx descriptor ring (for a specific queue) to setup
1da177e4
LT
1458 *
1459 * Return 0 on success, negative on failure
1460 **/
1461
64798845
JP
1462static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
1463 struct e1000_tx_ring *txdr)
1da177e4 1464{
1da177e4
LT
1465 struct pci_dev *pdev = adapter->pdev;
1466 int size;
1467
1468 size = sizeof(struct e1000_buffer) * txdr->count;
89bf67f1 1469 txdr->buffer_info = vzalloc(size);
96838a40 1470 if (!txdr->buffer_info) {
feb8f478
ET
1471 e_err(probe, "Unable to allocate memory for the Tx descriptor "
1472 "ring\n");
1da177e4
LT
1473 return -ENOMEM;
1474 }
1da177e4
LT
1475
1476 /* round up to nearest 4K */
1477
1478 txdr->size = txdr->count * sizeof(struct e1000_tx_desc);
9099cfb9 1479 txdr->size = ALIGN(txdr->size, 4096);
1da177e4 1480
b16f53be
NN
1481 txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma,
1482 GFP_KERNEL);
96838a40 1483 if (!txdr->desc) {
1da177e4 1484setup_tx_desc_die:
1da177e4 1485 vfree(txdr->buffer_info);
feb8f478
ET
1486 e_err(probe, "Unable to allocate memory for the Tx descriptor "
1487 "ring\n");
1da177e4
LT
1488 return -ENOMEM;
1489 }
1490
2648345f 1491 /* Fix for errata 23, can't cross 64kB boundary */
1da177e4
LT
1492 if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
1493 void *olddesc = txdr->desc;
1494 dma_addr_t olddma = txdr->dma;
feb8f478 1495 e_err(tx_err, "txdr align check failed: %u bytes at %p\n",
675ad473 1496 txdr->size, txdr->desc);
2648345f 1497 /* Try again, without freeing the previous */
b16f53be
NN
1498 txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size,
1499 &txdr->dma, GFP_KERNEL);
2648345f 1500 /* Failed allocation, critical failure */
96838a40 1501 if (!txdr->desc) {
b16f53be
NN
1502 dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1503 olddma);
1da177e4
LT
1504 goto setup_tx_desc_die;
1505 }
1506
1507 if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
1508 /* give up */
b16f53be
NN
1509 dma_free_coherent(&pdev->dev, txdr->size, txdr->desc,
1510 txdr->dma);
1511 dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1512 olddma);
feb8f478 1513 e_err(probe, "Unable to allocate aligned memory "
675ad473 1514 "for the transmit descriptor ring\n");
1da177e4
LT
1515 vfree(txdr->buffer_info);
1516 return -ENOMEM;
1517 } else {
2648345f 1518 /* Free old allocation, new allocation was successful */
b16f53be
NN
1519 dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1520 olddma);
1da177e4
LT
1521 }
1522 }
1523 memset(txdr->desc, 0, txdr->size);
1524
1525 txdr->next_to_use = 0;
1526 txdr->next_to_clean = 0;
1527
1528 return 0;
1529}
1530
581d708e
MC
1531/**
1532 * e1000_setup_all_tx_resources - wrapper to allocate Tx resources
1533 * (Descriptors) for all queues
1534 * @adapter: board private structure
1535 *
581d708e
MC
1536 * Return 0 on success, negative on failure
1537 **/
1538
64798845 1539int e1000_setup_all_tx_resources(struct e1000_adapter *adapter)
581d708e
MC
1540{
1541 int i, err = 0;
1542
f56799ea 1543 for (i = 0; i < adapter->num_tx_queues; i++) {
581d708e
MC
1544 err = e1000_setup_tx_resources(adapter, &adapter->tx_ring[i]);
1545 if (err) {
feb8f478 1546 e_err(probe, "Allocation for Tx Queue %u failed\n", i);
3fbbc72e
VA
1547 for (i-- ; i >= 0; i--)
1548 e1000_free_tx_resources(adapter,
1549 &adapter->tx_ring[i]);
581d708e
MC
1550 break;
1551 }
1552 }
1553
1554 return err;
1555}
1556
1da177e4
LT
1557/**
1558 * e1000_configure_tx - Configure 8254x Transmit Unit after Reset
1559 * @adapter: board private structure
1560 *
1561 * Configure the Tx unit of the MAC after a reset.
1562 **/
1563
64798845 1564static void e1000_configure_tx(struct e1000_adapter *adapter)
1da177e4 1565{
406874a7 1566 u64 tdba;
581d708e 1567 struct e1000_hw *hw = &adapter->hw;
1532ecea 1568 u32 tdlen, tctl, tipg;
406874a7 1569 u32 ipgr1, ipgr2;
1da177e4
LT
1570
1571 /* Setup the HW Tx Head and Tail descriptor pointers */
1572
f56799ea 1573 switch (adapter->num_tx_queues) {
24025e4e
MC
1574 case 1:
1575 default:
581d708e
MC
1576 tdba = adapter->tx_ring[0].dma;
1577 tdlen = adapter->tx_ring[0].count *
1578 sizeof(struct e1000_tx_desc);
1dc32918
JP
1579 ew32(TDLEN, tdlen);
1580 ew32(TDBAH, (tdba >> 32));
1581 ew32(TDBAL, (tdba & 0x00000000ffffffffULL));
1582 ew32(TDT, 0);
1583 ew32(TDH, 0);
6a951698
AK
1584 adapter->tx_ring[0].tdh = ((hw->mac_type >= e1000_82543) ? E1000_TDH : E1000_82542_TDH);
1585 adapter->tx_ring[0].tdt = ((hw->mac_type >= e1000_82543) ? E1000_TDT : E1000_82542_TDT);
24025e4e
MC
1586 break;
1587 }
1da177e4
LT
1588
1589 /* Set the default values for the Tx Inter Packet Gap timer */
1532ecea 1590 if ((hw->media_type == e1000_media_type_fiber ||
d89b6c67 1591 hw->media_type == e1000_media_type_internal_serdes))
0fadb059
JK
1592 tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
1593 else
1594 tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
1595
581d708e 1596 switch (hw->mac_type) {
1da177e4
LT
1597 case e1000_82542_rev2_0:
1598 case e1000_82542_rev2_1:
1599 tipg = DEFAULT_82542_TIPG_IPGT;
0fadb059
JK
1600 ipgr1 = DEFAULT_82542_TIPG_IPGR1;
1601 ipgr2 = DEFAULT_82542_TIPG_IPGR2;
1da177e4
LT
1602 break;
1603 default:
0fadb059
JK
1604 ipgr1 = DEFAULT_82543_TIPG_IPGR1;
1605 ipgr2 = DEFAULT_82543_TIPG_IPGR2;
1606 break;
1da177e4 1607 }
0fadb059
JK
1608 tipg |= ipgr1 << E1000_TIPG_IPGR1_SHIFT;
1609 tipg |= ipgr2 << E1000_TIPG_IPGR2_SHIFT;
1dc32918 1610 ew32(TIPG, tipg);
1da177e4
LT
1611
1612 /* Set the Tx Interrupt Delay register */
1613
1dc32918 1614 ew32(TIDV, adapter->tx_int_delay);
581d708e 1615 if (hw->mac_type >= e1000_82540)
1dc32918 1616 ew32(TADV, adapter->tx_abs_int_delay);
1da177e4
LT
1617
1618 /* Program the Transmit Control Register */
1619
1dc32918 1620 tctl = er32(TCTL);
1da177e4 1621 tctl &= ~E1000_TCTL_CT;
7e6c9861 1622 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
1da177e4
LT
1623 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
1624
581d708e 1625 e1000_config_collision_dist(hw);
1da177e4
LT
1626
1627 /* Setup Transmit Descriptor Settings for eop descriptor */
6a042dab
JB
1628 adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS;
1629
1630 /* only set IDE if we are delaying interrupts using the timers */
1631 if (adapter->tx_int_delay)
1632 adapter->txd_cmd |= E1000_TXD_CMD_IDE;
1da177e4 1633
581d708e 1634 if (hw->mac_type < e1000_82543)
1da177e4
LT
1635 adapter->txd_cmd |= E1000_TXD_CMD_RPS;
1636 else
1637 adapter->txd_cmd |= E1000_TXD_CMD_RS;
1638
1639 /* Cache if we're 82544 running in PCI-X because we'll
1640 * need this to apply a workaround later in the send path. */
581d708e
MC
1641 if (hw->mac_type == e1000_82544 &&
1642 hw->bus_type == e1000_bus_type_pcix)
1da177e4 1643 adapter->pcix_82544 = 1;
7e6c9861 1644
1dc32918 1645 ew32(TCTL, tctl);
7e6c9861 1646
1da177e4
LT
1647}
1648
1649/**
1650 * e1000_setup_rx_resources - allocate Rx resources (Descriptors)
1651 * @adapter: board private structure
581d708e 1652 * @rxdr: rx descriptor ring (for a specific queue) to setup
1da177e4
LT
1653 *
1654 * Returns 0 on success, negative on failure
1655 **/
1656
64798845
JP
1657static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
1658 struct e1000_rx_ring *rxdr)
1da177e4 1659{
1da177e4 1660 struct pci_dev *pdev = adapter->pdev;
2d7edb92 1661 int size, desc_len;
1da177e4
LT
1662
1663 size = sizeof(struct e1000_buffer) * rxdr->count;
89bf67f1 1664 rxdr->buffer_info = vzalloc(size);
581d708e 1665 if (!rxdr->buffer_info) {
feb8f478
ET
1666 e_err(probe, "Unable to allocate memory for the Rx descriptor "
1667 "ring\n");
1da177e4
LT
1668 return -ENOMEM;
1669 }
1da177e4 1670
1532ecea 1671 desc_len = sizeof(struct e1000_rx_desc);
2d7edb92 1672
1da177e4
LT
1673 /* Round up to nearest 4K */
1674
2d7edb92 1675 rxdr->size = rxdr->count * desc_len;
9099cfb9 1676 rxdr->size = ALIGN(rxdr->size, 4096);
1da177e4 1677
b16f53be
NN
1678 rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
1679 GFP_KERNEL);
1da177e4 1680
581d708e 1681 if (!rxdr->desc) {
feb8f478
ET
1682 e_err(probe, "Unable to allocate memory for the Rx descriptor "
1683 "ring\n");
1da177e4 1684setup_rx_desc_die:
1da177e4
LT
1685 vfree(rxdr->buffer_info);
1686 return -ENOMEM;
1687 }
1688
2648345f 1689 /* Fix for errata 23, can't cross 64kB boundary */
1da177e4
LT
1690 if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
1691 void *olddesc = rxdr->desc;
1692 dma_addr_t olddma = rxdr->dma;
feb8f478 1693 e_err(rx_err, "rxdr align check failed: %u bytes at %p\n",
675ad473 1694 rxdr->size, rxdr->desc);
2648345f 1695 /* Try again, without freeing the previous */
b16f53be
NN
1696 rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size,
1697 &rxdr->dma, GFP_KERNEL);
2648345f 1698 /* Failed allocation, critical failure */
581d708e 1699 if (!rxdr->desc) {
b16f53be
NN
1700 dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1701 olddma);
feb8f478
ET
1702 e_err(probe, "Unable to allocate memory for the Rx "
1703 "descriptor ring\n");
1da177e4
LT
1704 goto setup_rx_desc_die;
1705 }
1706
1707 if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
1708 /* give up */
b16f53be
NN
1709 dma_free_coherent(&pdev->dev, rxdr->size, rxdr->desc,
1710 rxdr->dma);
1711 dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1712 olddma);
feb8f478
ET
1713 e_err(probe, "Unable to allocate aligned memory for "
1714 "the Rx descriptor ring\n");
581d708e 1715 goto setup_rx_desc_die;
1da177e4 1716 } else {
2648345f 1717 /* Free old allocation, new allocation was successful */
b16f53be
NN
1718 dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1719 olddma);
1da177e4
LT
1720 }
1721 }
1722 memset(rxdr->desc, 0, rxdr->size);
1723
1724 rxdr->next_to_clean = 0;
1725 rxdr->next_to_use = 0;
edbbb3ca 1726 rxdr->rx_skb_top = NULL;
1da177e4
LT
1727
1728 return 0;
1729}
1730
581d708e
MC
1731/**
1732 * e1000_setup_all_rx_resources - wrapper to allocate Rx resources
1733 * (Descriptors) for all queues
1734 * @adapter: board private structure
1735 *
581d708e
MC
1736 * Return 0 on success, negative on failure
1737 **/
1738
64798845 1739int e1000_setup_all_rx_resources(struct e1000_adapter *adapter)
581d708e
MC
1740{
1741 int i, err = 0;
1742
f56799ea 1743 for (i = 0; i < adapter->num_rx_queues; i++) {
581d708e
MC
1744 err = e1000_setup_rx_resources(adapter, &adapter->rx_ring[i]);
1745 if (err) {
feb8f478 1746 e_err(probe, "Allocation for Rx Queue %u failed\n", i);
3fbbc72e
VA
1747 for (i-- ; i >= 0; i--)
1748 e1000_free_rx_resources(adapter,
1749 &adapter->rx_ring[i]);
581d708e
MC
1750 break;
1751 }
1752 }
1753
1754 return err;
1755}
1756
1da177e4 1757/**
2648345f 1758 * e1000_setup_rctl - configure the receive control registers
1da177e4
LT
1759 * @adapter: Board private structure
1760 **/
64798845 1761static void e1000_setup_rctl(struct e1000_adapter *adapter)
1da177e4 1762{
1dc32918 1763 struct e1000_hw *hw = &adapter->hw;
630b25cd 1764 u32 rctl;
1da177e4 1765
1dc32918 1766 rctl = er32(RCTL);
1da177e4
LT
1767
1768 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
1769
1770 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM |
1771 E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
1dc32918 1772 (hw->mc_filter_type << E1000_RCTL_MO_SHIFT);
1da177e4 1773
1dc32918 1774 if (hw->tbi_compatibility_on == 1)
1da177e4
LT
1775 rctl |= E1000_RCTL_SBP;
1776 else
1777 rctl &= ~E1000_RCTL_SBP;
1778
2d7edb92
MC
1779 if (adapter->netdev->mtu <= ETH_DATA_LEN)
1780 rctl &= ~E1000_RCTL_LPE;
1781 else
1782 rctl |= E1000_RCTL_LPE;
1783
1da177e4 1784 /* Setup buffer sizes */
9e2feace
AK
1785 rctl &= ~E1000_RCTL_SZ_4096;
1786 rctl |= E1000_RCTL_BSEX;
1787 switch (adapter->rx_buffer_len) {
a1415ee6
JK
1788 case E1000_RXBUFFER_2048:
1789 default:
1790 rctl |= E1000_RCTL_SZ_2048;
1791 rctl &= ~E1000_RCTL_BSEX;
1792 break;
1793 case E1000_RXBUFFER_4096:
1794 rctl |= E1000_RCTL_SZ_4096;
1795 break;
1796 case E1000_RXBUFFER_8192:
1797 rctl |= E1000_RCTL_SZ_8192;
1798 break;
1799 case E1000_RXBUFFER_16384:
1800 rctl |= E1000_RCTL_SZ_16384;
1801 break;
2d7edb92
MC
1802 }
1803
1dc32918 1804 ew32(RCTL, rctl);
1da177e4
LT
1805}
1806
1807/**
1808 * e1000_configure_rx - Configure 8254x Receive Unit after Reset
1809 * @adapter: board private structure
1810 *
1811 * Configure the Rx unit of the MAC after a reset.
1812 **/
1813
64798845 1814static void e1000_configure_rx(struct e1000_adapter *adapter)
1da177e4 1815{
406874a7 1816 u64 rdba;
581d708e 1817 struct e1000_hw *hw = &adapter->hw;
1532ecea 1818 u32 rdlen, rctl, rxcsum;
2d7edb92 1819
edbbb3ca
JB
1820 if (adapter->netdev->mtu > ETH_DATA_LEN) {
1821 rdlen = adapter->rx_ring[0].count *
1822 sizeof(struct e1000_rx_desc);
1823 adapter->clean_rx = e1000_clean_jumbo_rx_irq;
1824 adapter->alloc_rx_buf = e1000_alloc_jumbo_rx_buffers;
1825 } else {
1826 rdlen = adapter->rx_ring[0].count *
1827 sizeof(struct e1000_rx_desc);
1828 adapter->clean_rx = e1000_clean_rx_irq;
1829 adapter->alloc_rx_buf = e1000_alloc_rx_buffers;
1830 }
1da177e4
LT
1831
1832 /* disable receives while setting up the descriptors */
1dc32918
JP
1833 rctl = er32(RCTL);
1834 ew32(RCTL, rctl & ~E1000_RCTL_EN);
1da177e4
LT
1835
1836 /* set the Receive Delay Timer Register */
1dc32918 1837 ew32(RDTR, adapter->rx_int_delay);
1da177e4 1838
581d708e 1839 if (hw->mac_type >= e1000_82540) {
1dc32918 1840 ew32(RADV, adapter->rx_abs_int_delay);
835bb129 1841 if (adapter->itr_setting != 0)
1dc32918 1842 ew32(ITR, 1000000000 / (adapter->itr * 256));
1da177e4
LT
1843 }
1844
581d708e
MC
1845 /* Setup the HW Rx Head and Tail Descriptor Pointers and
1846 * the Base and Length of the Rx Descriptor Ring */
f56799ea 1847 switch (adapter->num_rx_queues) {
24025e4e
MC
1848 case 1:
1849 default:
581d708e 1850 rdba = adapter->rx_ring[0].dma;
1dc32918
JP
1851 ew32(RDLEN, rdlen);
1852 ew32(RDBAH, (rdba >> 32));
1853 ew32(RDBAL, (rdba & 0x00000000ffffffffULL));
1854 ew32(RDT, 0);
1855 ew32(RDH, 0);
6a951698
AK
1856 adapter->rx_ring[0].rdh = ((hw->mac_type >= e1000_82543) ? E1000_RDH : E1000_82542_RDH);
1857 adapter->rx_ring[0].rdt = ((hw->mac_type >= e1000_82543) ? E1000_RDT : E1000_82542_RDT);
581d708e 1858 break;
24025e4e
MC
1859 }
1860
1da177e4 1861 /* Enable 82543 Receive Checksum Offload for TCP and UDP */
581d708e 1862 if (hw->mac_type >= e1000_82543) {
1dc32918 1863 rxcsum = er32(RXCSUM);
630b25cd 1864 if (adapter->rx_csum)
2d7edb92 1865 rxcsum |= E1000_RXCSUM_TUOFL;
630b25cd 1866 else
2d7edb92 1867 /* don't need to clear IPPCSE as it defaults to 0 */
630b25cd 1868 rxcsum &= ~E1000_RXCSUM_TUOFL;
1dc32918 1869 ew32(RXCSUM, rxcsum);
1da177e4
LT
1870 }
1871
1872 /* Enable Receives */
1dc32918 1873 ew32(RCTL, rctl);
1da177e4
LT
1874}
1875
1876/**
581d708e 1877 * e1000_free_tx_resources - Free Tx Resources per Queue
1da177e4 1878 * @adapter: board private structure
581d708e 1879 * @tx_ring: Tx descriptor ring for a specific queue
1da177e4
LT
1880 *
1881 * Free all transmit software resources
1882 **/
1883
64798845
JP
1884static void e1000_free_tx_resources(struct e1000_adapter *adapter,
1885 struct e1000_tx_ring *tx_ring)
1da177e4
LT
1886{
1887 struct pci_dev *pdev = adapter->pdev;
1888
581d708e 1889 e1000_clean_tx_ring(adapter, tx_ring);
1da177e4 1890
581d708e
MC
1891 vfree(tx_ring->buffer_info);
1892 tx_ring->buffer_info = NULL;
1da177e4 1893
b16f53be
NN
1894 dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
1895 tx_ring->dma);
1da177e4 1896
581d708e
MC
1897 tx_ring->desc = NULL;
1898}
1899
1900/**
1901 * e1000_free_all_tx_resources - Free Tx Resources for All Queues
1902 * @adapter: board private structure
1903 *
1904 * Free all transmit software resources
1905 **/
1906
64798845 1907void e1000_free_all_tx_resources(struct e1000_adapter *adapter)
581d708e
MC
1908{
1909 int i;
1910
f56799ea 1911 for (i = 0; i < adapter->num_tx_queues; i++)
581d708e 1912 e1000_free_tx_resources(adapter, &adapter->tx_ring[i]);
1da177e4
LT
1913}
1914
64798845
JP
1915static void e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter,
1916 struct e1000_buffer *buffer_info)
1da177e4 1917{
602c0554
AD
1918 if (buffer_info->dma) {
1919 if (buffer_info->mapped_as_page)
b16f53be
NN
1920 dma_unmap_page(&adapter->pdev->dev, buffer_info->dma,
1921 buffer_info->length, DMA_TO_DEVICE);
602c0554 1922 else
b16f53be 1923 dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
602c0554 1924 buffer_info->length,
b16f53be 1925 DMA_TO_DEVICE);
602c0554
AD
1926 buffer_info->dma = 0;
1927 }
a9ebadd6 1928 if (buffer_info->skb) {
1da177e4 1929 dev_kfree_skb_any(buffer_info->skb);
a9ebadd6
JB
1930 buffer_info->skb = NULL;
1931 }
37e73df8 1932 buffer_info->time_stamp = 0;
a9ebadd6 1933 /* buffer_info must be completely set up in the transmit path */
1da177e4
LT
1934}
1935
1936/**
1937 * e1000_clean_tx_ring - Free Tx Buffers
1938 * @adapter: board private structure
581d708e 1939 * @tx_ring: ring to be cleaned
1da177e4
LT
1940 **/
1941
64798845
JP
1942static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
1943 struct e1000_tx_ring *tx_ring)
1da177e4 1944{
1dc32918 1945 struct e1000_hw *hw = &adapter->hw;
1da177e4
LT
1946 struct e1000_buffer *buffer_info;
1947 unsigned long size;
1948 unsigned int i;
1949
1950 /* Free all the Tx ring sk_buffs */
1951
96838a40 1952 for (i = 0; i < tx_ring->count; i++) {
1da177e4
LT
1953 buffer_info = &tx_ring->buffer_info[i];
1954 e1000_unmap_and_free_tx_resource(adapter, buffer_info);
1955 }
1956
1957 size = sizeof(struct e1000_buffer) * tx_ring->count;
1958 memset(tx_ring->buffer_info, 0, size);
1959
1960 /* Zero out the descriptor ring */
1961
1962 memset(tx_ring->desc, 0, tx_ring->size);
1963
1964 tx_ring->next_to_use = 0;
1965 tx_ring->next_to_clean = 0;
fd803241 1966 tx_ring->last_tx_tso = 0;
1da177e4 1967
1dc32918
JP
1968 writel(0, hw->hw_addr + tx_ring->tdh);
1969 writel(0, hw->hw_addr + tx_ring->tdt);
581d708e
MC
1970}
1971
1972/**
1973 * e1000_clean_all_tx_rings - Free Tx Buffers for all queues
1974 * @adapter: board private structure
1975 **/
1976
64798845 1977static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter)
581d708e
MC
1978{
1979 int i;
1980
f56799ea 1981 for (i = 0; i < adapter->num_tx_queues; i++)
581d708e 1982 e1000_clean_tx_ring(adapter, &adapter->tx_ring[i]);
1da177e4
LT
1983}
1984
1985/**
1986 * e1000_free_rx_resources - Free Rx Resources
1987 * @adapter: board private structure
581d708e 1988 * @rx_ring: ring to clean the resources from
1da177e4
LT
1989 *
1990 * Free all receive software resources
1991 **/
1992
64798845
JP
1993static void e1000_free_rx_resources(struct e1000_adapter *adapter,
1994 struct e1000_rx_ring *rx_ring)
1da177e4 1995{
1da177e4
LT
1996 struct pci_dev *pdev = adapter->pdev;
1997
581d708e 1998 e1000_clean_rx_ring(adapter, rx_ring);
1da177e4
LT
1999
2000 vfree(rx_ring->buffer_info);
2001 rx_ring->buffer_info = NULL;
2002
b16f53be
NN
2003 dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
2004 rx_ring->dma);
1da177e4
LT
2005
2006 rx_ring->desc = NULL;
2007}
2008
2009/**
581d708e 2010 * e1000_free_all_rx_resources - Free Rx Resources for All Queues
1da177e4 2011 * @adapter: board private structure
581d708e
MC
2012 *
2013 * Free all receive software resources
2014 **/
2015
64798845 2016void e1000_free_all_rx_resources(struct e1000_adapter *adapter)
581d708e
MC
2017{
2018 int i;
2019
f56799ea 2020 for (i = 0; i < adapter->num_rx_queues; i++)
581d708e
MC
2021 e1000_free_rx_resources(adapter, &adapter->rx_ring[i]);
2022}
2023
2024/**
2025 * e1000_clean_rx_ring - Free Rx Buffers per Queue
2026 * @adapter: board private structure
2027 * @rx_ring: ring to free buffers from
1da177e4
LT
2028 **/
2029
64798845
JP
2030static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
2031 struct e1000_rx_ring *rx_ring)
1da177e4 2032{
1dc32918 2033 struct e1000_hw *hw = &adapter->hw;
1da177e4
LT
2034 struct e1000_buffer *buffer_info;
2035 struct pci_dev *pdev = adapter->pdev;
2036 unsigned long size;
630b25cd 2037 unsigned int i;
1da177e4
LT
2038
2039 /* Free all the Rx ring sk_buffs */
96838a40 2040 for (i = 0; i < rx_ring->count; i++) {
1da177e4 2041 buffer_info = &rx_ring->buffer_info[i];
edbbb3ca
JB
2042 if (buffer_info->dma &&
2043 adapter->clean_rx == e1000_clean_rx_irq) {
b16f53be 2044 dma_unmap_single(&pdev->dev, buffer_info->dma,
edbbb3ca 2045 buffer_info->length,
b16f53be 2046 DMA_FROM_DEVICE);
edbbb3ca
JB
2047 } else if (buffer_info->dma &&
2048 adapter->clean_rx == e1000_clean_jumbo_rx_irq) {
b16f53be
NN
2049 dma_unmap_page(&pdev->dev, buffer_info->dma,
2050 buffer_info->length,
2051 DMA_FROM_DEVICE);
679be3ba 2052 }
1da177e4 2053
679be3ba 2054 buffer_info->dma = 0;
edbbb3ca
JB
2055 if (buffer_info->page) {
2056 put_page(buffer_info->page);
2057 buffer_info->page = NULL;
2058 }
679be3ba 2059 if (buffer_info->skb) {
1da177e4
LT
2060 dev_kfree_skb(buffer_info->skb);
2061 buffer_info->skb = NULL;
997f5cbd 2062 }
1da177e4
LT
2063 }
2064
edbbb3ca
JB
2065 /* there also may be some cached data from a chained receive */
2066 if (rx_ring->rx_skb_top) {
2067 dev_kfree_skb(rx_ring->rx_skb_top);
2068 rx_ring->rx_skb_top = NULL;
2069 }
2070
1da177e4
LT
2071 size = sizeof(struct e1000_buffer) * rx_ring->count;
2072 memset(rx_ring->buffer_info, 0, size);
2073
2074 /* Zero out the descriptor ring */
1da177e4
LT
2075 memset(rx_ring->desc, 0, rx_ring->size);
2076
2077 rx_ring->next_to_clean = 0;
2078 rx_ring->next_to_use = 0;
2079
1dc32918
JP
2080 writel(0, hw->hw_addr + rx_ring->rdh);
2081 writel(0, hw->hw_addr + rx_ring->rdt);
581d708e
MC
2082}
2083
2084/**
2085 * e1000_clean_all_rx_rings - Free Rx Buffers for all queues
2086 * @adapter: board private structure
2087 **/
2088
64798845 2089static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter)
581d708e
MC
2090{
2091 int i;
2092
f56799ea 2093 for (i = 0; i < adapter->num_rx_queues; i++)
581d708e 2094 e1000_clean_rx_ring(adapter, &adapter->rx_ring[i]);
1da177e4
LT
2095}
2096
2097/* The 82542 2.0 (revision 2) needs to have the receive unit in reset
2098 * and memory write and invalidate disabled for certain operations
2099 */
64798845 2100static void e1000_enter_82542_rst(struct e1000_adapter *adapter)
1da177e4 2101{
1dc32918 2102 struct e1000_hw *hw = &adapter->hw;
1da177e4 2103 struct net_device *netdev = adapter->netdev;
406874a7 2104 u32 rctl;
1da177e4 2105
1dc32918 2106 e1000_pci_clear_mwi(hw);
1da177e4 2107
1dc32918 2108 rctl = er32(RCTL);
1da177e4 2109 rctl |= E1000_RCTL_RST;
1dc32918
JP
2110 ew32(RCTL, rctl);
2111 E1000_WRITE_FLUSH();
1da177e4
LT
2112 mdelay(5);
2113
96838a40 2114 if (netif_running(netdev))
581d708e 2115 e1000_clean_all_rx_rings(adapter);
1da177e4
LT
2116}
2117
64798845 2118static void e1000_leave_82542_rst(struct e1000_adapter *adapter)
1da177e4 2119{
1dc32918 2120 struct e1000_hw *hw = &adapter->hw;
1da177e4 2121 struct net_device *netdev = adapter->netdev;
406874a7 2122 u32 rctl;
1da177e4 2123
1dc32918 2124 rctl = er32(RCTL);
1da177e4 2125 rctl &= ~E1000_RCTL_RST;
1dc32918
JP
2126 ew32(RCTL, rctl);
2127 E1000_WRITE_FLUSH();
1da177e4
LT
2128 mdelay(5);
2129
1dc32918
JP
2130 if (hw->pci_cmd_word & PCI_COMMAND_INVALIDATE)
2131 e1000_pci_set_mwi(hw);
1da177e4 2132
96838a40 2133 if (netif_running(netdev)) {
72d64a43
JK
2134 /* No need to loop, because 82542 supports only 1 queue */
2135 struct e1000_rx_ring *ring = &adapter->rx_ring[0];
7c4d3367 2136 e1000_configure_rx(adapter);
72d64a43 2137 adapter->alloc_rx_buf(adapter, ring, E1000_DESC_UNUSED(ring));
1da177e4
LT
2138 }
2139}
2140
2141/**
2142 * e1000_set_mac - Change the Ethernet Address of the NIC
2143 * @netdev: network interface device structure
2144 * @p: pointer to an address structure
2145 *
2146 * Returns 0 on success, negative on failure
2147 **/
2148
64798845 2149static int e1000_set_mac(struct net_device *netdev, void *p)
1da177e4 2150{
60490fe0 2151 struct e1000_adapter *adapter = netdev_priv(netdev);
1dc32918 2152 struct e1000_hw *hw = &adapter->hw;
1da177e4
LT
2153 struct sockaddr *addr = p;
2154
96838a40 2155 if (!is_valid_ether_addr(addr->sa_data))
1da177e4
LT
2156 return -EADDRNOTAVAIL;
2157
2158 /* 82542 2.0 needs to be in reset to write receive address registers */
2159
1dc32918 2160 if (hw->mac_type == e1000_82542_rev2_0)
1da177e4
LT
2161 e1000_enter_82542_rst(adapter);
2162
2163 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
1dc32918 2164 memcpy(hw->mac_addr, addr->sa_data, netdev->addr_len);
1da177e4 2165
1dc32918 2166 e1000_rar_set(hw, hw->mac_addr, 0);
1da177e4 2167
1dc32918 2168 if (hw->mac_type == e1000_82542_rev2_0)
1da177e4
LT
2169 e1000_leave_82542_rst(adapter);
2170
2171 return 0;
2172}
2173
2174/**
db0ce50d 2175 * e1000_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
1da177e4
LT
2176 * @netdev: network interface device structure
2177 *
db0ce50d
PM
2178 * The set_rx_mode entry point is called whenever the unicast or multicast
2179 * address lists or the network interface flags are updated. This routine is
2180 * responsible for configuring the hardware for proper unicast, multicast,
1da177e4
LT
2181 * promiscuous mode, and all-multi behavior.
2182 **/
2183
64798845 2184static void e1000_set_rx_mode(struct net_device *netdev)
1da177e4 2185{
60490fe0 2186 struct e1000_adapter *adapter = netdev_priv(netdev);
1da177e4 2187 struct e1000_hw *hw = &adapter->hw;
ccffad25
JP
2188 struct netdev_hw_addr *ha;
2189 bool use_uc = false;
406874a7
JP
2190 u32 rctl;
2191 u32 hash_value;
868d5309 2192 int i, rar_entries = E1000_RAR_ENTRIES;
1532ecea 2193 int mta_reg_count = E1000_NUM_MTA_REGISTERS;
81c52285
JB
2194 u32 *mcarray = kcalloc(mta_reg_count, sizeof(u32), GFP_ATOMIC);
2195
2196 if (!mcarray) {
feb8f478 2197 e_err(probe, "memory allocation failed\n");
81c52285
JB
2198 return;
2199 }
cd94dd0b 2200
2648345f
MC
2201 /* Check for Promiscuous and All Multicast modes */
2202
1dc32918 2203 rctl = er32(RCTL);
1da177e4 2204
96838a40 2205 if (netdev->flags & IFF_PROMISC) {
1da177e4 2206 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
746b9f02 2207 rctl &= ~E1000_RCTL_VFE;
1da177e4 2208 } else {
1532ecea 2209 if (netdev->flags & IFF_ALLMULTI)
746b9f02 2210 rctl |= E1000_RCTL_MPE;
1532ecea 2211 else
746b9f02 2212 rctl &= ~E1000_RCTL_MPE;
1532ecea
JB
2213 /* Enable VLAN filter if there is a VLAN */
2214 if (adapter->vlgrp)
2215 rctl |= E1000_RCTL_VFE;
db0ce50d
PM
2216 }
2217
32e7bfc4 2218 if (netdev_uc_count(netdev) > rar_entries - 1) {
db0ce50d
PM
2219 rctl |= E1000_RCTL_UPE;
2220 } else if (!(netdev->flags & IFF_PROMISC)) {
2221 rctl &= ~E1000_RCTL_UPE;
ccffad25 2222 use_uc = true;
1da177e4
LT
2223 }
2224
1dc32918 2225 ew32(RCTL, rctl);
1da177e4
LT
2226
2227 /* 82542 2.0 needs to be in reset to write receive address registers */
2228
96838a40 2229 if (hw->mac_type == e1000_82542_rev2_0)
1da177e4
LT
2230 e1000_enter_82542_rst(adapter);
2231
db0ce50d
PM
2232 /* load the first 14 addresses into the exact filters 1-14. Unicast
2233 * addresses take precedence to avoid disabling unicast filtering
2234 * when possible.
2235 *
b595076a 2236 * RAR 0 is used for the station MAC address
1da177e4
LT
2237 * if there are not 14 addresses, go ahead and clear the filters
2238 */
ccffad25
JP
2239 i = 1;
2240 if (use_uc)
32e7bfc4 2241 netdev_for_each_uc_addr(ha, netdev) {
ccffad25
JP
2242 if (i == rar_entries)
2243 break;
2244 e1000_rar_set(hw, ha->addr, i++);
2245 }
2246
22bedad3 2247 netdev_for_each_mc_addr(ha, netdev) {
7a81e9f3
JP
2248 if (i == rar_entries) {
2249 /* load any remaining addresses into the hash table */
2250 u32 hash_reg, hash_bit, mta;
22bedad3 2251 hash_value = e1000_hash_mc_addr(hw, ha->addr);
7a81e9f3
JP
2252 hash_reg = (hash_value >> 5) & 0x7F;
2253 hash_bit = hash_value & 0x1F;
2254 mta = (1 << hash_bit);
2255 mcarray[hash_reg] |= mta;
10886af5 2256 } else {
22bedad3 2257 e1000_rar_set(hw, ha->addr, i++);
1da177e4
LT
2258 }
2259 }
2260
7a81e9f3
JP
2261 for (; i < rar_entries; i++) {
2262 E1000_WRITE_REG_ARRAY(hw, RA, i << 1, 0);
2263 E1000_WRITE_FLUSH();
2264 E1000_WRITE_REG_ARRAY(hw, RA, (i << 1) + 1, 0);
2265 E1000_WRITE_FLUSH();
1da177e4
LT
2266 }
2267
81c52285
JB
2268 /* write the hash table completely, write from bottom to avoid
2269 * both stupid write combining chipsets, and flushing each write */
2270 for (i = mta_reg_count - 1; i >= 0 ; i--) {
2271 /*
2272 * If we are on an 82544 has an errata where writing odd
2273 * offsets overwrites the previous even offset, but writing
2274 * backwards over the range solves the issue by always
2275 * writing the odd offset first
2276 */
2277 E1000_WRITE_REG_ARRAY(hw, MTA, i, mcarray[i]);
2278 }
2279 E1000_WRITE_FLUSH();
2280
96838a40 2281 if (hw->mac_type == e1000_82542_rev2_0)
1da177e4 2282 e1000_leave_82542_rst(adapter);
81c52285
JB
2283
2284 kfree(mcarray);
1da177e4
LT
2285}
2286
2287/* Need to wait a few seconds after link up to get diagnostic information from
2288 * the phy */
2289
64798845 2290static void e1000_update_phy_info(unsigned long data)
1da177e4 2291{
e982f17c 2292 struct e1000_adapter *adapter = (struct e1000_adapter *)data;
5cf42fcd
JB
2293 schedule_work(&adapter->phy_info_task);
2294}
2295
2296static void e1000_update_phy_info_task(struct work_struct *work)
2297{
2298 struct e1000_adapter *adapter = container_of(work,
2299 struct e1000_adapter,
2300 phy_info_task);
1dc32918 2301 struct e1000_hw *hw = &adapter->hw;
338c15e4
JB
2302
2303 rtnl_lock();
1dc32918 2304 e1000_phy_get_info(hw, &adapter->phy_info);
338c15e4 2305 rtnl_unlock();
1da177e4
LT
2306}
2307
2308/**
2309 * e1000_82547_tx_fifo_stall - Timer Call-back
2310 * @data: pointer to adapter cast into an unsigned long
2311 **/
64798845 2312static void e1000_82547_tx_fifo_stall(unsigned long data)
1da177e4 2313{
e982f17c 2314 struct e1000_adapter *adapter = (struct e1000_adapter *)data;
5cf42fcd
JB
2315 schedule_work(&adapter->fifo_stall_task);
2316}
2317
2318/**
2319 * e1000_82547_tx_fifo_stall_task - task to complete work
2320 * @work: work struct contained inside adapter struct
2321 **/
2322static void e1000_82547_tx_fifo_stall_task(struct work_struct *work)
2323{
2324 struct e1000_adapter *adapter = container_of(work,
2325 struct e1000_adapter,
2326 fifo_stall_task);
1dc32918 2327 struct e1000_hw *hw = &adapter->hw;
1da177e4 2328 struct net_device *netdev = adapter->netdev;
406874a7 2329 u32 tctl;
1da177e4 2330
338c15e4 2331 rtnl_lock();
96838a40 2332 if (atomic_read(&adapter->tx_fifo_stall)) {
1dc32918
JP
2333 if ((er32(TDT) == er32(TDH)) &&
2334 (er32(TDFT) == er32(TDFH)) &&
2335 (er32(TDFTS) == er32(TDFHS))) {
2336 tctl = er32(TCTL);
2337 ew32(TCTL, tctl & ~E1000_TCTL_EN);
2338 ew32(TDFT, adapter->tx_head_addr);
2339 ew32(TDFH, adapter->tx_head_addr);
2340 ew32(TDFTS, adapter->tx_head_addr);
2341 ew32(TDFHS, adapter->tx_head_addr);
2342 ew32(TCTL, tctl);
2343 E1000_WRITE_FLUSH();
1da177e4
LT
2344
2345 adapter->tx_fifo_head = 0;
2346 atomic_set(&adapter->tx_fifo_stall, 0);
2347 netif_wake_queue(netdev);
baa34745 2348 } else if (!test_bit(__E1000_DOWN, &adapter->flags)) {
1da177e4
LT
2349 mod_timer(&adapter->tx_fifo_stall_timer, jiffies + 1);
2350 }
2351 }
338c15e4 2352 rtnl_unlock();
1da177e4
LT
2353}
2354
b548192a 2355bool e1000_has_link(struct e1000_adapter *adapter)
be0f0719
JB
2356{
2357 struct e1000_hw *hw = &adapter->hw;
2358 bool link_active = false;
be0f0719
JB
2359
2360 /* get_link_status is set on LSC (link status) interrupt or
2361 * rx sequence error interrupt. get_link_status will stay
2362 * false until the e1000_check_for_link establishes link
2363 * for copper adapters ONLY
2364 */
2365 switch (hw->media_type) {
2366 case e1000_media_type_copper:
2367 if (hw->get_link_status) {
120a5d0d 2368 e1000_check_for_link(hw);
be0f0719
JB
2369 link_active = !hw->get_link_status;
2370 } else {
2371 link_active = true;
2372 }
2373 break;
2374 case e1000_media_type_fiber:
120a5d0d 2375 e1000_check_for_link(hw);
be0f0719
JB
2376 link_active = !!(er32(STATUS) & E1000_STATUS_LU);
2377 break;
2378 case e1000_media_type_internal_serdes:
120a5d0d 2379 e1000_check_for_link(hw);
be0f0719
JB
2380 link_active = hw->serdes_has_link;
2381 break;
2382 default:
2383 break;
2384 }
2385
2386 return link_active;
2387}
2388
1da177e4
LT
2389/**
2390 * e1000_watchdog - Timer Call-back
2391 * @data: pointer to adapter cast into an unsigned long
2392 **/
64798845 2393static void e1000_watchdog(unsigned long data)
1da177e4 2394{
e982f17c 2395 struct e1000_adapter *adapter = (struct e1000_adapter *)data;
1dc32918 2396 struct e1000_hw *hw = &adapter->hw;
1da177e4 2397 struct net_device *netdev = adapter->netdev;
545c67c0 2398 struct e1000_tx_ring *txdr = adapter->tx_ring;
406874a7 2399 u32 link, tctl;
90fb5135 2400
be0f0719
JB
2401 link = e1000_has_link(adapter);
2402 if ((netif_carrier_ok(netdev)) && link)
2403 goto link_up;
1da177e4 2404
96838a40
JB
2405 if (link) {
2406 if (!netif_carrier_ok(netdev)) {
406874a7 2407 u32 ctrl;
c3033b01 2408 bool txb2b = true;
be0f0719 2409 /* update snapshot of PHY registers on LSC */
1dc32918 2410 e1000_get_speed_and_duplex(hw,
1da177e4
LT
2411 &adapter->link_speed,
2412 &adapter->link_duplex);
2413
1dc32918 2414 ctrl = er32(CTRL);
675ad473
ET
2415 pr_info("%s NIC Link is Up %d Mbps %s, "
2416 "Flow Control: %s\n",
2417 netdev->name,
2418 adapter->link_speed,
2419 adapter->link_duplex == FULL_DUPLEX ?
2420 "Full Duplex" : "Half Duplex",
2421 ((ctrl & E1000_CTRL_TFCE) && (ctrl &
2422 E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl &
2423 E1000_CTRL_RFCE) ? "RX" : ((ctrl &
2424 E1000_CTRL_TFCE) ? "TX" : "None")));
1da177e4 2425
39ca5f03 2426 /* adjust timeout factor according to speed/duplex */
66a2b0a3 2427 adapter->tx_timeout_factor = 1;
7e6c9861
JK
2428 switch (adapter->link_speed) {
2429 case SPEED_10:
c3033b01 2430 txb2b = false;
be0f0719 2431 adapter->tx_timeout_factor = 16;
7e6c9861
JK
2432 break;
2433 case SPEED_100:
c3033b01 2434 txb2b = false;
7e6c9861
JK
2435 /* maybe add some timeout factor ? */
2436 break;
2437 }
2438
1532ecea 2439 /* enable transmits in the hardware */
1dc32918 2440 tctl = er32(TCTL);
7e6c9861 2441 tctl |= E1000_TCTL_EN;
1dc32918 2442 ew32(TCTL, tctl);
66a2b0a3 2443
1da177e4 2444 netif_carrier_on(netdev);
baa34745
JB
2445 if (!test_bit(__E1000_DOWN, &adapter->flags))
2446 mod_timer(&adapter->phy_info_timer,
2447 round_jiffies(jiffies + 2 * HZ));
1da177e4
LT
2448 adapter->smartspeed = 0;
2449 }
2450 } else {
96838a40 2451 if (netif_carrier_ok(netdev)) {
1da177e4
LT
2452 adapter->link_speed = 0;
2453 adapter->link_duplex = 0;
675ad473
ET
2454 pr_info("%s NIC Link is Down\n",
2455 netdev->name);
1da177e4 2456 netif_carrier_off(netdev);
baa34745
JB
2457
2458 if (!test_bit(__E1000_DOWN, &adapter->flags))
2459 mod_timer(&adapter->phy_info_timer,
2460 round_jiffies(jiffies + 2 * HZ));
1da177e4
LT
2461 }
2462
2463 e1000_smartspeed(adapter);
2464 }
2465
be0f0719 2466link_up:
1da177e4
LT
2467 e1000_update_stats(adapter);
2468
1dc32918 2469 hw->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
1da177e4 2470 adapter->tpt_old = adapter->stats.tpt;
1dc32918 2471 hw->collision_delta = adapter->stats.colc - adapter->colc_old;
1da177e4
LT
2472 adapter->colc_old = adapter->stats.colc;
2473
2474 adapter->gorcl = adapter->stats.gorcl - adapter->gorcl_old;
2475 adapter->gorcl_old = adapter->stats.gorcl;
2476 adapter->gotcl = adapter->stats.gotcl - adapter->gotcl_old;
2477 adapter->gotcl_old = adapter->stats.gotcl;
2478
1dc32918 2479 e1000_update_adaptive(hw);
1da177e4 2480
f56799ea 2481 if (!netif_carrier_ok(netdev)) {
581d708e 2482 if (E1000_DESC_UNUSED(txdr) + 1 < txdr->count) {
1da177e4
LT
2483 /* We've lost link, so the controller stops DMA,
2484 * but we've got queued Tx work that's never going
2485 * to get done, so reset controller to flush Tx.
2486 * (Do the reset outside of interrupt context). */
87041639
JK
2487 adapter->tx_timeout_count++;
2488 schedule_work(&adapter->reset_task);
c2d5ab49
JB
2489 /* return immediately since reset is imminent */
2490 return;
1da177e4
LT
2491 }
2492 }
2493
eab2abf5
JB
2494 /* Simple mode for Interrupt Throttle Rate (ITR) */
2495 if (hw->mac_type >= e1000_82540 && adapter->itr_setting == 4) {
2496 /*
2497 * Symmetric Tx/Rx gets a reduced ITR=2000;
2498 * Total asymmetrical Tx or Rx gets ITR=8000;
2499 * everyone else is between 2000-8000.
2500 */
2501 u32 goc = (adapter->gotcl + adapter->gorcl) / 10000;
2502 u32 dif = (adapter->gotcl > adapter->gorcl ?
2503 adapter->gotcl - adapter->gorcl :
2504 adapter->gorcl - adapter->gotcl) / 10000;
2505 u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000;
2506
2507 ew32(ITR, 1000000000 / (itr * 256));
2508 }
2509
1da177e4 2510 /* Cause software interrupt to ensure rx ring is cleaned */
1dc32918 2511 ew32(ICS, E1000_ICS_RXDMT0);
1da177e4 2512
2648345f 2513 /* Force detection of hung controller every watchdog period */
c3033b01 2514 adapter->detect_tx_hung = true;
1da177e4
LT
2515
2516 /* Reset the timer */
baa34745
JB
2517 if (!test_bit(__E1000_DOWN, &adapter->flags))
2518 mod_timer(&adapter->watchdog_timer,
2519 round_jiffies(jiffies + 2 * HZ));
1da177e4
LT
2520}
2521
835bb129
JB
2522enum latency_range {
2523 lowest_latency = 0,
2524 low_latency = 1,
2525 bulk_latency = 2,
2526 latency_invalid = 255
2527};
2528
2529/**
2530 * e1000_update_itr - update the dynamic ITR value based on statistics
8fce4731
JB
2531 * @adapter: pointer to adapter
2532 * @itr_setting: current adapter->itr
2533 * @packets: the number of packets during this measurement interval
2534 * @bytes: the number of bytes during this measurement interval
2535 *
835bb129
JB
2536 * Stores a new ITR value based on packets and byte
2537 * counts during the last interrupt. The advantage of per interrupt
2538 * computation is faster updates and more accurate ITR for the current
2539 * traffic pattern. Constants in this function were computed
2540 * based on theoretical maximum wire speed and thresholds were set based
2541 * on testing data as well as attempting to minimize response time
2542 * while increasing bulk throughput.
2543 * this functionality is controlled by the InterruptThrottleRate module
2544 * parameter (see e1000_param.c)
835bb129
JB
2545 **/
2546static unsigned int e1000_update_itr(struct e1000_adapter *adapter,
64798845 2547 u16 itr_setting, int packets, int bytes)
835bb129
JB
2548{
2549 unsigned int retval = itr_setting;
2550 struct e1000_hw *hw = &adapter->hw;
2551
2552 if (unlikely(hw->mac_type < e1000_82540))
2553 goto update_itr_done;
2554
2555 if (packets == 0)
2556 goto update_itr_done;
2557
835bb129
JB
2558 switch (itr_setting) {
2559 case lowest_latency:
2b65326e
JB
2560 /* jumbo frames get bulk treatment*/
2561 if (bytes/packets > 8000)
2562 retval = bulk_latency;
2563 else if ((packets < 5) && (bytes > 512))
835bb129
JB
2564 retval = low_latency;
2565 break;
2566 case low_latency: /* 50 usec aka 20000 ints/s */
2567 if (bytes > 10000) {
2b65326e
JB
2568 /* jumbo frames need bulk latency setting */
2569 if (bytes/packets > 8000)
2570 retval = bulk_latency;
2571 else if ((packets < 10) || ((bytes/packets) > 1200))
835bb129
JB
2572 retval = bulk_latency;
2573 else if ((packets > 35))
2574 retval = lowest_latency;
2b65326e
JB
2575 } else if (bytes/packets > 2000)
2576 retval = bulk_latency;
2577 else if (packets <= 2 && bytes < 512)
835bb129
JB
2578 retval = lowest_latency;
2579 break;
2580 case bulk_latency: /* 250 usec aka 4000 ints/s */
2581 if (bytes > 25000) {
2582 if (packets > 35)
2583 retval = low_latency;
2b65326e
JB
2584 } else if (bytes < 6000) {
2585 retval = low_latency;
835bb129
JB
2586 }
2587 break;
2588 }
2589
2590update_itr_done:
2591 return retval;
2592}
2593
2594static void e1000_set_itr(struct e1000_adapter *adapter)
2595{
2596 struct e1000_hw *hw = &adapter->hw;
406874a7
JP
2597 u16 current_itr;
2598 u32 new_itr = adapter->itr;
835bb129
JB
2599
2600 if (unlikely(hw->mac_type < e1000_82540))
2601 return;
2602
2603 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
2604 if (unlikely(adapter->link_speed != SPEED_1000)) {
2605 current_itr = 0;
2606 new_itr = 4000;
2607 goto set_itr_now;
2608 }
2609
2610 adapter->tx_itr = e1000_update_itr(adapter,
2611 adapter->tx_itr,
2612 adapter->total_tx_packets,
2613 adapter->total_tx_bytes);
2b65326e
JB
2614 /* conservative mode (itr 3) eliminates the lowest_latency setting */
2615 if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency)
2616 adapter->tx_itr = low_latency;
2617
835bb129
JB
2618 adapter->rx_itr = e1000_update_itr(adapter,
2619 adapter->rx_itr,
2620 adapter->total_rx_packets,
2621 adapter->total_rx_bytes);
2b65326e
JB
2622 /* conservative mode (itr 3) eliminates the lowest_latency setting */
2623 if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency)
2624 adapter->rx_itr = low_latency;
835bb129
JB
2625
2626 current_itr = max(adapter->rx_itr, adapter->tx_itr);
2627
835bb129
JB
2628 switch (current_itr) {
2629 /* counts and packets in update_itr are dependent on these numbers */
2630 case lowest_latency:
2631 new_itr = 70000;
2632 break;
2633 case low_latency:
2634 new_itr = 20000; /* aka hwitr = ~200 */
2635 break;
2636 case bulk_latency:
2637 new_itr = 4000;
2638 break;
2639 default:
2640 break;
2641 }
2642
2643set_itr_now:
2644 if (new_itr != adapter->itr) {
2645 /* this attempts to bias the interrupt rate towards Bulk
2646 * by adding intermediate steps when interrupt rate is
2647 * increasing */
2648 new_itr = new_itr > adapter->itr ?
2649 min(adapter->itr + (new_itr >> 2), new_itr) :
2650 new_itr;
2651 adapter->itr = new_itr;
1dc32918 2652 ew32(ITR, 1000000000 / (new_itr * 256));
835bb129 2653 }
835bb129
JB
2654}
2655
1da177e4
LT
2656#define E1000_TX_FLAGS_CSUM 0x00000001
2657#define E1000_TX_FLAGS_VLAN 0x00000002
2658#define E1000_TX_FLAGS_TSO 0x00000004
2d7edb92 2659#define E1000_TX_FLAGS_IPV4 0x00000008
1da177e4
LT
2660#define E1000_TX_FLAGS_VLAN_MASK 0xffff0000
2661#define E1000_TX_FLAGS_VLAN_SHIFT 16
2662
64798845
JP
2663static int e1000_tso(struct e1000_adapter *adapter,
2664 struct e1000_tx_ring *tx_ring, struct sk_buff *skb)
1da177e4 2665{
1da177e4 2666 struct e1000_context_desc *context_desc;
545c67c0 2667 struct e1000_buffer *buffer_info;
1da177e4 2668 unsigned int i;
406874a7
JP
2669 u32 cmd_length = 0;
2670 u16 ipcse = 0, tucse, mss;
2671 u8 ipcss, ipcso, tucss, tucso, hdr_len;
1da177e4
LT
2672 int err;
2673
89114afd 2674 if (skb_is_gso(skb)) {
1da177e4
LT
2675 if (skb_header_cloned(skb)) {
2676 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2677 if (err)
2678 return err;
2679 }
2680
ab6a5bb6 2681 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
7967168c 2682 mss = skb_shinfo(skb)->gso_size;
60828236 2683 if (skb->protocol == htons(ETH_P_IP)) {
eddc9ec5
ACM
2684 struct iphdr *iph = ip_hdr(skb);
2685 iph->tot_len = 0;
2686 iph->check = 0;
aa8223c7
ACM
2687 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2688 iph->daddr, 0,
2689 IPPROTO_TCP,
2690 0);
2d7edb92 2691 cmd_length = E1000_TXD_CMD_IP;
ea2ae17d 2692 ipcse = skb_transport_offset(skb) - 1;
e15fdd03 2693 } else if (skb->protocol == htons(ETH_P_IPV6)) {
0660e03f 2694 ipv6_hdr(skb)->payload_len = 0;
aa8223c7 2695 tcp_hdr(skb)->check =
0660e03f
ACM
2696 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2697 &ipv6_hdr(skb)->daddr,
2698 0, IPPROTO_TCP, 0);
2d7edb92 2699 ipcse = 0;
2d7edb92 2700 }
bbe735e4 2701 ipcss = skb_network_offset(skb);
eddc9ec5 2702 ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data;
ea2ae17d 2703 tucss = skb_transport_offset(skb);
aa8223c7 2704 tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
1da177e4
LT
2705 tucse = 0;
2706
2707 cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
2d7edb92 2708 E1000_TXD_CMD_TCP | (skb->len - (hdr_len)));
1da177e4 2709
581d708e
MC
2710 i = tx_ring->next_to_use;
2711 context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
545c67c0 2712 buffer_info = &tx_ring->buffer_info[i];
1da177e4
LT
2713
2714 context_desc->lower_setup.ip_fields.ipcss = ipcss;
2715 context_desc->lower_setup.ip_fields.ipcso = ipcso;
2716 context_desc->lower_setup.ip_fields.ipcse = cpu_to_le16(ipcse);
2717 context_desc->upper_setup.tcp_fields.tucss = tucss;
2718 context_desc->upper_setup.tcp_fields.tucso = tucso;
2719 context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse);
2720 context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss);
2721 context_desc->tcp_seg_setup.fields.hdr_len = hdr_len;
2722 context_desc->cmd_and_length = cpu_to_le32(cmd_length);
2723
545c67c0 2724 buffer_info->time_stamp = jiffies;
a9ebadd6 2725 buffer_info->next_to_watch = i;
545c67c0 2726
581d708e
MC
2727 if (++i == tx_ring->count) i = 0;
2728 tx_ring->next_to_use = i;
1da177e4 2729
c3033b01 2730 return true;
1da177e4 2731 }
c3033b01 2732 return false;
1da177e4
LT
2733}
2734
64798845
JP
2735static bool e1000_tx_csum(struct e1000_adapter *adapter,
2736 struct e1000_tx_ring *tx_ring, struct sk_buff *skb)
1da177e4
LT
2737{
2738 struct e1000_context_desc *context_desc;
545c67c0 2739 struct e1000_buffer *buffer_info;
1da177e4 2740 unsigned int i;
406874a7 2741 u8 css;
3ed30676 2742 u32 cmd_len = E1000_TXD_CMD_DEXT;
1da177e4 2743
3ed30676
DG
2744 if (skb->ip_summed != CHECKSUM_PARTIAL)
2745 return false;
1da177e4 2746
3ed30676 2747 switch (skb->protocol) {
09640e63 2748 case cpu_to_be16(ETH_P_IP):
3ed30676
DG
2749 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
2750 cmd_len |= E1000_TXD_CMD_TCP;
2751 break;
09640e63 2752 case cpu_to_be16(ETH_P_IPV6):
3ed30676
DG
2753 /* XXX not handling all IPV6 headers */
2754 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
2755 cmd_len |= E1000_TXD_CMD_TCP;
2756 break;
2757 default:
2758 if (unlikely(net_ratelimit()))
feb8f478
ET
2759 e_warn(drv, "checksum_partial proto=%x!\n",
2760 skb->protocol);
3ed30676
DG
2761 break;
2762 }
1da177e4 2763
0d0b1672 2764 css = skb_checksum_start_offset(skb);
1da177e4 2765
3ed30676
DG
2766 i = tx_ring->next_to_use;
2767 buffer_info = &tx_ring->buffer_info[i];
2768 context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
545c67c0 2769
3ed30676
DG
2770 context_desc->lower_setup.ip_config = 0;
2771 context_desc->upper_setup.tcp_fields.tucss = css;
2772 context_desc->upper_setup.tcp_fields.tucso =
2773 css + skb->csum_offset;
2774 context_desc->upper_setup.tcp_fields.tucse = 0;
2775 context_desc->tcp_seg_setup.data = 0;
2776 context_desc->cmd_and_length = cpu_to_le32(cmd_len);
1da177e4 2777
3ed30676
DG
2778 buffer_info->time_stamp = jiffies;
2779 buffer_info->next_to_watch = i;
1da177e4 2780
3ed30676
DG
2781 if (unlikely(++i == tx_ring->count)) i = 0;
2782 tx_ring->next_to_use = i;
2783
2784 return true;
1da177e4
LT
2785}
2786
2787#define E1000_MAX_TXD_PWR 12
2788#define E1000_MAX_DATA_PER_TXD (1<<E1000_MAX_TXD_PWR)
2789
64798845
JP
2790static int e1000_tx_map(struct e1000_adapter *adapter,
2791 struct e1000_tx_ring *tx_ring,
2792 struct sk_buff *skb, unsigned int first,
2793 unsigned int max_per_txd, unsigned int nr_frags,
2794 unsigned int mss)
1da177e4 2795{
1dc32918 2796 struct e1000_hw *hw = &adapter->hw;
602c0554 2797 struct pci_dev *pdev = adapter->pdev;
37e73df8 2798 struct e1000_buffer *buffer_info;
d20b606c 2799 unsigned int len = skb_headlen(skb);
602c0554 2800 unsigned int offset = 0, size, count = 0, i;
1da177e4 2801 unsigned int f;
1da177e4
LT
2802
2803 i = tx_ring->next_to_use;
2804
96838a40 2805 while (len) {
37e73df8 2806 buffer_info = &tx_ring->buffer_info[i];
1da177e4 2807 size = min(len, max_per_txd);
fd803241
JK
2808 /* Workaround for Controller erratum --
2809 * descriptor for non-tso packet in a linear SKB that follows a
2810 * tso gets written back prematurely before the data is fully
0f15a8fa 2811 * DMA'd to the controller */
fd803241 2812 if (!skb->data_len && tx_ring->last_tx_tso &&
89114afd 2813 !skb_is_gso(skb)) {
fd803241
JK
2814 tx_ring->last_tx_tso = 0;
2815 size -= 4;
2816 }
2817
1da177e4
LT
2818 /* Workaround for premature desc write-backs
2819 * in TSO mode. Append 4-byte sentinel desc */
96838a40 2820 if (unlikely(mss && !nr_frags && size == len && size > 8))
1da177e4 2821 size -= 4;
97338bde
MC
2822 /* work-around for errata 10 and it applies
2823 * to all controllers in PCI-X mode
2824 * The fix is to make sure that the first descriptor of a
2825 * packet is smaller than 2048 - 16 - 16 (or 2016) bytes
2826 */
1dc32918 2827 if (unlikely((hw->bus_type == e1000_bus_type_pcix) &&
97338bde
MC
2828 (size > 2015) && count == 0))
2829 size = 2015;
96838a40 2830
1da177e4
LT
2831 /* Workaround for potential 82544 hang in PCI-X. Avoid
2832 * terminating buffers within evenly-aligned dwords. */
96838a40 2833 if (unlikely(adapter->pcix_82544 &&
1da177e4
LT
2834 !((unsigned long)(skb->data + offset + size - 1) & 4) &&
2835 size > 4))
2836 size -= 4;
2837
2838 buffer_info->length = size;
cdd7549e 2839 /* set time_stamp *before* dma to help avoid a possible race */
1da177e4 2840 buffer_info->time_stamp = jiffies;
602c0554 2841 buffer_info->mapped_as_page = false;
b16f53be
NN
2842 buffer_info->dma = dma_map_single(&pdev->dev,
2843 skb->data + offset,
2844 size, DMA_TO_DEVICE);
2845 if (dma_mapping_error(&pdev->dev, buffer_info->dma))
602c0554 2846 goto dma_error;
a9ebadd6 2847 buffer_info->next_to_watch = i;
1da177e4
LT
2848
2849 len -= size;
2850 offset += size;
2851 count++;
37e73df8
AD
2852 if (len) {
2853 i++;
2854 if (unlikely(i == tx_ring->count))
2855 i = 0;
2856 }
1da177e4
LT
2857 }
2858
96838a40 2859 for (f = 0; f < nr_frags; f++) {
1da177e4
LT
2860 struct skb_frag_struct *frag;
2861
2862 frag = &skb_shinfo(skb)->frags[f];
2863 len = frag->size;
602c0554 2864 offset = frag->page_offset;
1da177e4 2865
96838a40 2866 while (len) {
37e73df8
AD
2867 i++;
2868 if (unlikely(i == tx_ring->count))
2869 i = 0;
2870
1da177e4
LT
2871 buffer_info = &tx_ring->buffer_info[i];
2872 size = min(len, max_per_txd);
1da177e4
LT
2873 /* Workaround for premature desc write-backs
2874 * in TSO mode. Append 4-byte sentinel desc */
96838a40 2875 if (unlikely(mss && f == (nr_frags-1) && size == len && size > 8))
1da177e4 2876 size -= 4;
1da177e4
LT
2877 /* Workaround for potential 82544 hang in PCI-X.
2878 * Avoid terminating buffers within evenly-aligned
2879 * dwords. */
96838a40 2880 if (unlikely(adapter->pcix_82544 &&
8fce4731
JB
2881 !((unsigned long)(page_to_phys(frag->page) + offset
2882 + size - 1) & 4) &&
2883 size > 4))
1da177e4
LT
2884 size -= 4;
2885
2886 buffer_info->length = size;
1da177e4 2887 buffer_info->time_stamp = jiffies;
602c0554 2888 buffer_info->mapped_as_page = true;
b16f53be 2889 buffer_info->dma = dma_map_page(&pdev->dev, frag->page,
602c0554 2890 offset, size,
b16f53be
NN
2891 DMA_TO_DEVICE);
2892 if (dma_mapping_error(&pdev->dev, buffer_info->dma))
602c0554 2893 goto dma_error;
a9ebadd6 2894 buffer_info->next_to_watch = i;
1da177e4
LT
2895
2896 len -= size;
2897 offset += size;
2898 count++;
1da177e4
LT
2899 }
2900 }
2901
1da177e4
LT
2902 tx_ring->buffer_info[i].skb = skb;
2903 tx_ring->buffer_info[first].next_to_watch = i;
2904
2905 return count;
602c0554
AD
2906
2907dma_error:
2908 dev_err(&pdev->dev, "TX DMA map failed\n");
2909 buffer_info->dma = 0;
c1fa347f 2910 if (count)
602c0554 2911 count--;
c1fa347f
RK
2912
2913 while (count--) {
2914 if (i==0)
602c0554 2915 i += tx_ring->count;
c1fa347f 2916 i--;
602c0554
AD
2917 buffer_info = &tx_ring->buffer_info[i];
2918 e1000_unmap_and_free_tx_resource(adapter, buffer_info);
2919 }
2920
2921 return 0;
1da177e4
LT
2922}
2923
64798845
JP
2924static void e1000_tx_queue(struct e1000_adapter *adapter,
2925 struct e1000_tx_ring *tx_ring, int tx_flags,
2926 int count)
1da177e4 2927{
1dc32918 2928 struct e1000_hw *hw = &adapter->hw;
1da177e4
LT
2929 struct e1000_tx_desc *tx_desc = NULL;
2930 struct e1000_buffer *buffer_info;
406874a7 2931 u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
1da177e4
LT
2932 unsigned int i;
2933
96838a40 2934 if (likely(tx_flags & E1000_TX_FLAGS_TSO)) {
1da177e4
LT
2935 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |
2936 E1000_TXD_CMD_TSE;
2d7edb92
MC
2937 txd_upper |= E1000_TXD_POPTS_TXSM << 8;
2938
96838a40 2939 if (likely(tx_flags & E1000_TX_FLAGS_IPV4))
2d7edb92 2940 txd_upper |= E1000_TXD_POPTS_IXSM << 8;
1da177e4
LT
2941 }
2942
96838a40 2943 if (likely(tx_flags & E1000_TX_FLAGS_CSUM)) {
1da177e4
LT
2944 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
2945 txd_upper |= E1000_TXD_POPTS_TXSM << 8;
2946 }
2947
96838a40 2948 if (unlikely(tx_flags & E1000_TX_FLAGS_VLAN)) {
1da177e4
LT
2949 txd_lower |= E1000_TXD_CMD_VLE;
2950 txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK);
2951 }
2952
2953 i = tx_ring->next_to_use;
2954
96838a40 2955 while (count--) {
1da177e4
LT
2956 buffer_info = &tx_ring->buffer_info[i];
2957 tx_desc = E1000_TX_DESC(*tx_ring, i);
2958 tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
2959 tx_desc->lower.data =
2960 cpu_to_le32(txd_lower | buffer_info->length);
2961 tx_desc->upper.data = cpu_to_le32(txd_upper);
96838a40 2962 if (unlikely(++i == tx_ring->count)) i = 0;
1da177e4
LT
2963 }
2964
2965 tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd);
2966
2967 /* Force memory writes to complete before letting h/w
2968 * know there are new descriptors to fetch. (Only
2969 * applicable for weak-ordered memory model archs,
2970 * such as IA-64). */
2971 wmb();
2972
2973 tx_ring->next_to_use = i;
1dc32918 2974 writel(i, hw->hw_addr + tx_ring->tdt);
2ce9047f
JB
2975 /* we need this if more than one processor can write to our tail
2976 * at a time, it syncronizes IO on IA64/Altix systems */
2977 mmiowb();
1da177e4
LT
2978}
2979
2980/**
2981 * 82547 workaround to avoid controller hang in half-duplex environment.
2982 * The workaround is to avoid queuing a large packet that would span
2983 * the internal Tx FIFO ring boundary by notifying the stack to resend
2984 * the packet at a later time. This gives the Tx FIFO an opportunity to
2985 * flush all packets. When that occurs, we reset the Tx FIFO pointers
2986 * to the beginning of the Tx FIFO.
2987 **/
2988
2989#define E1000_FIFO_HDR 0x10
2990#define E1000_82547_PAD_LEN 0x3E0
2991
64798845
JP
2992static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
2993 struct sk_buff *skb)
1da177e4 2994{
406874a7
JP
2995 u32 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
2996 u32 skb_fifo_len = skb->len + E1000_FIFO_HDR;
1da177e4 2997
9099cfb9 2998 skb_fifo_len = ALIGN(skb_fifo_len, E1000_FIFO_HDR);
1da177e4 2999
96838a40 3000 if (adapter->link_duplex != HALF_DUPLEX)
1da177e4
LT
3001 goto no_fifo_stall_required;
3002
96838a40 3003 if (atomic_read(&adapter->tx_fifo_stall))
1da177e4
LT
3004 return 1;
3005
96838a40 3006 if (skb_fifo_len >= (E1000_82547_PAD_LEN + fifo_space)) {
1da177e4
LT
3007 atomic_set(&adapter->tx_fifo_stall, 1);
3008 return 1;
3009 }
3010
3011no_fifo_stall_required:
3012 adapter->tx_fifo_head += skb_fifo_len;
96838a40 3013 if (adapter->tx_fifo_head >= adapter->tx_fifo_size)
1da177e4
LT
3014 adapter->tx_fifo_head -= adapter->tx_fifo_size;
3015 return 0;
3016}
3017
65c7973f
JB
3018static int __e1000_maybe_stop_tx(struct net_device *netdev, int size)
3019{
3020 struct e1000_adapter *adapter = netdev_priv(netdev);
3021 struct e1000_tx_ring *tx_ring = adapter->tx_ring;
3022
3023 netif_stop_queue(netdev);
3024 /* Herbert's original patch had:
3025 * smp_mb__after_netif_stop_queue();
3026 * but since that doesn't exist yet, just open code it. */
3027 smp_mb();
3028
3029 /* We need to check again in a case another CPU has just
3030 * made room available. */
3031 if (likely(E1000_DESC_UNUSED(tx_ring) < size))
3032 return -EBUSY;
3033
3034 /* A reprieve! */
3035 netif_start_queue(netdev);
fcfb1224 3036 ++adapter->restart_queue;
65c7973f
JB
3037 return 0;
3038}
3039
3040static int e1000_maybe_stop_tx(struct net_device *netdev,
3041 struct e1000_tx_ring *tx_ring, int size)
3042{
3043 if (likely(E1000_DESC_UNUSED(tx_ring) >= size))
3044 return 0;
3045 return __e1000_maybe_stop_tx(netdev, size);
3046}
3047
1da177e4 3048#define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1 )
3b29a56d
SH
3049static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
3050 struct net_device *netdev)
1da177e4 3051{
60490fe0 3052 struct e1000_adapter *adapter = netdev_priv(netdev);
1dc32918 3053 struct e1000_hw *hw = &adapter->hw;
581d708e 3054 struct e1000_tx_ring *tx_ring;
1da177e4
LT
3055 unsigned int first, max_per_txd = E1000_MAX_DATA_PER_TXD;
3056 unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
3057 unsigned int tx_flags = 0;
e743d313 3058 unsigned int len = skb_headlen(skb);
6d1e3aa7
KK
3059 unsigned int nr_frags;
3060 unsigned int mss;
1da177e4 3061 int count = 0;
76c224bc 3062 int tso;
1da177e4 3063 unsigned int f;
1da177e4 3064
65c7973f
JB
3065 /* This goes back to the question of how to logically map a tx queue
3066 * to a flow. Right now, performance is impacted slightly negatively
3067 * if using multiple tx queues. If the stack breaks away from a
3068 * single qdisc implementation, we can look at this again. */
581d708e 3069 tx_ring = adapter->tx_ring;
24025e4e 3070
581d708e 3071 if (unlikely(skb->len <= 0)) {
1da177e4
LT
3072 dev_kfree_skb_any(skb);
3073 return NETDEV_TX_OK;
3074 }
3075
7967168c 3076 mss = skb_shinfo(skb)->gso_size;
76c224bc 3077 /* The controller does a simple calculation to
1da177e4
LT
3078 * make sure there is enough room in the FIFO before
3079 * initiating the DMA for each buffer. The calc is:
3080 * 4 = ceil(buffer len/mss). To make sure we don't
3081 * overrun the FIFO, adjust the max buffer len if mss
3082 * drops. */
96838a40 3083 if (mss) {
406874a7 3084 u8 hdr_len;
1da177e4
LT
3085 max_per_txd = min(mss << 2, max_per_txd);
3086 max_txd_pwr = fls(max_per_txd) - 1;
9a3056da 3087
ab6a5bb6 3088 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
6d1e3aa7 3089 if (skb->data_len && hdr_len == len) {
1dc32918 3090 switch (hw->mac_type) {
9f687888 3091 unsigned int pull_size;
683a2aa3
HX
3092 case e1000_82544:
3093 /* Make sure we have room to chop off 4 bytes,
3094 * and that the end alignment will work out to
3095 * this hardware's requirements
3096 * NOTE: this is a TSO only workaround
3097 * if end byte alignment not correct move us
3098 * into the next dword */
27a884dc 3099 if ((unsigned long)(skb_tail_pointer(skb) - 1) & 4)
683a2aa3
HX
3100 break;
3101 /* fall through */
9f687888
JK
3102 pull_size = min((unsigned int)4, skb->data_len);
3103 if (!__pskb_pull_tail(skb, pull_size)) {
feb8f478
ET
3104 e_err(drv, "__pskb_pull_tail "
3105 "failed.\n");
9f687888 3106 dev_kfree_skb_any(skb);
749dfc70 3107 return NETDEV_TX_OK;
9f687888 3108 }
e743d313 3109 len = skb_headlen(skb);
9f687888
JK
3110 break;
3111 default:
3112 /* do nothing */
3113 break;
d74bbd3b 3114 }
9a3056da 3115 }
1da177e4
LT
3116 }
3117
9a3056da 3118 /* reserve a descriptor for the offload context */
84fa7933 3119 if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL))
1da177e4 3120 count++;
2648345f 3121 count++;
fd803241 3122
fd803241 3123 /* Controller Erratum workaround */
89114afd 3124 if (!skb->data_len && tx_ring->last_tx_tso && !skb_is_gso(skb))
fd803241 3125 count++;
fd803241 3126
1da177e4
LT
3127 count += TXD_USE_COUNT(len, max_txd_pwr);
3128
96838a40 3129 if (adapter->pcix_82544)
1da177e4
LT
3130 count++;
3131
96838a40 3132 /* work-around for errata 10 and it applies to all controllers
97338bde
MC
3133 * in PCI-X mode, so add one more descriptor to the count
3134 */
1dc32918 3135 if (unlikely((hw->bus_type == e1000_bus_type_pcix) &&
97338bde
MC
3136 (len > 2015)))
3137 count++;
3138
1da177e4 3139 nr_frags = skb_shinfo(skb)->nr_frags;
96838a40 3140 for (f = 0; f < nr_frags; f++)
1da177e4
LT
3141 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size,
3142 max_txd_pwr);
96838a40 3143 if (adapter->pcix_82544)
1da177e4
LT
3144 count += nr_frags;
3145
1da177e4
LT
3146 /* need: count + 2 desc gap to keep tail from touching
3147 * head, otherwise try next time */
8017943e 3148 if (unlikely(e1000_maybe_stop_tx(netdev, tx_ring, count + 2)))
1da177e4 3149 return NETDEV_TX_BUSY;
1da177e4 3150
1dc32918 3151 if (unlikely(hw->mac_type == e1000_82547)) {
96838a40 3152 if (unlikely(e1000_82547_fifo_workaround(adapter, skb))) {
1da177e4 3153 netif_stop_queue(netdev);
baa34745
JB
3154 if (!test_bit(__E1000_DOWN, &adapter->flags))
3155 mod_timer(&adapter->tx_fifo_stall_timer,
3156 jiffies + 1);
1da177e4
LT
3157 return NETDEV_TX_BUSY;
3158 }
3159 }
3160
eab6d18d 3161 if (unlikely(vlan_tx_tag_present(skb))) {
1da177e4
LT
3162 tx_flags |= E1000_TX_FLAGS_VLAN;
3163 tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT);
3164 }
3165
581d708e 3166 first = tx_ring->next_to_use;
96838a40 3167
581d708e 3168 tso = e1000_tso(adapter, tx_ring, skb);
1da177e4
LT
3169 if (tso < 0) {
3170 dev_kfree_skb_any(skb);
3171 return NETDEV_TX_OK;
3172 }
3173
fd803241 3174 if (likely(tso)) {
8fce4731
JB
3175 if (likely(hw->mac_type != e1000_82544))
3176 tx_ring->last_tx_tso = 1;
1da177e4 3177 tx_flags |= E1000_TX_FLAGS_TSO;
fd803241 3178 } else if (likely(e1000_tx_csum(adapter, tx_ring, skb)))
1da177e4
LT
3179 tx_flags |= E1000_TX_FLAGS_CSUM;
3180
60828236 3181 if (likely(skb->protocol == htons(ETH_P_IP)))
2d7edb92
MC
3182 tx_flags |= E1000_TX_FLAGS_IPV4;
3183
37e73df8
AD
3184 count = e1000_tx_map(adapter, tx_ring, skb, first, max_per_txd,
3185 nr_frags, mss);
1da177e4 3186
37e73df8
AD
3187 if (count) {
3188 e1000_tx_queue(adapter, tx_ring, tx_flags, count);
37e73df8
AD
3189 /* Make sure there is space in the ring for the next send. */
3190 e1000_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 2);
1da177e4 3191
37e73df8
AD
3192 } else {
3193 dev_kfree_skb_any(skb);
3194 tx_ring->buffer_info[first].time_stamp = 0;
3195 tx_ring->next_to_use = first;
3196 }
1da177e4 3197
1da177e4
LT
3198 return NETDEV_TX_OK;
3199}
3200
3201/**
3202 * e1000_tx_timeout - Respond to a Tx Hang
3203 * @netdev: network interface device structure
3204 **/
3205
64798845 3206static void e1000_tx_timeout(struct net_device *netdev)
1da177e4 3207{
60490fe0 3208 struct e1000_adapter *adapter = netdev_priv(netdev);
1da177e4
LT
3209
3210 /* Do the reset outside of interrupt context */
87041639
JK
3211 adapter->tx_timeout_count++;
3212 schedule_work(&adapter->reset_task);
1da177e4
LT
3213}
3214
64798845 3215static void e1000_reset_task(struct work_struct *work)
1da177e4 3216{
65f27f38
DH
3217 struct e1000_adapter *adapter =
3218 container_of(work, struct e1000_adapter, reset_task);
1da177e4 3219
338c15e4 3220 e1000_reinit_safe(adapter);
1da177e4
LT
3221}
3222
3223/**
3224 * e1000_get_stats - Get System Network Statistics
3225 * @netdev: network interface device structure
3226 *
3227 * Returns the address of the device statistics structure.
3228 * The statistics are actually updated from the timer callback.
3229 **/
3230
64798845 3231static struct net_device_stats *e1000_get_stats(struct net_device *netdev)
1da177e4 3232{
6b7660cd 3233 /* only return the current stats */
5fe31def 3234 return &netdev->stats;
1da177e4
LT
3235}
3236
3237/**
3238 * e1000_change_mtu - Change the Maximum Transfer Unit
3239 * @netdev: network interface device structure
3240 * @new_mtu: new value for maximum frame size
3241 *
3242 * Returns 0 on success, negative on failure
3243 **/
3244
64798845 3245static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
1da177e4 3246{
60490fe0 3247 struct e1000_adapter *adapter = netdev_priv(netdev);
1dc32918 3248 struct e1000_hw *hw = &adapter->hw;
1da177e4
LT
3249 int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
3250
96838a40
JB
3251 if ((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) ||
3252 (max_frame > MAX_JUMBO_FRAME_SIZE)) {
feb8f478 3253 e_err(probe, "Invalid MTU setting\n");
1da177e4 3254 return -EINVAL;
2d7edb92 3255 }
1da177e4 3256
997f5cbd 3257 /* Adapter-specific max frame size limits. */
1dc32918 3258 switch (hw->mac_type) {
9e2feace 3259 case e1000_undefined ... e1000_82542_rev2_1:
b7cb8c2c 3260 if (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)) {
feb8f478 3261 e_err(probe, "Jumbo Frames not supported.\n");
2d7edb92 3262 return -EINVAL;
2d7edb92 3263 }
997f5cbd 3264 break;
997f5cbd
JK
3265 default:
3266 /* Capable of supporting up to MAX_JUMBO_FRAME_SIZE limit. */
3267 break;
1da177e4
LT
3268 }
3269
3d6114e7
JB
3270 while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
3271 msleep(1);
3272 /* e1000_down has a dependency on max_frame_size */
3273 hw->max_frame_size = max_frame;
3274 if (netif_running(netdev))
3275 e1000_down(adapter);
3276
87f5032e 3277 /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
9e2feace 3278 * means we reserve 2 more, this pushes us to allocate from the next
edbbb3ca
JB
3279 * larger slab size.
3280 * i.e. RXBUFFER_2048 --> size-4096 slab
3281 * however with the new *_jumbo_rx* routines, jumbo receives will use
3282 * fragmented skbs */
9e2feace 3283
9926146b 3284 if (max_frame <= E1000_RXBUFFER_2048)
9e2feace 3285 adapter->rx_buffer_len = E1000_RXBUFFER_2048;
edbbb3ca
JB
3286 else
3287#if (PAGE_SIZE >= E1000_RXBUFFER_16384)
9e2feace 3288 adapter->rx_buffer_len = E1000_RXBUFFER_16384;
edbbb3ca
JB
3289#elif (PAGE_SIZE >= E1000_RXBUFFER_4096)
3290 adapter->rx_buffer_len = PAGE_SIZE;
3291#endif
9e2feace
AK
3292
3293 /* adjust allocation if LPE protects us, and we aren't using SBP */
1dc32918 3294 if (!hw->tbi_compatibility_on &&
b7cb8c2c 3295 ((max_frame == (ETH_FRAME_LEN + ETH_FCS_LEN)) ||
9e2feace
AK
3296 (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE)))
3297 adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
997f5cbd 3298
675ad473
ET
3299 pr_info("%s changing MTU from %d to %d\n",
3300 netdev->name, netdev->mtu, new_mtu);
2d7edb92
MC
3301 netdev->mtu = new_mtu;
3302
2db10a08 3303 if (netif_running(netdev))
3d6114e7
JB
3304 e1000_up(adapter);
3305 else
3306 e1000_reset(adapter);
3307
3308 clear_bit(__E1000_RESETTING, &adapter->flags);
1da177e4 3309
1da177e4
LT
3310 return 0;
3311}
3312
3313/**
3314 * e1000_update_stats - Update the board statistics counters
3315 * @adapter: board private structure
3316 **/
3317
64798845 3318void e1000_update_stats(struct e1000_adapter *adapter)
1da177e4 3319{
5fe31def 3320 struct net_device *netdev = adapter->netdev;
1da177e4 3321 struct e1000_hw *hw = &adapter->hw;
282f33c9 3322 struct pci_dev *pdev = adapter->pdev;
1da177e4 3323 unsigned long flags;
406874a7 3324 u16 phy_tmp;
1da177e4
LT
3325
3326#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
3327
282f33c9
LV
3328 /*
3329 * Prevent stats update while adapter is being reset, or if the pci
3330 * connection is down.
3331 */
9026729b 3332 if (adapter->link_speed == 0)
282f33c9 3333 return;
81b1955e 3334 if (pci_channel_offline(pdev))
9026729b
AK
3335 return;
3336
1da177e4
LT
3337 spin_lock_irqsave(&adapter->stats_lock, flags);
3338
828d055f 3339 /* these counters are modified from e1000_tbi_adjust_stats,
1da177e4
LT
3340 * called from the interrupt context, so they must only
3341 * be written while holding adapter->stats_lock
3342 */
3343
1dc32918
JP
3344 adapter->stats.crcerrs += er32(CRCERRS);
3345 adapter->stats.gprc += er32(GPRC);
3346 adapter->stats.gorcl += er32(GORCL);
3347 adapter->stats.gorch += er32(GORCH);
3348 adapter->stats.bprc += er32(BPRC);
3349 adapter->stats.mprc += er32(MPRC);
3350 adapter->stats.roc += er32(ROC);
3351
1532ecea
JB
3352 adapter->stats.prc64 += er32(PRC64);
3353 adapter->stats.prc127 += er32(PRC127);
3354 adapter->stats.prc255 += er32(PRC255);
3355 adapter->stats.prc511 += er32(PRC511);
3356 adapter->stats.prc1023 += er32(PRC1023);
3357 adapter->stats.prc1522 += er32(PRC1522);
1dc32918
JP
3358
3359 adapter->stats.symerrs += er32(SYMERRS);
3360 adapter->stats.mpc += er32(MPC);
3361 adapter->stats.scc += er32(SCC);
3362 adapter->stats.ecol += er32(ECOL);
3363 adapter->stats.mcc += er32(MCC);
3364 adapter->stats.latecol += er32(LATECOL);
3365 adapter->stats.dc += er32(DC);
3366 adapter->stats.sec += er32(SEC);
3367 adapter->stats.rlec += er32(RLEC);
3368 adapter->stats.xonrxc += er32(XONRXC);
3369 adapter->stats.xontxc += er32(XONTXC);
3370 adapter->stats.xoffrxc += er32(XOFFRXC);
3371 adapter->stats.xofftxc += er32(XOFFTXC);
3372 adapter->stats.fcruc += er32(FCRUC);
3373 adapter->stats.gptc += er32(GPTC);
3374 adapter->stats.gotcl += er32(GOTCL);
3375 adapter->stats.gotch += er32(GOTCH);
3376 adapter->stats.rnbc += er32(RNBC);
3377 adapter->stats.ruc += er32(RUC);
3378 adapter->stats.rfc += er32(RFC);
3379 adapter->stats.rjc += er32(RJC);
3380 adapter->stats.torl += er32(TORL);
3381 adapter->stats.torh += er32(TORH);
3382 adapter->stats.totl += er32(TOTL);
3383 adapter->stats.toth += er32(TOTH);
3384 adapter->stats.tpr += er32(TPR);
3385
1532ecea
JB
3386 adapter->stats.ptc64 += er32(PTC64);
3387 adapter->stats.ptc127 += er32(PTC127);
3388 adapter->stats.ptc255 += er32(PTC255);
3389 adapter->stats.ptc511 += er32(PTC511);
3390 adapter->stats.ptc1023 += er32(PTC1023);
3391 adapter->stats.ptc1522 += er32(PTC1522);
1dc32918
JP
3392
3393 adapter->stats.mptc += er32(MPTC);
3394 adapter->stats.bptc += er32(BPTC);
1da177e4
LT
3395
3396 /* used for adaptive IFS */
3397
1dc32918 3398 hw->tx_packet_delta = er32(TPT);
1da177e4 3399 adapter->stats.tpt += hw->tx_packet_delta;
1dc32918 3400 hw->collision_delta = er32(COLC);
1da177e4
LT
3401 adapter->stats.colc += hw->collision_delta;
3402
96838a40 3403 if (hw->mac_type >= e1000_82543) {
1dc32918
JP
3404 adapter->stats.algnerrc += er32(ALGNERRC);
3405 adapter->stats.rxerrc += er32(RXERRC);
3406 adapter->stats.tncrs += er32(TNCRS);
3407 adapter->stats.cexterr += er32(CEXTERR);
3408 adapter->stats.tsctc += er32(TSCTC);
3409 adapter->stats.tsctfc += er32(TSCTFC);
1da177e4
LT
3410 }
3411
3412 /* Fill out the OS statistics structure */
5fe31def
AK
3413 netdev->stats.multicast = adapter->stats.mprc;
3414 netdev->stats.collisions = adapter->stats.colc;
1da177e4
LT
3415
3416 /* Rx Errors */
3417
87041639
JK
3418 /* RLEC on some newer hardware can be incorrect so build
3419 * our own version based on RUC and ROC */
5fe31def 3420 netdev->stats.rx_errors = adapter->stats.rxerrc +
1da177e4 3421 adapter->stats.crcerrs + adapter->stats.algnerrc +
87041639
JK
3422 adapter->stats.ruc + adapter->stats.roc +
3423 adapter->stats.cexterr;
49559854 3424 adapter->stats.rlerrc = adapter->stats.ruc + adapter->stats.roc;
5fe31def
AK
3425 netdev->stats.rx_length_errors = adapter->stats.rlerrc;
3426 netdev->stats.rx_crc_errors = adapter->stats.crcerrs;
3427 netdev->stats.rx_frame_errors = adapter->stats.algnerrc;
3428 netdev->stats.rx_missed_errors = adapter->stats.mpc;
1da177e4
LT
3429
3430 /* Tx Errors */
49559854 3431 adapter->stats.txerrc = adapter->stats.ecol + adapter->stats.latecol;
5fe31def
AK
3432 netdev->stats.tx_errors = adapter->stats.txerrc;
3433 netdev->stats.tx_aborted_errors = adapter->stats.ecol;
3434 netdev->stats.tx_window_errors = adapter->stats.latecol;
3435 netdev->stats.tx_carrier_errors = adapter->stats.tncrs;
1dc32918 3436 if (hw->bad_tx_carr_stats_fd &&
167fb284 3437 adapter->link_duplex == FULL_DUPLEX) {
5fe31def 3438 netdev->stats.tx_carrier_errors = 0;
167fb284
JG
3439 adapter->stats.tncrs = 0;
3440 }
1da177e4
LT
3441
3442 /* Tx Dropped needs to be maintained elsewhere */
3443
3444 /* Phy Stats */
96838a40
JB
3445 if (hw->media_type == e1000_media_type_copper) {
3446 if ((adapter->link_speed == SPEED_1000) &&
1da177e4
LT
3447 (!e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
3448 phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
3449 adapter->phy_stats.idle_errors += phy_tmp;
3450 }
3451
96838a40 3452 if ((hw->mac_type <= e1000_82546) &&
1da177e4
LT
3453 (hw->phy_type == e1000_phy_m88) &&
3454 !e1000_read_phy_reg(hw, M88E1000_RX_ERR_CNTR, &phy_tmp))
3455 adapter->phy_stats.receive_errors += phy_tmp;
3456 }
3457
15e376b4 3458 /* Management Stats */
1dc32918
JP
3459 if (hw->has_smbus) {
3460 adapter->stats.mgptc += er32(MGTPTC);
3461 adapter->stats.mgprc += er32(MGTPRC);
3462 adapter->stats.mgpdc += er32(MGTPDC);
15e376b4
JG
3463 }
3464
1da177e4
LT
3465 spin_unlock_irqrestore(&adapter->stats_lock, flags);
3466}
9ac98284 3467
1da177e4
LT
3468/**
3469 * e1000_intr - Interrupt Handler
3470 * @irq: interrupt number
3471 * @data: pointer to a network interface device structure
1da177e4
LT
3472 **/
3473
64798845 3474static irqreturn_t e1000_intr(int irq, void *data)
1da177e4
LT
3475{
3476 struct net_device *netdev = data;
60490fe0 3477 struct e1000_adapter *adapter = netdev_priv(netdev);
1da177e4 3478 struct e1000_hw *hw = &adapter->hw;
1532ecea 3479 u32 icr = er32(ICR);
c3570acb 3480
4c11b8ad 3481 if (unlikely((!icr)))
835bb129
JB
3482 return IRQ_NONE; /* Not our interrupt */
3483
4c11b8ad
JB
3484 /*
3485 * we might have caused the interrupt, but the above
3486 * read cleared it, and just in case the driver is
3487 * down there is nothing to do so return handled
3488 */
3489 if (unlikely(test_bit(__E1000_DOWN, &adapter->flags)))
3490 return IRQ_HANDLED;
3491
96838a40 3492 if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) {
1da177e4 3493 hw->get_link_status = 1;
1314bbf3
AK
3494 /* guard against interrupt when we're going down */
3495 if (!test_bit(__E1000_DOWN, &adapter->flags))
3496 mod_timer(&adapter->watchdog_timer, jiffies + 1);
1da177e4
LT
3497 }
3498
1532ecea
JB
3499 /* disable interrupts, without the synchronize_irq bit */
3500 ew32(IMC, ~0);
3501 E1000_WRITE_FLUSH();
3502
288379f0 3503 if (likely(napi_schedule_prep(&adapter->napi))) {
835bb129
JB
3504 adapter->total_tx_bytes = 0;
3505 adapter->total_tx_packets = 0;
3506 adapter->total_rx_bytes = 0;
3507 adapter->total_rx_packets = 0;
288379f0 3508 __napi_schedule(&adapter->napi);
a6c42322 3509 } else {
90fb5135
AK
3510 /* this really should not happen! if it does it is basically a
3511 * bug, but not a hard error, so enable ints and continue */
a6c42322
JB
3512 if (!test_bit(__E1000_DOWN, &adapter->flags))
3513 e1000_irq_enable(adapter);
3514 }
1da177e4 3515
1da177e4
LT
3516 return IRQ_HANDLED;
3517}
3518
1da177e4
LT
3519/**
3520 * e1000_clean - NAPI Rx polling callback
3521 * @adapter: board private structure
3522 **/
64798845 3523static int e1000_clean(struct napi_struct *napi, int budget)
1da177e4 3524{
bea3348e 3525 struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, napi);
650b5a5c 3526 int tx_clean_complete = 0, work_done = 0;
581d708e 3527
650b5a5c 3528 tx_clean_complete = e1000_clean_tx_irq(adapter, &adapter->tx_ring[0]);
581d708e 3529
650b5a5c 3530 adapter->clean_rx(adapter, &adapter->rx_ring[0], &work_done, budget);
581d708e 3531
650b5a5c 3532 if (!tx_clean_complete)
d2c7ddd6
DM
3533 work_done = budget;
3534
53e52c72
DM
3535 /* If budget not fully consumed, exit the polling mode */
3536 if (work_done < budget) {
835bb129
JB
3537 if (likely(adapter->itr_setting & 3))
3538 e1000_set_itr(adapter);
288379f0 3539 napi_complete(napi);
a6c42322
JB
3540 if (!test_bit(__E1000_DOWN, &adapter->flags))
3541 e1000_irq_enable(adapter);
1da177e4
LT
3542 }
3543
bea3348e 3544 return work_done;
1da177e4
LT
3545}
3546
1da177e4
LT
3547/**
3548 * e1000_clean_tx_irq - Reclaim resources after transmit completes
3549 * @adapter: board private structure
3550 **/
64798845
JP
3551static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
3552 struct e1000_tx_ring *tx_ring)
1da177e4 3553{
1dc32918 3554 struct e1000_hw *hw = &adapter->hw;
1da177e4
LT
3555 struct net_device *netdev = adapter->netdev;
3556 struct e1000_tx_desc *tx_desc, *eop_desc;
3557 struct e1000_buffer *buffer_info;
3558 unsigned int i, eop;
2a1af5d7 3559 unsigned int count = 0;
835bb129 3560 unsigned int total_tx_bytes=0, total_tx_packets=0;
1da177e4
LT
3561
3562 i = tx_ring->next_to_clean;
3563 eop = tx_ring->buffer_info[i].next_to_watch;
3564 eop_desc = E1000_TX_DESC(*tx_ring, eop);
3565
ccfb342c
AD
3566 while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) &&
3567 (count < tx_ring->count)) {
843f4267 3568 bool cleaned = false;
2d0bb1c1 3569 rmb(); /* read buffer_info after eop_desc */
843f4267 3570 for ( ; !cleaned; count++) {
1da177e4
LT
3571 tx_desc = E1000_TX_DESC(*tx_ring, i);
3572 buffer_info = &tx_ring->buffer_info[i];
3573 cleaned = (i == eop);
3574
835bb129 3575 if (cleaned) {
2b65326e 3576 struct sk_buff *skb = buffer_info->skb;
7753b171
JB
3577 unsigned int segs, bytecount;
3578 segs = skb_shinfo(skb)->gso_segs ?: 1;
3579 /* multiply data chunks by size of headers */
3580 bytecount = ((segs - 1) * skb_headlen(skb)) +
3581 skb->len;
2b65326e 3582 total_tx_packets += segs;
7753b171 3583 total_tx_bytes += bytecount;
835bb129 3584 }
fd803241 3585 e1000_unmap_and_free_tx_resource(adapter, buffer_info);
a9ebadd6 3586 tx_desc->upper.data = 0;
1da177e4 3587
96838a40 3588 if (unlikely(++i == tx_ring->count)) i = 0;
1da177e4 3589 }
581d708e 3590
1da177e4
LT
3591 eop = tx_ring->buffer_info[i].next_to_watch;
3592 eop_desc = E1000_TX_DESC(*tx_ring, eop);
3593 }
3594
3595 tx_ring->next_to_clean = i;
3596
77b2aad5 3597#define TX_WAKE_THRESHOLD 32
843f4267 3598 if (unlikely(count && netif_carrier_ok(netdev) &&
65c7973f
JB
3599 E1000_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD)) {
3600 /* Make sure that anybody stopping the queue after this
3601 * sees the new next_to_clean.
3602 */
3603 smp_mb();
cdd7549e
JB
3604
3605 if (netif_queue_stopped(netdev) &&
3606 !(test_bit(__E1000_DOWN, &adapter->flags))) {
77b2aad5 3607 netif_wake_queue(netdev);
fcfb1224
JB
3608 ++adapter->restart_queue;
3609 }
77b2aad5 3610 }
2648345f 3611
581d708e 3612 if (adapter->detect_tx_hung) {
2648345f 3613 /* Detect a transmit hang in hardware, this serializes the
1da177e4 3614 * check with the clearing of time_stamp and movement of i */
c3033b01 3615 adapter->detect_tx_hung = false;
cdd7549e
JB
3616 if (tx_ring->buffer_info[eop].time_stamp &&
3617 time_after(jiffies, tx_ring->buffer_info[eop].time_stamp +
8e95a202
JP
3618 (adapter->tx_timeout_factor * HZ)) &&
3619 !(er32(STATUS) & E1000_STATUS_TXOFF)) {
70b8f1e1
MC
3620
3621 /* detected Tx unit hang */
feb8f478 3622 e_err(drv, "Detected Tx Unit Hang\n"
675ad473
ET
3623 " Tx Queue <%lu>\n"
3624 " TDH <%x>\n"
3625 " TDT <%x>\n"
3626 " next_to_use <%x>\n"
3627 " next_to_clean <%x>\n"
3628 "buffer_info[next_to_clean]\n"
3629 " time_stamp <%lx>\n"
3630 " next_to_watch <%x>\n"
3631 " jiffies <%lx>\n"
3632 " next_to_watch.status <%x>\n",
7bfa4816
JK
3633 (unsigned long)((tx_ring - adapter->tx_ring) /
3634 sizeof(struct e1000_tx_ring)),
1dc32918
JP
3635 readl(hw->hw_addr + tx_ring->tdh),
3636 readl(hw->hw_addr + tx_ring->tdt),
70b8f1e1 3637 tx_ring->next_to_use,
392137fa 3638 tx_ring->next_to_clean,
cdd7549e 3639 tx_ring->buffer_info[eop].time_stamp,
70b8f1e1
MC
3640 eop,
3641 jiffies,
3642 eop_desc->upper.fields.status);
1da177e4 3643 netif_stop_queue(netdev);
70b8f1e1 3644 }
1da177e4 3645 }
835bb129
JB
3646 adapter->total_tx_bytes += total_tx_bytes;
3647 adapter->total_tx_packets += total_tx_packets;
5fe31def
AK
3648 netdev->stats.tx_bytes += total_tx_bytes;
3649 netdev->stats.tx_packets += total_tx_packets;
807540ba 3650 return count < tx_ring->count;
1da177e4
LT
3651}
3652
3653/**
3654 * e1000_rx_checksum - Receive Checksum Offload for 82543
2d7edb92
MC
3655 * @adapter: board private structure
3656 * @status_err: receive descriptor status and error fields
3657 * @csum: receive descriptor csum field
3658 * @sk_buff: socket buffer with received data
1da177e4
LT
3659 **/
3660
64798845
JP
3661static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
3662 u32 csum, struct sk_buff *skb)
1da177e4 3663{
1dc32918 3664 struct e1000_hw *hw = &adapter->hw;
406874a7
JP
3665 u16 status = (u16)status_err;
3666 u8 errors = (u8)(status_err >> 24);
bc8acf2c
ED
3667
3668 skb_checksum_none_assert(skb);
2d7edb92 3669
1da177e4 3670 /* 82543 or newer only */
1dc32918 3671 if (unlikely(hw->mac_type < e1000_82543)) return;
1da177e4 3672 /* Ignore Checksum bit is set */
96838a40 3673 if (unlikely(status & E1000_RXD_STAT_IXSM)) return;
2d7edb92 3674 /* TCP/UDP checksum error bit is set */
96838a40 3675 if (unlikely(errors & E1000_RXD_ERR_TCPE)) {
1da177e4 3676 /* let the stack verify checksum errors */
1da177e4 3677 adapter->hw_csum_err++;
2d7edb92
MC
3678 return;
3679 }
3680 /* TCP/UDP Checksum has not been calculated */
1532ecea
JB
3681 if (!(status & E1000_RXD_STAT_TCPCS))
3682 return;
3683
2d7edb92
MC
3684 /* It must be a TCP or UDP packet with a valid checksum */
3685 if (likely(status & E1000_RXD_STAT_TCPCS)) {
1da177e4
LT
3686 /* TCP checksum is good */
3687 skb->ip_summed = CHECKSUM_UNNECESSARY;
1da177e4 3688 }
2d7edb92 3689 adapter->hw_csum_good++;
1da177e4
LT
3690}
3691
edbbb3ca
JB
3692/**
3693 * e1000_consume_page - helper function
3694 **/
3695static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb,
3696 u16 length)
3697{
3698 bi->page = NULL;
3699 skb->len += length;
3700 skb->data_len += length;
3701 skb->truesize += length;
3702}
3703
3704/**
3705 * e1000_receive_skb - helper function to handle rx indications
3706 * @adapter: board private structure
3707 * @status: descriptor status field as written by hardware
3708 * @vlan: descriptor vlan field as written by hardware (no le/be conversion)
3709 * @skb: pointer to sk_buff to be indicated to stack
3710 */
3711static void e1000_receive_skb(struct e1000_adapter *adapter, u8 status,
3712 __le16 vlan, struct sk_buff *skb)
3713{
6a08d194
JB
3714 skb->protocol = eth_type_trans(skb, adapter->netdev);
3715
3716 if ((unlikely(adapter->vlgrp && (status & E1000_RXD_STAT_VP))))
3717 vlan_gro_receive(&adapter->napi, adapter->vlgrp,
3718 le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK,
3719 skb);
3720 else
3721 napi_gro_receive(&adapter->napi, skb);
edbbb3ca
JB
3722}
3723
3724/**
3725 * e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy
3726 * @adapter: board private structure
3727 * @rx_ring: ring to clean
3728 * @work_done: amount of napi work completed this call
3729 * @work_to_do: max amount of work allowed for this call to do
3730 *
3731 * the return value indicates whether actual cleaning was done, there
3732 * is no guarantee that everything was cleaned
3733 */
3734static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
3735 struct e1000_rx_ring *rx_ring,
3736 int *work_done, int work_to_do)
3737{
3738 struct e1000_hw *hw = &adapter->hw;
3739 struct net_device *netdev = adapter->netdev;
3740 struct pci_dev *pdev = adapter->pdev;
3741 struct e1000_rx_desc *rx_desc, *next_rxd;
3742 struct e1000_buffer *buffer_info, *next_buffer;
3743 unsigned long irq_flags;
3744 u32 length;
3745 unsigned int i;
3746 int cleaned_count = 0;
3747 bool cleaned = false;
3748 unsigned int total_rx_bytes=0, total_rx_packets=0;
3749
3750 i = rx_ring->next_to_clean;
3751 rx_desc = E1000_RX_DESC(*rx_ring, i);
3752 buffer_info = &rx_ring->buffer_info[i];
3753
3754 while (rx_desc->status & E1000_RXD_STAT_DD) {
3755 struct sk_buff *skb;
3756 u8 status;
3757
3758 if (*work_done >= work_to_do)
3759 break;
3760 (*work_done)++;
2d0bb1c1 3761 rmb(); /* read descriptor and rx_buffer_info after status DD */
edbbb3ca
JB
3762
3763 status = rx_desc->status;
3764 skb = buffer_info->skb;
3765 buffer_info->skb = NULL;
3766
3767 if (++i == rx_ring->count) i = 0;
3768 next_rxd = E1000_RX_DESC(*rx_ring, i);
3769 prefetch(next_rxd);
3770
3771 next_buffer = &rx_ring->buffer_info[i];
3772
3773 cleaned = true;
3774 cleaned_count++;
b16f53be
NN
3775 dma_unmap_page(&pdev->dev, buffer_info->dma,
3776 buffer_info->length, DMA_FROM_DEVICE);
edbbb3ca
JB
3777 buffer_info->dma = 0;
3778
3779 length = le16_to_cpu(rx_desc->length);
3780
3781 /* errors is only valid for DD + EOP descriptors */
3782 if (unlikely((status & E1000_RXD_STAT_EOP) &&
3783 (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK))) {
3784 u8 last_byte = *(skb->data + length - 1);
3785 if (TBI_ACCEPT(hw, status, rx_desc->errors, length,
3786 last_byte)) {
3787 spin_lock_irqsave(&adapter->stats_lock,
3788 irq_flags);
3789 e1000_tbi_adjust_stats(hw, &adapter->stats,
3790 length, skb->data);
3791 spin_unlock_irqrestore(&adapter->stats_lock,
3792 irq_flags);
3793 length--;
3794 } else {
3795 /* recycle both page and skb */
3796 buffer_info->skb = skb;
3797 /* an error means any chain goes out the window
3798 * too */
3799 if (rx_ring->rx_skb_top)
3800 dev_kfree_skb(rx_ring->rx_skb_top);
3801 rx_ring->rx_skb_top = NULL;
3802 goto next_desc;
3803 }
3804 }
3805
3806#define rxtop rx_ring->rx_skb_top
3807 if (!(status & E1000_RXD_STAT_EOP)) {
3808 /* this descriptor is only the beginning (or middle) */
3809 if (!rxtop) {
3810 /* this is the beginning of a chain */
3811 rxtop = skb;
3812 skb_fill_page_desc(rxtop, 0, buffer_info->page,
3813 0, length);
3814 } else {
3815 /* this is the middle of a chain */
3816 skb_fill_page_desc(rxtop,
3817 skb_shinfo(rxtop)->nr_frags,
3818 buffer_info->page, 0, length);
3819 /* re-use the skb, only consumed the page */
3820 buffer_info->skb = skb;
3821 }
3822 e1000_consume_page(buffer_info, rxtop, length);
3823 goto next_desc;
3824 } else {
3825 if (rxtop) {
3826 /* end of the chain */
3827 skb_fill_page_desc(rxtop,
3828 skb_shinfo(rxtop)->nr_frags,
3829 buffer_info->page, 0, length);
3830 /* re-use the current skb, we only consumed the
3831 * page */
3832 buffer_info->skb = skb;
3833 skb = rxtop;
3834 rxtop = NULL;
3835 e1000_consume_page(buffer_info, skb, length);
3836 } else {
3837 /* no chain, got EOP, this buf is the packet
3838 * copybreak to save the put_page/alloc_page */
3839 if (length <= copybreak &&
3840 skb_tailroom(skb) >= length) {
3841 u8 *vaddr;
3842 vaddr = kmap_atomic(buffer_info->page,
3843 KM_SKB_DATA_SOFTIRQ);
3844 memcpy(skb_tail_pointer(skb), vaddr, length);
3845 kunmap_atomic(vaddr,
3846 KM_SKB_DATA_SOFTIRQ);
3847 /* re-use the page, so don't erase
3848 * buffer_info->page */
3849 skb_put(skb, length);
3850 } else {
3851 skb_fill_page_desc(skb, 0,
3852 buffer_info->page, 0,
3853 length);
3854 e1000_consume_page(buffer_info, skb,
3855 length);
3856 }
3857 }
3858 }
3859
3860 /* Receive Checksum Offload XXX recompute due to CRC strip? */
3861 e1000_rx_checksum(adapter,
3862 (u32)(status) |
3863 ((u32)(rx_desc->errors) << 24),
3864 le16_to_cpu(rx_desc->csum), skb);
3865
3866 pskb_trim(skb, skb->len - 4);
3867
3868 /* probably a little skewed due to removing CRC */
3869 total_rx_bytes += skb->len;
3870 total_rx_packets++;
3871
3872 /* eth type trans needs skb->data to point to something */
3873 if (!pskb_may_pull(skb, ETH_HLEN)) {
feb8f478 3874 e_err(drv, "pskb_may_pull failed.\n");
edbbb3ca
JB
3875 dev_kfree_skb(skb);
3876 goto next_desc;
3877 }
3878
edbbb3ca
JB
3879 e1000_receive_skb(adapter, status, rx_desc->special, skb);
3880
3881next_desc:
3882 rx_desc->status = 0;
3883
3884 /* return some buffers to hardware, one at a time is too slow */
3885 if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
3886 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
3887 cleaned_count = 0;
3888 }
3889
3890 /* use prefetched values */
3891 rx_desc = next_rxd;
3892 buffer_info = next_buffer;
3893 }
3894 rx_ring->next_to_clean = i;
3895
3896 cleaned_count = E1000_DESC_UNUSED(rx_ring);
3897 if (cleaned_count)
3898 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
3899
3900 adapter->total_rx_packets += total_rx_packets;
3901 adapter->total_rx_bytes += total_rx_bytes;
5fe31def
AK
3902 netdev->stats.rx_bytes += total_rx_bytes;
3903 netdev->stats.rx_packets += total_rx_packets;
edbbb3ca
JB
3904 return cleaned;
3905}
3906
57bf6eef
JP
3907/*
3908 * this should improve performance for small packets with large amounts
3909 * of reassembly being done in the stack
3910 */
3911static void e1000_check_copybreak(struct net_device *netdev,
3912 struct e1000_buffer *buffer_info,
3913 u32 length, struct sk_buff **skb)
3914{
3915 struct sk_buff *new_skb;
3916
3917 if (length > copybreak)
3918 return;
3919
3920 new_skb = netdev_alloc_skb_ip_align(netdev, length);
3921 if (!new_skb)
3922 return;
3923
3924 skb_copy_to_linear_data_offset(new_skb, -NET_IP_ALIGN,
3925 (*skb)->data - NET_IP_ALIGN,
3926 length + NET_IP_ALIGN);
3927 /* save the skb in buffer_info as good */
3928 buffer_info->skb = *skb;
3929 *skb = new_skb;
3930}
3931
1da177e4 3932/**
2d7edb92 3933 * e1000_clean_rx_irq - Send received data up the network stack; legacy
1da177e4 3934 * @adapter: board private structure
edbbb3ca
JB
3935 * @rx_ring: ring to clean
3936 * @work_done: amount of napi work completed this call
3937 * @work_to_do: max amount of work allowed for this call to do
3938 */
64798845
JP
3939static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
3940 struct e1000_rx_ring *rx_ring,
3941 int *work_done, int work_to_do)
1da177e4 3942{
1dc32918 3943 struct e1000_hw *hw = &adapter->hw;
1da177e4
LT
3944 struct net_device *netdev = adapter->netdev;
3945 struct pci_dev *pdev = adapter->pdev;
86c3d59f
JB
3946 struct e1000_rx_desc *rx_desc, *next_rxd;
3947 struct e1000_buffer *buffer_info, *next_buffer;
1da177e4 3948 unsigned long flags;
406874a7 3949 u32 length;
1da177e4 3950 unsigned int i;
72d64a43 3951 int cleaned_count = 0;
c3033b01 3952 bool cleaned = false;
835bb129 3953 unsigned int total_rx_bytes=0, total_rx_packets=0;
1da177e4
LT
3954
3955 i = rx_ring->next_to_clean;
3956 rx_desc = E1000_RX_DESC(*rx_ring, i);
b92ff8ee 3957 buffer_info = &rx_ring->buffer_info[i];
1da177e4 3958
b92ff8ee 3959 while (rx_desc->status & E1000_RXD_STAT_DD) {
24f476ee 3960 struct sk_buff *skb;
a292ca6e 3961 u8 status;
90fb5135 3962
96838a40 3963 if (*work_done >= work_to_do)
1da177e4
LT
3964 break;
3965 (*work_done)++;
2d0bb1c1 3966 rmb(); /* read descriptor and rx_buffer_info after status DD */
c3570acb 3967
a292ca6e 3968 status = rx_desc->status;
b92ff8ee 3969 skb = buffer_info->skb;
86c3d59f
JB
3970 buffer_info->skb = NULL;
3971
30320be8
JK
3972 prefetch(skb->data - NET_IP_ALIGN);
3973
86c3d59f
JB
3974 if (++i == rx_ring->count) i = 0;
3975 next_rxd = E1000_RX_DESC(*rx_ring, i);
30320be8
JK
3976 prefetch(next_rxd);
3977
86c3d59f 3978 next_buffer = &rx_ring->buffer_info[i];
86c3d59f 3979
c3033b01 3980 cleaned = true;
72d64a43 3981 cleaned_count++;
b16f53be
NN
3982 dma_unmap_single(&pdev->dev, buffer_info->dma,
3983 buffer_info->length, DMA_FROM_DEVICE);
679be3ba 3984 buffer_info->dma = 0;
1da177e4 3985
1da177e4 3986 length = le16_to_cpu(rx_desc->length);
ea30e119 3987 /* !EOP means multiple descriptors were used to store a single
40a14dea
JB
3988 * packet, if thats the case we need to toss it. In fact, we
3989 * to toss every packet with the EOP bit clear and the next
3990 * frame that _does_ have the EOP bit set, as it is by
3991 * definition only a frame fragment
3992 */
3993 if (unlikely(!(status & E1000_RXD_STAT_EOP)))
3994 adapter->discarding = true;
3995
3996 if (adapter->discarding) {
a1415ee6 3997 /* All receives must fit into a single buffer */
feb8f478 3998 e_dbg("Receive packet consumed multiple buffers\n");
864c4e45 3999 /* recycle */
8fc897b0 4000 buffer_info->skb = skb;
40a14dea
JB
4001 if (status & E1000_RXD_STAT_EOP)
4002 adapter->discarding = false;
1da177e4
LT
4003 goto next_desc;
4004 }
4005
96838a40 4006 if (unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) {
edbbb3ca 4007 u8 last_byte = *(skb->data + length - 1);
1dc32918
JP
4008 if (TBI_ACCEPT(hw, status, rx_desc->errors, length,
4009 last_byte)) {
1da177e4 4010 spin_lock_irqsave(&adapter->stats_lock, flags);
1dc32918 4011 e1000_tbi_adjust_stats(hw, &adapter->stats,
1da177e4
LT
4012 length, skb->data);
4013 spin_unlock_irqrestore(&adapter->stats_lock,
4014 flags);
4015 length--;
4016 } else {
9e2feace
AK
4017 /* recycle */
4018 buffer_info->skb = skb;
1da177e4
LT
4019 goto next_desc;
4020 }
1cb5821f 4021 }
1da177e4 4022
d2a1e213
JB
4023 /* adjust length to remove Ethernet CRC, this must be
4024 * done after the TBI_ACCEPT workaround above */
4025 length -= 4;
4026
835bb129
JB
4027 /* probably a little skewed due to removing CRC */
4028 total_rx_bytes += length;
4029 total_rx_packets++;
4030
57bf6eef
JP
4031 e1000_check_copybreak(netdev, buffer_info, length, &skb);
4032
996695de 4033 skb_put(skb, length);
1da177e4
LT
4034
4035 /* Receive Checksum Offload */
a292ca6e 4036 e1000_rx_checksum(adapter,
406874a7
JP
4037 (u32)(status) |
4038 ((u32)(rx_desc->errors) << 24),
c3d7a3a4 4039 le16_to_cpu(rx_desc->csum), skb);
96838a40 4040
edbbb3ca 4041 e1000_receive_skb(adapter, status, rx_desc->special, skb);
c3570acb 4042
1da177e4
LT
4043next_desc:
4044 rx_desc->status = 0;
1da177e4 4045
72d64a43
JK
4046 /* return some buffers to hardware, one at a time is too slow */
4047 if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
4048 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4049 cleaned_count = 0;
4050 }
4051
30320be8 4052 /* use prefetched values */
86c3d59f
JB
4053 rx_desc = next_rxd;
4054 buffer_info = next_buffer;
1da177e4 4055 }
1da177e4 4056 rx_ring->next_to_clean = i;
72d64a43
JK
4057
4058 cleaned_count = E1000_DESC_UNUSED(rx_ring);
4059 if (cleaned_count)
4060 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
2d7edb92 4061
835bb129
JB
4062 adapter->total_rx_packets += total_rx_packets;
4063 adapter->total_rx_bytes += total_rx_bytes;
5fe31def
AK
4064 netdev->stats.rx_bytes += total_rx_bytes;
4065 netdev->stats.rx_packets += total_rx_packets;
2d7edb92
MC
4066 return cleaned;
4067}
4068
edbbb3ca
JB
4069/**
4070 * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers
4071 * @adapter: address of board private structure
4072 * @rx_ring: pointer to receive ring structure
4073 * @cleaned_count: number of buffers to allocate this pass
4074 **/
4075
4076static void
4077e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
4078 struct e1000_rx_ring *rx_ring, int cleaned_count)
4079{
4080 struct net_device *netdev = adapter->netdev;
4081 struct pci_dev *pdev = adapter->pdev;
4082 struct e1000_rx_desc *rx_desc;
4083 struct e1000_buffer *buffer_info;
4084 struct sk_buff *skb;
4085 unsigned int i;
89d71a66 4086 unsigned int bufsz = 256 - 16 /*for skb_reserve */ ;
edbbb3ca
JB
4087
4088 i = rx_ring->next_to_use;
4089 buffer_info = &rx_ring->buffer_info[i];
4090
4091 while (cleaned_count--) {
4092 skb = buffer_info->skb;
4093 if (skb) {
4094 skb_trim(skb, 0);
4095 goto check_page;
4096 }
4097
89d71a66 4098 skb = netdev_alloc_skb_ip_align(netdev, bufsz);
edbbb3ca
JB
4099 if (unlikely(!skb)) {
4100 /* Better luck next round */
4101 adapter->alloc_rx_buff_failed++;
4102 break;
4103 }
4104
4105 /* Fix for errata 23, can't cross 64kB boundary */
4106 if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
4107 struct sk_buff *oldskb = skb;
feb8f478
ET
4108 e_err(rx_err, "skb align check failed: %u bytes at "
4109 "%p\n", bufsz, skb->data);
edbbb3ca 4110 /* Try again, without freeing the previous */
89d71a66 4111 skb = netdev_alloc_skb_ip_align(netdev, bufsz);
edbbb3ca
JB
4112 /* Failed allocation, critical failure */
4113 if (!skb) {
4114 dev_kfree_skb(oldskb);
4115 adapter->alloc_rx_buff_failed++;
4116 break;
4117 }
4118
4119 if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
4120 /* give up */
4121 dev_kfree_skb(skb);
4122 dev_kfree_skb(oldskb);
4123 break; /* while (cleaned_count--) */
4124 }
4125
4126 /* Use new allocation */
4127 dev_kfree_skb(oldskb);
4128 }
edbbb3ca
JB
4129 buffer_info->skb = skb;
4130 buffer_info->length = adapter->rx_buffer_len;
4131check_page:
4132 /* allocate a new page if necessary */
4133 if (!buffer_info->page) {
4134 buffer_info->page = alloc_page(GFP_ATOMIC);
4135 if (unlikely(!buffer_info->page)) {
4136 adapter->alloc_rx_buff_failed++;
4137 break;
4138 }
4139 }
4140
b5abb028 4141 if (!buffer_info->dma) {
b16f53be 4142 buffer_info->dma = dma_map_page(&pdev->dev,
edbbb3ca 4143 buffer_info->page, 0,
b16f53be
NN
4144 buffer_info->length,
4145 DMA_FROM_DEVICE);
4146 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
b5abb028
AB
4147 put_page(buffer_info->page);
4148 dev_kfree_skb(skb);
4149 buffer_info->page = NULL;
4150 buffer_info->skb = NULL;
4151 buffer_info->dma = 0;
4152 adapter->alloc_rx_buff_failed++;
4153 break; /* while !buffer_info->skb */
4154 }
4155 }
edbbb3ca
JB
4156
4157 rx_desc = E1000_RX_DESC(*rx_ring, i);
4158 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
4159
4160 if (unlikely(++i == rx_ring->count))
4161 i = 0;
4162 buffer_info = &rx_ring->buffer_info[i];
4163 }
4164
4165 if (likely(rx_ring->next_to_use != i)) {
4166 rx_ring->next_to_use = i;
4167 if (unlikely(i-- == 0))
4168 i = (rx_ring->count - 1);
4169
4170 /* Force memory writes to complete before letting h/w
4171 * know there are new descriptors to fetch. (Only
4172 * applicable for weak-ordered memory model archs,
4173 * such as IA-64). */
4174 wmb();
4175 writel(i, adapter->hw.hw_addr + rx_ring->rdt);
4176 }
4177}
4178
1da177e4 4179/**
2d7edb92 4180 * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended
1da177e4
LT
4181 * @adapter: address of board private structure
4182 **/
4183
64798845
JP
4184static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
4185 struct e1000_rx_ring *rx_ring,
4186 int cleaned_count)
1da177e4 4187{
1dc32918 4188 struct e1000_hw *hw = &adapter->hw;
1da177e4
LT
4189 struct net_device *netdev = adapter->netdev;
4190 struct pci_dev *pdev = adapter->pdev;
4191 struct e1000_rx_desc *rx_desc;
4192 struct e1000_buffer *buffer_info;
4193 struct sk_buff *skb;
2648345f 4194 unsigned int i;
89d71a66 4195 unsigned int bufsz = adapter->rx_buffer_len;
1da177e4
LT
4196
4197 i = rx_ring->next_to_use;
4198 buffer_info = &rx_ring->buffer_info[i];
4199
a292ca6e 4200 while (cleaned_count--) {
ca6f7224
CH
4201 skb = buffer_info->skb;
4202 if (skb) {
a292ca6e
JK
4203 skb_trim(skb, 0);
4204 goto map_skb;
4205 }
4206
89d71a66 4207 skb = netdev_alloc_skb_ip_align(netdev, bufsz);
96838a40 4208 if (unlikely(!skb)) {
1da177e4 4209 /* Better luck next round */
72d64a43 4210 adapter->alloc_rx_buff_failed++;
1da177e4
LT
4211 break;
4212 }
4213
2648345f 4214 /* Fix for errata 23, can't cross 64kB boundary */
1da177e4
LT
4215 if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
4216 struct sk_buff *oldskb = skb;
feb8f478
ET
4217 e_err(rx_err, "skb align check failed: %u bytes at "
4218 "%p\n", bufsz, skb->data);
2648345f 4219 /* Try again, without freeing the previous */
89d71a66 4220 skb = netdev_alloc_skb_ip_align(netdev, bufsz);
2648345f 4221 /* Failed allocation, critical failure */
1da177e4
LT
4222 if (!skb) {
4223 dev_kfree_skb(oldskb);
edbbb3ca 4224 adapter->alloc_rx_buff_failed++;
1da177e4
LT
4225 break;
4226 }
2648345f 4227
1da177e4
LT
4228 if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
4229 /* give up */
4230 dev_kfree_skb(skb);
4231 dev_kfree_skb(oldskb);
edbbb3ca 4232 adapter->alloc_rx_buff_failed++;
1da177e4 4233 break; /* while !buffer_info->skb */
1da177e4 4234 }
ca6f7224
CH
4235
4236 /* Use new allocation */
4237 dev_kfree_skb(oldskb);
1da177e4 4238 }
1da177e4
LT
4239 buffer_info->skb = skb;
4240 buffer_info->length = adapter->rx_buffer_len;
a292ca6e 4241map_skb:
b16f53be 4242 buffer_info->dma = dma_map_single(&pdev->dev,
1da177e4 4243 skb->data,
edbbb3ca 4244 buffer_info->length,
b16f53be
NN
4245 DMA_FROM_DEVICE);
4246 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
b5abb028
AB
4247 dev_kfree_skb(skb);
4248 buffer_info->skb = NULL;
4249 buffer_info->dma = 0;
4250 adapter->alloc_rx_buff_failed++;
4251 break; /* while !buffer_info->skb */
4252 }
1da177e4 4253
edbbb3ca
JB
4254 /*
4255 * XXX if it was allocated cleanly it will never map to a
4256 * boundary crossing
4257 */
4258
2648345f
MC
4259 /* Fix for errata 23, can't cross 64kB boundary */
4260 if (!e1000_check_64k_bound(adapter,
4261 (void *)(unsigned long)buffer_info->dma,
4262 adapter->rx_buffer_len)) {
feb8f478
ET
4263 e_err(rx_err, "dma align check failed: %u bytes at "
4264 "%p\n", adapter->rx_buffer_len,
675ad473 4265 (void *)(unsigned long)buffer_info->dma);
1da177e4
LT
4266 dev_kfree_skb(skb);
4267 buffer_info->skb = NULL;
4268
b16f53be 4269 dma_unmap_single(&pdev->dev, buffer_info->dma,
1da177e4 4270 adapter->rx_buffer_len,
b16f53be 4271 DMA_FROM_DEVICE);
679be3ba 4272 buffer_info->dma = 0;
1da177e4 4273
edbbb3ca 4274 adapter->alloc_rx_buff_failed++;
1da177e4
LT
4275 break; /* while !buffer_info->skb */
4276 }
1da177e4
LT
4277 rx_desc = E1000_RX_DESC(*rx_ring, i);
4278 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
4279
96838a40
JB
4280 if (unlikely(++i == rx_ring->count))
4281 i = 0;
1da177e4
LT
4282 buffer_info = &rx_ring->buffer_info[i];
4283 }
4284
b92ff8ee
JB
4285 if (likely(rx_ring->next_to_use != i)) {
4286 rx_ring->next_to_use = i;
4287 if (unlikely(i-- == 0))
4288 i = (rx_ring->count - 1);
4289
4290 /* Force memory writes to complete before letting h/w
4291 * know there are new descriptors to fetch. (Only
4292 * applicable for weak-ordered memory model archs,
4293 * such as IA-64). */
4294 wmb();
1dc32918 4295 writel(i, hw->hw_addr + rx_ring->rdt);
b92ff8ee 4296 }
1da177e4
LT
4297}
4298
4299/**
4300 * e1000_smartspeed - Workaround for SmartSpeed on 82541 and 82547 controllers.
4301 * @adapter:
4302 **/
4303
64798845 4304static void e1000_smartspeed(struct e1000_adapter *adapter)
1da177e4 4305{
1dc32918 4306 struct e1000_hw *hw = &adapter->hw;
406874a7
JP
4307 u16 phy_status;
4308 u16 phy_ctrl;
1da177e4 4309
1dc32918
JP
4310 if ((hw->phy_type != e1000_phy_igp) || !hw->autoneg ||
4311 !(hw->autoneg_advertised & ADVERTISE_1000_FULL))
1da177e4
LT
4312 return;
4313
96838a40 4314 if (adapter->smartspeed == 0) {
1da177e4
LT
4315 /* If Master/Slave config fault is asserted twice,
4316 * we assume back-to-back */
1dc32918 4317 e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
96838a40 4318 if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return;
1dc32918 4319 e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
96838a40 4320 if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return;
1dc32918 4321 e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
96838a40 4322 if (phy_ctrl & CR_1000T_MS_ENABLE) {
1da177e4 4323 phy_ctrl &= ~CR_1000T_MS_ENABLE;
1dc32918 4324 e1000_write_phy_reg(hw, PHY_1000T_CTRL,
1da177e4
LT
4325 phy_ctrl);
4326 adapter->smartspeed++;
1dc32918
JP
4327 if (!e1000_phy_setup_autoneg(hw) &&
4328 !e1000_read_phy_reg(hw, PHY_CTRL,
1da177e4
LT
4329 &phy_ctrl)) {
4330 phy_ctrl |= (MII_CR_AUTO_NEG_EN |
4331 MII_CR_RESTART_AUTO_NEG);
1dc32918 4332 e1000_write_phy_reg(hw, PHY_CTRL,
1da177e4
LT
4333 phy_ctrl);
4334 }
4335 }
4336 return;
96838a40 4337 } else if (adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) {
1da177e4 4338 /* If still no link, perhaps using 2/3 pair cable */
1dc32918 4339 e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
1da177e4 4340 phy_ctrl |= CR_1000T_MS_ENABLE;
1dc32918
JP
4341 e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_ctrl);
4342 if (!e1000_phy_setup_autoneg(hw) &&
4343 !e1000_read_phy_reg(hw, PHY_CTRL, &phy_ctrl)) {
1da177e4
LT
4344 phy_ctrl |= (MII_CR_AUTO_NEG_EN |
4345 MII_CR_RESTART_AUTO_NEG);
1dc32918 4346 e1000_write_phy_reg(hw, PHY_CTRL, phy_ctrl);
1da177e4
LT
4347 }
4348 }
4349 /* Restart process after E1000_SMARTSPEED_MAX iterations */
96838a40 4350 if (adapter->smartspeed++ == E1000_SMARTSPEED_MAX)
1da177e4
LT
4351 adapter->smartspeed = 0;
4352}
4353
4354/**
4355 * e1000_ioctl -
4356 * @netdev:
4357 * @ifreq:
4358 * @cmd:
4359 **/
4360
64798845 4361static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
1da177e4
LT
4362{
4363 switch (cmd) {
4364 case SIOCGMIIPHY:
4365 case SIOCGMIIREG:
4366 case SIOCSMIIREG:
4367 return e1000_mii_ioctl(netdev, ifr, cmd);
4368 default:
4369 return -EOPNOTSUPP;
4370 }
4371}
4372
4373/**
4374 * e1000_mii_ioctl -
4375 * @netdev:
4376 * @ifreq:
4377 * @cmd:
4378 **/
4379
64798845
JP
4380static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
4381 int cmd)
1da177e4 4382{
60490fe0 4383 struct e1000_adapter *adapter = netdev_priv(netdev);
1dc32918 4384 struct e1000_hw *hw = &adapter->hw;
1da177e4
LT
4385 struct mii_ioctl_data *data = if_mii(ifr);
4386 int retval;
406874a7
JP
4387 u16 mii_reg;
4388 u16 spddplx;
97876fc6 4389 unsigned long flags;
1da177e4 4390
1dc32918 4391 if (hw->media_type != e1000_media_type_copper)
1da177e4
LT
4392 return -EOPNOTSUPP;
4393
4394 switch (cmd) {
4395 case SIOCGMIIPHY:
1dc32918 4396 data->phy_id = hw->phy_addr;
1da177e4
LT
4397 break;
4398 case SIOCGMIIREG:
97876fc6 4399 spin_lock_irqsave(&adapter->stats_lock, flags);
1dc32918 4400 if (e1000_read_phy_reg(hw, data->reg_num & 0x1F,
97876fc6
MC
4401 &data->val_out)) {
4402 spin_unlock_irqrestore(&adapter->stats_lock, flags);
1da177e4 4403 return -EIO;
97876fc6
MC
4404 }
4405 spin_unlock_irqrestore(&adapter->stats_lock, flags);
1da177e4
LT
4406 break;
4407 case SIOCSMIIREG:
96838a40 4408 if (data->reg_num & ~(0x1F))
1da177e4
LT
4409 return -EFAULT;
4410 mii_reg = data->val_in;
97876fc6 4411 spin_lock_irqsave(&adapter->stats_lock, flags);
1dc32918 4412 if (e1000_write_phy_reg(hw, data->reg_num,
97876fc6
MC
4413 mii_reg)) {
4414 spin_unlock_irqrestore(&adapter->stats_lock, flags);
1da177e4 4415 return -EIO;
97876fc6 4416 }
f0163ac4 4417 spin_unlock_irqrestore(&adapter->stats_lock, flags);
1dc32918 4418 if (hw->media_type == e1000_media_type_copper) {
1da177e4
LT
4419 switch (data->reg_num) {
4420 case PHY_CTRL:
96838a40 4421 if (mii_reg & MII_CR_POWER_DOWN)
1da177e4 4422 break;
96838a40 4423 if (mii_reg & MII_CR_AUTO_NEG_EN) {
1dc32918
JP
4424 hw->autoneg = 1;
4425 hw->autoneg_advertised = 0x2F;
1da177e4
LT
4426 } else {
4427 if (mii_reg & 0x40)
4428 spddplx = SPEED_1000;
4429 else if (mii_reg & 0x2000)
4430 spddplx = SPEED_100;
4431 else
4432 spddplx = SPEED_10;
4433 spddplx += (mii_reg & 0x100)
cb764326
JK
4434 ? DUPLEX_FULL :
4435 DUPLEX_HALF;
1da177e4
LT
4436 retval = e1000_set_spd_dplx(adapter,
4437 spddplx);
f0163ac4 4438 if (retval)
1da177e4
LT
4439 return retval;
4440 }
2db10a08
AK
4441 if (netif_running(adapter->netdev))
4442 e1000_reinit_locked(adapter);
4443 else
1da177e4
LT
4444 e1000_reset(adapter);
4445 break;
4446 case M88E1000_PHY_SPEC_CTRL:
4447 case M88E1000_EXT_PHY_SPEC_CTRL:
1dc32918 4448 if (e1000_phy_reset(hw))
1da177e4
LT
4449 return -EIO;
4450 break;
4451 }
4452 } else {
4453 switch (data->reg_num) {
4454 case PHY_CTRL:
96838a40 4455 if (mii_reg & MII_CR_POWER_DOWN)
1da177e4 4456 break;
2db10a08
AK
4457 if (netif_running(adapter->netdev))
4458 e1000_reinit_locked(adapter);
4459 else
1da177e4
LT
4460 e1000_reset(adapter);
4461 break;
4462 }
4463 }
4464 break;
4465 default:
4466 return -EOPNOTSUPP;
4467 }
4468 return E1000_SUCCESS;
4469}
4470
64798845 4471void e1000_pci_set_mwi(struct e1000_hw *hw)
1da177e4
LT
4472{
4473 struct e1000_adapter *adapter = hw->back;
2648345f 4474 int ret_val = pci_set_mwi(adapter->pdev);
1da177e4 4475
96838a40 4476 if (ret_val)
feb8f478 4477 e_err(probe, "Error in setting MWI\n");
1da177e4
LT
4478}
4479
64798845 4480void e1000_pci_clear_mwi(struct e1000_hw *hw)
1da177e4
LT
4481{
4482 struct e1000_adapter *adapter = hw->back;
4483
4484 pci_clear_mwi(adapter->pdev);
4485}
4486
64798845 4487int e1000_pcix_get_mmrbc(struct e1000_hw *hw)
007755eb
PO
4488{
4489 struct e1000_adapter *adapter = hw->back;
4490 return pcix_get_mmrbc(adapter->pdev);
4491}
4492
64798845 4493void e1000_pcix_set_mmrbc(struct e1000_hw *hw, int mmrbc)
007755eb
PO
4494{
4495 struct e1000_adapter *adapter = hw->back;
4496 pcix_set_mmrbc(adapter->pdev, mmrbc);
4497}
4498
64798845 4499void e1000_io_write(struct e1000_hw *hw, unsigned long port, u32 value)
1da177e4
LT
4500{
4501 outl(value, port);
4502}
4503
64798845
JP
4504static void e1000_vlan_rx_register(struct net_device *netdev,
4505 struct vlan_group *grp)
1da177e4 4506{
60490fe0 4507 struct e1000_adapter *adapter = netdev_priv(netdev);
1dc32918 4508 struct e1000_hw *hw = &adapter->hw;
406874a7 4509 u32 ctrl, rctl;
1da177e4 4510
9150b76a
JB
4511 if (!test_bit(__E1000_DOWN, &adapter->flags))
4512 e1000_irq_disable(adapter);
1da177e4
LT
4513 adapter->vlgrp = grp;
4514
96838a40 4515 if (grp) {
1da177e4 4516 /* enable VLAN tag insert/strip */
1dc32918 4517 ctrl = er32(CTRL);
1da177e4 4518 ctrl |= E1000_CTRL_VME;
1dc32918 4519 ew32(CTRL, ctrl);
1da177e4 4520
1532ecea
JB
4521 /* enable VLAN receive filtering */
4522 rctl = er32(RCTL);
4523 rctl &= ~E1000_RCTL_CFIEN;
4524 if (!(netdev->flags & IFF_PROMISC))
4525 rctl |= E1000_RCTL_VFE;
4526 ew32(RCTL, rctl);
4527 e1000_update_mng_vlan(adapter);
1da177e4
LT
4528 } else {
4529 /* disable VLAN tag insert/strip */
1dc32918 4530 ctrl = er32(CTRL);
1da177e4 4531 ctrl &= ~E1000_CTRL_VME;
1dc32918 4532 ew32(CTRL, ctrl);
1da177e4 4533
1532ecea
JB
4534 /* disable VLAN receive filtering */
4535 rctl = er32(RCTL);
4536 rctl &= ~E1000_RCTL_VFE;
4537 ew32(RCTL, rctl);
fd38d7a0 4538
1532ecea 4539 if (adapter->mng_vlan_id != (u16)E1000_MNG_VLAN_NONE) {
120a5d0d 4540 e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
1532ecea 4541 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
cd94dd0b 4542 }
1da177e4
LT
4543 }
4544
9150b76a
JB
4545 if (!test_bit(__E1000_DOWN, &adapter->flags))
4546 e1000_irq_enable(adapter);
1da177e4
LT
4547}
4548
64798845 4549static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
1da177e4 4550{
60490fe0 4551 struct e1000_adapter *adapter = netdev_priv(netdev);
1dc32918 4552 struct e1000_hw *hw = &adapter->hw;
406874a7 4553 u32 vfta, index;
96838a40 4554
1dc32918 4555 if ((hw->mng_cookie.status &
96838a40
JB
4556 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
4557 (vid == adapter->mng_vlan_id))
2d7edb92 4558 return;
1da177e4
LT
4559 /* add VID to filter table */
4560 index = (vid >> 5) & 0x7F;
1dc32918 4561 vfta = E1000_READ_REG_ARRAY(hw, VFTA, index);
1da177e4 4562 vfta |= (1 << (vid & 0x1F));
1dc32918 4563 e1000_write_vfta(hw, index, vfta);
1da177e4
LT
4564}
4565
64798845 4566static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
1da177e4 4567{
60490fe0 4568 struct e1000_adapter *adapter = netdev_priv(netdev);
1dc32918 4569 struct e1000_hw *hw = &adapter->hw;
406874a7 4570 u32 vfta, index;
1da177e4 4571
9150b76a
JB
4572 if (!test_bit(__E1000_DOWN, &adapter->flags))
4573 e1000_irq_disable(adapter);
5c15bdec 4574 vlan_group_set_device(adapter->vlgrp, vid, NULL);
9150b76a
JB
4575 if (!test_bit(__E1000_DOWN, &adapter->flags))
4576 e1000_irq_enable(adapter);
1da177e4
LT
4577
4578 /* remove VID from filter table */
4579 index = (vid >> 5) & 0x7F;
1dc32918 4580 vfta = E1000_READ_REG_ARRAY(hw, VFTA, index);
1da177e4 4581 vfta &= ~(1 << (vid & 0x1F));
1dc32918 4582 e1000_write_vfta(hw, index, vfta);
1da177e4
LT
4583}
4584
64798845 4585static void e1000_restore_vlan(struct e1000_adapter *adapter)
1da177e4
LT
4586{
4587 e1000_vlan_rx_register(adapter->netdev, adapter->vlgrp);
4588
96838a40 4589 if (adapter->vlgrp) {
406874a7 4590 u16 vid;
b738127d 4591 for (vid = 0; vid < VLAN_N_VID; vid++) {
5c15bdec 4592 if (!vlan_group_get_device(adapter->vlgrp, vid))
1da177e4
LT
4593 continue;
4594 e1000_vlan_rx_add_vid(adapter->netdev, vid);
4595 }
4596 }
4597}
4598
64798845 4599int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx)
1da177e4 4600{
1dc32918
JP
4601 struct e1000_hw *hw = &adapter->hw;
4602
4603 hw->autoneg = 0;
1da177e4 4604
6921368f 4605 /* Fiber NICs only allow 1000 gbps Full duplex */
1dc32918 4606 if ((hw->media_type == e1000_media_type_fiber) &&
6921368f 4607 spddplx != (SPEED_1000 + DUPLEX_FULL)) {
feb8f478 4608 e_err(probe, "Unsupported Speed/Duplex configuration\n");
6921368f
MC
4609 return -EINVAL;
4610 }
4611
96838a40 4612 switch (spddplx) {
1da177e4 4613 case SPEED_10 + DUPLEX_HALF:
1dc32918 4614 hw->forced_speed_duplex = e1000_10_half;
1da177e4
LT
4615 break;
4616 case SPEED_10 + DUPLEX_FULL:
1dc32918 4617 hw->forced_speed_duplex = e1000_10_full;
1da177e4
LT
4618 break;
4619 case SPEED_100 + DUPLEX_HALF:
1dc32918 4620 hw->forced_speed_duplex = e1000_100_half;
1da177e4
LT
4621 break;
4622 case SPEED_100 + DUPLEX_FULL:
1dc32918 4623 hw->forced_speed_duplex = e1000_100_full;
1da177e4
LT
4624 break;
4625 case SPEED_1000 + DUPLEX_FULL:
1dc32918
JP
4626 hw->autoneg = 1;
4627 hw->autoneg_advertised = ADVERTISE_1000_FULL;
1da177e4
LT
4628 break;
4629 case SPEED_1000 + DUPLEX_HALF: /* not supported */
4630 default:
feb8f478 4631 e_err(probe, "Unsupported Speed/Duplex configuration\n");
1da177e4
LT
4632 return -EINVAL;
4633 }
4634 return 0;
4635}
4636
b43fcd7d 4637static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
1da177e4
LT
4638{
4639 struct net_device *netdev = pci_get_drvdata(pdev);
60490fe0 4640 struct e1000_adapter *adapter = netdev_priv(netdev);
1dc32918 4641 struct e1000_hw *hw = &adapter->hw;
406874a7
JP
4642 u32 ctrl, ctrl_ext, rctl, status;
4643 u32 wufc = adapter->wol;
6fdfef16 4644#ifdef CONFIG_PM
240b1710 4645 int retval = 0;
6fdfef16 4646#endif
1da177e4
LT
4647
4648 netif_device_detach(netdev);
4649
2db10a08
AK
4650 if (netif_running(netdev)) {
4651 WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
1da177e4 4652 e1000_down(adapter);
2db10a08 4653 }
1da177e4 4654
2f82665f 4655#ifdef CONFIG_PM
1d33e9c6 4656 retval = pci_save_state(pdev);
2f82665f
JB
4657 if (retval)
4658 return retval;
4659#endif
4660
1dc32918 4661 status = er32(STATUS);
96838a40 4662 if (status & E1000_STATUS_LU)
1da177e4
LT
4663 wufc &= ~E1000_WUFC_LNKC;
4664
96838a40 4665 if (wufc) {
1da177e4 4666 e1000_setup_rctl(adapter);
db0ce50d 4667 e1000_set_rx_mode(netdev);
1da177e4
LT
4668
4669 /* turn on all-multi mode if wake on multicast is enabled */
120cd576 4670 if (wufc & E1000_WUFC_MC) {
1dc32918 4671 rctl = er32(RCTL);
1da177e4 4672 rctl |= E1000_RCTL_MPE;
1dc32918 4673 ew32(RCTL, rctl);
1da177e4
LT
4674 }
4675
1dc32918
JP
4676 if (hw->mac_type >= e1000_82540) {
4677 ctrl = er32(CTRL);
1da177e4
LT
4678 /* advertise wake from D3Cold */
4679 #define E1000_CTRL_ADVD3WUC 0x00100000
4680 /* phy power management enable */
4681 #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
4682 ctrl |= E1000_CTRL_ADVD3WUC |
4683 E1000_CTRL_EN_PHY_PWR_MGMT;
1dc32918 4684 ew32(CTRL, ctrl);
1da177e4
LT
4685 }
4686
1dc32918 4687 if (hw->media_type == e1000_media_type_fiber ||
1532ecea 4688 hw->media_type == e1000_media_type_internal_serdes) {
1da177e4 4689 /* keep the laser running in D3 */
1dc32918 4690 ctrl_ext = er32(CTRL_EXT);
1da177e4 4691 ctrl_ext |= E1000_CTRL_EXT_SDP7_DATA;
1dc32918 4692 ew32(CTRL_EXT, ctrl_ext);
1da177e4
LT
4693 }
4694
1dc32918
JP
4695 ew32(WUC, E1000_WUC_PME_EN);
4696 ew32(WUFC, wufc);
1da177e4 4697 } else {
1dc32918
JP
4698 ew32(WUC, 0);
4699 ew32(WUFC, 0);
1da177e4
LT
4700 }
4701
0fccd0e9
JG
4702 e1000_release_manageability(adapter);
4703
b43fcd7d
RW
4704 *enable_wake = !!wufc;
4705
0fccd0e9 4706 /* make sure adapter isn't asleep if manageability is enabled */
b43fcd7d
RW
4707 if (adapter->en_mng_pt)
4708 *enable_wake = true;
1da177e4 4709
edd106fc
AK
4710 if (netif_running(netdev))
4711 e1000_free_irq(adapter);
4712
1da177e4 4713 pci_disable_device(pdev);
240b1710 4714
1da177e4
LT
4715 return 0;
4716}
4717
2f82665f 4718#ifdef CONFIG_PM
b43fcd7d
RW
4719static int e1000_suspend(struct pci_dev *pdev, pm_message_t state)
4720{
4721 int retval;
4722 bool wake;
4723
4724 retval = __e1000_shutdown(pdev, &wake);
4725 if (retval)
4726 return retval;
4727
4728 if (wake) {
4729 pci_prepare_to_sleep(pdev);
4730 } else {
4731 pci_wake_from_d3(pdev, false);
4732 pci_set_power_state(pdev, PCI_D3hot);
4733 }
4734
4735 return 0;
4736}
4737
64798845 4738static int e1000_resume(struct pci_dev *pdev)
1da177e4
LT
4739{
4740 struct net_device *netdev = pci_get_drvdata(pdev);
60490fe0 4741 struct e1000_adapter *adapter = netdev_priv(netdev);
1dc32918 4742 struct e1000_hw *hw = &adapter->hw;
406874a7 4743 u32 err;
1da177e4 4744
d0e027db 4745 pci_set_power_state(pdev, PCI_D0);
1d33e9c6 4746 pci_restore_state(pdev);
dbb5aaeb 4747 pci_save_state(pdev);
81250297
TI
4748
4749 if (adapter->need_ioport)
4750 err = pci_enable_device(pdev);
4751 else
4752 err = pci_enable_device_mem(pdev);
c7be73bc 4753 if (err) {
675ad473 4754 pr_err("Cannot enable PCI device from suspend\n");
3d1dd8cb
AK
4755 return err;
4756 }
a4cb847d 4757 pci_set_master(pdev);
1da177e4 4758
d0e027db
AK
4759 pci_enable_wake(pdev, PCI_D3hot, 0);
4760 pci_enable_wake(pdev, PCI_D3cold, 0);
1da177e4 4761
c7be73bc
JP
4762 if (netif_running(netdev)) {
4763 err = e1000_request_irq(adapter);
4764 if (err)
4765 return err;
4766 }
edd106fc
AK
4767
4768 e1000_power_up_phy(adapter);
1da177e4 4769 e1000_reset(adapter);
1dc32918 4770 ew32(WUS, ~0);
1da177e4 4771
0fccd0e9
JG
4772 e1000_init_manageability(adapter);
4773
96838a40 4774 if (netif_running(netdev))
1da177e4
LT
4775 e1000_up(adapter);
4776
4777 netif_device_attach(netdev);
4778
1da177e4
LT
4779 return 0;
4780}
4781#endif
c653e635
AK
4782
4783static void e1000_shutdown(struct pci_dev *pdev)
4784{
b43fcd7d
RW
4785 bool wake;
4786
4787 __e1000_shutdown(pdev, &wake);
4788
4789 if (system_state == SYSTEM_POWER_OFF) {
4790 pci_wake_from_d3(pdev, wake);
4791 pci_set_power_state(pdev, PCI_D3hot);
4792 }
c653e635
AK
4793}
4794
1da177e4
LT
4795#ifdef CONFIG_NET_POLL_CONTROLLER
4796/*
4797 * Polling 'interrupt' - used by things like netconsole to send skbs
4798 * without having to re-enable interrupts. It's not called while
4799 * the interrupt routine is executing.
4800 */
64798845 4801static void e1000_netpoll(struct net_device *netdev)
1da177e4 4802{
60490fe0 4803 struct e1000_adapter *adapter = netdev_priv(netdev);
d3d9e484 4804
1da177e4 4805 disable_irq(adapter->pdev->irq);
7d12e780 4806 e1000_intr(adapter->pdev->irq, netdev);
1da177e4
LT
4807 enable_irq(adapter->pdev->irq);
4808}
4809#endif
4810
9026729b
AK
4811/**
4812 * e1000_io_error_detected - called when PCI error is detected
4813 * @pdev: Pointer to PCI device
120a5d0d 4814 * @state: The current pci connection state
9026729b
AK
4815 *
4816 * This function is called after a PCI bus error affecting
4817 * this device has been detected.
4818 */
64798845
JP
4819static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
4820 pci_channel_state_t state)
9026729b
AK
4821{
4822 struct net_device *netdev = pci_get_drvdata(pdev);
4cf1653a 4823 struct e1000_adapter *adapter = netdev_priv(netdev);
9026729b
AK
4824
4825 netif_device_detach(netdev);
4826
eab63302
AD
4827 if (state == pci_channel_io_perm_failure)
4828 return PCI_ERS_RESULT_DISCONNECT;
4829
9026729b
AK
4830 if (netif_running(netdev))
4831 e1000_down(adapter);
72e8d6bb 4832 pci_disable_device(pdev);
9026729b
AK
4833
4834 /* Request a slot slot reset. */
4835 return PCI_ERS_RESULT_NEED_RESET;
4836}
4837
4838/**
4839 * e1000_io_slot_reset - called after the pci bus has been reset.
4840 * @pdev: Pointer to PCI device
4841 *
4842 * Restart the card from scratch, as if from a cold-boot. Implementation
4843 * resembles the first-half of the e1000_resume routine.
4844 */
4845static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
4846{
4847 struct net_device *netdev = pci_get_drvdata(pdev);
4cf1653a 4848 struct e1000_adapter *adapter = netdev_priv(netdev);
1dc32918 4849 struct e1000_hw *hw = &adapter->hw;
81250297 4850 int err;
9026729b 4851
81250297
TI
4852 if (adapter->need_ioport)
4853 err = pci_enable_device(pdev);
4854 else
4855 err = pci_enable_device_mem(pdev);
4856 if (err) {
675ad473 4857 pr_err("Cannot re-enable PCI device after reset.\n");
9026729b
AK
4858 return PCI_ERS_RESULT_DISCONNECT;
4859 }
4860 pci_set_master(pdev);
4861
dbf38c94
LV
4862 pci_enable_wake(pdev, PCI_D3hot, 0);
4863 pci_enable_wake(pdev, PCI_D3cold, 0);
9026729b 4864
9026729b 4865 e1000_reset(adapter);
1dc32918 4866 ew32(WUS, ~0);
9026729b
AK
4867
4868 return PCI_ERS_RESULT_RECOVERED;
4869}
4870
4871/**
4872 * e1000_io_resume - called when traffic can start flowing again.
4873 * @pdev: Pointer to PCI device
4874 *
4875 * This callback is called when the error recovery driver tells us that
4876 * its OK to resume normal operation. Implementation resembles the
4877 * second-half of the e1000_resume routine.
4878 */
4879static void e1000_io_resume(struct pci_dev *pdev)
4880{
4881 struct net_device *netdev = pci_get_drvdata(pdev);
4cf1653a 4882 struct e1000_adapter *adapter = netdev_priv(netdev);
0fccd0e9
JG
4883
4884 e1000_init_manageability(adapter);
9026729b
AK
4885
4886 if (netif_running(netdev)) {
4887 if (e1000_up(adapter)) {
675ad473 4888 pr_info("can't bring device back up after reset\n");
9026729b
AK
4889 return;
4890 }
4891 }
4892
4893 netif_device_attach(netdev);
9026729b
AK
4894}
4895
1da177e4 4896/* e1000_main.c */