]>
Commit | Line | Data |
---|---|---|
1 | /******************************************************************************* | |
2 | ||
3 | Intel PRO/1000 Linux driver | |
4 | Copyright(c) 1999 - 2006 Intel Corporation. | |
5 | ||
6 | This program is free software; you can redistribute it and/or modify it | |
7 | under the terms and conditions of the GNU General Public License, | |
8 | version 2, as published by the Free Software Foundation. | |
9 | ||
10 | This program is distributed in the hope it will be useful, but WITHOUT | |
11 | ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
12 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
13 | more details. | |
14 | ||
15 | You should have received a copy of the GNU General Public License along with | |
16 | this program; if not, write to the Free Software Foundation, Inc., | |
17 | 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | |
18 | ||
19 | The full GNU General Public License is included in this distribution in | |
20 | the file called "COPYING". | |
21 | ||
22 | Contact Information: | |
23 | Linux NICS <linux.nics@intel.com> | |
24 | e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> | |
25 | Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 | |
26 | ||
27 | *******************************************************************************/ | |
28 | ||
29 | #include "e1000.h" | |
30 | #include <net/ip6_checksum.h> | |
31 | ||
32 | char e1000_driver_name[] = "e1000"; | |
33 | static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver"; | |
34 | #define DRV_VERSION "7.3.21-k3-NAPI" | |
35 | const char e1000_driver_version[] = DRV_VERSION; | |
36 | static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation."; | |
37 | ||
38 | /* e1000_pci_tbl - PCI Device ID Table | |
39 | * | |
40 | * Last entry must be all 0s | |
41 | * | |
42 | * Macro expands to... | |
43 | * {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)} | |
44 | */ | |
45 | static struct pci_device_id e1000_pci_tbl[] = { | |
46 | INTEL_E1000_ETHERNET_DEVICE(0x1000), | |
47 | INTEL_E1000_ETHERNET_DEVICE(0x1001), | |
48 | INTEL_E1000_ETHERNET_DEVICE(0x1004), | |
49 | INTEL_E1000_ETHERNET_DEVICE(0x1008), | |
50 | INTEL_E1000_ETHERNET_DEVICE(0x1009), | |
51 | INTEL_E1000_ETHERNET_DEVICE(0x100C), | |
52 | INTEL_E1000_ETHERNET_DEVICE(0x100D), | |
53 | INTEL_E1000_ETHERNET_DEVICE(0x100E), | |
54 | INTEL_E1000_ETHERNET_DEVICE(0x100F), | |
55 | INTEL_E1000_ETHERNET_DEVICE(0x1010), | |
56 | INTEL_E1000_ETHERNET_DEVICE(0x1011), | |
57 | INTEL_E1000_ETHERNET_DEVICE(0x1012), | |
58 | INTEL_E1000_ETHERNET_DEVICE(0x1013), | |
59 | INTEL_E1000_ETHERNET_DEVICE(0x1014), | |
60 | INTEL_E1000_ETHERNET_DEVICE(0x1015), | |
61 | INTEL_E1000_ETHERNET_DEVICE(0x1016), | |
62 | INTEL_E1000_ETHERNET_DEVICE(0x1017), | |
63 | INTEL_E1000_ETHERNET_DEVICE(0x1018), | |
64 | INTEL_E1000_ETHERNET_DEVICE(0x1019), | |
65 | INTEL_E1000_ETHERNET_DEVICE(0x101A), | |
66 | INTEL_E1000_ETHERNET_DEVICE(0x101D), | |
67 | INTEL_E1000_ETHERNET_DEVICE(0x101E), | |
68 | INTEL_E1000_ETHERNET_DEVICE(0x1026), | |
69 | INTEL_E1000_ETHERNET_DEVICE(0x1027), | |
70 | INTEL_E1000_ETHERNET_DEVICE(0x1028), | |
71 | INTEL_E1000_ETHERNET_DEVICE(0x1075), | |
72 | INTEL_E1000_ETHERNET_DEVICE(0x1076), | |
73 | INTEL_E1000_ETHERNET_DEVICE(0x1077), | |
74 | INTEL_E1000_ETHERNET_DEVICE(0x1078), | |
75 | INTEL_E1000_ETHERNET_DEVICE(0x1079), | |
76 | INTEL_E1000_ETHERNET_DEVICE(0x107A), | |
77 | INTEL_E1000_ETHERNET_DEVICE(0x107B), | |
78 | INTEL_E1000_ETHERNET_DEVICE(0x107C), | |
79 | INTEL_E1000_ETHERNET_DEVICE(0x108A), | |
80 | INTEL_E1000_ETHERNET_DEVICE(0x1099), | |
81 | INTEL_E1000_ETHERNET_DEVICE(0x10B5), | |
82 | /* required last entry */ | |
83 | {0,} | |
84 | }; | |
85 | ||
86 | MODULE_DEVICE_TABLE(pci, e1000_pci_tbl); | |
87 | ||
88 | int e1000_up(struct e1000_adapter *adapter); | |
89 | void e1000_down(struct e1000_adapter *adapter); | |
90 | void e1000_reinit_locked(struct e1000_adapter *adapter); | |
91 | void e1000_reset(struct e1000_adapter *adapter); | |
92 | int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx); | |
93 | int e1000_setup_all_tx_resources(struct e1000_adapter *adapter); | |
94 | int e1000_setup_all_rx_resources(struct e1000_adapter *adapter); | |
95 | void e1000_free_all_tx_resources(struct e1000_adapter *adapter); | |
96 | void e1000_free_all_rx_resources(struct e1000_adapter *adapter); | |
97 | static int e1000_setup_tx_resources(struct e1000_adapter *adapter, | |
98 | struct e1000_tx_ring *txdr); | |
99 | static int e1000_setup_rx_resources(struct e1000_adapter *adapter, | |
100 | struct e1000_rx_ring *rxdr); | |
101 | static void e1000_free_tx_resources(struct e1000_adapter *adapter, | |
102 | struct e1000_tx_ring *tx_ring); | |
103 | static void e1000_free_rx_resources(struct e1000_adapter *adapter, | |
104 | struct e1000_rx_ring *rx_ring); | |
105 | void e1000_update_stats(struct e1000_adapter *adapter); | |
106 | ||
107 | static int e1000_init_module(void); | |
108 | static void e1000_exit_module(void); | |
109 | static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent); | |
110 | static void __devexit e1000_remove(struct pci_dev *pdev); | |
111 | static int e1000_alloc_queues(struct e1000_adapter *adapter); | |
112 | static int e1000_sw_init(struct e1000_adapter *adapter); | |
113 | static int e1000_open(struct net_device *netdev); | |
114 | static int e1000_close(struct net_device *netdev); | |
115 | static void e1000_configure_tx(struct e1000_adapter *adapter); | |
116 | static void e1000_configure_rx(struct e1000_adapter *adapter); | |
117 | static void e1000_setup_rctl(struct e1000_adapter *adapter); | |
118 | static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter); | |
119 | static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter); | |
120 | static void e1000_clean_tx_ring(struct e1000_adapter *adapter, | |
121 | struct e1000_tx_ring *tx_ring); | |
122 | static void e1000_clean_rx_ring(struct e1000_adapter *adapter, | |
123 | struct e1000_rx_ring *rx_ring); | |
124 | static void e1000_set_rx_mode(struct net_device *netdev); | |
125 | static void e1000_update_phy_info(unsigned long data); | |
126 | static void e1000_watchdog(unsigned long data); | |
127 | static void e1000_82547_tx_fifo_stall(unsigned long data); | |
128 | static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev); | |
129 | static struct net_device_stats * e1000_get_stats(struct net_device *netdev); | |
130 | static int e1000_change_mtu(struct net_device *netdev, int new_mtu); | |
131 | static int e1000_set_mac(struct net_device *netdev, void *p); | |
132 | static irqreturn_t e1000_intr(int irq, void *data); | |
133 | static irqreturn_t e1000_intr_msi(int irq, void *data); | |
134 | static bool e1000_clean_tx_irq(struct e1000_adapter *adapter, | |
135 | struct e1000_tx_ring *tx_ring); | |
136 | static int e1000_clean(struct napi_struct *napi, int budget); | |
137 | static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, | |
138 | struct e1000_rx_ring *rx_ring, | |
139 | int *work_done, int work_to_do); | |
140 | static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter, | |
141 | struct e1000_rx_ring *rx_ring, | |
142 | int cleaned_count); | |
143 | static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd); | |
144 | static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, | |
145 | int cmd); | |
146 | static void e1000_enter_82542_rst(struct e1000_adapter *adapter); | |
147 | static void e1000_leave_82542_rst(struct e1000_adapter *adapter); | |
148 | static void e1000_tx_timeout(struct net_device *dev); | |
149 | static void e1000_reset_task(struct work_struct *work); | |
150 | static void e1000_smartspeed(struct e1000_adapter *adapter); | |
151 | static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter, | |
152 | struct sk_buff *skb); | |
153 | ||
154 | static void e1000_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp); | |
155 | static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid); | |
156 | static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid); | |
157 | static void e1000_restore_vlan(struct e1000_adapter *adapter); | |
158 | ||
159 | static int e1000_suspend(struct pci_dev *pdev, pm_message_t state); | |
160 | #ifdef CONFIG_PM | |
161 | static int e1000_resume(struct pci_dev *pdev); | |
162 | #endif | |
163 | static void e1000_shutdown(struct pci_dev *pdev); | |
164 | ||
165 | #ifdef CONFIG_NET_POLL_CONTROLLER | |
166 | /* for netdump / net console */ | |
167 | static void e1000_netpoll (struct net_device *netdev); | |
168 | #endif | |
169 | ||
170 | #define COPYBREAK_DEFAULT 256 | |
171 | static unsigned int copybreak __read_mostly = COPYBREAK_DEFAULT; | |
172 | module_param(copybreak, uint, 0644); | |
173 | MODULE_PARM_DESC(copybreak, | |
174 | "Maximum size of packet that is copied to a new buffer on receive"); | |
175 | ||
176 | static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev, | |
177 | pci_channel_state_t state); | |
178 | static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev); | |
179 | static void e1000_io_resume(struct pci_dev *pdev); | |
180 | ||
181 | static struct pci_error_handlers e1000_err_handler = { | |
182 | .error_detected = e1000_io_error_detected, | |
183 | .slot_reset = e1000_io_slot_reset, | |
184 | .resume = e1000_io_resume, | |
185 | }; | |
186 | ||
187 | static struct pci_driver e1000_driver = { | |
188 | .name = e1000_driver_name, | |
189 | .id_table = e1000_pci_tbl, | |
190 | .probe = e1000_probe, | |
191 | .remove = __devexit_p(e1000_remove), | |
192 | #ifdef CONFIG_PM | |
193 | /* Power Managment Hooks */ | |
194 | .suspend = e1000_suspend, | |
195 | .resume = e1000_resume, | |
196 | #endif | |
197 | .shutdown = e1000_shutdown, | |
198 | .err_handler = &e1000_err_handler | |
199 | }; | |
200 | ||
201 | MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); | |
202 | MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver"); | |
203 | MODULE_LICENSE("GPL"); | |
204 | MODULE_VERSION(DRV_VERSION); | |
205 | ||
206 | static int debug = NETIF_MSG_DRV | NETIF_MSG_PROBE; | |
207 | module_param(debug, int, 0); | |
208 | MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); | |
209 | ||
210 | /** | |
211 | * e1000_init_module - Driver Registration Routine | |
212 | * | |
213 | * e1000_init_module is the first routine called when the driver is | |
214 | * loaded. All it does is register with the PCI subsystem. | |
215 | **/ | |
216 | ||
217 | static int __init e1000_init_module(void) | |
218 | { | |
219 | int ret; | |
220 | printk(KERN_INFO "%s - version %s\n", | |
221 | e1000_driver_string, e1000_driver_version); | |
222 | ||
223 | printk(KERN_INFO "%s\n", e1000_copyright); | |
224 | ||
225 | ret = pci_register_driver(&e1000_driver); | |
226 | if (copybreak != COPYBREAK_DEFAULT) { | |
227 | if (copybreak == 0) | |
228 | printk(KERN_INFO "e1000: copybreak disabled\n"); | |
229 | else | |
230 | printk(KERN_INFO "e1000: copybreak enabled for " | |
231 | "packets <= %u bytes\n", copybreak); | |
232 | } | |
233 | return ret; | |
234 | } | |
235 | ||
236 | module_init(e1000_init_module); | |
237 | ||
238 | /** | |
239 | * e1000_exit_module - Driver Exit Cleanup Routine | |
240 | * | |
241 | * e1000_exit_module is called just before the driver is removed | |
242 | * from memory. | |
243 | **/ | |
244 | ||
245 | static void __exit e1000_exit_module(void) | |
246 | { | |
247 | pci_unregister_driver(&e1000_driver); | |
248 | } | |
249 | ||
250 | module_exit(e1000_exit_module); | |
251 | ||
252 | static int e1000_request_irq(struct e1000_adapter *adapter) | |
253 | { | |
254 | struct e1000_hw *hw = &adapter->hw; | |
255 | struct net_device *netdev = adapter->netdev; | |
256 | irq_handler_t handler = e1000_intr; | |
257 | int irq_flags = IRQF_SHARED; | |
258 | int err; | |
259 | ||
260 | if (hw->mac_type >= e1000_82571) { | |
261 | adapter->have_msi = !pci_enable_msi(adapter->pdev); | |
262 | if (adapter->have_msi) { | |
263 | handler = e1000_intr_msi; | |
264 | irq_flags = 0; | |
265 | } | |
266 | } | |
267 | ||
268 | err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name, | |
269 | netdev); | |
270 | if (err) { | |
271 | if (adapter->have_msi) | |
272 | pci_disable_msi(adapter->pdev); | |
273 | DPRINTK(PROBE, ERR, | |
274 | "Unable to allocate interrupt Error: %d\n", err); | |
275 | } | |
276 | ||
277 | return err; | |
278 | } | |
279 | ||
280 | static void e1000_free_irq(struct e1000_adapter *adapter) | |
281 | { | |
282 | struct net_device *netdev = adapter->netdev; | |
283 | ||
284 | free_irq(adapter->pdev->irq, netdev); | |
285 | ||
286 | if (adapter->have_msi) | |
287 | pci_disable_msi(adapter->pdev); | |
288 | } | |
289 | ||
290 | /** | |
291 | * e1000_irq_disable - Mask off interrupt generation on the NIC | |
292 | * @adapter: board private structure | |
293 | **/ | |
294 | ||
295 | static void e1000_irq_disable(struct e1000_adapter *adapter) | |
296 | { | |
297 | struct e1000_hw *hw = &adapter->hw; | |
298 | ||
299 | ew32(IMC, ~0); | |
300 | E1000_WRITE_FLUSH(); | |
301 | synchronize_irq(adapter->pdev->irq); | |
302 | } | |
303 | ||
304 | /** | |
305 | * e1000_irq_enable - Enable default interrupt generation settings | |
306 | * @adapter: board private structure | |
307 | **/ | |
308 | ||
309 | static void e1000_irq_enable(struct e1000_adapter *adapter) | |
310 | { | |
311 | struct e1000_hw *hw = &adapter->hw; | |
312 | ||
313 | ew32(IMS, IMS_ENABLE_MASK); | |
314 | E1000_WRITE_FLUSH(); | |
315 | } | |
316 | ||
317 | static void e1000_update_mng_vlan(struct e1000_adapter *adapter) | |
318 | { | |
319 | struct e1000_hw *hw = &adapter->hw; | |
320 | struct net_device *netdev = adapter->netdev; | |
321 | u16 vid = hw->mng_cookie.vlan_id; | |
322 | u16 old_vid = adapter->mng_vlan_id; | |
323 | if (adapter->vlgrp) { | |
324 | if (!vlan_group_get_device(adapter->vlgrp, vid)) { | |
325 | if (hw->mng_cookie.status & | |
326 | E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) { | |
327 | e1000_vlan_rx_add_vid(netdev, vid); | |
328 | adapter->mng_vlan_id = vid; | |
329 | } else | |
330 | adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; | |
331 | ||
332 | if ((old_vid != (u16)E1000_MNG_VLAN_NONE) && | |
333 | (vid != old_vid) && | |
334 | !vlan_group_get_device(adapter->vlgrp, old_vid)) | |
335 | e1000_vlan_rx_kill_vid(netdev, old_vid); | |
336 | } else | |
337 | adapter->mng_vlan_id = vid; | |
338 | } | |
339 | } | |
340 | ||
341 | /** | |
342 | * e1000_release_hw_control - release control of the h/w to f/w | |
343 | * @adapter: address of board private structure | |
344 | * | |
345 | * e1000_release_hw_control resets {CTRL_EXT|FWSM}:DRV_LOAD bit. | |
346 | * For ASF and Pass Through versions of f/w this means that the | |
347 | * driver is no longer loaded. For AMT version (only with 82573) i | |
348 | * of the f/w this means that the network i/f is closed. | |
349 | * | |
350 | **/ | |
351 | ||
352 | static void e1000_release_hw_control(struct e1000_adapter *adapter) | |
353 | { | |
354 | u32 ctrl_ext; | |
355 | u32 swsm; | |
356 | struct e1000_hw *hw = &adapter->hw; | |
357 | ||
358 | /* Let firmware taken over control of h/w */ | |
359 | switch (hw->mac_type) { | |
360 | case e1000_82573: | |
361 | swsm = er32(SWSM); | |
362 | ew32(SWSM, swsm & ~E1000_SWSM_DRV_LOAD); | |
363 | break; | |
364 | case e1000_82571: | |
365 | case e1000_82572: | |
366 | case e1000_80003es2lan: | |
367 | case e1000_ich8lan: | |
368 | ctrl_ext = er32(CTRL_EXT); | |
369 | ew32(CTRL_EXT, ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD); | |
370 | break; | |
371 | default: | |
372 | break; | |
373 | } | |
374 | } | |
375 | ||
376 | /** | |
377 | * e1000_get_hw_control - get control of the h/w from f/w | |
378 | * @adapter: address of board private structure | |
379 | * | |
380 | * e1000_get_hw_control sets {CTRL_EXT|FWSM}:DRV_LOAD bit. | |
381 | * For ASF and Pass Through versions of f/w this means that | |
382 | * the driver is loaded. For AMT version (only with 82573) | |
383 | * of the f/w this means that the network i/f is open. | |
384 | * | |
385 | **/ | |
386 | ||
387 | static void e1000_get_hw_control(struct e1000_adapter *adapter) | |
388 | { | |
389 | u32 ctrl_ext; | |
390 | u32 swsm; | |
391 | struct e1000_hw *hw = &adapter->hw; | |
392 | ||
393 | /* Let firmware know the driver has taken over */ | |
394 | switch (hw->mac_type) { | |
395 | case e1000_82573: | |
396 | swsm = er32(SWSM); | |
397 | ew32(SWSM, swsm | E1000_SWSM_DRV_LOAD); | |
398 | break; | |
399 | case e1000_82571: | |
400 | case e1000_82572: | |
401 | case e1000_80003es2lan: | |
402 | case e1000_ich8lan: | |
403 | ctrl_ext = er32(CTRL_EXT); | |
404 | ew32(CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_DRV_LOAD); | |
405 | break; | |
406 | default: | |
407 | break; | |
408 | } | |
409 | } | |
410 | ||
411 | static void e1000_init_manageability(struct e1000_adapter *adapter) | |
412 | { | |
413 | struct e1000_hw *hw = &adapter->hw; | |
414 | ||
415 | if (adapter->en_mng_pt) { | |
416 | u32 manc = er32(MANC); | |
417 | ||
418 | /* disable hardware interception of ARP */ | |
419 | manc &= ~(E1000_MANC_ARP_EN); | |
420 | ||
421 | /* enable receiving management packets to the host */ | |
422 | /* this will probably generate destination unreachable messages | |
423 | * from the host OS, but the packets will be handled on SMBUS */ | |
424 | if (hw->has_manc2h) { | |
425 | u32 manc2h = er32(MANC2H); | |
426 | ||
427 | manc |= E1000_MANC_EN_MNG2HOST; | |
428 | #define E1000_MNG2HOST_PORT_623 (1 << 5) | |
429 | #define E1000_MNG2HOST_PORT_664 (1 << 6) | |
430 | manc2h |= E1000_MNG2HOST_PORT_623; | |
431 | manc2h |= E1000_MNG2HOST_PORT_664; | |
432 | ew32(MANC2H, manc2h); | |
433 | } | |
434 | ||
435 | ew32(MANC, manc); | |
436 | } | |
437 | } | |
438 | ||
439 | static void e1000_release_manageability(struct e1000_adapter *adapter) | |
440 | { | |
441 | struct e1000_hw *hw = &adapter->hw; | |
442 | ||
443 | if (adapter->en_mng_pt) { | |
444 | u32 manc = er32(MANC); | |
445 | ||
446 | /* re-enable hardware interception of ARP */ | |
447 | manc |= E1000_MANC_ARP_EN; | |
448 | ||
449 | if (hw->has_manc2h) | |
450 | manc &= ~E1000_MANC_EN_MNG2HOST; | |
451 | ||
452 | /* don't explicitly have to mess with MANC2H since | |
453 | * MANC has an enable disable that gates MANC2H */ | |
454 | ||
455 | ew32(MANC, manc); | |
456 | } | |
457 | } | |
458 | ||
459 | /** | |
460 | * e1000_configure - configure the hardware for RX and TX | |
461 | * @adapter = private board structure | |
462 | **/ | |
463 | static void e1000_configure(struct e1000_adapter *adapter) | |
464 | { | |
465 | struct net_device *netdev = adapter->netdev; | |
466 | int i; | |
467 | ||
468 | e1000_set_rx_mode(netdev); | |
469 | ||
470 | e1000_restore_vlan(adapter); | |
471 | e1000_init_manageability(adapter); | |
472 | ||
473 | e1000_configure_tx(adapter); | |
474 | e1000_setup_rctl(adapter); | |
475 | e1000_configure_rx(adapter); | |
476 | /* call E1000_DESC_UNUSED which always leaves | |
477 | * at least 1 descriptor unused to make sure | |
478 | * next_to_use != next_to_clean */ | |
479 | for (i = 0; i < adapter->num_rx_queues; i++) { | |
480 | struct e1000_rx_ring *ring = &adapter->rx_ring[i]; | |
481 | adapter->alloc_rx_buf(adapter, ring, | |
482 | E1000_DESC_UNUSED(ring)); | |
483 | } | |
484 | ||
485 | adapter->tx_queue_len = netdev->tx_queue_len; | |
486 | } | |
487 | ||
488 | int e1000_up(struct e1000_adapter *adapter) | |
489 | { | |
490 | struct e1000_hw *hw = &adapter->hw; | |
491 | ||
492 | /* hardware has been reset, we need to reload some things */ | |
493 | e1000_configure(adapter); | |
494 | ||
495 | clear_bit(__E1000_DOWN, &adapter->flags); | |
496 | ||
497 | napi_enable(&adapter->napi); | |
498 | ||
499 | e1000_irq_enable(adapter); | |
500 | ||
501 | /* fire a link change interrupt to start the watchdog */ | |
502 | ew32(ICS, E1000_ICS_LSC); | |
503 | return 0; | |
504 | } | |
505 | ||
506 | /** | |
507 | * e1000_power_up_phy - restore link in case the phy was powered down | |
508 | * @adapter: address of board private structure | |
509 | * | |
510 | * The phy may be powered down to save power and turn off link when the | |
511 | * driver is unloaded and wake on lan is not enabled (among others) | |
512 | * *** this routine MUST be followed by a call to e1000_reset *** | |
513 | * | |
514 | **/ | |
515 | ||
516 | void e1000_power_up_phy(struct e1000_adapter *adapter) | |
517 | { | |
518 | struct e1000_hw *hw = &adapter->hw; | |
519 | u16 mii_reg = 0; | |
520 | ||
521 | /* Just clear the power down bit to wake the phy back up */ | |
522 | if (hw->media_type == e1000_media_type_copper) { | |
523 | /* according to the manual, the phy will retain its | |
524 | * settings across a power-down/up cycle */ | |
525 | e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg); | |
526 | mii_reg &= ~MII_CR_POWER_DOWN; | |
527 | e1000_write_phy_reg(hw, PHY_CTRL, mii_reg); | |
528 | } | |
529 | } | |
530 | ||
531 | static void e1000_power_down_phy(struct e1000_adapter *adapter) | |
532 | { | |
533 | struct e1000_hw *hw = &adapter->hw; | |
534 | ||
535 | /* Power down the PHY so no link is implied when interface is down * | |
536 | * The PHY cannot be powered down if any of the following is true * | |
537 | * (a) WoL is enabled | |
538 | * (b) AMT is active | |
539 | * (c) SoL/IDER session is active */ | |
540 | if (!adapter->wol && hw->mac_type >= e1000_82540 && | |
541 | hw->media_type == e1000_media_type_copper) { | |
542 | u16 mii_reg = 0; | |
543 | ||
544 | switch (hw->mac_type) { | |
545 | case e1000_82540: | |
546 | case e1000_82545: | |
547 | case e1000_82545_rev_3: | |
548 | case e1000_82546: | |
549 | case e1000_82546_rev_3: | |
550 | case e1000_82541: | |
551 | case e1000_82541_rev_2: | |
552 | case e1000_82547: | |
553 | case e1000_82547_rev_2: | |
554 | if (er32(MANC) & E1000_MANC_SMBUS_EN) | |
555 | goto out; | |
556 | break; | |
557 | case e1000_82571: | |
558 | case e1000_82572: | |
559 | case e1000_82573: | |
560 | case e1000_80003es2lan: | |
561 | case e1000_ich8lan: | |
562 | if (e1000_check_mng_mode(hw) || | |
563 | e1000_check_phy_reset_block(hw)) | |
564 | goto out; | |
565 | break; | |
566 | default: | |
567 | goto out; | |
568 | } | |
569 | e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg); | |
570 | mii_reg |= MII_CR_POWER_DOWN; | |
571 | e1000_write_phy_reg(hw, PHY_CTRL, mii_reg); | |
572 | mdelay(1); | |
573 | } | |
574 | out: | |
575 | return; | |
576 | } | |
577 | ||
578 | void e1000_down(struct e1000_adapter *adapter) | |
579 | { | |
580 | struct net_device *netdev = adapter->netdev; | |
581 | ||
582 | /* signal that we're down so the interrupt handler does not | |
583 | * reschedule our watchdog timer */ | |
584 | set_bit(__E1000_DOWN, &adapter->flags); | |
585 | ||
586 | napi_disable(&adapter->napi); | |
587 | ||
588 | e1000_irq_disable(adapter); | |
589 | ||
590 | del_timer_sync(&adapter->tx_fifo_stall_timer); | |
591 | del_timer_sync(&adapter->watchdog_timer); | |
592 | del_timer_sync(&adapter->phy_info_timer); | |
593 | ||
594 | netdev->tx_queue_len = adapter->tx_queue_len; | |
595 | adapter->link_speed = 0; | |
596 | adapter->link_duplex = 0; | |
597 | netif_carrier_off(netdev); | |
598 | netif_stop_queue(netdev); | |
599 | ||
600 | e1000_reset(adapter); | |
601 | e1000_clean_all_tx_rings(adapter); | |
602 | e1000_clean_all_rx_rings(adapter); | |
603 | } | |
604 | ||
605 | void e1000_reinit_locked(struct e1000_adapter *adapter) | |
606 | { | |
607 | WARN_ON(in_interrupt()); | |
608 | while (test_and_set_bit(__E1000_RESETTING, &adapter->flags)) | |
609 | msleep(1); | |
610 | e1000_down(adapter); | |
611 | e1000_up(adapter); | |
612 | clear_bit(__E1000_RESETTING, &adapter->flags); | |
613 | } | |
614 | ||
615 | void e1000_reset(struct e1000_adapter *adapter) | |
616 | { | |
617 | struct e1000_hw *hw = &adapter->hw; | |
618 | u32 pba = 0, tx_space, min_tx_space, min_rx_space; | |
619 | u16 fc_high_water_mark = E1000_FC_HIGH_DIFF; | |
620 | bool legacy_pba_adjust = false; | |
621 | ||
622 | /* Repartition Pba for greater than 9k mtu | |
623 | * To take effect CTRL.RST is required. | |
624 | */ | |
625 | ||
626 | switch (hw->mac_type) { | |
627 | case e1000_82542_rev2_0: | |
628 | case e1000_82542_rev2_1: | |
629 | case e1000_82543: | |
630 | case e1000_82544: | |
631 | case e1000_82540: | |
632 | case e1000_82541: | |
633 | case e1000_82541_rev_2: | |
634 | legacy_pba_adjust = true; | |
635 | pba = E1000_PBA_48K; | |
636 | break; | |
637 | case e1000_82545: | |
638 | case e1000_82545_rev_3: | |
639 | case e1000_82546: | |
640 | case e1000_82546_rev_3: | |
641 | pba = E1000_PBA_48K; | |
642 | break; | |
643 | case e1000_82547: | |
644 | case e1000_82547_rev_2: | |
645 | legacy_pba_adjust = true; | |
646 | pba = E1000_PBA_30K; | |
647 | break; | |
648 | case e1000_82571: | |
649 | case e1000_82572: | |
650 | case e1000_80003es2lan: | |
651 | pba = E1000_PBA_38K; | |
652 | break; | |
653 | case e1000_82573: | |
654 | pba = E1000_PBA_20K; | |
655 | break; | |
656 | case e1000_ich8lan: | |
657 | pba = E1000_PBA_8K; | |
658 | case e1000_undefined: | |
659 | case e1000_num_macs: | |
660 | break; | |
661 | } | |
662 | ||
663 | if (legacy_pba_adjust) { | |
664 | if (adapter->netdev->mtu > E1000_RXBUFFER_8192) | |
665 | pba -= 8; /* allocate more FIFO for Tx */ | |
666 | ||
667 | if (hw->mac_type == e1000_82547) { | |
668 | adapter->tx_fifo_head = 0; | |
669 | adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT; | |
670 | adapter->tx_fifo_size = | |
671 | (E1000_PBA_40K - pba) << E1000_PBA_BYTES_SHIFT; | |
672 | atomic_set(&adapter->tx_fifo_stall, 0); | |
673 | } | |
674 | } else if (hw->max_frame_size > MAXIMUM_ETHERNET_FRAME_SIZE) { | |
675 | /* adjust PBA for jumbo frames */ | |
676 | ew32(PBA, pba); | |
677 | ||
678 | /* To maintain wire speed transmits, the Tx FIFO should be | |
679 | * large enough to accomodate two full transmit packets, | |
680 | * rounded up to the next 1KB and expressed in KB. Likewise, | |
681 | * the Rx FIFO should be large enough to accomodate at least | |
682 | * one full receive packet and is similarly rounded up and | |
683 | * expressed in KB. */ | |
684 | pba = er32(PBA); | |
685 | /* upper 16 bits has Tx packet buffer allocation size in KB */ | |
686 | tx_space = pba >> 16; | |
687 | /* lower 16 bits has Rx packet buffer allocation size in KB */ | |
688 | pba &= 0xffff; | |
689 | /* don't include ethernet FCS because hardware appends/strips */ | |
690 | min_rx_space = adapter->netdev->mtu + ENET_HEADER_SIZE + | |
691 | VLAN_TAG_SIZE; | |
692 | min_tx_space = min_rx_space; | |
693 | min_tx_space *= 2; | |
694 | min_tx_space = ALIGN(min_tx_space, 1024); | |
695 | min_tx_space >>= 10; | |
696 | min_rx_space = ALIGN(min_rx_space, 1024); | |
697 | min_rx_space >>= 10; | |
698 | ||
699 | /* If current Tx allocation is less than the min Tx FIFO size, | |
700 | * and the min Tx FIFO size is less than the current Rx FIFO | |
701 | * allocation, take space away from current Rx allocation */ | |
702 | if (tx_space < min_tx_space && | |
703 | ((min_tx_space - tx_space) < pba)) { | |
704 | pba = pba - (min_tx_space - tx_space); | |
705 | ||
706 | /* PCI/PCIx hardware has PBA alignment constraints */ | |
707 | switch (hw->mac_type) { | |
708 | case e1000_82545 ... e1000_82546_rev_3: | |
709 | pba &= ~(E1000_PBA_8K - 1); | |
710 | break; | |
711 | default: | |
712 | break; | |
713 | } | |
714 | ||
715 | /* if short on rx space, rx wins and must trump tx | |
716 | * adjustment or use Early Receive if available */ | |
717 | if (pba < min_rx_space) { | |
718 | switch (hw->mac_type) { | |
719 | case e1000_82573: | |
720 | /* ERT enabled in e1000_configure_rx */ | |
721 | break; | |
722 | default: | |
723 | pba = min_rx_space; | |
724 | break; | |
725 | } | |
726 | } | |
727 | } | |
728 | } | |
729 | ||
730 | ew32(PBA, pba); | |
731 | ||
732 | /* flow control settings */ | |
733 | /* Set the FC high water mark to 90% of the FIFO size. | |
734 | * Required to clear last 3 LSB */ | |
735 | fc_high_water_mark = ((pba * 9216)/10) & 0xFFF8; | |
736 | /* We can't use 90% on small FIFOs because the remainder | |
737 | * would be less than 1 full frame. In this case, we size | |
738 | * it to allow at least a full frame above the high water | |
739 | * mark. */ | |
740 | if (pba < E1000_PBA_16K) | |
741 | fc_high_water_mark = (pba * 1024) - 1600; | |
742 | ||
743 | hw->fc_high_water = fc_high_water_mark; | |
744 | hw->fc_low_water = fc_high_water_mark - 8; | |
745 | if (hw->mac_type == e1000_80003es2lan) | |
746 | hw->fc_pause_time = 0xFFFF; | |
747 | else | |
748 | hw->fc_pause_time = E1000_FC_PAUSE_TIME; | |
749 | hw->fc_send_xon = 1; | |
750 | hw->fc = hw->original_fc; | |
751 | ||
752 | /* Allow time for pending master requests to run */ | |
753 | e1000_reset_hw(hw); | |
754 | if (hw->mac_type >= e1000_82544) | |
755 | ew32(WUC, 0); | |
756 | ||
757 | if (e1000_init_hw(hw)) | |
758 | DPRINTK(PROBE, ERR, "Hardware Error\n"); | |
759 | e1000_update_mng_vlan(adapter); | |
760 | ||
761 | /* if (adapter->hwflags & HWFLAGS_PHY_PWR_BIT) { */ | |
762 | if (hw->mac_type >= e1000_82544 && | |
763 | hw->mac_type <= e1000_82547_rev_2 && | |
764 | hw->autoneg == 1 && | |
765 | hw->autoneg_advertised == ADVERTISE_1000_FULL) { | |
766 | u32 ctrl = er32(CTRL); | |
767 | /* clear phy power management bit if we are in gig only mode, | |
768 | * which if enabled will attempt negotiation to 100Mb, which | |
769 | * can cause a loss of link at power off or driver unload */ | |
770 | ctrl &= ~E1000_CTRL_SWDPIN3; | |
771 | ew32(CTRL, ctrl); | |
772 | } | |
773 | ||
774 | /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */ | |
775 | ew32(VET, ETHERNET_IEEE_VLAN_TYPE); | |
776 | ||
777 | e1000_reset_adaptive(hw); | |
778 | e1000_phy_get_info(hw, &adapter->phy_info); | |
779 | ||
780 | if (!adapter->smart_power_down && | |
781 | (hw->mac_type == e1000_82571 || | |
782 | hw->mac_type == e1000_82572)) { | |
783 | u16 phy_data = 0; | |
784 | /* speed up time to link by disabling smart power down, ignore | |
785 | * the return value of this function because there is nothing | |
786 | * different we would do if it failed */ | |
787 | e1000_read_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, | |
788 | &phy_data); | |
789 | phy_data &= ~IGP02E1000_PM_SPD; | |
790 | e1000_write_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, | |
791 | phy_data); | |
792 | } | |
793 | ||
794 | e1000_release_manageability(adapter); | |
795 | } | |
796 | ||
797 | /** | |
798 | * Dump the eeprom for users having checksum issues | |
799 | **/ | |
800 | static void e1000_dump_eeprom(struct e1000_adapter *adapter) | |
801 | { | |
802 | struct net_device *netdev = adapter->netdev; | |
803 | struct ethtool_eeprom eeprom; | |
804 | const struct ethtool_ops *ops = netdev->ethtool_ops; | |
805 | u8 *data; | |
806 | int i; | |
807 | u16 csum_old, csum_new = 0; | |
808 | ||
809 | eeprom.len = ops->get_eeprom_len(netdev); | |
810 | eeprom.offset = 0; | |
811 | ||
812 | data = kmalloc(eeprom.len, GFP_KERNEL); | |
813 | if (!data) { | |
814 | printk(KERN_ERR "Unable to allocate memory to dump EEPROM" | |
815 | " data\n"); | |
816 | return; | |
817 | } | |
818 | ||
819 | ops->get_eeprom(netdev, &eeprom, data); | |
820 | ||
821 | csum_old = (data[EEPROM_CHECKSUM_REG * 2]) + | |
822 | (data[EEPROM_CHECKSUM_REG * 2 + 1] << 8); | |
823 | for (i = 0; i < EEPROM_CHECKSUM_REG * 2; i += 2) | |
824 | csum_new += data[i] + (data[i + 1] << 8); | |
825 | csum_new = EEPROM_SUM - csum_new; | |
826 | ||
827 | printk(KERN_ERR "/*********************/\n"); | |
828 | printk(KERN_ERR "Current EEPROM Checksum : 0x%04x\n", csum_old); | |
829 | printk(KERN_ERR "Calculated : 0x%04x\n", csum_new); | |
830 | ||
831 | printk(KERN_ERR "Offset Values\n"); | |
832 | printk(KERN_ERR "======== ======\n"); | |
833 | print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1, data, 128, 0); | |
834 | ||
835 | printk(KERN_ERR "Include this output when contacting your support " | |
836 | "provider.\n"); | |
837 | printk(KERN_ERR "This is not a software error! Something bad " | |
838 | "happened to your hardware or\n"); | |
839 | printk(KERN_ERR "EEPROM image. Ignoring this " | |
840 | "problem could result in further problems,\n"); | |
841 | printk(KERN_ERR "possibly loss of data, corruption or system hangs!\n"); | |
842 | printk(KERN_ERR "The MAC Address will be reset to 00:00:00:00:00:00, " | |
843 | "which is invalid\n"); | |
844 | printk(KERN_ERR "and requires you to set the proper MAC " | |
845 | "address manually before continuing\n"); | |
846 | printk(KERN_ERR "to enable this network device.\n"); | |
847 | printk(KERN_ERR "Please inspect the EEPROM dump and report the issue " | |
848 | "to your hardware vendor\n"); | |
849 | printk(KERN_ERR "or Intel Customer Support.\n"); | |
850 | printk(KERN_ERR "/*********************/\n"); | |
851 | ||
852 | kfree(data); | |
853 | } | |
854 | ||
855 | /** | |
856 | * e1000_is_need_ioport - determine if an adapter needs ioport resources or not | |
857 | * @pdev: PCI device information struct | |
858 | * | |
859 | * Return true if an adapter needs ioport resources | |
860 | **/ | |
861 | static int e1000_is_need_ioport(struct pci_dev *pdev) | |
862 | { | |
863 | switch (pdev->device) { | |
864 | case E1000_DEV_ID_82540EM: | |
865 | case E1000_DEV_ID_82540EM_LOM: | |
866 | case E1000_DEV_ID_82540EP: | |
867 | case E1000_DEV_ID_82540EP_LOM: | |
868 | case E1000_DEV_ID_82540EP_LP: | |
869 | case E1000_DEV_ID_82541EI: | |
870 | case E1000_DEV_ID_82541EI_MOBILE: | |
871 | case E1000_DEV_ID_82541ER: | |
872 | case E1000_DEV_ID_82541ER_LOM: | |
873 | case E1000_DEV_ID_82541GI: | |
874 | case E1000_DEV_ID_82541GI_LF: | |
875 | case E1000_DEV_ID_82541GI_MOBILE: | |
876 | case E1000_DEV_ID_82544EI_COPPER: | |
877 | case E1000_DEV_ID_82544EI_FIBER: | |
878 | case E1000_DEV_ID_82544GC_COPPER: | |
879 | case E1000_DEV_ID_82544GC_LOM: | |
880 | case E1000_DEV_ID_82545EM_COPPER: | |
881 | case E1000_DEV_ID_82545EM_FIBER: | |
882 | case E1000_DEV_ID_82546EB_COPPER: | |
883 | case E1000_DEV_ID_82546EB_FIBER: | |
884 | case E1000_DEV_ID_82546EB_QUAD_COPPER: | |
885 | return true; | |
886 | default: | |
887 | return false; | |
888 | } | |
889 | } | |
890 | ||
891 | static const struct net_device_ops e1000_netdev_ops = { | |
892 | .ndo_open = e1000_open, | |
893 | .ndo_stop = e1000_close, | |
894 | .ndo_start_xmit = e1000_xmit_frame, | |
895 | .ndo_get_stats = e1000_get_stats, | |
896 | .ndo_set_rx_mode = e1000_set_rx_mode, | |
897 | .ndo_set_mac_address = e1000_set_mac, | |
898 | .ndo_tx_timeout = e1000_tx_timeout, | |
899 | .ndo_change_mtu = e1000_change_mtu, | |
900 | .ndo_do_ioctl = e1000_ioctl, | |
901 | .ndo_validate_addr = eth_validate_addr, | |
902 | ||
903 | .ndo_vlan_rx_register = e1000_vlan_rx_register, | |
904 | .ndo_vlan_rx_add_vid = e1000_vlan_rx_add_vid, | |
905 | .ndo_vlan_rx_kill_vid = e1000_vlan_rx_kill_vid, | |
906 | #ifdef CONFIG_NET_POLL_CONTROLLER | |
907 | .ndo_poll_controller = e1000_netpoll, | |
908 | #endif | |
909 | }; | |
910 | ||
911 | /** | |
912 | * e1000_probe - Device Initialization Routine | |
913 | * @pdev: PCI device information struct | |
914 | * @ent: entry in e1000_pci_tbl | |
915 | * | |
916 | * Returns 0 on success, negative on failure | |
917 | * | |
918 | * e1000_probe initializes an adapter identified by a pci_dev structure. | |
919 | * The OS initialization, configuring of the adapter private structure, | |
920 | * and a hardware reset occur. | |
921 | **/ | |
922 | static int __devinit e1000_probe(struct pci_dev *pdev, | |
923 | const struct pci_device_id *ent) | |
924 | { | |
925 | struct net_device *netdev; | |
926 | struct e1000_adapter *adapter; | |
927 | struct e1000_hw *hw; | |
928 | ||
929 | static int cards_found = 0; | |
930 | static int global_quad_port_a = 0; /* global ksp3 port a indication */ | |
931 | int i, err, pci_using_dac; | |
932 | u16 eeprom_data = 0; | |
933 | u16 eeprom_apme_mask = E1000_EEPROM_APME; | |
934 | int bars, need_ioport; | |
935 | ||
936 | /* do not allocate ioport bars when not needed */ | |
937 | need_ioport = e1000_is_need_ioport(pdev); | |
938 | if (need_ioport) { | |
939 | bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO); | |
940 | err = pci_enable_device(pdev); | |
941 | } else { | |
942 | bars = pci_select_bars(pdev, IORESOURCE_MEM); | |
943 | err = pci_enable_device_mem(pdev); | |
944 | } | |
945 | if (err) | |
946 | return err; | |
947 | ||
948 | if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK) && | |
949 | !pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK)) { | |
950 | pci_using_dac = 1; | |
951 | } else { | |
952 | err = pci_set_dma_mask(pdev, DMA_32BIT_MASK); | |
953 | if (err) { | |
954 | err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); | |
955 | if (err) { | |
956 | E1000_ERR("No usable DMA configuration, " | |
957 | "aborting\n"); | |
958 | goto err_dma; | |
959 | } | |
960 | } | |
961 | pci_using_dac = 0; | |
962 | } | |
963 | ||
964 | err = pci_request_selected_regions(pdev, bars, e1000_driver_name); | |
965 | if (err) | |
966 | goto err_pci_reg; | |
967 | ||
968 | pci_set_master(pdev); | |
969 | ||
970 | err = -ENOMEM; | |
971 | netdev = alloc_etherdev(sizeof(struct e1000_adapter)); | |
972 | if (!netdev) | |
973 | goto err_alloc_etherdev; | |
974 | ||
975 | SET_NETDEV_DEV(netdev, &pdev->dev); | |
976 | ||
977 | pci_set_drvdata(pdev, netdev); | |
978 | adapter = netdev_priv(netdev); | |
979 | adapter->netdev = netdev; | |
980 | adapter->pdev = pdev; | |
981 | adapter->msg_enable = (1 << debug) - 1; | |
982 | adapter->bars = bars; | |
983 | adapter->need_ioport = need_ioport; | |
984 | ||
985 | hw = &adapter->hw; | |
986 | hw->back = adapter; | |
987 | ||
988 | err = -EIO; | |
989 | hw->hw_addr = pci_ioremap_bar(pdev, BAR_0); | |
990 | if (!hw->hw_addr) | |
991 | goto err_ioremap; | |
992 | ||
993 | if (adapter->need_ioport) { | |
994 | for (i = BAR_1; i <= BAR_5; i++) { | |
995 | if (pci_resource_len(pdev, i) == 0) | |
996 | continue; | |
997 | if (pci_resource_flags(pdev, i) & IORESOURCE_IO) { | |
998 | hw->io_base = pci_resource_start(pdev, i); | |
999 | break; | |
1000 | } | |
1001 | } | |
1002 | } | |
1003 | ||
1004 | netdev->netdev_ops = &e1000_netdev_ops; | |
1005 | e1000_set_ethtool_ops(netdev); | |
1006 | netdev->watchdog_timeo = 5 * HZ; | |
1007 | netif_napi_add(netdev, &adapter->napi, e1000_clean, 64); | |
1008 | ||
1009 | strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1); | |
1010 | ||
1011 | adapter->bd_number = cards_found; | |
1012 | ||
1013 | /* setup the private structure */ | |
1014 | ||
1015 | err = e1000_sw_init(adapter); | |
1016 | if (err) | |
1017 | goto err_sw_init; | |
1018 | ||
1019 | err = -EIO; | |
1020 | /* Flash BAR mapping must happen after e1000_sw_init | |
1021 | * because it depends on mac_type */ | |
1022 | if ((hw->mac_type == e1000_ich8lan) && | |
1023 | (pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) { | |
1024 | hw->flash_address = pci_ioremap_bar(pdev, 1); | |
1025 | if (!hw->flash_address) | |
1026 | goto err_flashmap; | |
1027 | } | |
1028 | ||
1029 | if (e1000_check_phy_reset_block(hw)) | |
1030 | DPRINTK(PROBE, INFO, "PHY reset is blocked due to SOL/IDER session.\n"); | |
1031 | ||
1032 | if (hw->mac_type >= e1000_82543) { | |
1033 | netdev->features = NETIF_F_SG | | |
1034 | NETIF_F_HW_CSUM | | |
1035 | NETIF_F_HW_VLAN_TX | | |
1036 | NETIF_F_HW_VLAN_RX | | |
1037 | NETIF_F_HW_VLAN_FILTER; | |
1038 | if (hw->mac_type == e1000_ich8lan) | |
1039 | netdev->features &= ~NETIF_F_HW_VLAN_FILTER; | |
1040 | } | |
1041 | ||
1042 | if ((hw->mac_type >= e1000_82544) && | |
1043 | (hw->mac_type != e1000_82547)) | |
1044 | netdev->features |= NETIF_F_TSO; | |
1045 | ||
1046 | if (hw->mac_type > e1000_82547_rev_2) | |
1047 | netdev->features |= NETIF_F_TSO6; | |
1048 | if (pci_using_dac) | |
1049 | netdev->features |= NETIF_F_HIGHDMA; | |
1050 | ||
1051 | netdev->vlan_features |= NETIF_F_TSO; | |
1052 | netdev->vlan_features |= NETIF_F_TSO6; | |
1053 | netdev->vlan_features |= NETIF_F_HW_CSUM; | |
1054 | netdev->vlan_features |= NETIF_F_SG; | |
1055 | ||
1056 | adapter->en_mng_pt = e1000_enable_mng_pass_thru(hw); | |
1057 | ||
1058 | /* initialize eeprom parameters */ | |
1059 | if (e1000_init_eeprom_params(hw)) { | |
1060 | E1000_ERR("EEPROM initialization failed\n"); | |
1061 | goto err_eeprom; | |
1062 | } | |
1063 | ||
1064 | /* before reading the EEPROM, reset the controller to | |
1065 | * put the device in a known good starting state */ | |
1066 | ||
1067 | e1000_reset_hw(hw); | |
1068 | ||
1069 | /* make sure the EEPROM is good */ | |
1070 | if (e1000_validate_eeprom_checksum(hw) < 0) { | |
1071 | DPRINTK(PROBE, ERR, "The EEPROM Checksum Is Not Valid\n"); | |
1072 | e1000_dump_eeprom(adapter); | |
1073 | /* | |
1074 | * set MAC address to all zeroes to invalidate and temporary | |
1075 | * disable this device for the user. This blocks regular | |
1076 | * traffic while still permitting ethtool ioctls from reaching | |
1077 | * the hardware as well as allowing the user to run the | |
1078 | * interface after manually setting a hw addr using | |
1079 | * `ip set address` | |
1080 | */ | |
1081 | memset(hw->mac_addr, 0, netdev->addr_len); | |
1082 | } else { | |
1083 | /* copy the MAC address out of the EEPROM */ | |
1084 | if (e1000_read_mac_addr(hw)) | |
1085 | DPRINTK(PROBE, ERR, "EEPROM Read Error\n"); | |
1086 | } | |
1087 | /* don't block initalization here due to bad MAC address */ | |
1088 | memcpy(netdev->dev_addr, hw->mac_addr, netdev->addr_len); | |
1089 | memcpy(netdev->perm_addr, hw->mac_addr, netdev->addr_len); | |
1090 | ||
1091 | if (!is_valid_ether_addr(netdev->perm_addr)) | |
1092 | DPRINTK(PROBE, ERR, "Invalid MAC Address\n"); | |
1093 | ||
1094 | e1000_get_bus_info(hw); | |
1095 | ||
1096 | init_timer(&adapter->tx_fifo_stall_timer); | |
1097 | adapter->tx_fifo_stall_timer.function = &e1000_82547_tx_fifo_stall; | |
1098 | adapter->tx_fifo_stall_timer.data = (unsigned long)adapter; | |
1099 | ||
1100 | init_timer(&adapter->watchdog_timer); | |
1101 | adapter->watchdog_timer.function = &e1000_watchdog; | |
1102 | adapter->watchdog_timer.data = (unsigned long) adapter; | |
1103 | ||
1104 | init_timer(&adapter->phy_info_timer); | |
1105 | adapter->phy_info_timer.function = &e1000_update_phy_info; | |
1106 | adapter->phy_info_timer.data = (unsigned long)adapter; | |
1107 | ||
1108 | INIT_WORK(&adapter->reset_task, e1000_reset_task); | |
1109 | ||
1110 | e1000_check_options(adapter); | |
1111 | ||
1112 | /* Initial Wake on LAN setting | |
1113 | * If APM wake is enabled in the EEPROM, | |
1114 | * enable the ACPI Magic Packet filter | |
1115 | */ | |
1116 | ||
1117 | switch (hw->mac_type) { | |
1118 | case e1000_82542_rev2_0: | |
1119 | case e1000_82542_rev2_1: | |
1120 | case e1000_82543: | |
1121 | break; | |
1122 | case e1000_82544: | |
1123 | e1000_read_eeprom(hw, | |
1124 | EEPROM_INIT_CONTROL2_REG, 1, &eeprom_data); | |
1125 | eeprom_apme_mask = E1000_EEPROM_82544_APM; | |
1126 | break; | |
1127 | case e1000_ich8lan: | |
1128 | e1000_read_eeprom(hw, | |
1129 | EEPROM_INIT_CONTROL1_REG, 1, &eeprom_data); | |
1130 | eeprom_apme_mask = E1000_EEPROM_ICH8_APME; | |
1131 | break; | |
1132 | case e1000_82546: | |
1133 | case e1000_82546_rev_3: | |
1134 | case e1000_82571: | |
1135 | case e1000_80003es2lan: | |
1136 | if (er32(STATUS) & E1000_STATUS_FUNC_1){ | |
1137 | e1000_read_eeprom(hw, | |
1138 | EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data); | |
1139 | break; | |
1140 | } | |
1141 | /* Fall Through */ | |
1142 | default: | |
1143 | e1000_read_eeprom(hw, | |
1144 | EEPROM_INIT_CONTROL3_PORT_A, 1, &eeprom_data); | |
1145 | break; | |
1146 | } | |
1147 | if (eeprom_data & eeprom_apme_mask) | |
1148 | adapter->eeprom_wol |= E1000_WUFC_MAG; | |
1149 | ||
1150 | /* now that we have the eeprom settings, apply the special cases | |
1151 | * where the eeprom may be wrong or the board simply won't support | |
1152 | * wake on lan on a particular port */ | |
1153 | switch (pdev->device) { | |
1154 | case E1000_DEV_ID_82546GB_PCIE: | |
1155 | adapter->eeprom_wol = 0; | |
1156 | break; | |
1157 | case E1000_DEV_ID_82546EB_FIBER: | |
1158 | case E1000_DEV_ID_82546GB_FIBER: | |
1159 | case E1000_DEV_ID_82571EB_FIBER: | |
1160 | /* Wake events only supported on port A for dual fiber | |
1161 | * regardless of eeprom setting */ | |
1162 | if (er32(STATUS) & E1000_STATUS_FUNC_1) | |
1163 | adapter->eeprom_wol = 0; | |
1164 | break; | |
1165 | case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3: | |
1166 | case E1000_DEV_ID_82571EB_QUAD_COPPER: | |
1167 | case E1000_DEV_ID_82571EB_QUAD_FIBER: | |
1168 | case E1000_DEV_ID_82571EB_QUAD_COPPER_LOWPROFILE: | |
1169 | case E1000_DEV_ID_82571PT_QUAD_COPPER: | |
1170 | /* if quad port adapter, disable WoL on all but port A */ | |
1171 | if (global_quad_port_a != 0) | |
1172 | adapter->eeprom_wol = 0; | |
1173 | else | |
1174 | adapter->quad_port_a = 1; | |
1175 | /* Reset for multiple quad port adapters */ | |
1176 | if (++global_quad_port_a == 4) | |
1177 | global_quad_port_a = 0; | |
1178 | break; | |
1179 | } | |
1180 | ||
1181 | /* initialize the wol settings based on the eeprom settings */ | |
1182 | adapter->wol = adapter->eeprom_wol; | |
1183 | device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); | |
1184 | ||
1185 | /* print bus type/speed/width info */ | |
1186 | DPRINTK(PROBE, INFO, "(PCI%s:%s:%s) ", | |
1187 | ((hw->bus_type == e1000_bus_type_pcix) ? "-X" : | |
1188 | (hw->bus_type == e1000_bus_type_pci_express ? " Express":"")), | |
1189 | ((hw->bus_speed == e1000_bus_speed_2500) ? "2.5Gb/s" : | |
1190 | (hw->bus_speed == e1000_bus_speed_133) ? "133MHz" : | |
1191 | (hw->bus_speed == e1000_bus_speed_120) ? "120MHz" : | |
1192 | (hw->bus_speed == e1000_bus_speed_100) ? "100MHz" : | |
1193 | (hw->bus_speed == e1000_bus_speed_66) ? "66MHz" : "33MHz"), | |
1194 | ((hw->bus_width == e1000_bus_width_64) ? "64-bit" : | |
1195 | (hw->bus_width == e1000_bus_width_pciex_4) ? "Width x4" : | |
1196 | (hw->bus_width == e1000_bus_width_pciex_1) ? "Width x1" : | |
1197 | "32-bit")); | |
1198 | ||
1199 | printk("%pM\n", netdev->dev_addr); | |
1200 | ||
1201 | if (hw->bus_type == e1000_bus_type_pci_express) { | |
1202 | DPRINTK(PROBE, WARNING, "This device (id %04x:%04x) will no " | |
1203 | "longer be supported by this driver in the future.\n", | |
1204 | pdev->vendor, pdev->device); | |
1205 | DPRINTK(PROBE, WARNING, "please use the \"e1000e\" " | |
1206 | "driver instead.\n"); | |
1207 | } | |
1208 | ||
1209 | /* reset the hardware with the new settings */ | |
1210 | e1000_reset(adapter); | |
1211 | ||
1212 | /* If the controller is 82573 and f/w is AMT, do not set | |
1213 | * DRV_LOAD until the interface is up. For all other cases, | |
1214 | * let the f/w know that the h/w is now under the control | |
1215 | * of the driver. */ | |
1216 | if (hw->mac_type != e1000_82573 || | |
1217 | !e1000_check_mng_mode(hw)) | |
1218 | e1000_get_hw_control(adapter); | |
1219 | ||
1220 | /* tell the stack to leave us alone until e1000_open() is called */ | |
1221 | netif_carrier_off(netdev); | |
1222 | netif_stop_queue(netdev); | |
1223 | ||
1224 | strcpy(netdev->name, "eth%d"); | |
1225 | err = register_netdev(netdev); | |
1226 | if (err) | |
1227 | goto err_register; | |
1228 | ||
1229 | DPRINTK(PROBE, INFO, "Intel(R) PRO/1000 Network Connection\n"); | |
1230 | ||
1231 | cards_found++; | |
1232 | return 0; | |
1233 | ||
1234 | err_register: | |
1235 | e1000_release_hw_control(adapter); | |
1236 | err_eeprom: | |
1237 | if (!e1000_check_phy_reset_block(hw)) | |
1238 | e1000_phy_hw_reset(hw); | |
1239 | ||
1240 | if (hw->flash_address) | |
1241 | iounmap(hw->flash_address); | |
1242 | err_flashmap: | |
1243 | kfree(adapter->tx_ring); | |
1244 | kfree(adapter->rx_ring); | |
1245 | err_sw_init: | |
1246 | iounmap(hw->hw_addr); | |
1247 | err_ioremap: | |
1248 | free_netdev(netdev); | |
1249 | err_alloc_etherdev: | |
1250 | pci_release_selected_regions(pdev, bars); | |
1251 | err_pci_reg: | |
1252 | err_dma: | |
1253 | pci_disable_device(pdev); | |
1254 | return err; | |
1255 | } | |
1256 | ||
1257 | /** | |
1258 | * e1000_remove - Device Removal Routine | |
1259 | * @pdev: PCI device information struct | |
1260 | * | |
1261 | * e1000_remove is called by the PCI subsystem to alert the driver | |
1262 | * that it should release a PCI device. The could be caused by a | |
1263 | * Hot-Plug event, or because the driver is going to be removed from | |
1264 | * memory. | |
1265 | **/ | |
1266 | ||
1267 | static void __devexit e1000_remove(struct pci_dev *pdev) | |
1268 | { | |
1269 | struct net_device *netdev = pci_get_drvdata(pdev); | |
1270 | struct e1000_adapter *adapter = netdev_priv(netdev); | |
1271 | struct e1000_hw *hw = &adapter->hw; | |
1272 | ||
1273 | cancel_work_sync(&adapter->reset_task); | |
1274 | ||
1275 | e1000_release_manageability(adapter); | |
1276 | ||
1277 | /* Release control of h/w to f/w. If f/w is AMT enabled, this | |
1278 | * would have already happened in close and is redundant. */ | |
1279 | e1000_release_hw_control(adapter); | |
1280 | ||
1281 | unregister_netdev(netdev); | |
1282 | ||
1283 | if (!e1000_check_phy_reset_block(hw)) | |
1284 | e1000_phy_hw_reset(hw); | |
1285 | ||
1286 | kfree(adapter->tx_ring); | |
1287 | kfree(adapter->rx_ring); | |
1288 | ||
1289 | iounmap(hw->hw_addr); | |
1290 | if (hw->flash_address) | |
1291 | iounmap(hw->flash_address); | |
1292 | pci_release_selected_regions(pdev, adapter->bars); | |
1293 | ||
1294 | free_netdev(netdev); | |
1295 | ||
1296 | pci_disable_device(pdev); | |
1297 | } | |
1298 | ||
1299 | /** | |
1300 | * e1000_sw_init - Initialize general software structures (struct e1000_adapter) | |
1301 | * @adapter: board private structure to initialize | |
1302 | * | |
1303 | * e1000_sw_init initializes the Adapter private data structure. | |
1304 | * Fields are initialized based on PCI device information and | |
1305 | * OS network device settings (MTU size). | |
1306 | **/ | |
1307 | ||
1308 | static int __devinit e1000_sw_init(struct e1000_adapter *adapter) | |
1309 | { | |
1310 | struct e1000_hw *hw = &adapter->hw; | |
1311 | struct net_device *netdev = adapter->netdev; | |
1312 | struct pci_dev *pdev = adapter->pdev; | |
1313 | ||
1314 | /* PCI config space info */ | |
1315 | ||
1316 | hw->vendor_id = pdev->vendor; | |
1317 | hw->device_id = pdev->device; | |
1318 | hw->subsystem_vendor_id = pdev->subsystem_vendor; | |
1319 | hw->subsystem_id = pdev->subsystem_device; | |
1320 | hw->revision_id = pdev->revision; | |
1321 | ||
1322 | pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word); | |
1323 | ||
1324 | adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE; | |
1325 | hw->max_frame_size = netdev->mtu + | |
1326 | ENET_HEADER_SIZE + ETHERNET_FCS_SIZE; | |
1327 | hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE; | |
1328 | ||
1329 | /* identify the MAC */ | |
1330 | ||
1331 | if (e1000_set_mac_type(hw)) { | |
1332 | DPRINTK(PROBE, ERR, "Unknown MAC Type\n"); | |
1333 | return -EIO; | |
1334 | } | |
1335 | ||
1336 | switch (hw->mac_type) { | |
1337 | default: | |
1338 | break; | |
1339 | case e1000_82541: | |
1340 | case e1000_82547: | |
1341 | case e1000_82541_rev_2: | |
1342 | case e1000_82547_rev_2: | |
1343 | hw->phy_init_script = 1; | |
1344 | break; | |
1345 | } | |
1346 | ||
1347 | e1000_set_media_type(hw); | |
1348 | ||
1349 | hw->wait_autoneg_complete = false; | |
1350 | hw->tbi_compatibility_en = true; | |
1351 | hw->adaptive_ifs = true; | |
1352 | ||
1353 | /* Copper options */ | |
1354 | ||
1355 | if (hw->media_type == e1000_media_type_copper) { | |
1356 | hw->mdix = AUTO_ALL_MODES; | |
1357 | hw->disable_polarity_correction = false; | |
1358 | hw->master_slave = E1000_MASTER_SLAVE; | |
1359 | } | |
1360 | ||
1361 | adapter->num_tx_queues = 1; | |
1362 | adapter->num_rx_queues = 1; | |
1363 | ||
1364 | if (e1000_alloc_queues(adapter)) { | |
1365 | DPRINTK(PROBE, ERR, "Unable to allocate memory for queues\n"); | |
1366 | return -ENOMEM; | |
1367 | } | |
1368 | ||
1369 | /* Explicitly disable IRQ since the NIC can be in any state. */ | |
1370 | e1000_irq_disable(adapter); | |
1371 | ||
1372 | spin_lock_init(&adapter->stats_lock); | |
1373 | ||
1374 | set_bit(__E1000_DOWN, &adapter->flags); | |
1375 | ||
1376 | return 0; | |
1377 | } | |
1378 | ||
1379 | /** | |
1380 | * e1000_alloc_queues - Allocate memory for all rings | |
1381 | * @adapter: board private structure to initialize | |
1382 | * | |
1383 | * We allocate one ring per queue at run-time since we don't know the | |
1384 | * number of queues at compile-time. | |
1385 | **/ | |
1386 | ||
1387 | static int __devinit e1000_alloc_queues(struct e1000_adapter *adapter) | |
1388 | { | |
1389 | adapter->tx_ring = kcalloc(adapter->num_tx_queues, | |
1390 | sizeof(struct e1000_tx_ring), GFP_KERNEL); | |
1391 | if (!adapter->tx_ring) | |
1392 | return -ENOMEM; | |
1393 | ||
1394 | adapter->rx_ring = kcalloc(adapter->num_rx_queues, | |
1395 | sizeof(struct e1000_rx_ring), GFP_KERNEL); | |
1396 | if (!adapter->rx_ring) { | |
1397 | kfree(adapter->tx_ring); | |
1398 | return -ENOMEM; | |
1399 | } | |
1400 | ||
1401 | return E1000_SUCCESS; | |
1402 | } | |
1403 | ||
1404 | /** | |
1405 | * e1000_open - Called when a network interface is made active | |
1406 | * @netdev: network interface device structure | |
1407 | * | |
1408 | * Returns 0 on success, negative value on failure | |
1409 | * | |
1410 | * The open entry point is called when a network interface is made | |
1411 | * active by the system (IFF_UP). At this point all resources needed | |
1412 | * for transmit and receive operations are allocated, the interrupt | |
1413 | * handler is registered with the OS, the watchdog timer is started, | |
1414 | * and the stack is notified that the interface is ready. | |
1415 | **/ | |
1416 | ||
1417 | static int e1000_open(struct net_device *netdev) | |
1418 | { | |
1419 | struct e1000_adapter *adapter = netdev_priv(netdev); | |
1420 | struct e1000_hw *hw = &adapter->hw; | |
1421 | int err; | |
1422 | ||
1423 | /* disallow open during test */ | |
1424 | if (test_bit(__E1000_TESTING, &adapter->flags)) | |
1425 | return -EBUSY; | |
1426 | ||
1427 | /* allocate transmit descriptors */ | |
1428 | err = e1000_setup_all_tx_resources(adapter); | |
1429 | if (err) | |
1430 | goto err_setup_tx; | |
1431 | ||
1432 | /* allocate receive descriptors */ | |
1433 | err = e1000_setup_all_rx_resources(adapter); | |
1434 | if (err) | |
1435 | goto err_setup_rx; | |
1436 | ||
1437 | e1000_power_up_phy(adapter); | |
1438 | ||
1439 | adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; | |
1440 | if ((hw->mng_cookie.status & | |
1441 | E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) { | |
1442 | e1000_update_mng_vlan(adapter); | |
1443 | } | |
1444 | ||
1445 | /* If AMT is enabled, let the firmware know that the network | |
1446 | * interface is now open */ | |
1447 | if (hw->mac_type == e1000_82573 && | |
1448 | e1000_check_mng_mode(hw)) | |
1449 | e1000_get_hw_control(adapter); | |
1450 | ||
1451 | /* before we allocate an interrupt, we must be ready to handle it. | |
1452 | * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt | |
1453 | * as soon as we call pci_request_irq, so we have to setup our | |
1454 | * clean_rx handler before we do so. */ | |
1455 | e1000_configure(adapter); | |
1456 | ||
1457 | err = e1000_request_irq(adapter); | |
1458 | if (err) | |
1459 | goto err_req_irq; | |
1460 | ||
1461 | /* From here on the code is the same as e1000_up() */ | |
1462 | clear_bit(__E1000_DOWN, &adapter->flags); | |
1463 | ||
1464 | napi_enable(&adapter->napi); | |
1465 | ||
1466 | e1000_irq_enable(adapter); | |
1467 | ||
1468 | netif_start_queue(netdev); | |
1469 | ||
1470 | /* fire a link status change interrupt to start the watchdog */ | |
1471 | ew32(ICS, E1000_ICS_LSC); | |
1472 | ||
1473 | return E1000_SUCCESS; | |
1474 | ||
1475 | err_req_irq: | |
1476 | e1000_release_hw_control(adapter); | |
1477 | e1000_power_down_phy(adapter); | |
1478 | e1000_free_all_rx_resources(adapter); | |
1479 | err_setup_rx: | |
1480 | e1000_free_all_tx_resources(adapter); | |
1481 | err_setup_tx: | |
1482 | e1000_reset(adapter); | |
1483 | ||
1484 | return err; | |
1485 | } | |
1486 | ||
1487 | /** | |
1488 | * e1000_close - Disables a network interface | |
1489 | * @netdev: network interface device structure | |
1490 | * | |
1491 | * Returns 0, this is not allowed to fail | |
1492 | * | |
1493 | * The close entry point is called when an interface is de-activated | |
1494 | * by the OS. The hardware is still under the drivers control, but | |
1495 | * needs to be disabled. A global MAC reset is issued to stop the | |
1496 | * hardware, and all transmit and receive resources are freed. | |
1497 | **/ | |
1498 | ||
1499 | static int e1000_close(struct net_device *netdev) | |
1500 | { | |
1501 | struct e1000_adapter *adapter = netdev_priv(netdev); | |
1502 | struct e1000_hw *hw = &adapter->hw; | |
1503 | ||
1504 | WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags)); | |
1505 | e1000_down(adapter); | |
1506 | e1000_power_down_phy(adapter); | |
1507 | e1000_free_irq(adapter); | |
1508 | ||
1509 | e1000_free_all_tx_resources(adapter); | |
1510 | e1000_free_all_rx_resources(adapter); | |
1511 | ||
1512 | /* kill manageability vlan ID if supported, but not if a vlan with | |
1513 | * the same ID is registered on the host OS (let 8021q kill it) */ | |
1514 | if ((hw->mng_cookie.status & | |
1515 | E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) && | |
1516 | !(adapter->vlgrp && | |
1517 | vlan_group_get_device(adapter->vlgrp, adapter->mng_vlan_id))) { | |
1518 | e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id); | |
1519 | } | |
1520 | ||
1521 | /* If AMT is enabled, let the firmware know that the network | |
1522 | * interface is now closed */ | |
1523 | if (hw->mac_type == e1000_82573 && | |
1524 | e1000_check_mng_mode(hw)) | |
1525 | e1000_release_hw_control(adapter); | |
1526 | ||
1527 | return 0; | |
1528 | } | |
1529 | ||
1530 | /** | |
1531 | * e1000_check_64k_bound - check that memory doesn't cross 64kB boundary | |
1532 | * @adapter: address of board private structure | |
1533 | * @start: address of beginning of memory | |
1534 | * @len: length of memory | |
1535 | **/ | |
1536 | static bool e1000_check_64k_bound(struct e1000_adapter *adapter, void *start, | |
1537 | unsigned long len) | |
1538 | { | |
1539 | struct e1000_hw *hw = &adapter->hw; | |
1540 | unsigned long begin = (unsigned long)start; | |
1541 | unsigned long end = begin + len; | |
1542 | ||
1543 | /* First rev 82545 and 82546 need to not allow any memory | |
1544 | * write location to cross 64k boundary due to errata 23 */ | |
1545 | if (hw->mac_type == e1000_82545 || | |
1546 | hw->mac_type == e1000_82546) { | |
1547 | return ((begin ^ (end - 1)) >> 16) != 0 ? false : true; | |
1548 | } | |
1549 | ||
1550 | return true; | |
1551 | } | |
1552 | ||
1553 | /** | |
1554 | * e1000_setup_tx_resources - allocate Tx resources (Descriptors) | |
1555 | * @adapter: board private structure | |
1556 | * @txdr: tx descriptor ring (for a specific queue) to setup | |
1557 | * | |
1558 | * Return 0 on success, negative on failure | |
1559 | **/ | |
1560 | ||
1561 | static int e1000_setup_tx_resources(struct e1000_adapter *adapter, | |
1562 | struct e1000_tx_ring *txdr) | |
1563 | { | |
1564 | struct pci_dev *pdev = adapter->pdev; | |
1565 | int size; | |
1566 | ||
1567 | size = sizeof(struct e1000_buffer) * txdr->count; | |
1568 | txdr->buffer_info = vmalloc(size); | |
1569 | if (!txdr->buffer_info) { | |
1570 | DPRINTK(PROBE, ERR, | |
1571 | "Unable to allocate memory for the transmit descriptor ring\n"); | |
1572 | return -ENOMEM; | |
1573 | } | |
1574 | memset(txdr->buffer_info, 0, size); | |
1575 | ||
1576 | /* round up to nearest 4K */ | |
1577 | ||
1578 | txdr->size = txdr->count * sizeof(struct e1000_tx_desc); | |
1579 | txdr->size = ALIGN(txdr->size, 4096); | |
1580 | ||
1581 | txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma); | |
1582 | if (!txdr->desc) { | |
1583 | setup_tx_desc_die: | |
1584 | vfree(txdr->buffer_info); | |
1585 | DPRINTK(PROBE, ERR, | |
1586 | "Unable to allocate memory for the transmit descriptor ring\n"); | |
1587 | return -ENOMEM; | |
1588 | } | |
1589 | ||
1590 | /* Fix for errata 23, can't cross 64kB boundary */ | |
1591 | if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) { | |
1592 | void *olddesc = txdr->desc; | |
1593 | dma_addr_t olddma = txdr->dma; | |
1594 | DPRINTK(TX_ERR, ERR, "txdr align check failed: %u bytes " | |
1595 | "at %p\n", txdr->size, txdr->desc); | |
1596 | /* Try again, without freeing the previous */ | |
1597 | txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma); | |
1598 | /* Failed allocation, critical failure */ | |
1599 | if (!txdr->desc) { | |
1600 | pci_free_consistent(pdev, txdr->size, olddesc, olddma); | |
1601 | goto setup_tx_desc_die; | |
1602 | } | |
1603 | ||
1604 | if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) { | |
1605 | /* give up */ | |
1606 | pci_free_consistent(pdev, txdr->size, txdr->desc, | |
1607 | txdr->dma); | |
1608 | pci_free_consistent(pdev, txdr->size, olddesc, olddma); | |
1609 | DPRINTK(PROBE, ERR, | |
1610 | "Unable to allocate aligned memory " | |
1611 | "for the transmit descriptor ring\n"); | |
1612 | vfree(txdr->buffer_info); | |
1613 | return -ENOMEM; | |
1614 | } else { | |
1615 | /* Free old allocation, new allocation was successful */ | |
1616 | pci_free_consistent(pdev, txdr->size, olddesc, olddma); | |
1617 | } | |
1618 | } | |
1619 | memset(txdr->desc, 0, txdr->size); | |
1620 | ||
1621 | txdr->next_to_use = 0; | |
1622 | txdr->next_to_clean = 0; | |
1623 | ||
1624 | return 0; | |
1625 | } | |
1626 | ||
1627 | /** | |
1628 | * e1000_setup_all_tx_resources - wrapper to allocate Tx resources | |
1629 | * (Descriptors) for all queues | |
1630 | * @adapter: board private structure | |
1631 | * | |
1632 | * Return 0 on success, negative on failure | |
1633 | **/ | |
1634 | ||
1635 | int e1000_setup_all_tx_resources(struct e1000_adapter *adapter) | |
1636 | { | |
1637 | int i, err = 0; | |
1638 | ||
1639 | for (i = 0; i < adapter->num_tx_queues; i++) { | |
1640 | err = e1000_setup_tx_resources(adapter, &adapter->tx_ring[i]); | |
1641 | if (err) { | |
1642 | DPRINTK(PROBE, ERR, | |
1643 | "Allocation for Tx Queue %u failed\n", i); | |
1644 | for (i-- ; i >= 0; i--) | |
1645 | e1000_free_tx_resources(adapter, | |
1646 | &adapter->tx_ring[i]); | |
1647 | break; | |
1648 | } | |
1649 | } | |
1650 | ||
1651 | return err; | |
1652 | } | |
1653 | ||
1654 | /** | |
1655 | * e1000_configure_tx - Configure 8254x Transmit Unit after Reset | |
1656 | * @adapter: board private structure | |
1657 | * | |
1658 | * Configure the Tx unit of the MAC after a reset. | |
1659 | **/ | |
1660 | ||
1661 | static void e1000_configure_tx(struct e1000_adapter *adapter) | |
1662 | { | |
1663 | u64 tdba; | |
1664 | struct e1000_hw *hw = &adapter->hw; | |
1665 | u32 tdlen, tctl, tipg, tarc; | |
1666 | u32 ipgr1, ipgr2; | |
1667 | ||
1668 | /* Setup the HW Tx Head and Tail descriptor pointers */ | |
1669 | ||
1670 | switch (adapter->num_tx_queues) { | |
1671 | case 1: | |
1672 | default: | |
1673 | tdba = adapter->tx_ring[0].dma; | |
1674 | tdlen = adapter->tx_ring[0].count * | |
1675 | sizeof(struct e1000_tx_desc); | |
1676 | ew32(TDLEN, tdlen); | |
1677 | ew32(TDBAH, (tdba >> 32)); | |
1678 | ew32(TDBAL, (tdba & 0x00000000ffffffffULL)); | |
1679 | ew32(TDT, 0); | |
1680 | ew32(TDH, 0); | |
1681 | adapter->tx_ring[0].tdh = ((hw->mac_type >= e1000_82543) ? E1000_TDH : E1000_82542_TDH); | |
1682 | adapter->tx_ring[0].tdt = ((hw->mac_type >= e1000_82543) ? E1000_TDT : E1000_82542_TDT); | |
1683 | break; | |
1684 | } | |
1685 | ||
1686 | /* Set the default values for the Tx Inter Packet Gap timer */ | |
1687 | if (hw->mac_type <= e1000_82547_rev_2 && | |
1688 | (hw->media_type == e1000_media_type_fiber || | |
1689 | hw->media_type == e1000_media_type_internal_serdes)) | |
1690 | tipg = DEFAULT_82543_TIPG_IPGT_FIBER; | |
1691 | else | |
1692 | tipg = DEFAULT_82543_TIPG_IPGT_COPPER; | |
1693 | ||
1694 | switch (hw->mac_type) { | |
1695 | case e1000_82542_rev2_0: | |
1696 | case e1000_82542_rev2_1: | |
1697 | tipg = DEFAULT_82542_TIPG_IPGT; | |
1698 | ipgr1 = DEFAULT_82542_TIPG_IPGR1; | |
1699 | ipgr2 = DEFAULT_82542_TIPG_IPGR2; | |
1700 | break; | |
1701 | case e1000_80003es2lan: | |
1702 | ipgr1 = DEFAULT_82543_TIPG_IPGR1; | |
1703 | ipgr2 = DEFAULT_80003ES2LAN_TIPG_IPGR2; | |
1704 | break; | |
1705 | default: | |
1706 | ipgr1 = DEFAULT_82543_TIPG_IPGR1; | |
1707 | ipgr2 = DEFAULT_82543_TIPG_IPGR2; | |
1708 | break; | |
1709 | } | |
1710 | tipg |= ipgr1 << E1000_TIPG_IPGR1_SHIFT; | |
1711 | tipg |= ipgr2 << E1000_TIPG_IPGR2_SHIFT; | |
1712 | ew32(TIPG, tipg); | |
1713 | ||
1714 | /* Set the Tx Interrupt Delay register */ | |
1715 | ||
1716 | ew32(TIDV, adapter->tx_int_delay); | |
1717 | if (hw->mac_type >= e1000_82540) | |
1718 | ew32(TADV, adapter->tx_abs_int_delay); | |
1719 | ||
1720 | /* Program the Transmit Control Register */ | |
1721 | ||
1722 | tctl = er32(TCTL); | |
1723 | tctl &= ~E1000_TCTL_CT; | |
1724 | tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC | | |
1725 | (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT); | |
1726 | ||
1727 | if (hw->mac_type == e1000_82571 || hw->mac_type == e1000_82572) { | |
1728 | tarc = er32(TARC0); | |
1729 | /* set the speed mode bit, we'll clear it if we're not at | |
1730 | * gigabit link later */ | |
1731 | tarc |= (1 << 21); | |
1732 | ew32(TARC0, tarc); | |
1733 | } else if (hw->mac_type == e1000_80003es2lan) { | |
1734 | tarc = er32(TARC0); | |
1735 | tarc |= 1; | |
1736 | ew32(TARC0, tarc); | |
1737 | tarc = er32(TARC1); | |
1738 | tarc |= 1; | |
1739 | ew32(TARC1, tarc); | |
1740 | } | |
1741 | ||
1742 | e1000_config_collision_dist(hw); | |
1743 | ||
1744 | /* Setup Transmit Descriptor Settings for eop descriptor */ | |
1745 | adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS; | |
1746 | ||
1747 | /* only set IDE if we are delaying interrupts using the timers */ | |
1748 | if (adapter->tx_int_delay) | |
1749 | adapter->txd_cmd |= E1000_TXD_CMD_IDE; | |
1750 | ||
1751 | if (hw->mac_type < e1000_82543) | |
1752 | adapter->txd_cmd |= E1000_TXD_CMD_RPS; | |
1753 | else | |
1754 | adapter->txd_cmd |= E1000_TXD_CMD_RS; | |
1755 | ||
1756 | /* Cache if we're 82544 running in PCI-X because we'll | |
1757 | * need this to apply a workaround later in the send path. */ | |
1758 | if (hw->mac_type == e1000_82544 && | |
1759 | hw->bus_type == e1000_bus_type_pcix) | |
1760 | adapter->pcix_82544 = 1; | |
1761 | ||
1762 | ew32(TCTL, tctl); | |
1763 | ||
1764 | } | |
1765 | ||
1766 | /** | |
1767 | * e1000_setup_rx_resources - allocate Rx resources (Descriptors) | |
1768 | * @adapter: board private structure | |
1769 | * @rxdr: rx descriptor ring (for a specific queue) to setup | |
1770 | * | |
1771 | * Returns 0 on success, negative on failure | |
1772 | **/ | |
1773 | ||
1774 | static int e1000_setup_rx_resources(struct e1000_adapter *adapter, | |
1775 | struct e1000_rx_ring *rxdr) | |
1776 | { | |
1777 | struct e1000_hw *hw = &adapter->hw; | |
1778 | struct pci_dev *pdev = adapter->pdev; | |
1779 | int size, desc_len; | |
1780 | ||
1781 | size = sizeof(struct e1000_buffer) * rxdr->count; | |
1782 | rxdr->buffer_info = vmalloc(size); | |
1783 | if (!rxdr->buffer_info) { | |
1784 | DPRINTK(PROBE, ERR, | |
1785 | "Unable to allocate memory for the receive descriptor ring\n"); | |
1786 | return -ENOMEM; | |
1787 | } | |
1788 | memset(rxdr->buffer_info, 0, size); | |
1789 | ||
1790 | if (hw->mac_type <= e1000_82547_rev_2) | |
1791 | desc_len = sizeof(struct e1000_rx_desc); | |
1792 | else | |
1793 | desc_len = sizeof(union e1000_rx_desc_packet_split); | |
1794 | ||
1795 | /* Round up to nearest 4K */ | |
1796 | ||
1797 | rxdr->size = rxdr->count * desc_len; | |
1798 | rxdr->size = ALIGN(rxdr->size, 4096); | |
1799 | ||
1800 | rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma); | |
1801 | ||
1802 | if (!rxdr->desc) { | |
1803 | DPRINTK(PROBE, ERR, | |
1804 | "Unable to allocate memory for the receive descriptor ring\n"); | |
1805 | setup_rx_desc_die: | |
1806 | vfree(rxdr->buffer_info); | |
1807 | return -ENOMEM; | |
1808 | } | |
1809 | ||
1810 | /* Fix for errata 23, can't cross 64kB boundary */ | |
1811 | if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) { | |
1812 | void *olddesc = rxdr->desc; | |
1813 | dma_addr_t olddma = rxdr->dma; | |
1814 | DPRINTK(RX_ERR, ERR, "rxdr align check failed: %u bytes " | |
1815 | "at %p\n", rxdr->size, rxdr->desc); | |
1816 | /* Try again, without freeing the previous */ | |
1817 | rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma); | |
1818 | /* Failed allocation, critical failure */ | |
1819 | if (!rxdr->desc) { | |
1820 | pci_free_consistent(pdev, rxdr->size, olddesc, olddma); | |
1821 | DPRINTK(PROBE, ERR, | |
1822 | "Unable to allocate memory " | |
1823 | "for the receive descriptor ring\n"); | |
1824 | goto setup_rx_desc_die; | |
1825 | } | |
1826 | ||
1827 | if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) { | |
1828 | /* give up */ | |
1829 | pci_free_consistent(pdev, rxdr->size, rxdr->desc, | |
1830 | rxdr->dma); | |
1831 | pci_free_consistent(pdev, rxdr->size, olddesc, olddma); | |
1832 | DPRINTK(PROBE, ERR, | |
1833 | "Unable to allocate aligned memory " | |
1834 | "for the receive descriptor ring\n"); | |
1835 | goto setup_rx_desc_die; | |
1836 | } else { | |
1837 | /* Free old allocation, new allocation was successful */ | |
1838 | pci_free_consistent(pdev, rxdr->size, olddesc, olddma); | |
1839 | } | |
1840 | } | |
1841 | memset(rxdr->desc, 0, rxdr->size); | |
1842 | ||
1843 | rxdr->next_to_clean = 0; | |
1844 | rxdr->next_to_use = 0; | |
1845 | ||
1846 | return 0; | |
1847 | } | |
1848 | ||
1849 | /** | |
1850 | * e1000_setup_all_rx_resources - wrapper to allocate Rx resources | |
1851 | * (Descriptors) for all queues | |
1852 | * @adapter: board private structure | |
1853 | * | |
1854 | * Return 0 on success, negative on failure | |
1855 | **/ | |
1856 | ||
1857 | int e1000_setup_all_rx_resources(struct e1000_adapter *adapter) | |
1858 | { | |
1859 | int i, err = 0; | |
1860 | ||
1861 | for (i = 0; i < adapter->num_rx_queues; i++) { | |
1862 | err = e1000_setup_rx_resources(adapter, &adapter->rx_ring[i]); | |
1863 | if (err) { | |
1864 | DPRINTK(PROBE, ERR, | |
1865 | "Allocation for Rx Queue %u failed\n", i); | |
1866 | for (i-- ; i >= 0; i--) | |
1867 | e1000_free_rx_resources(adapter, | |
1868 | &adapter->rx_ring[i]); | |
1869 | break; | |
1870 | } | |
1871 | } | |
1872 | ||
1873 | return err; | |
1874 | } | |
1875 | ||
1876 | /** | |
1877 | * e1000_setup_rctl - configure the receive control registers | |
1878 | * @adapter: Board private structure | |
1879 | **/ | |
1880 | static void e1000_setup_rctl(struct e1000_adapter *adapter) | |
1881 | { | |
1882 | struct e1000_hw *hw = &adapter->hw; | |
1883 | u32 rctl; | |
1884 | ||
1885 | rctl = er32(RCTL); | |
1886 | ||
1887 | rctl &= ~(3 << E1000_RCTL_MO_SHIFT); | |
1888 | ||
1889 | rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | | |
1890 | E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF | | |
1891 | (hw->mc_filter_type << E1000_RCTL_MO_SHIFT); | |
1892 | ||
1893 | if (hw->tbi_compatibility_on == 1) | |
1894 | rctl |= E1000_RCTL_SBP; | |
1895 | else | |
1896 | rctl &= ~E1000_RCTL_SBP; | |
1897 | ||
1898 | if (adapter->netdev->mtu <= ETH_DATA_LEN) | |
1899 | rctl &= ~E1000_RCTL_LPE; | |
1900 | else | |
1901 | rctl |= E1000_RCTL_LPE; | |
1902 | ||
1903 | /* Setup buffer sizes */ | |
1904 | rctl &= ~E1000_RCTL_SZ_4096; | |
1905 | rctl |= E1000_RCTL_BSEX; | |
1906 | switch (adapter->rx_buffer_len) { | |
1907 | case E1000_RXBUFFER_256: | |
1908 | rctl |= E1000_RCTL_SZ_256; | |
1909 | rctl &= ~E1000_RCTL_BSEX; | |
1910 | break; | |
1911 | case E1000_RXBUFFER_512: | |
1912 | rctl |= E1000_RCTL_SZ_512; | |
1913 | rctl &= ~E1000_RCTL_BSEX; | |
1914 | break; | |
1915 | case E1000_RXBUFFER_1024: | |
1916 | rctl |= E1000_RCTL_SZ_1024; | |
1917 | rctl &= ~E1000_RCTL_BSEX; | |
1918 | break; | |
1919 | case E1000_RXBUFFER_2048: | |
1920 | default: | |
1921 | rctl |= E1000_RCTL_SZ_2048; | |
1922 | rctl &= ~E1000_RCTL_BSEX; | |
1923 | break; | |
1924 | case E1000_RXBUFFER_4096: | |
1925 | rctl |= E1000_RCTL_SZ_4096; | |
1926 | break; | |
1927 | case E1000_RXBUFFER_8192: | |
1928 | rctl |= E1000_RCTL_SZ_8192; | |
1929 | break; | |
1930 | case E1000_RXBUFFER_16384: | |
1931 | rctl |= E1000_RCTL_SZ_16384; | |
1932 | break; | |
1933 | } | |
1934 | ||
1935 | ew32(RCTL, rctl); | |
1936 | } | |
1937 | ||
1938 | /** | |
1939 | * e1000_configure_rx - Configure 8254x Receive Unit after Reset | |
1940 | * @adapter: board private structure | |
1941 | * | |
1942 | * Configure the Rx unit of the MAC after a reset. | |
1943 | **/ | |
1944 | ||
1945 | static void e1000_configure_rx(struct e1000_adapter *adapter) | |
1946 | { | |
1947 | u64 rdba; | |
1948 | struct e1000_hw *hw = &adapter->hw; | |
1949 | u32 rdlen, rctl, rxcsum, ctrl_ext; | |
1950 | ||
1951 | rdlen = adapter->rx_ring[0].count * | |
1952 | sizeof(struct e1000_rx_desc); | |
1953 | adapter->clean_rx = e1000_clean_rx_irq; | |
1954 | adapter->alloc_rx_buf = e1000_alloc_rx_buffers; | |
1955 | ||
1956 | /* disable receives while setting up the descriptors */ | |
1957 | rctl = er32(RCTL); | |
1958 | ew32(RCTL, rctl & ~E1000_RCTL_EN); | |
1959 | ||
1960 | /* set the Receive Delay Timer Register */ | |
1961 | ew32(RDTR, adapter->rx_int_delay); | |
1962 | ||
1963 | if (hw->mac_type >= e1000_82540) { | |
1964 | ew32(RADV, adapter->rx_abs_int_delay); | |
1965 | if (adapter->itr_setting != 0) | |
1966 | ew32(ITR, 1000000000 / (adapter->itr * 256)); | |
1967 | } | |
1968 | ||
1969 | if (hw->mac_type >= e1000_82571) { | |
1970 | ctrl_ext = er32(CTRL_EXT); | |
1971 | /* Reset delay timers after every interrupt */ | |
1972 | ctrl_ext |= E1000_CTRL_EXT_INT_TIMER_CLR; | |
1973 | /* Auto-Mask interrupts upon ICR access */ | |
1974 | ctrl_ext |= E1000_CTRL_EXT_IAME; | |
1975 | ew32(IAM, 0xffffffff); | |
1976 | ew32(CTRL_EXT, ctrl_ext); | |
1977 | E1000_WRITE_FLUSH(); | |
1978 | } | |
1979 | ||
1980 | /* Setup the HW Rx Head and Tail Descriptor Pointers and | |
1981 | * the Base and Length of the Rx Descriptor Ring */ | |
1982 | switch (adapter->num_rx_queues) { | |
1983 | case 1: | |
1984 | default: | |
1985 | rdba = adapter->rx_ring[0].dma; | |
1986 | ew32(RDLEN, rdlen); | |
1987 | ew32(RDBAH, (rdba >> 32)); | |
1988 | ew32(RDBAL, (rdba & 0x00000000ffffffffULL)); | |
1989 | ew32(RDT, 0); | |
1990 | ew32(RDH, 0); | |
1991 | adapter->rx_ring[0].rdh = ((hw->mac_type >= e1000_82543) ? E1000_RDH : E1000_82542_RDH); | |
1992 | adapter->rx_ring[0].rdt = ((hw->mac_type >= e1000_82543) ? E1000_RDT : E1000_82542_RDT); | |
1993 | break; | |
1994 | } | |
1995 | ||
1996 | /* Enable 82543 Receive Checksum Offload for TCP and UDP */ | |
1997 | if (hw->mac_type >= e1000_82543) { | |
1998 | rxcsum = er32(RXCSUM); | |
1999 | if (adapter->rx_csum) | |
2000 | rxcsum |= E1000_RXCSUM_TUOFL; | |
2001 | else | |
2002 | /* don't need to clear IPPCSE as it defaults to 0 */ | |
2003 | rxcsum &= ~E1000_RXCSUM_TUOFL; | |
2004 | ew32(RXCSUM, rxcsum); | |
2005 | } | |
2006 | ||
2007 | /* Enable Receives */ | |
2008 | ew32(RCTL, rctl); | |
2009 | } | |
2010 | ||
2011 | /** | |
2012 | * e1000_free_tx_resources - Free Tx Resources per Queue | |
2013 | * @adapter: board private structure | |
2014 | * @tx_ring: Tx descriptor ring for a specific queue | |
2015 | * | |
2016 | * Free all transmit software resources | |
2017 | **/ | |
2018 | ||
2019 | static void e1000_free_tx_resources(struct e1000_adapter *adapter, | |
2020 | struct e1000_tx_ring *tx_ring) | |
2021 | { | |
2022 | struct pci_dev *pdev = adapter->pdev; | |
2023 | ||
2024 | e1000_clean_tx_ring(adapter, tx_ring); | |
2025 | ||
2026 | vfree(tx_ring->buffer_info); | |
2027 | tx_ring->buffer_info = NULL; | |
2028 | ||
2029 | pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma); | |
2030 | ||
2031 | tx_ring->desc = NULL; | |
2032 | } | |
2033 | ||
2034 | /** | |
2035 | * e1000_free_all_tx_resources - Free Tx Resources for All Queues | |
2036 | * @adapter: board private structure | |
2037 | * | |
2038 | * Free all transmit software resources | |
2039 | **/ | |
2040 | ||
2041 | void e1000_free_all_tx_resources(struct e1000_adapter *adapter) | |
2042 | { | |
2043 | int i; | |
2044 | ||
2045 | for (i = 0; i < adapter->num_tx_queues; i++) | |
2046 | e1000_free_tx_resources(adapter, &adapter->tx_ring[i]); | |
2047 | } | |
2048 | ||
2049 | static void e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter, | |
2050 | struct e1000_buffer *buffer_info) | |
2051 | { | |
2052 | if (buffer_info->dma) { | |
2053 | pci_unmap_page(adapter->pdev, | |
2054 | buffer_info->dma, | |
2055 | buffer_info->length, | |
2056 | PCI_DMA_TODEVICE); | |
2057 | buffer_info->dma = 0; | |
2058 | } | |
2059 | if (buffer_info->skb) { | |
2060 | dev_kfree_skb_any(buffer_info->skb); | |
2061 | buffer_info->skb = NULL; | |
2062 | } | |
2063 | /* buffer_info must be completely set up in the transmit path */ | |
2064 | } | |
2065 | ||
2066 | /** | |
2067 | * e1000_clean_tx_ring - Free Tx Buffers | |
2068 | * @adapter: board private structure | |
2069 | * @tx_ring: ring to be cleaned | |
2070 | **/ | |
2071 | ||
2072 | static void e1000_clean_tx_ring(struct e1000_adapter *adapter, | |
2073 | struct e1000_tx_ring *tx_ring) | |
2074 | { | |
2075 | struct e1000_hw *hw = &adapter->hw; | |
2076 | struct e1000_buffer *buffer_info; | |
2077 | unsigned long size; | |
2078 | unsigned int i; | |
2079 | ||
2080 | /* Free all the Tx ring sk_buffs */ | |
2081 | ||
2082 | for (i = 0; i < tx_ring->count; i++) { | |
2083 | buffer_info = &tx_ring->buffer_info[i]; | |
2084 | e1000_unmap_and_free_tx_resource(adapter, buffer_info); | |
2085 | } | |
2086 | ||
2087 | size = sizeof(struct e1000_buffer) * tx_ring->count; | |
2088 | memset(tx_ring->buffer_info, 0, size); | |
2089 | ||
2090 | /* Zero out the descriptor ring */ | |
2091 | ||
2092 | memset(tx_ring->desc, 0, tx_ring->size); | |
2093 | ||
2094 | tx_ring->next_to_use = 0; | |
2095 | tx_ring->next_to_clean = 0; | |
2096 | tx_ring->last_tx_tso = 0; | |
2097 | ||
2098 | writel(0, hw->hw_addr + tx_ring->tdh); | |
2099 | writel(0, hw->hw_addr + tx_ring->tdt); | |
2100 | } | |
2101 | ||
2102 | /** | |
2103 | * e1000_clean_all_tx_rings - Free Tx Buffers for all queues | |
2104 | * @adapter: board private structure | |
2105 | **/ | |
2106 | ||
2107 | static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter) | |
2108 | { | |
2109 | int i; | |
2110 | ||
2111 | for (i = 0; i < adapter->num_tx_queues; i++) | |
2112 | e1000_clean_tx_ring(adapter, &adapter->tx_ring[i]); | |
2113 | } | |
2114 | ||
2115 | /** | |
2116 | * e1000_free_rx_resources - Free Rx Resources | |
2117 | * @adapter: board private structure | |
2118 | * @rx_ring: ring to clean the resources from | |
2119 | * | |
2120 | * Free all receive software resources | |
2121 | **/ | |
2122 | ||
2123 | static void e1000_free_rx_resources(struct e1000_adapter *adapter, | |
2124 | struct e1000_rx_ring *rx_ring) | |
2125 | { | |
2126 | struct pci_dev *pdev = adapter->pdev; | |
2127 | ||
2128 | e1000_clean_rx_ring(adapter, rx_ring); | |
2129 | ||
2130 | vfree(rx_ring->buffer_info); | |
2131 | rx_ring->buffer_info = NULL; | |
2132 | ||
2133 | pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma); | |
2134 | ||
2135 | rx_ring->desc = NULL; | |
2136 | } | |
2137 | ||
2138 | /** | |
2139 | * e1000_free_all_rx_resources - Free Rx Resources for All Queues | |
2140 | * @adapter: board private structure | |
2141 | * | |
2142 | * Free all receive software resources | |
2143 | **/ | |
2144 | ||
2145 | void e1000_free_all_rx_resources(struct e1000_adapter *adapter) | |
2146 | { | |
2147 | int i; | |
2148 | ||
2149 | for (i = 0; i < adapter->num_rx_queues; i++) | |
2150 | e1000_free_rx_resources(adapter, &adapter->rx_ring[i]); | |
2151 | } | |
2152 | ||
2153 | /** | |
2154 | * e1000_clean_rx_ring - Free Rx Buffers per Queue | |
2155 | * @adapter: board private structure | |
2156 | * @rx_ring: ring to free buffers from | |
2157 | **/ | |
2158 | ||
2159 | static void e1000_clean_rx_ring(struct e1000_adapter *adapter, | |
2160 | struct e1000_rx_ring *rx_ring) | |
2161 | { | |
2162 | struct e1000_hw *hw = &adapter->hw; | |
2163 | struct e1000_buffer *buffer_info; | |
2164 | struct pci_dev *pdev = adapter->pdev; | |
2165 | unsigned long size; | |
2166 | unsigned int i; | |
2167 | ||
2168 | /* Free all the Rx ring sk_buffs */ | |
2169 | for (i = 0; i < rx_ring->count; i++) { | |
2170 | buffer_info = &rx_ring->buffer_info[i]; | |
2171 | if (buffer_info->skb) { | |
2172 | pci_unmap_single(pdev, | |
2173 | buffer_info->dma, | |
2174 | buffer_info->length, | |
2175 | PCI_DMA_FROMDEVICE); | |
2176 | ||
2177 | dev_kfree_skb(buffer_info->skb); | |
2178 | buffer_info->skb = NULL; | |
2179 | } | |
2180 | } | |
2181 | ||
2182 | size = sizeof(struct e1000_buffer) * rx_ring->count; | |
2183 | memset(rx_ring->buffer_info, 0, size); | |
2184 | ||
2185 | /* Zero out the descriptor ring */ | |
2186 | ||
2187 | memset(rx_ring->desc, 0, rx_ring->size); | |
2188 | ||
2189 | rx_ring->next_to_clean = 0; | |
2190 | rx_ring->next_to_use = 0; | |
2191 | ||
2192 | writel(0, hw->hw_addr + rx_ring->rdh); | |
2193 | writel(0, hw->hw_addr + rx_ring->rdt); | |
2194 | } | |
2195 | ||
2196 | /** | |
2197 | * e1000_clean_all_rx_rings - Free Rx Buffers for all queues | |
2198 | * @adapter: board private structure | |
2199 | **/ | |
2200 | ||
2201 | static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter) | |
2202 | { | |
2203 | int i; | |
2204 | ||
2205 | for (i = 0; i < adapter->num_rx_queues; i++) | |
2206 | e1000_clean_rx_ring(adapter, &adapter->rx_ring[i]); | |
2207 | } | |
2208 | ||
2209 | /* The 82542 2.0 (revision 2) needs to have the receive unit in reset | |
2210 | * and memory write and invalidate disabled for certain operations | |
2211 | */ | |
2212 | static void e1000_enter_82542_rst(struct e1000_adapter *adapter) | |
2213 | { | |
2214 | struct e1000_hw *hw = &adapter->hw; | |
2215 | struct net_device *netdev = adapter->netdev; | |
2216 | u32 rctl; | |
2217 | ||
2218 | e1000_pci_clear_mwi(hw); | |
2219 | ||
2220 | rctl = er32(RCTL); | |
2221 | rctl |= E1000_RCTL_RST; | |
2222 | ew32(RCTL, rctl); | |
2223 | E1000_WRITE_FLUSH(); | |
2224 | mdelay(5); | |
2225 | ||
2226 | if (netif_running(netdev)) | |
2227 | e1000_clean_all_rx_rings(adapter); | |
2228 | } | |
2229 | ||
2230 | static void e1000_leave_82542_rst(struct e1000_adapter *adapter) | |
2231 | { | |
2232 | struct e1000_hw *hw = &adapter->hw; | |
2233 | struct net_device *netdev = adapter->netdev; | |
2234 | u32 rctl; | |
2235 | ||
2236 | rctl = er32(RCTL); | |
2237 | rctl &= ~E1000_RCTL_RST; | |
2238 | ew32(RCTL, rctl); | |
2239 | E1000_WRITE_FLUSH(); | |
2240 | mdelay(5); | |
2241 | ||
2242 | if (hw->pci_cmd_word & PCI_COMMAND_INVALIDATE) | |
2243 | e1000_pci_set_mwi(hw); | |
2244 | ||
2245 | if (netif_running(netdev)) { | |
2246 | /* No need to loop, because 82542 supports only 1 queue */ | |
2247 | struct e1000_rx_ring *ring = &adapter->rx_ring[0]; | |
2248 | e1000_configure_rx(adapter); | |
2249 | adapter->alloc_rx_buf(adapter, ring, E1000_DESC_UNUSED(ring)); | |
2250 | } | |
2251 | } | |
2252 | ||
2253 | /** | |
2254 | * e1000_set_mac - Change the Ethernet Address of the NIC | |
2255 | * @netdev: network interface device structure | |
2256 | * @p: pointer to an address structure | |
2257 | * | |
2258 | * Returns 0 on success, negative on failure | |
2259 | **/ | |
2260 | ||
2261 | static int e1000_set_mac(struct net_device *netdev, void *p) | |
2262 | { | |
2263 | struct e1000_adapter *adapter = netdev_priv(netdev); | |
2264 | struct e1000_hw *hw = &adapter->hw; | |
2265 | struct sockaddr *addr = p; | |
2266 | ||
2267 | if (!is_valid_ether_addr(addr->sa_data)) | |
2268 | return -EADDRNOTAVAIL; | |
2269 | ||
2270 | /* 82542 2.0 needs to be in reset to write receive address registers */ | |
2271 | ||
2272 | if (hw->mac_type == e1000_82542_rev2_0) | |
2273 | e1000_enter_82542_rst(adapter); | |
2274 | ||
2275 | memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); | |
2276 | memcpy(hw->mac_addr, addr->sa_data, netdev->addr_len); | |
2277 | ||
2278 | e1000_rar_set(hw, hw->mac_addr, 0); | |
2279 | ||
2280 | /* With 82571 controllers, LAA may be overwritten (with the default) | |
2281 | * due to controller reset from the other port. */ | |
2282 | if (hw->mac_type == e1000_82571) { | |
2283 | /* activate the work around */ | |
2284 | hw->laa_is_present = 1; | |
2285 | ||
2286 | /* Hold a copy of the LAA in RAR[14] This is done so that | |
2287 | * between the time RAR[0] gets clobbered and the time it | |
2288 | * gets fixed (in e1000_watchdog), the actual LAA is in one | |
2289 | * of the RARs and no incoming packets directed to this port | |
2290 | * are dropped. Eventaully the LAA will be in RAR[0] and | |
2291 | * RAR[14] */ | |
2292 | e1000_rar_set(hw, hw->mac_addr, | |
2293 | E1000_RAR_ENTRIES - 1); | |
2294 | } | |
2295 | ||
2296 | if (hw->mac_type == e1000_82542_rev2_0) | |
2297 | e1000_leave_82542_rst(adapter); | |
2298 | ||
2299 | return 0; | |
2300 | } | |
2301 | ||
2302 | /** | |
2303 | * e1000_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set | |
2304 | * @netdev: network interface device structure | |
2305 | * | |
2306 | * The set_rx_mode entry point is called whenever the unicast or multicast | |
2307 | * address lists or the network interface flags are updated. This routine is | |
2308 | * responsible for configuring the hardware for proper unicast, multicast, | |
2309 | * promiscuous mode, and all-multi behavior. | |
2310 | **/ | |
2311 | ||
2312 | static void e1000_set_rx_mode(struct net_device *netdev) | |
2313 | { | |
2314 | struct e1000_adapter *adapter = netdev_priv(netdev); | |
2315 | struct e1000_hw *hw = &adapter->hw; | |
2316 | struct dev_addr_list *uc_ptr; | |
2317 | struct dev_addr_list *mc_ptr; | |
2318 | u32 rctl; | |
2319 | u32 hash_value; | |
2320 | int i, rar_entries = E1000_RAR_ENTRIES; | |
2321 | int mta_reg_count = (hw->mac_type == e1000_ich8lan) ? | |
2322 | E1000_NUM_MTA_REGISTERS_ICH8LAN : | |
2323 | E1000_NUM_MTA_REGISTERS; | |
2324 | ||
2325 | if (hw->mac_type == e1000_ich8lan) | |
2326 | rar_entries = E1000_RAR_ENTRIES_ICH8LAN; | |
2327 | ||
2328 | /* reserve RAR[14] for LAA over-write work-around */ | |
2329 | if (hw->mac_type == e1000_82571) | |
2330 | rar_entries--; | |
2331 | ||
2332 | /* Check for Promiscuous and All Multicast modes */ | |
2333 | ||
2334 | rctl = er32(RCTL); | |
2335 | ||
2336 | if (netdev->flags & IFF_PROMISC) { | |
2337 | rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE); | |
2338 | rctl &= ~E1000_RCTL_VFE; | |
2339 | } else { | |
2340 | if (netdev->flags & IFF_ALLMULTI) { | |
2341 | rctl |= E1000_RCTL_MPE; | |
2342 | } else { | |
2343 | rctl &= ~E1000_RCTL_MPE; | |
2344 | } | |
2345 | if (adapter->hw.mac_type != e1000_ich8lan) | |
2346 | rctl |= E1000_RCTL_VFE; | |
2347 | } | |
2348 | ||
2349 | uc_ptr = NULL; | |
2350 | if (netdev->uc_count > rar_entries - 1) { | |
2351 | rctl |= E1000_RCTL_UPE; | |
2352 | } else if (!(netdev->flags & IFF_PROMISC)) { | |
2353 | rctl &= ~E1000_RCTL_UPE; | |
2354 | uc_ptr = netdev->uc_list; | |
2355 | } | |
2356 | ||
2357 | ew32(RCTL, rctl); | |
2358 | ||
2359 | /* 82542 2.0 needs to be in reset to write receive address registers */ | |
2360 | ||
2361 | if (hw->mac_type == e1000_82542_rev2_0) | |
2362 | e1000_enter_82542_rst(adapter); | |
2363 | ||
2364 | /* load the first 14 addresses into the exact filters 1-14. Unicast | |
2365 | * addresses take precedence to avoid disabling unicast filtering | |
2366 | * when possible. | |
2367 | * | |
2368 | * RAR 0 is used for the station MAC adddress | |
2369 | * if there are not 14 addresses, go ahead and clear the filters | |
2370 | * -- with 82571 controllers only 0-13 entries are filled here | |
2371 | */ | |
2372 | mc_ptr = netdev->mc_list; | |
2373 | ||
2374 | for (i = 1; i < rar_entries; i++) { | |
2375 | if (uc_ptr) { | |
2376 | e1000_rar_set(hw, uc_ptr->da_addr, i); | |
2377 | uc_ptr = uc_ptr->next; | |
2378 | } else if (mc_ptr) { | |
2379 | e1000_rar_set(hw, mc_ptr->da_addr, i); | |
2380 | mc_ptr = mc_ptr->next; | |
2381 | } else { | |
2382 | E1000_WRITE_REG_ARRAY(hw, RA, i << 1, 0); | |
2383 | E1000_WRITE_FLUSH(); | |
2384 | E1000_WRITE_REG_ARRAY(hw, RA, (i << 1) + 1, 0); | |
2385 | E1000_WRITE_FLUSH(); | |
2386 | } | |
2387 | } | |
2388 | WARN_ON(uc_ptr != NULL); | |
2389 | ||
2390 | /* clear the old settings from the multicast hash table */ | |
2391 | ||
2392 | for (i = 0; i < mta_reg_count; i++) { | |
2393 | E1000_WRITE_REG_ARRAY(hw, MTA, i, 0); | |
2394 | E1000_WRITE_FLUSH(); | |
2395 | } | |
2396 | ||
2397 | /* load any remaining addresses into the hash table */ | |
2398 | ||
2399 | for (; mc_ptr; mc_ptr = mc_ptr->next) { | |
2400 | hash_value = e1000_hash_mc_addr(hw, mc_ptr->da_addr); | |
2401 | e1000_mta_set(hw, hash_value); | |
2402 | } | |
2403 | ||
2404 | if (hw->mac_type == e1000_82542_rev2_0) | |
2405 | e1000_leave_82542_rst(adapter); | |
2406 | } | |
2407 | ||
2408 | /* Need to wait a few seconds after link up to get diagnostic information from | |
2409 | * the phy */ | |
2410 | ||
2411 | static void e1000_update_phy_info(unsigned long data) | |
2412 | { | |
2413 | struct e1000_adapter *adapter = (struct e1000_adapter *)data; | |
2414 | struct e1000_hw *hw = &adapter->hw; | |
2415 | e1000_phy_get_info(hw, &adapter->phy_info); | |
2416 | } | |
2417 | ||
2418 | /** | |
2419 | * e1000_82547_tx_fifo_stall - Timer Call-back | |
2420 | * @data: pointer to adapter cast into an unsigned long | |
2421 | **/ | |
2422 | ||
2423 | static void e1000_82547_tx_fifo_stall(unsigned long data) | |
2424 | { | |
2425 | struct e1000_adapter *adapter = (struct e1000_adapter *)data; | |
2426 | struct e1000_hw *hw = &adapter->hw; | |
2427 | struct net_device *netdev = adapter->netdev; | |
2428 | u32 tctl; | |
2429 | ||
2430 | if (atomic_read(&adapter->tx_fifo_stall)) { | |
2431 | if ((er32(TDT) == er32(TDH)) && | |
2432 | (er32(TDFT) == er32(TDFH)) && | |
2433 | (er32(TDFTS) == er32(TDFHS))) { | |
2434 | tctl = er32(TCTL); | |
2435 | ew32(TCTL, tctl & ~E1000_TCTL_EN); | |
2436 | ew32(TDFT, adapter->tx_head_addr); | |
2437 | ew32(TDFH, adapter->tx_head_addr); | |
2438 | ew32(TDFTS, adapter->tx_head_addr); | |
2439 | ew32(TDFHS, adapter->tx_head_addr); | |
2440 | ew32(TCTL, tctl); | |
2441 | E1000_WRITE_FLUSH(); | |
2442 | ||
2443 | adapter->tx_fifo_head = 0; | |
2444 | atomic_set(&adapter->tx_fifo_stall, 0); | |
2445 | netif_wake_queue(netdev); | |
2446 | } else { | |
2447 | mod_timer(&adapter->tx_fifo_stall_timer, jiffies + 1); | |
2448 | } | |
2449 | } | |
2450 | } | |
2451 | ||
2452 | /** | |
2453 | * e1000_watchdog - Timer Call-back | |
2454 | * @data: pointer to adapter cast into an unsigned long | |
2455 | **/ | |
2456 | static void e1000_watchdog(unsigned long data) | |
2457 | { | |
2458 | struct e1000_adapter *adapter = (struct e1000_adapter *)data; | |
2459 | struct e1000_hw *hw = &adapter->hw; | |
2460 | struct net_device *netdev = adapter->netdev; | |
2461 | struct e1000_tx_ring *txdr = adapter->tx_ring; | |
2462 | u32 link, tctl; | |
2463 | s32 ret_val; | |
2464 | ||
2465 | ret_val = e1000_check_for_link(hw); | |
2466 | if ((ret_val == E1000_ERR_PHY) && | |
2467 | (hw->phy_type == e1000_phy_igp_3) && | |
2468 | (er32(CTRL) & E1000_PHY_CTRL_GBE_DISABLE)) { | |
2469 | /* See e1000_kumeran_lock_loss_workaround() */ | |
2470 | DPRINTK(LINK, INFO, | |
2471 | "Gigabit has been disabled, downgrading speed\n"); | |
2472 | } | |
2473 | ||
2474 | if (hw->mac_type == e1000_82573) { | |
2475 | e1000_enable_tx_pkt_filtering(hw); | |
2476 | if (adapter->mng_vlan_id != hw->mng_cookie.vlan_id) | |
2477 | e1000_update_mng_vlan(adapter); | |
2478 | } | |
2479 | ||
2480 | if ((hw->media_type == e1000_media_type_internal_serdes) && | |
2481 | !(er32(TXCW) & E1000_TXCW_ANE)) | |
2482 | link = !hw->serdes_link_down; | |
2483 | else | |
2484 | link = er32(STATUS) & E1000_STATUS_LU; | |
2485 | ||
2486 | if (link) { | |
2487 | if (!netif_carrier_ok(netdev)) { | |
2488 | u32 ctrl; | |
2489 | bool txb2b = true; | |
2490 | e1000_get_speed_and_duplex(hw, | |
2491 | &adapter->link_speed, | |
2492 | &adapter->link_duplex); | |
2493 | ||
2494 | ctrl = er32(CTRL); | |
2495 | printk(KERN_INFO "e1000: %s NIC Link is Up %d Mbps %s, " | |
2496 | "Flow Control: %s\n", | |
2497 | netdev->name, | |
2498 | adapter->link_speed, | |
2499 | adapter->link_duplex == FULL_DUPLEX ? | |
2500 | "Full Duplex" : "Half Duplex", | |
2501 | ((ctrl & E1000_CTRL_TFCE) && (ctrl & | |
2502 | E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl & | |
2503 | E1000_CTRL_RFCE) ? "RX" : ((ctrl & | |
2504 | E1000_CTRL_TFCE) ? "TX" : "None" ))); | |
2505 | ||
2506 | /* tweak tx_queue_len according to speed/duplex | |
2507 | * and adjust the timeout factor */ | |
2508 | netdev->tx_queue_len = adapter->tx_queue_len; | |
2509 | adapter->tx_timeout_factor = 1; | |
2510 | switch (adapter->link_speed) { | |
2511 | case SPEED_10: | |
2512 | txb2b = false; | |
2513 | netdev->tx_queue_len = 10; | |
2514 | adapter->tx_timeout_factor = 8; | |
2515 | break; | |
2516 | case SPEED_100: | |
2517 | txb2b = false; | |
2518 | netdev->tx_queue_len = 100; | |
2519 | /* maybe add some timeout factor ? */ | |
2520 | break; | |
2521 | } | |
2522 | ||
2523 | if ((hw->mac_type == e1000_82571 || | |
2524 | hw->mac_type == e1000_82572) && | |
2525 | !txb2b) { | |
2526 | u32 tarc0; | |
2527 | tarc0 = er32(TARC0); | |
2528 | tarc0 &= ~(1 << 21); | |
2529 | ew32(TARC0, tarc0); | |
2530 | } | |
2531 | ||
2532 | /* disable TSO for pcie and 10/100 speeds, to avoid | |
2533 | * some hardware issues */ | |
2534 | if (!adapter->tso_force && | |
2535 | hw->bus_type == e1000_bus_type_pci_express){ | |
2536 | switch (adapter->link_speed) { | |
2537 | case SPEED_10: | |
2538 | case SPEED_100: | |
2539 | DPRINTK(PROBE,INFO, | |
2540 | "10/100 speed: disabling TSO\n"); | |
2541 | netdev->features &= ~NETIF_F_TSO; | |
2542 | netdev->features &= ~NETIF_F_TSO6; | |
2543 | break; | |
2544 | case SPEED_1000: | |
2545 | netdev->features |= NETIF_F_TSO; | |
2546 | netdev->features |= NETIF_F_TSO6; | |
2547 | break; | |
2548 | default: | |
2549 | /* oops */ | |
2550 | break; | |
2551 | } | |
2552 | } | |
2553 | ||
2554 | /* enable transmits in the hardware, need to do this | |
2555 | * after setting TARC0 */ | |
2556 | tctl = er32(TCTL); | |
2557 | tctl |= E1000_TCTL_EN; | |
2558 | ew32(TCTL, tctl); | |
2559 | ||
2560 | netif_carrier_on(netdev); | |
2561 | netif_wake_queue(netdev); | |
2562 | mod_timer(&adapter->phy_info_timer, round_jiffies(jiffies + 2 * HZ)); | |
2563 | adapter->smartspeed = 0; | |
2564 | } else { | |
2565 | /* make sure the receive unit is started */ | |
2566 | if (hw->rx_needs_kicking) { | |
2567 | u32 rctl = er32(RCTL); | |
2568 | ew32(RCTL, rctl | E1000_RCTL_EN); | |
2569 | } | |
2570 | } | |
2571 | } else { | |
2572 | if (netif_carrier_ok(netdev)) { | |
2573 | adapter->link_speed = 0; | |
2574 | adapter->link_duplex = 0; | |
2575 | printk(KERN_INFO "e1000: %s NIC Link is Down\n", | |
2576 | netdev->name); | |
2577 | netif_carrier_off(netdev); | |
2578 | netif_stop_queue(netdev); | |
2579 | mod_timer(&adapter->phy_info_timer, round_jiffies(jiffies + 2 * HZ)); | |
2580 | ||
2581 | /* 80003ES2LAN workaround-- | |
2582 | * For packet buffer work-around on link down event; | |
2583 | * disable receives in the ISR and | |
2584 | * reset device here in the watchdog | |
2585 | */ | |
2586 | if (hw->mac_type == e1000_80003es2lan) | |
2587 | /* reset device */ | |
2588 | schedule_work(&adapter->reset_task); | |
2589 | } | |
2590 | ||
2591 | e1000_smartspeed(adapter); | |
2592 | } | |
2593 | ||
2594 | e1000_update_stats(adapter); | |
2595 | ||
2596 | hw->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old; | |
2597 | adapter->tpt_old = adapter->stats.tpt; | |
2598 | hw->collision_delta = adapter->stats.colc - adapter->colc_old; | |
2599 | adapter->colc_old = adapter->stats.colc; | |
2600 | ||
2601 | adapter->gorcl = adapter->stats.gorcl - adapter->gorcl_old; | |
2602 | adapter->gorcl_old = adapter->stats.gorcl; | |
2603 | adapter->gotcl = adapter->stats.gotcl - adapter->gotcl_old; | |
2604 | adapter->gotcl_old = adapter->stats.gotcl; | |
2605 | ||
2606 | e1000_update_adaptive(hw); | |
2607 | ||
2608 | if (!netif_carrier_ok(netdev)) { | |
2609 | if (E1000_DESC_UNUSED(txdr) + 1 < txdr->count) { | |
2610 | /* We've lost link, so the controller stops DMA, | |
2611 | * but we've got queued Tx work that's never going | |
2612 | * to get done, so reset controller to flush Tx. | |
2613 | * (Do the reset outside of interrupt context). */ | |
2614 | adapter->tx_timeout_count++; | |
2615 | schedule_work(&adapter->reset_task); | |
2616 | } | |
2617 | } | |
2618 | ||
2619 | /* Cause software interrupt to ensure rx ring is cleaned */ | |
2620 | ew32(ICS, E1000_ICS_RXDMT0); | |
2621 | ||
2622 | /* Force detection of hung controller every watchdog period */ | |
2623 | adapter->detect_tx_hung = true; | |
2624 | ||
2625 | /* With 82571 controllers, LAA may be overwritten due to controller | |
2626 | * reset from the other port. Set the appropriate LAA in RAR[0] */ | |
2627 | if (hw->mac_type == e1000_82571 && hw->laa_is_present) | |
2628 | e1000_rar_set(hw, hw->mac_addr, 0); | |
2629 | ||
2630 | /* Reset the timer */ | |
2631 | mod_timer(&adapter->watchdog_timer, round_jiffies(jiffies + 2 * HZ)); | |
2632 | } | |
2633 | ||
2634 | enum latency_range { | |
2635 | lowest_latency = 0, | |
2636 | low_latency = 1, | |
2637 | bulk_latency = 2, | |
2638 | latency_invalid = 255 | |
2639 | }; | |
2640 | ||
2641 | /** | |
2642 | * e1000_update_itr - update the dynamic ITR value based on statistics | |
2643 | * Stores a new ITR value based on packets and byte | |
2644 | * counts during the last interrupt. The advantage of per interrupt | |
2645 | * computation is faster updates and more accurate ITR for the current | |
2646 | * traffic pattern. Constants in this function were computed | |
2647 | * based on theoretical maximum wire speed and thresholds were set based | |
2648 | * on testing data as well as attempting to minimize response time | |
2649 | * while increasing bulk throughput. | |
2650 | * this functionality is controlled by the InterruptThrottleRate module | |
2651 | * parameter (see e1000_param.c) | |
2652 | * @adapter: pointer to adapter | |
2653 | * @itr_setting: current adapter->itr | |
2654 | * @packets: the number of packets during this measurement interval | |
2655 | * @bytes: the number of bytes during this measurement interval | |
2656 | **/ | |
2657 | static unsigned int e1000_update_itr(struct e1000_adapter *adapter, | |
2658 | u16 itr_setting, int packets, int bytes) | |
2659 | { | |
2660 | unsigned int retval = itr_setting; | |
2661 | struct e1000_hw *hw = &adapter->hw; | |
2662 | ||
2663 | if (unlikely(hw->mac_type < e1000_82540)) | |
2664 | goto update_itr_done; | |
2665 | ||
2666 | if (packets == 0) | |
2667 | goto update_itr_done; | |
2668 | ||
2669 | switch (itr_setting) { | |
2670 | case lowest_latency: | |
2671 | /* jumbo frames get bulk treatment*/ | |
2672 | if (bytes/packets > 8000) | |
2673 | retval = bulk_latency; | |
2674 | else if ((packets < 5) && (bytes > 512)) | |
2675 | retval = low_latency; | |
2676 | break; | |
2677 | case low_latency: /* 50 usec aka 20000 ints/s */ | |
2678 | if (bytes > 10000) { | |
2679 | /* jumbo frames need bulk latency setting */ | |
2680 | if (bytes/packets > 8000) | |
2681 | retval = bulk_latency; | |
2682 | else if ((packets < 10) || ((bytes/packets) > 1200)) | |
2683 | retval = bulk_latency; | |
2684 | else if ((packets > 35)) | |
2685 | retval = lowest_latency; | |
2686 | } else if (bytes/packets > 2000) | |
2687 | retval = bulk_latency; | |
2688 | else if (packets <= 2 && bytes < 512) | |
2689 | retval = lowest_latency; | |
2690 | break; | |
2691 | case bulk_latency: /* 250 usec aka 4000 ints/s */ | |
2692 | if (bytes > 25000) { | |
2693 | if (packets > 35) | |
2694 | retval = low_latency; | |
2695 | } else if (bytes < 6000) { | |
2696 | retval = low_latency; | |
2697 | } | |
2698 | break; | |
2699 | } | |
2700 | ||
2701 | update_itr_done: | |
2702 | return retval; | |
2703 | } | |
2704 | ||
2705 | static void e1000_set_itr(struct e1000_adapter *adapter) | |
2706 | { | |
2707 | struct e1000_hw *hw = &adapter->hw; | |
2708 | u16 current_itr; | |
2709 | u32 new_itr = adapter->itr; | |
2710 | ||
2711 | if (unlikely(hw->mac_type < e1000_82540)) | |
2712 | return; | |
2713 | ||
2714 | /* for non-gigabit speeds, just fix the interrupt rate at 4000 */ | |
2715 | if (unlikely(adapter->link_speed != SPEED_1000)) { | |
2716 | current_itr = 0; | |
2717 | new_itr = 4000; | |
2718 | goto set_itr_now; | |
2719 | } | |
2720 | ||
2721 | adapter->tx_itr = e1000_update_itr(adapter, | |
2722 | adapter->tx_itr, | |
2723 | adapter->total_tx_packets, | |
2724 | adapter->total_tx_bytes); | |
2725 | /* conservative mode (itr 3) eliminates the lowest_latency setting */ | |
2726 | if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency) | |
2727 | adapter->tx_itr = low_latency; | |
2728 | ||
2729 | adapter->rx_itr = e1000_update_itr(adapter, | |
2730 | adapter->rx_itr, | |
2731 | adapter->total_rx_packets, | |
2732 | adapter->total_rx_bytes); | |
2733 | /* conservative mode (itr 3) eliminates the lowest_latency setting */ | |
2734 | if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency) | |
2735 | adapter->rx_itr = low_latency; | |
2736 | ||
2737 | current_itr = max(adapter->rx_itr, adapter->tx_itr); | |
2738 | ||
2739 | switch (current_itr) { | |
2740 | /* counts and packets in update_itr are dependent on these numbers */ | |
2741 | case lowest_latency: | |
2742 | new_itr = 70000; | |
2743 | break; | |
2744 | case low_latency: | |
2745 | new_itr = 20000; /* aka hwitr = ~200 */ | |
2746 | break; | |
2747 | case bulk_latency: | |
2748 | new_itr = 4000; | |
2749 | break; | |
2750 | default: | |
2751 | break; | |
2752 | } | |
2753 | ||
2754 | set_itr_now: | |
2755 | if (new_itr != adapter->itr) { | |
2756 | /* this attempts to bias the interrupt rate towards Bulk | |
2757 | * by adding intermediate steps when interrupt rate is | |
2758 | * increasing */ | |
2759 | new_itr = new_itr > adapter->itr ? | |
2760 | min(adapter->itr + (new_itr >> 2), new_itr) : | |
2761 | new_itr; | |
2762 | adapter->itr = new_itr; | |
2763 | ew32(ITR, 1000000000 / (new_itr * 256)); | |
2764 | } | |
2765 | ||
2766 | return; | |
2767 | } | |
2768 | ||
2769 | #define E1000_TX_FLAGS_CSUM 0x00000001 | |
2770 | #define E1000_TX_FLAGS_VLAN 0x00000002 | |
2771 | #define E1000_TX_FLAGS_TSO 0x00000004 | |
2772 | #define E1000_TX_FLAGS_IPV4 0x00000008 | |
2773 | #define E1000_TX_FLAGS_VLAN_MASK 0xffff0000 | |
2774 | #define E1000_TX_FLAGS_VLAN_SHIFT 16 | |
2775 | ||
2776 | static int e1000_tso(struct e1000_adapter *adapter, | |
2777 | struct e1000_tx_ring *tx_ring, struct sk_buff *skb) | |
2778 | { | |
2779 | struct e1000_context_desc *context_desc; | |
2780 | struct e1000_buffer *buffer_info; | |
2781 | unsigned int i; | |
2782 | u32 cmd_length = 0; | |
2783 | u16 ipcse = 0, tucse, mss; | |
2784 | u8 ipcss, ipcso, tucss, tucso, hdr_len; | |
2785 | int err; | |
2786 | ||
2787 | if (skb_is_gso(skb)) { | |
2788 | if (skb_header_cloned(skb)) { | |
2789 | err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); | |
2790 | if (err) | |
2791 | return err; | |
2792 | } | |
2793 | ||
2794 | hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); | |
2795 | mss = skb_shinfo(skb)->gso_size; | |
2796 | if (skb->protocol == htons(ETH_P_IP)) { | |
2797 | struct iphdr *iph = ip_hdr(skb); | |
2798 | iph->tot_len = 0; | |
2799 | iph->check = 0; | |
2800 | tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, | |
2801 | iph->daddr, 0, | |
2802 | IPPROTO_TCP, | |
2803 | 0); | |
2804 | cmd_length = E1000_TXD_CMD_IP; | |
2805 | ipcse = skb_transport_offset(skb) - 1; | |
2806 | } else if (skb->protocol == htons(ETH_P_IPV6)) { | |
2807 | ipv6_hdr(skb)->payload_len = 0; | |
2808 | tcp_hdr(skb)->check = | |
2809 | ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, | |
2810 | &ipv6_hdr(skb)->daddr, | |
2811 | 0, IPPROTO_TCP, 0); | |
2812 | ipcse = 0; | |
2813 | } | |
2814 | ipcss = skb_network_offset(skb); | |
2815 | ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data; | |
2816 | tucss = skb_transport_offset(skb); | |
2817 | tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data; | |
2818 | tucse = 0; | |
2819 | ||
2820 | cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE | | |
2821 | E1000_TXD_CMD_TCP | (skb->len - (hdr_len))); | |
2822 | ||
2823 | i = tx_ring->next_to_use; | |
2824 | context_desc = E1000_CONTEXT_DESC(*tx_ring, i); | |
2825 | buffer_info = &tx_ring->buffer_info[i]; | |
2826 | ||
2827 | context_desc->lower_setup.ip_fields.ipcss = ipcss; | |
2828 | context_desc->lower_setup.ip_fields.ipcso = ipcso; | |
2829 | context_desc->lower_setup.ip_fields.ipcse = cpu_to_le16(ipcse); | |
2830 | context_desc->upper_setup.tcp_fields.tucss = tucss; | |
2831 | context_desc->upper_setup.tcp_fields.tucso = tucso; | |
2832 | context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse); | |
2833 | context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss); | |
2834 | context_desc->tcp_seg_setup.fields.hdr_len = hdr_len; | |
2835 | context_desc->cmd_and_length = cpu_to_le32(cmd_length); | |
2836 | ||
2837 | buffer_info->time_stamp = jiffies; | |
2838 | buffer_info->next_to_watch = i; | |
2839 | ||
2840 | if (++i == tx_ring->count) i = 0; | |
2841 | tx_ring->next_to_use = i; | |
2842 | ||
2843 | return true; | |
2844 | } | |
2845 | return false; | |
2846 | } | |
2847 | ||
2848 | static bool e1000_tx_csum(struct e1000_adapter *adapter, | |
2849 | struct e1000_tx_ring *tx_ring, struct sk_buff *skb) | |
2850 | { | |
2851 | struct e1000_context_desc *context_desc; | |
2852 | struct e1000_buffer *buffer_info; | |
2853 | unsigned int i; | |
2854 | u8 css; | |
2855 | u32 cmd_len = E1000_TXD_CMD_DEXT; | |
2856 | ||
2857 | if (skb->ip_summed != CHECKSUM_PARTIAL) | |
2858 | return false; | |
2859 | ||
2860 | switch (skb->protocol) { | |
2861 | case cpu_to_be16(ETH_P_IP): | |
2862 | if (ip_hdr(skb)->protocol == IPPROTO_TCP) | |
2863 | cmd_len |= E1000_TXD_CMD_TCP; | |
2864 | break; | |
2865 | case cpu_to_be16(ETH_P_IPV6): | |
2866 | /* XXX not handling all IPV6 headers */ | |
2867 | if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) | |
2868 | cmd_len |= E1000_TXD_CMD_TCP; | |
2869 | break; | |
2870 | default: | |
2871 | if (unlikely(net_ratelimit())) | |
2872 | DPRINTK(DRV, WARNING, | |
2873 | "checksum_partial proto=%x!\n", skb->protocol); | |
2874 | break; | |
2875 | } | |
2876 | ||
2877 | css = skb_transport_offset(skb); | |
2878 | ||
2879 | i = tx_ring->next_to_use; | |
2880 | buffer_info = &tx_ring->buffer_info[i]; | |
2881 | context_desc = E1000_CONTEXT_DESC(*tx_ring, i); | |
2882 | ||
2883 | context_desc->lower_setup.ip_config = 0; | |
2884 | context_desc->upper_setup.tcp_fields.tucss = css; | |
2885 | context_desc->upper_setup.tcp_fields.tucso = | |
2886 | css + skb->csum_offset; | |
2887 | context_desc->upper_setup.tcp_fields.tucse = 0; | |
2888 | context_desc->tcp_seg_setup.data = 0; | |
2889 | context_desc->cmd_and_length = cpu_to_le32(cmd_len); | |
2890 | ||
2891 | buffer_info->time_stamp = jiffies; | |
2892 | buffer_info->next_to_watch = i; | |
2893 | ||
2894 | if (unlikely(++i == tx_ring->count)) i = 0; | |
2895 | tx_ring->next_to_use = i; | |
2896 | ||
2897 | return true; | |
2898 | } | |
2899 | ||
2900 | #define E1000_MAX_TXD_PWR 12 | |
2901 | #define E1000_MAX_DATA_PER_TXD (1<<E1000_MAX_TXD_PWR) | |
2902 | ||
2903 | static int e1000_tx_map(struct e1000_adapter *adapter, | |
2904 | struct e1000_tx_ring *tx_ring, | |
2905 | struct sk_buff *skb, unsigned int first, | |
2906 | unsigned int max_per_txd, unsigned int nr_frags, | |
2907 | unsigned int mss) | |
2908 | { | |
2909 | struct e1000_hw *hw = &adapter->hw; | |
2910 | struct e1000_buffer *buffer_info; | |
2911 | unsigned int len = skb->len; | |
2912 | unsigned int offset = 0, size, count = 0, i; | |
2913 | unsigned int f; | |
2914 | len -= skb->data_len; | |
2915 | ||
2916 | i = tx_ring->next_to_use; | |
2917 | ||
2918 | while (len) { | |
2919 | buffer_info = &tx_ring->buffer_info[i]; | |
2920 | size = min(len, max_per_txd); | |
2921 | /* Workaround for Controller erratum -- | |
2922 | * descriptor for non-tso packet in a linear SKB that follows a | |
2923 | * tso gets written back prematurely before the data is fully | |
2924 | * DMA'd to the controller */ | |
2925 | if (!skb->data_len && tx_ring->last_tx_tso && | |
2926 | !skb_is_gso(skb)) { | |
2927 | tx_ring->last_tx_tso = 0; | |
2928 | size -= 4; | |
2929 | } | |
2930 | ||
2931 | /* Workaround for premature desc write-backs | |
2932 | * in TSO mode. Append 4-byte sentinel desc */ | |
2933 | if (unlikely(mss && !nr_frags && size == len && size > 8)) | |
2934 | size -= 4; | |
2935 | /* work-around for errata 10 and it applies | |
2936 | * to all controllers in PCI-X mode | |
2937 | * The fix is to make sure that the first descriptor of a | |
2938 | * packet is smaller than 2048 - 16 - 16 (or 2016) bytes | |
2939 | */ | |
2940 | if (unlikely((hw->bus_type == e1000_bus_type_pcix) && | |
2941 | (size > 2015) && count == 0)) | |
2942 | size = 2015; | |
2943 | ||
2944 | /* Workaround for potential 82544 hang in PCI-X. Avoid | |
2945 | * terminating buffers within evenly-aligned dwords. */ | |
2946 | if (unlikely(adapter->pcix_82544 && | |
2947 | !((unsigned long)(skb->data + offset + size - 1) & 4) && | |
2948 | size > 4)) | |
2949 | size -= 4; | |
2950 | ||
2951 | buffer_info->length = size; | |
2952 | buffer_info->dma = | |
2953 | pci_map_single(adapter->pdev, | |
2954 | skb->data + offset, | |
2955 | size, | |
2956 | PCI_DMA_TODEVICE); | |
2957 | buffer_info->time_stamp = jiffies; | |
2958 | buffer_info->next_to_watch = i; | |
2959 | ||
2960 | len -= size; | |
2961 | offset += size; | |
2962 | count++; | |
2963 | if (unlikely(++i == tx_ring->count)) i = 0; | |
2964 | } | |
2965 | ||
2966 | for (f = 0; f < nr_frags; f++) { | |
2967 | struct skb_frag_struct *frag; | |
2968 | ||
2969 | frag = &skb_shinfo(skb)->frags[f]; | |
2970 | len = frag->size; | |
2971 | offset = frag->page_offset; | |
2972 | ||
2973 | while (len) { | |
2974 | buffer_info = &tx_ring->buffer_info[i]; | |
2975 | size = min(len, max_per_txd); | |
2976 | /* Workaround for premature desc write-backs | |
2977 | * in TSO mode. Append 4-byte sentinel desc */ | |
2978 | if (unlikely(mss && f == (nr_frags-1) && size == len && size > 8)) | |
2979 | size -= 4; | |
2980 | /* Workaround for potential 82544 hang in PCI-X. | |
2981 | * Avoid terminating buffers within evenly-aligned | |
2982 | * dwords. */ | |
2983 | if (unlikely(adapter->pcix_82544 && | |
2984 | !((unsigned long)(frag->page+offset+size-1) & 4) && | |
2985 | size > 4)) | |
2986 | size -= 4; | |
2987 | ||
2988 | buffer_info->length = size; | |
2989 | buffer_info->dma = | |
2990 | pci_map_page(adapter->pdev, | |
2991 | frag->page, | |
2992 | offset, | |
2993 | size, | |
2994 | PCI_DMA_TODEVICE); | |
2995 | buffer_info->time_stamp = jiffies; | |
2996 | buffer_info->next_to_watch = i; | |
2997 | ||
2998 | len -= size; | |
2999 | offset += size; | |
3000 | count++; | |
3001 | if (unlikely(++i == tx_ring->count)) i = 0; | |
3002 | } | |
3003 | } | |
3004 | ||
3005 | i = (i == 0) ? tx_ring->count - 1 : i - 1; | |
3006 | tx_ring->buffer_info[i].skb = skb; | |
3007 | tx_ring->buffer_info[first].next_to_watch = i; | |
3008 | ||
3009 | return count; | |
3010 | } | |
3011 | ||
3012 | static void e1000_tx_queue(struct e1000_adapter *adapter, | |
3013 | struct e1000_tx_ring *tx_ring, int tx_flags, | |
3014 | int count) | |
3015 | { | |
3016 | struct e1000_hw *hw = &adapter->hw; | |
3017 | struct e1000_tx_desc *tx_desc = NULL; | |
3018 | struct e1000_buffer *buffer_info; | |
3019 | u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS; | |
3020 | unsigned int i; | |
3021 | ||
3022 | if (likely(tx_flags & E1000_TX_FLAGS_TSO)) { | |
3023 | txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D | | |
3024 | E1000_TXD_CMD_TSE; | |
3025 | txd_upper |= E1000_TXD_POPTS_TXSM << 8; | |
3026 | ||
3027 | if (likely(tx_flags & E1000_TX_FLAGS_IPV4)) | |
3028 | txd_upper |= E1000_TXD_POPTS_IXSM << 8; | |
3029 | } | |
3030 | ||
3031 | if (likely(tx_flags & E1000_TX_FLAGS_CSUM)) { | |
3032 | txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D; | |
3033 | txd_upper |= E1000_TXD_POPTS_TXSM << 8; | |
3034 | } | |
3035 | ||
3036 | if (unlikely(tx_flags & E1000_TX_FLAGS_VLAN)) { | |
3037 | txd_lower |= E1000_TXD_CMD_VLE; | |
3038 | txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK); | |
3039 | } | |
3040 | ||
3041 | i = tx_ring->next_to_use; | |
3042 | ||
3043 | while (count--) { | |
3044 | buffer_info = &tx_ring->buffer_info[i]; | |
3045 | tx_desc = E1000_TX_DESC(*tx_ring, i); | |
3046 | tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); | |
3047 | tx_desc->lower.data = | |
3048 | cpu_to_le32(txd_lower | buffer_info->length); | |
3049 | tx_desc->upper.data = cpu_to_le32(txd_upper); | |
3050 | if (unlikely(++i == tx_ring->count)) i = 0; | |
3051 | } | |
3052 | ||
3053 | tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd); | |
3054 | ||
3055 | /* Force memory writes to complete before letting h/w | |
3056 | * know there are new descriptors to fetch. (Only | |
3057 | * applicable for weak-ordered memory model archs, | |
3058 | * such as IA-64). */ | |
3059 | wmb(); | |
3060 | ||
3061 | tx_ring->next_to_use = i; | |
3062 | writel(i, hw->hw_addr + tx_ring->tdt); | |
3063 | /* we need this if more than one processor can write to our tail | |
3064 | * at a time, it syncronizes IO on IA64/Altix systems */ | |
3065 | mmiowb(); | |
3066 | } | |
3067 | ||
3068 | /** | |
3069 | * 82547 workaround to avoid controller hang in half-duplex environment. | |
3070 | * The workaround is to avoid queuing a large packet that would span | |
3071 | * the internal Tx FIFO ring boundary by notifying the stack to resend | |
3072 | * the packet at a later time. This gives the Tx FIFO an opportunity to | |
3073 | * flush all packets. When that occurs, we reset the Tx FIFO pointers | |
3074 | * to the beginning of the Tx FIFO. | |
3075 | **/ | |
3076 | ||
3077 | #define E1000_FIFO_HDR 0x10 | |
3078 | #define E1000_82547_PAD_LEN 0x3E0 | |
3079 | ||
3080 | static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter, | |
3081 | struct sk_buff *skb) | |
3082 | { | |
3083 | u32 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head; | |
3084 | u32 skb_fifo_len = skb->len + E1000_FIFO_HDR; | |
3085 | ||
3086 | skb_fifo_len = ALIGN(skb_fifo_len, E1000_FIFO_HDR); | |
3087 | ||
3088 | if (adapter->link_duplex != HALF_DUPLEX) | |
3089 | goto no_fifo_stall_required; | |
3090 | ||
3091 | if (atomic_read(&adapter->tx_fifo_stall)) | |
3092 | return 1; | |
3093 | ||
3094 | if (skb_fifo_len >= (E1000_82547_PAD_LEN + fifo_space)) { | |
3095 | atomic_set(&adapter->tx_fifo_stall, 1); | |
3096 | return 1; | |
3097 | } | |
3098 | ||
3099 | no_fifo_stall_required: | |
3100 | adapter->tx_fifo_head += skb_fifo_len; | |
3101 | if (adapter->tx_fifo_head >= adapter->tx_fifo_size) | |
3102 | adapter->tx_fifo_head -= adapter->tx_fifo_size; | |
3103 | return 0; | |
3104 | } | |
3105 | ||
3106 | #define MINIMUM_DHCP_PACKET_SIZE 282 | |
3107 | static int e1000_transfer_dhcp_info(struct e1000_adapter *adapter, | |
3108 | struct sk_buff *skb) | |
3109 | { | |
3110 | struct e1000_hw *hw = &adapter->hw; | |
3111 | u16 length, offset; | |
3112 | if (vlan_tx_tag_present(skb)) { | |
3113 | if (!((vlan_tx_tag_get(skb) == hw->mng_cookie.vlan_id) && | |
3114 | ( hw->mng_cookie.status & | |
3115 | E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) ) | |
3116 | return 0; | |
3117 | } | |
3118 | if (skb->len > MINIMUM_DHCP_PACKET_SIZE) { | |
3119 | struct ethhdr *eth = (struct ethhdr *)skb->data; | |
3120 | if ((htons(ETH_P_IP) == eth->h_proto)) { | |
3121 | const struct iphdr *ip = | |
3122 | (struct iphdr *)((u8 *)skb->data+14); | |
3123 | if (IPPROTO_UDP == ip->protocol) { | |
3124 | struct udphdr *udp = | |
3125 | (struct udphdr *)((u8 *)ip + | |
3126 | (ip->ihl << 2)); | |
3127 | if (ntohs(udp->dest) == 67) { | |
3128 | offset = (u8 *)udp + 8 - skb->data; | |
3129 | length = skb->len - offset; | |
3130 | ||
3131 | return e1000_mng_write_dhcp_info(hw, | |
3132 | (u8 *)udp + 8, | |
3133 | length); | |
3134 | } | |
3135 | } | |
3136 | } | |
3137 | } | |
3138 | return 0; | |
3139 | } | |
3140 | ||
3141 | static int __e1000_maybe_stop_tx(struct net_device *netdev, int size) | |
3142 | { | |
3143 | struct e1000_adapter *adapter = netdev_priv(netdev); | |
3144 | struct e1000_tx_ring *tx_ring = adapter->tx_ring; | |
3145 | ||
3146 | netif_stop_queue(netdev); | |
3147 | /* Herbert's original patch had: | |
3148 | * smp_mb__after_netif_stop_queue(); | |
3149 | * but since that doesn't exist yet, just open code it. */ | |
3150 | smp_mb(); | |
3151 | ||
3152 | /* We need to check again in a case another CPU has just | |
3153 | * made room available. */ | |
3154 | if (likely(E1000_DESC_UNUSED(tx_ring) < size)) | |
3155 | return -EBUSY; | |
3156 | ||
3157 | /* A reprieve! */ | |
3158 | netif_start_queue(netdev); | |
3159 | ++adapter->restart_queue; | |
3160 | return 0; | |
3161 | } | |
3162 | ||
3163 | static int e1000_maybe_stop_tx(struct net_device *netdev, | |
3164 | struct e1000_tx_ring *tx_ring, int size) | |
3165 | { | |
3166 | if (likely(E1000_DESC_UNUSED(tx_ring) >= size)) | |
3167 | return 0; | |
3168 | return __e1000_maybe_stop_tx(netdev, size); | |
3169 | } | |
3170 | ||
3171 | #define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1 ) | |
3172 | static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |
3173 | { | |
3174 | struct e1000_adapter *adapter = netdev_priv(netdev); | |
3175 | struct e1000_hw *hw = &adapter->hw; | |
3176 | struct e1000_tx_ring *tx_ring; | |
3177 | unsigned int first, max_per_txd = E1000_MAX_DATA_PER_TXD; | |
3178 | unsigned int max_txd_pwr = E1000_MAX_TXD_PWR; | |
3179 | unsigned int tx_flags = 0; | |
3180 | unsigned int len = skb->len - skb->data_len; | |
3181 | unsigned int nr_frags; | |
3182 | unsigned int mss; | |
3183 | int count = 0; | |
3184 | int tso; | |
3185 | unsigned int f; | |
3186 | ||
3187 | /* This goes back to the question of how to logically map a tx queue | |
3188 | * to a flow. Right now, performance is impacted slightly negatively | |
3189 | * if using multiple tx queues. If the stack breaks away from a | |
3190 | * single qdisc implementation, we can look at this again. */ | |
3191 | tx_ring = adapter->tx_ring; | |
3192 | ||
3193 | if (unlikely(skb->len <= 0)) { | |
3194 | dev_kfree_skb_any(skb); | |
3195 | return NETDEV_TX_OK; | |
3196 | } | |
3197 | ||
3198 | /* 82571 and newer doesn't need the workaround that limited descriptor | |
3199 | * length to 4kB */ | |
3200 | if (hw->mac_type >= e1000_82571) | |
3201 | max_per_txd = 8192; | |
3202 | ||
3203 | mss = skb_shinfo(skb)->gso_size; | |
3204 | /* The controller does a simple calculation to | |
3205 | * make sure there is enough room in the FIFO before | |
3206 | * initiating the DMA for each buffer. The calc is: | |
3207 | * 4 = ceil(buffer len/mss). To make sure we don't | |
3208 | * overrun the FIFO, adjust the max buffer len if mss | |
3209 | * drops. */ | |
3210 | if (mss) { | |
3211 | u8 hdr_len; | |
3212 | max_per_txd = min(mss << 2, max_per_txd); | |
3213 | max_txd_pwr = fls(max_per_txd) - 1; | |
3214 | ||
3215 | /* TSO Workaround for 82571/2/3 Controllers -- if skb->data | |
3216 | * points to just header, pull a few bytes of payload from | |
3217 | * frags into skb->data */ | |
3218 | hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); | |
3219 | if (skb->data_len && hdr_len == len) { | |
3220 | switch (hw->mac_type) { | |
3221 | unsigned int pull_size; | |
3222 | case e1000_82544: | |
3223 | /* Make sure we have room to chop off 4 bytes, | |
3224 | * and that the end alignment will work out to | |
3225 | * this hardware's requirements | |
3226 | * NOTE: this is a TSO only workaround | |
3227 | * if end byte alignment not correct move us | |
3228 | * into the next dword */ | |
3229 | if ((unsigned long)(skb_tail_pointer(skb) - 1) & 4) | |
3230 | break; | |
3231 | /* fall through */ | |
3232 | case e1000_82571: | |
3233 | case e1000_82572: | |
3234 | case e1000_82573: | |
3235 | case e1000_ich8lan: | |
3236 | pull_size = min((unsigned int)4, skb->data_len); | |
3237 | if (!__pskb_pull_tail(skb, pull_size)) { | |
3238 | DPRINTK(DRV, ERR, | |
3239 | "__pskb_pull_tail failed.\n"); | |
3240 | dev_kfree_skb_any(skb); | |
3241 | return NETDEV_TX_OK; | |
3242 | } | |
3243 | len = skb->len - skb->data_len; | |
3244 | break; | |
3245 | default: | |
3246 | /* do nothing */ | |
3247 | break; | |
3248 | } | |
3249 | } | |
3250 | } | |
3251 | ||
3252 | /* reserve a descriptor for the offload context */ | |
3253 | if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL)) | |
3254 | count++; | |
3255 | count++; | |
3256 | ||
3257 | /* Controller Erratum workaround */ | |
3258 | if (!skb->data_len && tx_ring->last_tx_tso && !skb_is_gso(skb)) | |
3259 | count++; | |
3260 | ||
3261 | count += TXD_USE_COUNT(len, max_txd_pwr); | |
3262 | ||
3263 | if (adapter->pcix_82544) | |
3264 | count++; | |
3265 | ||
3266 | /* work-around for errata 10 and it applies to all controllers | |
3267 | * in PCI-X mode, so add one more descriptor to the count | |
3268 | */ | |
3269 | if (unlikely((hw->bus_type == e1000_bus_type_pcix) && | |
3270 | (len > 2015))) | |
3271 | count++; | |
3272 | ||
3273 | nr_frags = skb_shinfo(skb)->nr_frags; | |
3274 | for (f = 0; f < nr_frags; f++) | |
3275 | count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size, | |
3276 | max_txd_pwr); | |
3277 | if (adapter->pcix_82544) | |
3278 | count += nr_frags; | |
3279 | ||
3280 | ||
3281 | if (hw->tx_pkt_filtering && | |
3282 | (hw->mac_type == e1000_82573)) | |
3283 | e1000_transfer_dhcp_info(adapter, skb); | |
3284 | ||
3285 | /* need: count + 2 desc gap to keep tail from touching | |
3286 | * head, otherwise try next time */ | |
3287 | if (unlikely(e1000_maybe_stop_tx(netdev, tx_ring, count + 2))) | |
3288 | return NETDEV_TX_BUSY; | |
3289 | ||
3290 | if (unlikely(hw->mac_type == e1000_82547)) { | |
3291 | if (unlikely(e1000_82547_fifo_workaround(adapter, skb))) { | |
3292 | netif_stop_queue(netdev); | |
3293 | mod_timer(&adapter->tx_fifo_stall_timer, jiffies + 1); | |
3294 | return NETDEV_TX_BUSY; | |
3295 | } | |
3296 | } | |
3297 | ||
3298 | if (unlikely(adapter->vlgrp && vlan_tx_tag_present(skb))) { | |
3299 | tx_flags |= E1000_TX_FLAGS_VLAN; | |
3300 | tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT); | |
3301 | } | |
3302 | ||
3303 | first = tx_ring->next_to_use; | |
3304 | ||
3305 | tso = e1000_tso(adapter, tx_ring, skb); | |
3306 | if (tso < 0) { | |
3307 | dev_kfree_skb_any(skb); | |
3308 | return NETDEV_TX_OK; | |
3309 | } | |
3310 | ||
3311 | if (likely(tso)) { | |
3312 | tx_ring->last_tx_tso = 1; | |
3313 | tx_flags |= E1000_TX_FLAGS_TSO; | |
3314 | } else if (likely(e1000_tx_csum(adapter, tx_ring, skb))) | |
3315 | tx_flags |= E1000_TX_FLAGS_CSUM; | |
3316 | ||
3317 | /* Old method was to assume IPv4 packet by default if TSO was enabled. | |
3318 | * 82571 hardware supports TSO capabilities for IPv6 as well... | |
3319 | * no longer assume, we must. */ | |
3320 | if (likely(skb->protocol == htons(ETH_P_IP))) | |
3321 | tx_flags |= E1000_TX_FLAGS_IPV4; | |
3322 | ||
3323 | e1000_tx_queue(adapter, tx_ring, tx_flags, | |
3324 | e1000_tx_map(adapter, tx_ring, skb, first, | |
3325 | max_per_txd, nr_frags, mss)); | |
3326 | ||
3327 | netdev->trans_start = jiffies; | |
3328 | ||
3329 | /* Make sure there is space in the ring for the next send. */ | |
3330 | e1000_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 2); | |
3331 | ||
3332 | return NETDEV_TX_OK; | |
3333 | } | |
3334 | ||
3335 | /** | |
3336 | * e1000_tx_timeout - Respond to a Tx Hang | |
3337 | * @netdev: network interface device structure | |
3338 | **/ | |
3339 | ||
3340 | static void e1000_tx_timeout(struct net_device *netdev) | |
3341 | { | |
3342 | struct e1000_adapter *adapter = netdev_priv(netdev); | |
3343 | ||
3344 | /* Do the reset outside of interrupt context */ | |
3345 | adapter->tx_timeout_count++; | |
3346 | schedule_work(&adapter->reset_task); | |
3347 | } | |
3348 | ||
3349 | static void e1000_reset_task(struct work_struct *work) | |
3350 | { | |
3351 | struct e1000_adapter *adapter = | |
3352 | container_of(work, struct e1000_adapter, reset_task); | |
3353 | ||
3354 | e1000_reinit_locked(adapter); | |
3355 | } | |
3356 | ||
3357 | /** | |
3358 | * e1000_get_stats - Get System Network Statistics | |
3359 | * @netdev: network interface device structure | |
3360 | * | |
3361 | * Returns the address of the device statistics structure. | |
3362 | * The statistics are actually updated from the timer callback. | |
3363 | **/ | |
3364 | ||
3365 | static struct net_device_stats *e1000_get_stats(struct net_device *netdev) | |
3366 | { | |
3367 | struct e1000_adapter *adapter = netdev_priv(netdev); | |
3368 | ||
3369 | /* only return the current stats */ | |
3370 | return &adapter->net_stats; | |
3371 | } | |
3372 | ||
3373 | /** | |
3374 | * e1000_change_mtu - Change the Maximum Transfer Unit | |
3375 | * @netdev: network interface device structure | |
3376 | * @new_mtu: new value for maximum frame size | |
3377 | * | |
3378 | * Returns 0 on success, negative on failure | |
3379 | **/ | |
3380 | ||
3381 | static int e1000_change_mtu(struct net_device *netdev, int new_mtu) | |
3382 | { | |
3383 | struct e1000_adapter *adapter = netdev_priv(netdev); | |
3384 | struct e1000_hw *hw = &adapter->hw; | |
3385 | int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE; | |
3386 | u16 eeprom_data = 0; | |
3387 | ||
3388 | if ((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) || | |
3389 | (max_frame > MAX_JUMBO_FRAME_SIZE)) { | |
3390 | DPRINTK(PROBE, ERR, "Invalid MTU setting\n"); | |
3391 | return -EINVAL; | |
3392 | } | |
3393 | ||
3394 | /* Adapter-specific max frame size limits. */ | |
3395 | switch (hw->mac_type) { | |
3396 | case e1000_undefined ... e1000_82542_rev2_1: | |
3397 | case e1000_ich8lan: | |
3398 | if (max_frame > MAXIMUM_ETHERNET_FRAME_SIZE) { | |
3399 | DPRINTK(PROBE, ERR, "Jumbo Frames not supported.\n"); | |
3400 | return -EINVAL; | |
3401 | } | |
3402 | break; | |
3403 | case e1000_82573: | |
3404 | /* Jumbo Frames not supported if: | |
3405 | * - this is not an 82573L device | |
3406 | * - ASPM is enabled in any way (0x1A bits 3:2) */ | |
3407 | e1000_read_eeprom(hw, EEPROM_INIT_3GIO_3, 1, | |
3408 | &eeprom_data); | |
3409 | if ((hw->device_id != E1000_DEV_ID_82573L) || | |
3410 | (eeprom_data & EEPROM_WORD1A_ASPM_MASK)) { | |
3411 | if (max_frame > MAXIMUM_ETHERNET_FRAME_SIZE) { | |
3412 | DPRINTK(PROBE, ERR, | |
3413 | "Jumbo Frames not supported.\n"); | |
3414 | return -EINVAL; | |
3415 | } | |
3416 | break; | |
3417 | } | |
3418 | /* ERT will be enabled later to enable wire speed receives */ | |
3419 | ||
3420 | /* fall through to get support */ | |
3421 | case e1000_82571: | |
3422 | case e1000_82572: | |
3423 | case e1000_80003es2lan: | |
3424 | #define MAX_STD_JUMBO_FRAME_SIZE 9234 | |
3425 | if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) { | |
3426 | DPRINTK(PROBE, ERR, "MTU > 9216 not supported.\n"); | |
3427 | return -EINVAL; | |
3428 | } | |
3429 | break; | |
3430 | default: | |
3431 | /* Capable of supporting up to MAX_JUMBO_FRAME_SIZE limit. */ | |
3432 | break; | |
3433 | } | |
3434 | ||
3435 | /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN | |
3436 | * means we reserve 2 more, this pushes us to allocate from the next | |
3437 | * larger slab size | |
3438 | * i.e. RXBUFFER_2048 --> size-4096 slab */ | |
3439 | ||
3440 | if (max_frame <= E1000_RXBUFFER_256) | |
3441 | adapter->rx_buffer_len = E1000_RXBUFFER_256; | |
3442 | else if (max_frame <= E1000_RXBUFFER_512) | |
3443 | adapter->rx_buffer_len = E1000_RXBUFFER_512; | |
3444 | else if (max_frame <= E1000_RXBUFFER_1024) | |
3445 | adapter->rx_buffer_len = E1000_RXBUFFER_1024; | |
3446 | else if (max_frame <= E1000_RXBUFFER_2048) | |
3447 | adapter->rx_buffer_len = E1000_RXBUFFER_2048; | |
3448 | else if (max_frame <= E1000_RXBUFFER_4096) | |
3449 | adapter->rx_buffer_len = E1000_RXBUFFER_4096; | |
3450 | else if (max_frame <= E1000_RXBUFFER_8192) | |
3451 | adapter->rx_buffer_len = E1000_RXBUFFER_8192; | |
3452 | else if (max_frame <= E1000_RXBUFFER_16384) | |
3453 | adapter->rx_buffer_len = E1000_RXBUFFER_16384; | |
3454 | ||
3455 | /* adjust allocation if LPE protects us, and we aren't using SBP */ | |
3456 | if (!hw->tbi_compatibility_on && | |
3457 | ((max_frame == MAXIMUM_ETHERNET_FRAME_SIZE) || | |
3458 | (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE))) | |
3459 | adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE; | |
3460 | ||
3461 | netdev->mtu = new_mtu; | |
3462 | hw->max_frame_size = max_frame; | |
3463 | ||
3464 | if (netif_running(netdev)) | |
3465 | e1000_reinit_locked(adapter); | |
3466 | ||
3467 | return 0; | |
3468 | } | |
3469 | ||
3470 | /** | |
3471 | * e1000_update_stats - Update the board statistics counters | |
3472 | * @adapter: board private structure | |
3473 | **/ | |
3474 | ||
3475 | void e1000_update_stats(struct e1000_adapter *adapter) | |
3476 | { | |
3477 | struct e1000_hw *hw = &adapter->hw; | |
3478 | struct pci_dev *pdev = adapter->pdev; | |
3479 | unsigned long flags; | |
3480 | u16 phy_tmp; | |
3481 | ||
3482 | #define PHY_IDLE_ERROR_COUNT_MASK 0x00FF | |
3483 | ||
3484 | /* | |
3485 | * Prevent stats update while adapter is being reset, or if the pci | |
3486 | * connection is down. | |
3487 | */ | |
3488 | if (adapter->link_speed == 0) | |
3489 | return; | |
3490 | if (pci_channel_offline(pdev)) | |
3491 | return; | |
3492 | ||
3493 | spin_lock_irqsave(&adapter->stats_lock, flags); | |
3494 | ||
3495 | /* these counters are modified from e1000_tbi_adjust_stats, | |
3496 | * called from the interrupt context, so they must only | |
3497 | * be written while holding adapter->stats_lock | |
3498 | */ | |
3499 | ||
3500 | adapter->stats.crcerrs += er32(CRCERRS); | |
3501 | adapter->stats.gprc += er32(GPRC); | |
3502 | adapter->stats.gorcl += er32(GORCL); | |
3503 | adapter->stats.gorch += er32(GORCH); | |
3504 | adapter->stats.bprc += er32(BPRC); | |
3505 | adapter->stats.mprc += er32(MPRC); | |
3506 | adapter->stats.roc += er32(ROC); | |
3507 | ||
3508 | if (hw->mac_type != e1000_ich8lan) { | |
3509 | adapter->stats.prc64 += er32(PRC64); | |
3510 | adapter->stats.prc127 += er32(PRC127); | |
3511 | adapter->stats.prc255 += er32(PRC255); | |
3512 | adapter->stats.prc511 += er32(PRC511); | |
3513 | adapter->stats.prc1023 += er32(PRC1023); | |
3514 | adapter->stats.prc1522 += er32(PRC1522); | |
3515 | } | |
3516 | ||
3517 | adapter->stats.symerrs += er32(SYMERRS); | |
3518 | adapter->stats.mpc += er32(MPC); | |
3519 | adapter->stats.scc += er32(SCC); | |
3520 | adapter->stats.ecol += er32(ECOL); | |
3521 | adapter->stats.mcc += er32(MCC); | |
3522 | adapter->stats.latecol += er32(LATECOL); | |
3523 | adapter->stats.dc += er32(DC); | |
3524 | adapter->stats.sec += er32(SEC); | |
3525 | adapter->stats.rlec += er32(RLEC); | |
3526 | adapter->stats.xonrxc += er32(XONRXC); | |
3527 | adapter->stats.xontxc += er32(XONTXC); | |
3528 | adapter->stats.xoffrxc += er32(XOFFRXC); | |
3529 | adapter->stats.xofftxc += er32(XOFFTXC); | |
3530 | adapter->stats.fcruc += er32(FCRUC); | |
3531 | adapter->stats.gptc += er32(GPTC); | |
3532 | adapter->stats.gotcl += er32(GOTCL); | |
3533 | adapter->stats.gotch += er32(GOTCH); | |
3534 | adapter->stats.rnbc += er32(RNBC); | |
3535 | adapter->stats.ruc += er32(RUC); | |
3536 | adapter->stats.rfc += er32(RFC); | |
3537 | adapter->stats.rjc += er32(RJC); | |
3538 | adapter->stats.torl += er32(TORL); | |
3539 | adapter->stats.torh += er32(TORH); | |
3540 | adapter->stats.totl += er32(TOTL); | |
3541 | adapter->stats.toth += er32(TOTH); | |
3542 | adapter->stats.tpr += er32(TPR); | |
3543 | ||
3544 | if (hw->mac_type != e1000_ich8lan) { | |
3545 | adapter->stats.ptc64 += er32(PTC64); | |
3546 | adapter->stats.ptc127 += er32(PTC127); | |
3547 | adapter->stats.ptc255 += er32(PTC255); | |
3548 | adapter->stats.ptc511 += er32(PTC511); | |
3549 | adapter->stats.ptc1023 += er32(PTC1023); | |
3550 | adapter->stats.ptc1522 += er32(PTC1522); | |
3551 | } | |
3552 | ||
3553 | adapter->stats.mptc += er32(MPTC); | |
3554 | adapter->stats.bptc += er32(BPTC); | |
3555 | ||
3556 | /* used for adaptive IFS */ | |
3557 | ||
3558 | hw->tx_packet_delta = er32(TPT); | |
3559 | adapter->stats.tpt += hw->tx_packet_delta; | |
3560 | hw->collision_delta = er32(COLC); | |
3561 | adapter->stats.colc += hw->collision_delta; | |
3562 | ||
3563 | if (hw->mac_type >= e1000_82543) { | |
3564 | adapter->stats.algnerrc += er32(ALGNERRC); | |
3565 | adapter->stats.rxerrc += er32(RXERRC); | |
3566 | adapter->stats.tncrs += er32(TNCRS); | |
3567 | adapter->stats.cexterr += er32(CEXTERR); | |
3568 | adapter->stats.tsctc += er32(TSCTC); | |
3569 | adapter->stats.tsctfc += er32(TSCTFC); | |
3570 | } | |
3571 | if (hw->mac_type > e1000_82547_rev_2) { | |
3572 | adapter->stats.iac += er32(IAC); | |
3573 | adapter->stats.icrxoc += er32(ICRXOC); | |
3574 | ||
3575 | if (hw->mac_type != e1000_ich8lan) { | |
3576 | adapter->stats.icrxptc += er32(ICRXPTC); | |
3577 | adapter->stats.icrxatc += er32(ICRXATC); | |
3578 | adapter->stats.ictxptc += er32(ICTXPTC); | |
3579 | adapter->stats.ictxatc += er32(ICTXATC); | |
3580 | adapter->stats.ictxqec += er32(ICTXQEC); | |
3581 | adapter->stats.ictxqmtc += er32(ICTXQMTC); | |
3582 | adapter->stats.icrxdmtc += er32(ICRXDMTC); | |
3583 | } | |
3584 | } | |
3585 | ||
3586 | /* Fill out the OS statistics structure */ | |
3587 | adapter->net_stats.multicast = adapter->stats.mprc; | |
3588 | adapter->net_stats.collisions = adapter->stats.colc; | |
3589 | ||
3590 | /* Rx Errors */ | |
3591 | ||
3592 | /* RLEC on some newer hardware can be incorrect so build | |
3593 | * our own version based on RUC and ROC */ | |
3594 | adapter->net_stats.rx_errors = adapter->stats.rxerrc + | |
3595 | adapter->stats.crcerrs + adapter->stats.algnerrc + | |
3596 | adapter->stats.ruc + adapter->stats.roc + | |
3597 | adapter->stats.cexterr; | |
3598 | adapter->stats.rlerrc = adapter->stats.ruc + adapter->stats.roc; | |
3599 | adapter->net_stats.rx_length_errors = adapter->stats.rlerrc; | |
3600 | adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs; | |
3601 | adapter->net_stats.rx_frame_errors = adapter->stats.algnerrc; | |
3602 | adapter->net_stats.rx_missed_errors = adapter->stats.mpc; | |
3603 | ||
3604 | /* Tx Errors */ | |
3605 | adapter->stats.txerrc = adapter->stats.ecol + adapter->stats.latecol; | |
3606 | adapter->net_stats.tx_errors = adapter->stats.txerrc; | |
3607 | adapter->net_stats.tx_aborted_errors = adapter->stats.ecol; | |
3608 | adapter->net_stats.tx_window_errors = adapter->stats.latecol; | |
3609 | adapter->net_stats.tx_carrier_errors = adapter->stats.tncrs; | |
3610 | if (hw->bad_tx_carr_stats_fd && | |
3611 | adapter->link_duplex == FULL_DUPLEX) { | |
3612 | adapter->net_stats.tx_carrier_errors = 0; | |
3613 | adapter->stats.tncrs = 0; | |
3614 | } | |
3615 | ||
3616 | /* Tx Dropped needs to be maintained elsewhere */ | |
3617 | ||
3618 | /* Phy Stats */ | |
3619 | if (hw->media_type == e1000_media_type_copper) { | |
3620 | if ((adapter->link_speed == SPEED_1000) && | |
3621 | (!e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) { | |
3622 | phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK; | |
3623 | adapter->phy_stats.idle_errors += phy_tmp; | |
3624 | } | |
3625 | ||
3626 | if ((hw->mac_type <= e1000_82546) && | |
3627 | (hw->phy_type == e1000_phy_m88) && | |
3628 | !e1000_read_phy_reg(hw, M88E1000_RX_ERR_CNTR, &phy_tmp)) | |
3629 | adapter->phy_stats.receive_errors += phy_tmp; | |
3630 | } | |
3631 | ||
3632 | /* Management Stats */ | |
3633 | if (hw->has_smbus) { | |
3634 | adapter->stats.mgptc += er32(MGTPTC); | |
3635 | adapter->stats.mgprc += er32(MGTPRC); | |
3636 | adapter->stats.mgpdc += er32(MGTPDC); | |
3637 | } | |
3638 | ||
3639 | spin_unlock_irqrestore(&adapter->stats_lock, flags); | |
3640 | } | |
3641 | ||
3642 | /** | |
3643 | * e1000_intr_msi - Interrupt Handler | |
3644 | * @irq: interrupt number | |
3645 | * @data: pointer to a network interface device structure | |
3646 | **/ | |
3647 | ||
3648 | static irqreturn_t e1000_intr_msi(int irq, void *data) | |
3649 | { | |
3650 | struct net_device *netdev = data; | |
3651 | struct e1000_adapter *adapter = netdev_priv(netdev); | |
3652 | struct e1000_hw *hw = &adapter->hw; | |
3653 | u32 icr = er32(ICR); | |
3654 | ||
3655 | /* in NAPI mode read ICR disables interrupts using IAM */ | |
3656 | ||
3657 | if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { | |
3658 | hw->get_link_status = 1; | |
3659 | /* 80003ES2LAN workaround-- For packet buffer work-around on | |
3660 | * link down event; disable receives here in the ISR and reset | |
3661 | * adapter in watchdog */ | |
3662 | if (netif_carrier_ok(netdev) && | |
3663 | (hw->mac_type == e1000_80003es2lan)) { | |
3664 | /* disable receives */ | |
3665 | u32 rctl = er32(RCTL); | |
3666 | ew32(RCTL, rctl & ~E1000_RCTL_EN); | |
3667 | } | |
3668 | /* guard against interrupt when we're going down */ | |
3669 | if (!test_bit(__E1000_DOWN, &adapter->flags)) | |
3670 | mod_timer(&adapter->watchdog_timer, jiffies + 1); | |
3671 | } | |
3672 | ||
3673 | if (likely(napi_schedule_prep(&adapter->napi))) { | |
3674 | adapter->total_tx_bytes = 0; | |
3675 | adapter->total_tx_packets = 0; | |
3676 | adapter->total_rx_bytes = 0; | |
3677 | adapter->total_rx_packets = 0; | |
3678 | __napi_schedule(&adapter->napi); | |
3679 | } else | |
3680 | e1000_irq_enable(adapter); | |
3681 | ||
3682 | return IRQ_HANDLED; | |
3683 | } | |
3684 | ||
3685 | /** | |
3686 | * e1000_intr - Interrupt Handler | |
3687 | * @irq: interrupt number | |
3688 | * @data: pointer to a network interface device structure | |
3689 | **/ | |
3690 | ||
3691 | static irqreturn_t e1000_intr(int irq, void *data) | |
3692 | { | |
3693 | struct net_device *netdev = data; | |
3694 | struct e1000_adapter *adapter = netdev_priv(netdev); | |
3695 | struct e1000_hw *hw = &adapter->hw; | |
3696 | u32 rctl, icr = er32(ICR); | |
3697 | ||
3698 | if (unlikely((!icr) || test_bit(__E1000_RESETTING, &adapter->flags))) | |
3699 | return IRQ_NONE; /* Not our interrupt */ | |
3700 | ||
3701 | /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is | |
3702 | * not set, then the adapter didn't send an interrupt */ | |
3703 | if (unlikely(hw->mac_type >= e1000_82571 && | |
3704 | !(icr & E1000_ICR_INT_ASSERTED))) | |
3705 | return IRQ_NONE; | |
3706 | ||
3707 | /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No | |
3708 | * need for the IMC write */ | |
3709 | ||
3710 | if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) { | |
3711 | hw->get_link_status = 1; | |
3712 | /* 80003ES2LAN workaround-- | |
3713 | * For packet buffer work-around on link down event; | |
3714 | * disable receives here in the ISR and | |
3715 | * reset adapter in watchdog | |
3716 | */ | |
3717 | if (netif_carrier_ok(netdev) && | |
3718 | (hw->mac_type == e1000_80003es2lan)) { | |
3719 | /* disable receives */ | |
3720 | rctl = er32(RCTL); | |
3721 | ew32(RCTL, rctl & ~E1000_RCTL_EN); | |
3722 | } | |
3723 | /* guard against interrupt when we're going down */ | |
3724 | if (!test_bit(__E1000_DOWN, &adapter->flags)) | |
3725 | mod_timer(&adapter->watchdog_timer, jiffies + 1); | |
3726 | } | |
3727 | ||
3728 | if (unlikely(hw->mac_type < e1000_82571)) { | |
3729 | /* disable interrupts, without the synchronize_irq bit */ | |
3730 | ew32(IMC, ~0); | |
3731 | E1000_WRITE_FLUSH(); | |
3732 | } | |
3733 | if (likely(napi_schedule_prep(&adapter->napi))) { | |
3734 | adapter->total_tx_bytes = 0; | |
3735 | adapter->total_tx_packets = 0; | |
3736 | adapter->total_rx_bytes = 0; | |
3737 | adapter->total_rx_packets = 0; | |
3738 | __napi_schedule(&adapter->napi); | |
3739 | } else | |
3740 | /* this really should not happen! if it does it is basically a | |
3741 | * bug, but not a hard error, so enable ints and continue */ | |
3742 | e1000_irq_enable(adapter); | |
3743 | ||
3744 | return IRQ_HANDLED; | |
3745 | } | |
3746 | ||
3747 | /** | |
3748 | * e1000_clean - NAPI Rx polling callback | |
3749 | * @adapter: board private structure | |
3750 | **/ | |
3751 | static int e1000_clean(struct napi_struct *napi, int budget) | |
3752 | { | |
3753 | struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, napi); | |
3754 | struct net_device *poll_dev = adapter->netdev; | |
3755 | int tx_cleaned = 0, work_done = 0; | |
3756 | ||
3757 | adapter = netdev_priv(poll_dev); | |
3758 | ||
3759 | tx_cleaned = e1000_clean_tx_irq(adapter, &adapter->tx_ring[0]); | |
3760 | ||
3761 | adapter->clean_rx(adapter, &adapter->rx_ring[0], | |
3762 | &work_done, budget); | |
3763 | ||
3764 | if (tx_cleaned) | |
3765 | work_done = budget; | |
3766 | ||
3767 | /* If budget not fully consumed, exit the polling mode */ | |
3768 | if (work_done < budget) { | |
3769 | if (likely(adapter->itr_setting & 3)) | |
3770 | e1000_set_itr(adapter); | |
3771 | napi_complete(napi); | |
3772 | e1000_irq_enable(adapter); | |
3773 | } | |
3774 | ||
3775 | return work_done; | |
3776 | } | |
3777 | ||
3778 | /** | |
3779 | * e1000_clean_tx_irq - Reclaim resources after transmit completes | |
3780 | * @adapter: board private structure | |
3781 | **/ | |
3782 | static bool e1000_clean_tx_irq(struct e1000_adapter *adapter, | |
3783 | struct e1000_tx_ring *tx_ring) | |
3784 | { | |
3785 | struct e1000_hw *hw = &adapter->hw; | |
3786 | struct net_device *netdev = adapter->netdev; | |
3787 | struct e1000_tx_desc *tx_desc, *eop_desc; | |
3788 | struct e1000_buffer *buffer_info; | |
3789 | unsigned int i, eop; | |
3790 | unsigned int count = 0; | |
3791 | bool cleaned = false; | |
3792 | unsigned int total_tx_bytes=0, total_tx_packets=0; | |
3793 | ||
3794 | i = tx_ring->next_to_clean; | |
3795 | eop = tx_ring->buffer_info[i].next_to_watch; | |
3796 | eop_desc = E1000_TX_DESC(*tx_ring, eop); | |
3797 | ||
3798 | while (eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) { | |
3799 | for (cleaned = false; !cleaned; ) { | |
3800 | tx_desc = E1000_TX_DESC(*tx_ring, i); | |
3801 | buffer_info = &tx_ring->buffer_info[i]; | |
3802 | cleaned = (i == eop); | |
3803 | ||
3804 | if (cleaned) { | |
3805 | struct sk_buff *skb = buffer_info->skb; | |
3806 | unsigned int segs, bytecount; | |
3807 | segs = skb_shinfo(skb)->gso_segs ?: 1; | |
3808 | /* multiply data chunks by size of headers */ | |
3809 | bytecount = ((segs - 1) * skb_headlen(skb)) + | |
3810 | skb->len; | |
3811 | total_tx_packets += segs; | |
3812 | total_tx_bytes += bytecount; | |
3813 | } | |
3814 | e1000_unmap_and_free_tx_resource(adapter, buffer_info); | |
3815 | tx_desc->upper.data = 0; | |
3816 | ||
3817 | if (unlikely(++i == tx_ring->count)) i = 0; | |
3818 | } | |
3819 | ||
3820 | eop = tx_ring->buffer_info[i].next_to_watch; | |
3821 | eop_desc = E1000_TX_DESC(*tx_ring, eop); | |
3822 | #define E1000_TX_WEIGHT 64 | |
3823 | /* weight of a sort for tx, to avoid endless transmit cleanup */ | |
3824 | if (count++ == E1000_TX_WEIGHT) | |
3825 | break; | |
3826 | } | |
3827 | ||
3828 | tx_ring->next_to_clean = i; | |
3829 | ||
3830 | #define TX_WAKE_THRESHOLD 32 | |
3831 | if (unlikely(cleaned && netif_carrier_ok(netdev) && | |
3832 | E1000_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD)) { | |
3833 | /* Make sure that anybody stopping the queue after this | |
3834 | * sees the new next_to_clean. | |
3835 | */ | |
3836 | smp_mb(); | |
3837 | if (netif_queue_stopped(netdev)) { | |
3838 | netif_wake_queue(netdev); | |
3839 | ++adapter->restart_queue; | |
3840 | } | |
3841 | } | |
3842 | ||
3843 | if (adapter->detect_tx_hung) { | |
3844 | /* Detect a transmit hang in hardware, this serializes the | |
3845 | * check with the clearing of time_stamp and movement of i */ | |
3846 | adapter->detect_tx_hung = false; | |
3847 | if (tx_ring->buffer_info[eop].dma && | |
3848 | time_after(jiffies, tx_ring->buffer_info[eop].time_stamp + | |
3849 | (adapter->tx_timeout_factor * HZ)) | |
3850 | && !(er32(STATUS) & E1000_STATUS_TXOFF)) { | |
3851 | ||
3852 | /* detected Tx unit hang */ | |
3853 | DPRINTK(DRV, ERR, "Detected Tx Unit Hang\n" | |
3854 | " Tx Queue <%lu>\n" | |
3855 | " TDH <%x>\n" | |
3856 | " TDT <%x>\n" | |
3857 | " next_to_use <%x>\n" | |
3858 | " next_to_clean <%x>\n" | |
3859 | "buffer_info[next_to_clean]\n" | |
3860 | " time_stamp <%lx>\n" | |
3861 | " next_to_watch <%x>\n" | |
3862 | " jiffies <%lx>\n" | |
3863 | " next_to_watch.status <%x>\n", | |
3864 | (unsigned long)((tx_ring - adapter->tx_ring) / | |
3865 | sizeof(struct e1000_tx_ring)), | |
3866 | readl(hw->hw_addr + tx_ring->tdh), | |
3867 | readl(hw->hw_addr + tx_ring->tdt), | |
3868 | tx_ring->next_to_use, | |
3869 | tx_ring->next_to_clean, | |
3870 | tx_ring->buffer_info[eop].time_stamp, | |
3871 | eop, | |
3872 | jiffies, | |
3873 | eop_desc->upper.fields.status); | |
3874 | netif_stop_queue(netdev); | |
3875 | } | |
3876 | } | |
3877 | adapter->total_tx_bytes += total_tx_bytes; | |
3878 | adapter->total_tx_packets += total_tx_packets; | |
3879 | adapter->net_stats.tx_bytes += total_tx_bytes; | |
3880 | adapter->net_stats.tx_packets += total_tx_packets; | |
3881 | return cleaned; | |
3882 | } | |
3883 | ||
3884 | /** | |
3885 | * e1000_rx_checksum - Receive Checksum Offload for 82543 | |
3886 | * @adapter: board private structure | |
3887 | * @status_err: receive descriptor status and error fields | |
3888 | * @csum: receive descriptor csum field | |
3889 | * @sk_buff: socket buffer with received data | |
3890 | **/ | |
3891 | ||
3892 | static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err, | |
3893 | u32 csum, struct sk_buff *skb) | |
3894 | { | |
3895 | struct e1000_hw *hw = &adapter->hw; | |
3896 | u16 status = (u16)status_err; | |
3897 | u8 errors = (u8)(status_err >> 24); | |
3898 | skb->ip_summed = CHECKSUM_NONE; | |
3899 | ||
3900 | /* 82543 or newer only */ | |
3901 | if (unlikely(hw->mac_type < e1000_82543)) return; | |
3902 | /* Ignore Checksum bit is set */ | |
3903 | if (unlikely(status & E1000_RXD_STAT_IXSM)) return; | |
3904 | /* TCP/UDP checksum error bit is set */ | |
3905 | if (unlikely(errors & E1000_RXD_ERR_TCPE)) { | |
3906 | /* let the stack verify checksum errors */ | |
3907 | adapter->hw_csum_err++; | |
3908 | return; | |
3909 | } | |
3910 | /* TCP/UDP Checksum has not been calculated */ | |
3911 | if (hw->mac_type <= e1000_82547_rev_2) { | |
3912 | if (!(status & E1000_RXD_STAT_TCPCS)) | |
3913 | return; | |
3914 | } else { | |
3915 | if (!(status & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS))) | |
3916 | return; | |
3917 | } | |
3918 | /* It must be a TCP or UDP packet with a valid checksum */ | |
3919 | if (likely(status & E1000_RXD_STAT_TCPCS)) { | |
3920 | /* TCP checksum is good */ | |
3921 | skb->ip_summed = CHECKSUM_UNNECESSARY; | |
3922 | } else if (hw->mac_type > e1000_82547_rev_2) { | |
3923 | /* IP fragment with UDP payload */ | |
3924 | /* Hardware complements the payload checksum, so we undo it | |
3925 | * and then put the value in host order for further stack use. | |
3926 | */ | |
3927 | __sum16 sum = (__force __sum16)htons(csum); | |
3928 | skb->csum = csum_unfold(~sum); | |
3929 | skb->ip_summed = CHECKSUM_COMPLETE; | |
3930 | } | |
3931 | adapter->hw_csum_good++; | |
3932 | } | |
3933 | ||
3934 | /** | |
3935 | * e1000_clean_rx_irq - Send received data up the network stack; legacy | |
3936 | * @adapter: board private structure | |
3937 | **/ | |
3938 | static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, | |
3939 | struct e1000_rx_ring *rx_ring, | |
3940 | int *work_done, int work_to_do) | |
3941 | { | |
3942 | struct e1000_hw *hw = &adapter->hw; | |
3943 | struct net_device *netdev = adapter->netdev; | |
3944 | struct pci_dev *pdev = adapter->pdev; | |
3945 | struct e1000_rx_desc *rx_desc, *next_rxd; | |
3946 | struct e1000_buffer *buffer_info, *next_buffer; | |
3947 | unsigned long flags; | |
3948 | u32 length; | |
3949 | u8 last_byte; | |
3950 | unsigned int i; | |
3951 | int cleaned_count = 0; | |
3952 | bool cleaned = false; | |
3953 | unsigned int total_rx_bytes=0, total_rx_packets=0; | |
3954 | ||
3955 | i = rx_ring->next_to_clean; | |
3956 | rx_desc = E1000_RX_DESC(*rx_ring, i); | |
3957 | buffer_info = &rx_ring->buffer_info[i]; | |
3958 | ||
3959 | while (rx_desc->status & E1000_RXD_STAT_DD) { | |
3960 | struct sk_buff *skb; | |
3961 | u8 status; | |
3962 | ||
3963 | if (*work_done >= work_to_do) | |
3964 | break; | |
3965 | (*work_done)++; | |
3966 | ||
3967 | status = rx_desc->status; | |
3968 | skb = buffer_info->skb; | |
3969 | buffer_info->skb = NULL; | |
3970 | ||
3971 | prefetch(skb->data - NET_IP_ALIGN); | |
3972 | ||
3973 | if (++i == rx_ring->count) i = 0; | |
3974 | next_rxd = E1000_RX_DESC(*rx_ring, i); | |
3975 | prefetch(next_rxd); | |
3976 | ||
3977 | next_buffer = &rx_ring->buffer_info[i]; | |
3978 | ||
3979 | cleaned = true; | |
3980 | cleaned_count++; | |
3981 | pci_unmap_single(pdev, | |
3982 | buffer_info->dma, | |
3983 | buffer_info->length, | |
3984 | PCI_DMA_FROMDEVICE); | |
3985 | ||
3986 | length = le16_to_cpu(rx_desc->length); | |
3987 | ||
3988 | if (unlikely(!(status & E1000_RXD_STAT_EOP))) { | |
3989 | /* All receives must fit into a single buffer */ | |
3990 | E1000_DBG("%s: Receive packet consumed multiple" | |
3991 | " buffers\n", netdev->name); | |
3992 | /* recycle */ | |
3993 | buffer_info->skb = skb; | |
3994 | goto next_desc; | |
3995 | } | |
3996 | ||
3997 | if (unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) { | |
3998 | last_byte = *(skb->data + length - 1); | |
3999 | if (TBI_ACCEPT(hw, status, rx_desc->errors, length, | |
4000 | last_byte)) { | |
4001 | spin_lock_irqsave(&adapter->stats_lock, flags); | |
4002 | e1000_tbi_adjust_stats(hw, &adapter->stats, | |
4003 | length, skb->data); | |
4004 | spin_unlock_irqrestore(&adapter->stats_lock, | |
4005 | flags); | |
4006 | length--; | |
4007 | } else { | |
4008 | /* recycle */ | |
4009 | buffer_info->skb = skb; | |
4010 | goto next_desc; | |
4011 | } | |
4012 | } | |
4013 | ||
4014 | /* adjust length to remove Ethernet CRC, this must be | |
4015 | * done after the TBI_ACCEPT workaround above */ | |
4016 | length -= 4; | |
4017 | ||
4018 | /* probably a little skewed due to removing CRC */ | |
4019 | total_rx_bytes += length; | |
4020 | total_rx_packets++; | |
4021 | ||
4022 | /* code added for copybreak, this should improve | |
4023 | * performance for small packets with large amounts | |
4024 | * of reassembly being done in the stack */ | |
4025 | if (length < copybreak) { | |
4026 | struct sk_buff *new_skb = | |
4027 | netdev_alloc_skb(netdev, length + NET_IP_ALIGN); | |
4028 | if (new_skb) { | |
4029 | skb_reserve(new_skb, NET_IP_ALIGN); | |
4030 | skb_copy_to_linear_data_offset(new_skb, | |
4031 | -NET_IP_ALIGN, | |
4032 | (skb->data - | |
4033 | NET_IP_ALIGN), | |
4034 | (length + | |
4035 | NET_IP_ALIGN)); | |
4036 | /* save the skb in buffer_info as good */ | |
4037 | buffer_info->skb = skb; | |
4038 | skb = new_skb; | |
4039 | } | |
4040 | /* else just continue with the old one */ | |
4041 | } | |
4042 | /* end copybreak code */ | |
4043 | skb_put(skb, length); | |
4044 | ||
4045 | /* Receive Checksum Offload */ | |
4046 | e1000_rx_checksum(adapter, | |
4047 | (u32)(status) | | |
4048 | ((u32)(rx_desc->errors) << 24), | |
4049 | le16_to_cpu(rx_desc->csum), skb); | |
4050 | ||
4051 | skb->protocol = eth_type_trans(skb, netdev); | |
4052 | ||
4053 | if (unlikely(adapter->vlgrp && | |
4054 | (status & E1000_RXD_STAT_VP))) { | |
4055 | vlan_hwaccel_receive_skb(skb, adapter->vlgrp, | |
4056 | le16_to_cpu(rx_desc->special)); | |
4057 | } else { | |
4058 | netif_receive_skb(skb); | |
4059 | } | |
4060 | ||
4061 | next_desc: | |
4062 | rx_desc->status = 0; | |
4063 | ||
4064 | /* return some buffers to hardware, one at a time is too slow */ | |
4065 | if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) { | |
4066 | adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count); | |
4067 | cleaned_count = 0; | |
4068 | } | |
4069 | ||
4070 | /* use prefetched values */ | |
4071 | rx_desc = next_rxd; | |
4072 | buffer_info = next_buffer; | |
4073 | } | |
4074 | rx_ring->next_to_clean = i; | |
4075 | ||
4076 | cleaned_count = E1000_DESC_UNUSED(rx_ring); | |
4077 | if (cleaned_count) | |
4078 | adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count); | |
4079 | ||
4080 | adapter->total_rx_packets += total_rx_packets; | |
4081 | adapter->total_rx_bytes += total_rx_bytes; | |
4082 | adapter->net_stats.rx_bytes += total_rx_bytes; | |
4083 | adapter->net_stats.rx_packets += total_rx_packets; | |
4084 | return cleaned; | |
4085 | } | |
4086 | ||
4087 | /** | |
4088 | * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended | |
4089 | * @adapter: address of board private structure | |
4090 | **/ | |
4091 | ||
4092 | static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter, | |
4093 | struct e1000_rx_ring *rx_ring, | |
4094 | int cleaned_count) | |
4095 | { | |
4096 | struct e1000_hw *hw = &adapter->hw; | |
4097 | struct net_device *netdev = adapter->netdev; | |
4098 | struct pci_dev *pdev = adapter->pdev; | |
4099 | struct e1000_rx_desc *rx_desc; | |
4100 | struct e1000_buffer *buffer_info; | |
4101 | struct sk_buff *skb; | |
4102 | unsigned int i; | |
4103 | unsigned int bufsz = adapter->rx_buffer_len + NET_IP_ALIGN; | |
4104 | ||
4105 | i = rx_ring->next_to_use; | |
4106 | buffer_info = &rx_ring->buffer_info[i]; | |
4107 | ||
4108 | while (cleaned_count--) { | |
4109 | skb = buffer_info->skb; | |
4110 | if (skb) { | |
4111 | skb_trim(skb, 0); | |
4112 | goto map_skb; | |
4113 | } | |
4114 | ||
4115 | skb = netdev_alloc_skb(netdev, bufsz); | |
4116 | if (unlikely(!skb)) { | |
4117 | /* Better luck next round */ | |
4118 | adapter->alloc_rx_buff_failed++; | |
4119 | break; | |
4120 | } | |
4121 | ||
4122 | /* Fix for errata 23, can't cross 64kB boundary */ | |
4123 | if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) { | |
4124 | struct sk_buff *oldskb = skb; | |
4125 | DPRINTK(RX_ERR, ERR, "skb align check failed: %u bytes " | |
4126 | "at %p\n", bufsz, skb->data); | |
4127 | /* Try again, without freeing the previous */ | |
4128 | skb = netdev_alloc_skb(netdev, bufsz); | |
4129 | /* Failed allocation, critical failure */ | |
4130 | if (!skb) { | |
4131 | dev_kfree_skb(oldskb); | |
4132 | break; | |
4133 | } | |
4134 | ||
4135 | if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) { | |
4136 | /* give up */ | |
4137 | dev_kfree_skb(skb); | |
4138 | dev_kfree_skb(oldskb); | |
4139 | break; /* while !buffer_info->skb */ | |
4140 | } | |
4141 | ||
4142 | /* Use new allocation */ | |
4143 | dev_kfree_skb(oldskb); | |
4144 | } | |
4145 | /* Make buffer alignment 2 beyond a 16 byte boundary | |
4146 | * this will result in a 16 byte aligned IP header after | |
4147 | * the 14 byte MAC header is removed | |
4148 | */ | |
4149 | skb_reserve(skb, NET_IP_ALIGN); | |
4150 | ||
4151 | buffer_info->skb = skb; | |
4152 | buffer_info->length = adapter->rx_buffer_len; | |
4153 | map_skb: | |
4154 | buffer_info->dma = pci_map_single(pdev, | |
4155 | skb->data, | |
4156 | adapter->rx_buffer_len, | |
4157 | PCI_DMA_FROMDEVICE); | |
4158 | ||
4159 | /* Fix for errata 23, can't cross 64kB boundary */ | |
4160 | if (!e1000_check_64k_bound(adapter, | |
4161 | (void *)(unsigned long)buffer_info->dma, | |
4162 | adapter->rx_buffer_len)) { | |
4163 | DPRINTK(RX_ERR, ERR, | |
4164 | "dma align check failed: %u bytes at %p\n", | |
4165 | adapter->rx_buffer_len, | |
4166 | (void *)(unsigned long)buffer_info->dma); | |
4167 | dev_kfree_skb(skb); | |
4168 | buffer_info->skb = NULL; | |
4169 | ||
4170 | pci_unmap_single(pdev, buffer_info->dma, | |
4171 | adapter->rx_buffer_len, | |
4172 | PCI_DMA_FROMDEVICE); | |
4173 | ||
4174 | break; /* while !buffer_info->skb */ | |
4175 | } | |
4176 | rx_desc = E1000_RX_DESC(*rx_ring, i); | |
4177 | rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); | |
4178 | ||
4179 | if (unlikely(++i == rx_ring->count)) | |
4180 | i = 0; | |
4181 | buffer_info = &rx_ring->buffer_info[i]; | |
4182 | } | |
4183 | ||
4184 | if (likely(rx_ring->next_to_use != i)) { | |
4185 | rx_ring->next_to_use = i; | |
4186 | if (unlikely(i-- == 0)) | |
4187 | i = (rx_ring->count - 1); | |
4188 | ||
4189 | /* Force memory writes to complete before letting h/w | |
4190 | * know there are new descriptors to fetch. (Only | |
4191 | * applicable for weak-ordered memory model archs, | |
4192 | * such as IA-64). */ | |
4193 | wmb(); | |
4194 | writel(i, hw->hw_addr + rx_ring->rdt); | |
4195 | } | |
4196 | } | |
4197 | ||
4198 | /** | |
4199 | * e1000_smartspeed - Workaround for SmartSpeed on 82541 and 82547 controllers. | |
4200 | * @adapter: | |
4201 | **/ | |
4202 | ||
4203 | static void e1000_smartspeed(struct e1000_adapter *adapter) | |
4204 | { | |
4205 | struct e1000_hw *hw = &adapter->hw; | |
4206 | u16 phy_status; | |
4207 | u16 phy_ctrl; | |
4208 | ||
4209 | if ((hw->phy_type != e1000_phy_igp) || !hw->autoneg || | |
4210 | !(hw->autoneg_advertised & ADVERTISE_1000_FULL)) | |
4211 | return; | |
4212 | ||
4213 | if (adapter->smartspeed == 0) { | |
4214 | /* If Master/Slave config fault is asserted twice, | |
4215 | * we assume back-to-back */ | |
4216 | e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status); | |
4217 | if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return; | |
4218 | e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status); | |
4219 | if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return; | |
4220 | e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl); | |
4221 | if (phy_ctrl & CR_1000T_MS_ENABLE) { | |
4222 | phy_ctrl &= ~CR_1000T_MS_ENABLE; | |
4223 | e1000_write_phy_reg(hw, PHY_1000T_CTRL, | |
4224 | phy_ctrl); | |
4225 | adapter->smartspeed++; | |
4226 | if (!e1000_phy_setup_autoneg(hw) && | |
4227 | !e1000_read_phy_reg(hw, PHY_CTRL, | |
4228 | &phy_ctrl)) { | |
4229 | phy_ctrl |= (MII_CR_AUTO_NEG_EN | | |
4230 | MII_CR_RESTART_AUTO_NEG); | |
4231 | e1000_write_phy_reg(hw, PHY_CTRL, | |
4232 | phy_ctrl); | |
4233 | } | |
4234 | } | |
4235 | return; | |
4236 | } else if (adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) { | |
4237 | /* If still no link, perhaps using 2/3 pair cable */ | |
4238 | e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl); | |
4239 | phy_ctrl |= CR_1000T_MS_ENABLE; | |
4240 | e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_ctrl); | |
4241 | if (!e1000_phy_setup_autoneg(hw) && | |
4242 | !e1000_read_phy_reg(hw, PHY_CTRL, &phy_ctrl)) { | |
4243 | phy_ctrl |= (MII_CR_AUTO_NEG_EN | | |
4244 | MII_CR_RESTART_AUTO_NEG); | |
4245 | e1000_write_phy_reg(hw, PHY_CTRL, phy_ctrl); | |
4246 | } | |
4247 | } | |
4248 | /* Restart process after E1000_SMARTSPEED_MAX iterations */ | |
4249 | if (adapter->smartspeed++ == E1000_SMARTSPEED_MAX) | |
4250 | adapter->smartspeed = 0; | |
4251 | } | |
4252 | ||
4253 | /** | |
4254 | * e1000_ioctl - | |
4255 | * @netdev: | |
4256 | * @ifreq: | |
4257 | * @cmd: | |
4258 | **/ | |
4259 | ||
4260 | static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) | |
4261 | { | |
4262 | switch (cmd) { | |
4263 | case SIOCGMIIPHY: | |
4264 | case SIOCGMIIREG: | |
4265 | case SIOCSMIIREG: | |
4266 | return e1000_mii_ioctl(netdev, ifr, cmd); | |
4267 | default: | |
4268 | return -EOPNOTSUPP; | |
4269 | } | |
4270 | } | |
4271 | ||
4272 | /** | |
4273 | * e1000_mii_ioctl - | |
4274 | * @netdev: | |
4275 | * @ifreq: | |
4276 | * @cmd: | |
4277 | **/ | |
4278 | ||
4279 | static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, | |
4280 | int cmd) | |
4281 | { | |
4282 | struct e1000_adapter *adapter = netdev_priv(netdev); | |
4283 | struct e1000_hw *hw = &adapter->hw; | |
4284 | struct mii_ioctl_data *data = if_mii(ifr); | |
4285 | int retval; | |
4286 | u16 mii_reg; | |
4287 | u16 spddplx; | |
4288 | unsigned long flags; | |
4289 | ||
4290 | if (hw->media_type != e1000_media_type_copper) | |
4291 | return -EOPNOTSUPP; | |
4292 | ||
4293 | switch (cmd) { | |
4294 | case SIOCGMIIPHY: | |
4295 | data->phy_id = hw->phy_addr; | |
4296 | break; | |
4297 | case SIOCGMIIREG: | |
4298 | if (!capable(CAP_NET_ADMIN)) | |
4299 | return -EPERM; | |
4300 | spin_lock_irqsave(&adapter->stats_lock, flags); | |
4301 | if (e1000_read_phy_reg(hw, data->reg_num & 0x1F, | |
4302 | &data->val_out)) { | |
4303 | spin_unlock_irqrestore(&adapter->stats_lock, flags); | |
4304 | return -EIO; | |
4305 | } | |
4306 | spin_unlock_irqrestore(&adapter->stats_lock, flags); | |
4307 | break; | |
4308 | case SIOCSMIIREG: | |
4309 | if (!capable(CAP_NET_ADMIN)) | |
4310 | return -EPERM; | |
4311 | if (data->reg_num & ~(0x1F)) | |
4312 | return -EFAULT; | |
4313 | mii_reg = data->val_in; | |
4314 | spin_lock_irqsave(&adapter->stats_lock, flags); | |
4315 | if (e1000_write_phy_reg(hw, data->reg_num, | |
4316 | mii_reg)) { | |
4317 | spin_unlock_irqrestore(&adapter->stats_lock, flags); | |
4318 | return -EIO; | |
4319 | } | |
4320 | spin_unlock_irqrestore(&adapter->stats_lock, flags); | |
4321 | if (hw->media_type == e1000_media_type_copper) { | |
4322 | switch (data->reg_num) { | |
4323 | case PHY_CTRL: | |
4324 | if (mii_reg & MII_CR_POWER_DOWN) | |
4325 | break; | |
4326 | if (mii_reg & MII_CR_AUTO_NEG_EN) { | |
4327 | hw->autoneg = 1; | |
4328 | hw->autoneg_advertised = 0x2F; | |
4329 | } else { | |
4330 | if (mii_reg & 0x40) | |
4331 | spddplx = SPEED_1000; | |
4332 | else if (mii_reg & 0x2000) | |
4333 | spddplx = SPEED_100; | |
4334 | else | |
4335 | spddplx = SPEED_10; | |
4336 | spddplx += (mii_reg & 0x100) | |
4337 | ? DUPLEX_FULL : | |
4338 | DUPLEX_HALF; | |
4339 | retval = e1000_set_spd_dplx(adapter, | |
4340 | spddplx); | |
4341 | if (retval) | |
4342 | return retval; | |
4343 | } | |
4344 | if (netif_running(adapter->netdev)) | |
4345 | e1000_reinit_locked(adapter); | |
4346 | else | |
4347 | e1000_reset(adapter); | |
4348 | break; | |
4349 | case M88E1000_PHY_SPEC_CTRL: | |
4350 | case M88E1000_EXT_PHY_SPEC_CTRL: | |
4351 | if (e1000_phy_reset(hw)) | |
4352 | return -EIO; | |
4353 | break; | |
4354 | } | |
4355 | } else { | |
4356 | switch (data->reg_num) { | |
4357 | case PHY_CTRL: | |
4358 | if (mii_reg & MII_CR_POWER_DOWN) | |
4359 | break; | |
4360 | if (netif_running(adapter->netdev)) | |
4361 | e1000_reinit_locked(adapter); | |
4362 | else | |
4363 | e1000_reset(adapter); | |
4364 | break; | |
4365 | } | |
4366 | } | |
4367 | break; | |
4368 | default: | |
4369 | return -EOPNOTSUPP; | |
4370 | } | |
4371 | return E1000_SUCCESS; | |
4372 | } | |
4373 | ||
4374 | void e1000_pci_set_mwi(struct e1000_hw *hw) | |
4375 | { | |
4376 | struct e1000_adapter *adapter = hw->back; | |
4377 | int ret_val = pci_set_mwi(adapter->pdev); | |
4378 | ||
4379 | if (ret_val) | |
4380 | DPRINTK(PROBE, ERR, "Error in setting MWI\n"); | |
4381 | } | |
4382 | ||
4383 | void e1000_pci_clear_mwi(struct e1000_hw *hw) | |
4384 | { | |
4385 | struct e1000_adapter *adapter = hw->back; | |
4386 | ||
4387 | pci_clear_mwi(adapter->pdev); | |
4388 | } | |
4389 | ||
4390 | int e1000_pcix_get_mmrbc(struct e1000_hw *hw) | |
4391 | { | |
4392 | struct e1000_adapter *adapter = hw->back; | |
4393 | return pcix_get_mmrbc(adapter->pdev); | |
4394 | } | |
4395 | ||
4396 | void e1000_pcix_set_mmrbc(struct e1000_hw *hw, int mmrbc) | |
4397 | { | |
4398 | struct e1000_adapter *adapter = hw->back; | |
4399 | pcix_set_mmrbc(adapter->pdev, mmrbc); | |
4400 | } | |
4401 | ||
4402 | s32 e1000_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value) | |
4403 | { | |
4404 | struct e1000_adapter *adapter = hw->back; | |
4405 | u16 cap_offset; | |
4406 | ||
4407 | cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP); | |
4408 | if (!cap_offset) | |
4409 | return -E1000_ERR_CONFIG; | |
4410 | ||
4411 | pci_read_config_word(adapter->pdev, cap_offset + reg, value); | |
4412 | ||
4413 | return E1000_SUCCESS; | |
4414 | } | |
4415 | ||
4416 | void e1000_io_write(struct e1000_hw *hw, unsigned long port, u32 value) | |
4417 | { | |
4418 | outl(value, port); | |
4419 | } | |
4420 | ||
4421 | static void e1000_vlan_rx_register(struct net_device *netdev, | |
4422 | struct vlan_group *grp) | |
4423 | { | |
4424 | struct e1000_adapter *adapter = netdev_priv(netdev); | |
4425 | struct e1000_hw *hw = &adapter->hw; | |
4426 | u32 ctrl, rctl; | |
4427 | ||
4428 | if (!test_bit(__E1000_DOWN, &adapter->flags)) | |
4429 | e1000_irq_disable(adapter); | |
4430 | adapter->vlgrp = grp; | |
4431 | ||
4432 | if (grp) { | |
4433 | /* enable VLAN tag insert/strip */ | |
4434 | ctrl = er32(CTRL); | |
4435 | ctrl |= E1000_CTRL_VME; | |
4436 | ew32(CTRL, ctrl); | |
4437 | ||
4438 | if (adapter->hw.mac_type != e1000_ich8lan) { | |
4439 | /* enable VLAN receive filtering */ | |
4440 | rctl = er32(RCTL); | |
4441 | rctl &= ~E1000_RCTL_CFIEN; | |
4442 | ew32(RCTL, rctl); | |
4443 | e1000_update_mng_vlan(adapter); | |
4444 | } | |
4445 | } else { | |
4446 | /* disable VLAN tag insert/strip */ | |
4447 | ctrl = er32(CTRL); | |
4448 | ctrl &= ~E1000_CTRL_VME; | |
4449 | ew32(CTRL, ctrl); | |
4450 | ||
4451 | if (adapter->hw.mac_type != e1000_ich8lan) { | |
4452 | if (adapter->mng_vlan_id != | |
4453 | (u16)E1000_MNG_VLAN_NONE) { | |
4454 | e1000_vlan_rx_kill_vid(netdev, | |
4455 | adapter->mng_vlan_id); | |
4456 | adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; | |
4457 | } | |
4458 | } | |
4459 | } | |
4460 | ||
4461 | if (!test_bit(__E1000_DOWN, &adapter->flags)) | |
4462 | e1000_irq_enable(adapter); | |
4463 | } | |
4464 | ||
4465 | static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid) | |
4466 | { | |
4467 | struct e1000_adapter *adapter = netdev_priv(netdev); | |
4468 | struct e1000_hw *hw = &adapter->hw; | |
4469 | u32 vfta, index; | |
4470 | ||
4471 | if ((hw->mng_cookie.status & | |
4472 | E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) && | |
4473 | (vid == adapter->mng_vlan_id)) | |
4474 | return; | |
4475 | /* add VID to filter table */ | |
4476 | index = (vid >> 5) & 0x7F; | |
4477 | vfta = E1000_READ_REG_ARRAY(hw, VFTA, index); | |
4478 | vfta |= (1 << (vid & 0x1F)); | |
4479 | e1000_write_vfta(hw, index, vfta); | |
4480 | } | |
4481 | ||
4482 | static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) | |
4483 | { | |
4484 | struct e1000_adapter *adapter = netdev_priv(netdev); | |
4485 | struct e1000_hw *hw = &adapter->hw; | |
4486 | u32 vfta, index; | |
4487 | ||
4488 | if (!test_bit(__E1000_DOWN, &adapter->flags)) | |
4489 | e1000_irq_disable(adapter); | |
4490 | vlan_group_set_device(adapter->vlgrp, vid, NULL); | |
4491 | if (!test_bit(__E1000_DOWN, &adapter->flags)) | |
4492 | e1000_irq_enable(adapter); | |
4493 | ||
4494 | if ((hw->mng_cookie.status & | |
4495 | E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) && | |
4496 | (vid == adapter->mng_vlan_id)) { | |
4497 | /* release control to f/w */ | |
4498 | e1000_release_hw_control(adapter); | |
4499 | return; | |
4500 | } | |
4501 | ||
4502 | /* remove VID from filter table */ | |
4503 | index = (vid >> 5) & 0x7F; | |
4504 | vfta = E1000_READ_REG_ARRAY(hw, VFTA, index); | |
4505 | vfta &= ~(1 << (vid & 0x1F)); | |
4506 | e1000_write_vfta(hw, index, vfta); | |
4507 | } | |
4508 | ||
4509 | static void e1000_restore_vlan(struct e1000_adapter *adapter) | |
4510 | { | |
4511 | e1000_vlan_rx_register(adapter->netdev, adapter->vlgrp); | |
4512 | ||
4513 | if (adapter->vlgrp) { | |
4514 | u16 vid; | |
4515 | for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) { | |
4516 | if (!vlan_group_get_device(adapter->vlgrp, vid)) | |
4517 | continue; | |
4518 | e1000_vlan_rx_add_vid(adapter->netdev, vid); | |
4519 | } | |
4520 | } | |
4521 | } | |
4522 | ||
4523 | int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx) | |
4524 | { | |
4525 | struct e1000_hw *hw = &adapter->hw; | |
4526 | ||
4527 | hw->autoneg = 0; | |
4528 | ||
4529 | /* Fiber NICs only allow 1000 gbps Full duplex */ | |
4530 | if ((hw->media_type == e1000_media_type_fiber) && | |
4531 | spddplx != (SPEED_1000 + DUPLEX_FULL)) { | |
4532 | DPRINTK(PROBE, ERR, "Unsupported Speed/Duplex configuration\n"); | |
4533 | return -EINVAL; | |
4534 | } | |
4535 | ||
4536 | switch (spddplx) { | |
4537 | case SPEED_10 + DUPLEX_HALF: | |
4538 | hw->forced_speed_duplex = e1000_10_half; | |
4539 | break; | |
4540 | case SPEED_10 + DUPLEX_FULL: | |
4541 | hw->forced_speed_duplex = e1000_10_full; | |
4542 | break; | |
4543 | case SPEED_100 + DUPLEX_HALF: | |
4544 | hw->forced_speed_duplex = e1000_100_half; | |
4545 | break; | |
4546 | case SPEED_100 + DUPLEX_FULL: | |
4547 | hw->forced_speed_duplex = e1000_100_full; | |
4548 | break; | |
4549 | case SPEED_1000 + DUPLEX_FULL: | |
4550 | hw->autoneg = 1; | |
4551 | hw->autoneg_advertised = ADVERTISE_1000_FULL; | |
4552 | break; | |
4553 | case SPEED_1000 + DUPLEX_HALF: /* not supported */ | |
4554 | default: | |
4555 | DPRINTK(PROBE, ERR, "Unsupported Speed/Duplex configuration\n"); | |
4556 | return -EINVAL; | |
4557 | } | |
4558 | return 0; | |
4559 | } | |
4560 | ||
4561 | static int e1000_suspend(struct pci_dev *pdev, pm_message_t state) | |
4562 | { | |
4563 | struct net_device *netdev = pci_get_drvdata(pdev); | |
4564 | struct e1000_adapter *adapter = netdev_priv(netdev); | |
4565 | struct e1000_hw *hw = &adapter->hw; | |
4566 | u32 ctrl, ctrl_ext, rctl, status; | |
4567 | u32 wufc = adapter->wol; | |
4568 | #ifdef CONFIG_PM | |
4569 | int retval = 0; | |
4570 | #endif | |
4571 | ||
4572 | netif_device_detach(netdev); | |
4573 | ||
4574 | if (netif_running(netdev)) { | |
4575 | WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags)); | |
4576 | e1000_down(adapter); | |
4577 | } | |
4578 | ||
4579 | #ifdef CONFIG_PM | |
4580 | retval = pci_save_state(pdev); | |
4581 | if (retval) | |
4582 | return retval; | |
4583 | #endif | |
4584 | ||
4585 | status = er32(STATUS); | |
4586 | if (status & E1000_STATUS_LU) | |
4587 | wufc &= ~E1000_WUFC_LNKC; | |
4588 | ||
4589 | if (wufc) { | |
4590 | e1000_setup_rctl(adapter); | |
4591 | e1000_set_rx_mode(netdev); | |
4592 | ||
4593 | /* turn on all-multi mode if wake on multicast is enabled */ | |
4594 | if (wufc & E1000_WUFC_MC) { | |
4595 | rctl = er32(RCTL); | |
4596 | rctl |= E1000_RCTL_MPE; | |
4597 | ew32(RCTL, rctl); | |
4598 | } | |
4599 | ||
4600 | if (hw->mac_type >= e1000_82540) { | |
4601 | ctrl = er32(CTRL); | |
4602 | /* advertise wake from D3Cold */ | |
4603 | #define E1000_CTRL_ADVD3WUC 0x00100000 | |
4604 | /* phy power management enable */ | |
4605 | #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000 | |
4606 | ctrl |= E1000_CTRL_ADVD3WUC | | |
4607 | E1000_CTRL_EN_PHY_PWR_MGMT; | |
4608 | ew32(CTRL, ctrl); | |
4609 | } | |
4610 | ||
4611 | if (hw->media_type == e1000_media_type_fiber || | |
4612 | hw->media_type == e1000_media_type_internal_serdes) { | |
4613 | /* keep the laser running in D3 */ | |
4614 | ctrl_ext = er32(CTRL_EXT); | |
4615 | ctrl_ext |= E1000_CTRL_EXT_SDP7_DATA; | |
4616 | ew32(CTRL_EXT, ctrl_ext); | |
4617 | } | |
4618 | ||
4619 | /* Allow time for pending master requests to run */ | |
4620 | e1000_disable_pciex_master(hw); | |
4621 | ||
4622 | ew32(WUC, E1000_WUC_PME_EN); | |
4623 | ew32(WUFC, wufc); | |
4624 | pci_enable_wake(pdev, PCI_D3hot, 1); | |
4625 | pci_enable_wake(pdev, PCI_D3cold, 1); | |
4626 | } else { | |
4627 | ew32(WUC, 0); | |
4628 | ew32(WUFC, 0); | |
4629 | pci_enable_wake(pdev, PCI_D3hot, 0); | |
4630 | pci_enable_wake(pdev, PCI_D3cold, 0); | |
4631 | } | |
4632 | ||
4633 | e1000_release_manageability(adapter); | |
4634 | ||
4635 | /* make sure adapter isn't asleep if manageability is enabled */ | |
4636 | if (adapter->en_mng_pt) { | |
4637 | pci_enable_wake(pdev, PCI_D3hot, 1); | |
4638 | pci_enable_wake(pdev, PCI_D3cold, 1); | |
4639 | } | |
4640 | ||
4641 | if (hw->phy_type == e1000_phy_igp_3) | |
4642 | e1000_phy_powerdown_workaround(hw); | |
4643 | ||
4644 | if (netif_running(netdev)) | |
4645 | e1000_free_irq(adapter); | |
4646 | ||
4647 | /* Release control of h/w to f/w. If f/w is AMT enabled, this | |
4648 | * would have already happened in close and is redundant. */ | |
4649 | e1000_release_hw_control(adapter); | |
4650 | ||
4651 | pci_disable_device(pdev); | |
4652 | ||
4653 | pci_set_power_state(pdev, pci_choose_state(pdev, state)); | |
4654 | ||
4655 | return 0; | |
4656 | } | |
4657 | ||
4658 | #ifdef CONFIG_PM | |
4659 | static int e1000_resume(struct pci_dev *pdev) | |
4660 | { | |
4661 | struct net_device *netdev = pci_get_drvdata(pdev); | |
4662 | struct e1000_adapter *adapter = netdev_priv(netdev); | |
4663 | struct e1000_hw *hw = &adapter->hw; | |
4664 | u32 err; | |
4665 | ||
4666 | pci_set_power_state(pdev, PCI_D0); | |
4667 | pci_restore_state(pdev); | |
4668 | ||
4669 | if (adapter->need_ioport) | |
4670 | err = pci_enable_device(pdev); | |
4671 | else | |
4672 | err = pci_enable_device_mem(pdev); | |
4673 | if (err) { | |
4674 | printk(KERN_ERR "e1000: Cannot enable PCI device from suspend\n"); | |
4675 | return err; | |
4676 | } | |
4677 | pci_set_master(pdev); | |
4678 | ||
4679 | pci_enable_wake(pdev, PCI_D3hot, 0); | |
4680 | pci_enable_wake(pdev, PCI_D3cold, 0); | |
4681 | ||
4682 | if (netif_running(netdev)) { | |
4683 | err = e1000_request_irq(adapter); | |
4684 | if (err) | |
4685 | return err; | |
4686 | } | |
4687 | ||
4688 | e1000_power_up_phy(adapter); | |
4689 | e1000_reset(adapter); | |
4690 | ew32(WUS, ~0); | |
4691 | ||
4692 | e1000_init_manageability(adapter); | |
4693 | ||
4694 | if (netif_running(netdev)) | |
4695 | e1000_up(adapter); | |
4696 | ||
4697 | netif_device_attach(netdev); | |
4698 | ||
4699 | /* If the controller is 82573 and f/w is AMT, do not set | |
4700 | * DRV_LOAD until the interface is up. For all other cases, | |
4701 | * let the f/w know that the h/w is now under the control | |
4702 | * of the driver. */ | |
4703 | if (hw->mac_type != e1000_82573 || | |
4704 | !e1000_check_mng_mode(hw)) | |
4705 | e1000_get_hw_control(adapter); | |
4706 | ||
4707 | return 0; | |
4708 | } | |
4709 | #endif | |
4710 | ||
4711 | static void e1000_shutdown(struct pci_dev *pdev) | |
4712 | { | |
4713 | e1000_suspend(pdev, PMSG_SUSPEND); | |
4714 | } | |
4715 | ||
4716 | #ifdef CONFIG_NET_POLL_CONTROLLER | |
4717 | /* | |
4718 | * Polling 'interrupt' - used by things like netconsole to send skbs | |
4719 | * without having to re-enable interrupts. It's not called while | |
4720 | * the interrupt routine is executing. | |
4721 | */ | |
4722 | static void e1000_netpoll(struct net_device *netdev) | |
4723 | { | |
4724 | struct e1000_adapter *adapter = netdev_priv(netdev); | |
4725 | ||
4726 | disable_irq(adapter->pdev->irq); | |
4727 | e1000_intr(adapter->pdev->irq, netdev); | |
4728 | enable_irq(adapter->pdev->irq); | |
4729 | } | |
4730 | #endif | |
4731 | ||
4732 | /** | |
4733 | * e1000_io_error_detected - called when PCI error is detected | |
4734 | * @pdev: Pointer to PCI device | |
4735 | * @state: The current pci conneection state | |
4736 | * | |
4737 | * This function is called after a PCI bus error affecting | |
4738 | * this device has been detected. | |
4739 | */ | |
4740 | static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev, | |
4741 | pci_channel_state_t state) | |
4742 | { | |
4743 | struct net_device *netdev = pci_get_drvdata(pdev); | |
4744 | struct e1000_adapter *adapter = netdev_priv(netdev); | |
4745 | ||
4746 | netif_device_detach(netdev); | |
4747 | ||
4748 | if (netif_running(netdev)) | |
4749 | e1000_down(adapter); | |
4750 | pci_disable_device(pdev); | |
4751 | ||
4752 | /* Request a slot slot reset. */ | |
4753 | return PCI_ERS_RESULT_NEED_RESET; | |
4754 | } | |
4755 | ||
4756 | /** | |
4757 | * e1000_io_slot_reset - called after the pci bus has been reset. | |
4758 | * @pdev: Pointer to PCI device | |
4759 | * | |
4760 | * Restart the card from scratch, as if from a cold-boot. Implementation | |
4761 | * resembles the first-half of the e1000_resume routine. | |
4762 | */ | |
4763 | static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev) | |
4764 | { | |
4765 | struct net_device *netdev = pci_get_drvdata(pdev); | |
4766 | struct e1000_adapter *adapter = netdev_priv(netdev); | |
4767 | struct e1000_hw *hw = &adapter->hw; | |
4768 | int err; | |
4769 | ||
4770 | if (adapter->need_ioport) | |
4771 | err = pci_enable_device(pdev); | |
4772 | else | |
4773 | err = pci_enable_device_mem(pdev); | |
4774 | if (err) { | |
4775 | printk(KERN_ERR "e1000: Cannot re-enable PCI device after reset.\n"); | |
4776 | return PCI_ERS_RESULT_DISCONNECT; | |
4777 | } | |
4778 | pci_set_master(pdev); | |
4779 | ||
4780 | pci_enable_wake(pdev, PCI_D3hot, 0); | |
4781 | pci_enable_wake(pdev, PCI_D3cold, 0); | |
4782 | ||
4783 | e1000_reset(adapter); | |
4784 | ew32(WUS, ~0); | |
4785 | ||
4786 | return PCI_ERS_RESULT_RECOVERED; | |
4787 | } | |
4788 | ||
4789 | /** | |
4790 | * e1000_io_resume - called when traffic can start flowing again. | |
4791 | * @pdev: Pointer to PCI device | |
4792 | * | |
4793 | * This callback is called when the error recovery driver tells us that | |
4794 | * its OK to resume normal operation. Implementation resembles the | |
4795 | * second-half of the e1000_resume routine. | |
4796 | */ | |
4797 | static void e1000_io_resume(struct pci_dev *pdev) | |
4798 | { | |
4799 | struct net_device *netdev = pci_get_drvdata(pdev); | |
4800 | struct e1000_adapter *adapter = netdev_priv(netdev); | |
4801 | struct e1000_hw *hw = &adapter->hw; | |
4802 | ||
4803 | e1000_init_manageability(adapter); | |
4804 | ||
4805 | if (netif_running(netdev)) { | |
4806 | if (e1000_up(adapter)) { | |
4807 | printk("e1000: can't bring device back up after reset\n"); | |
4808 | return; | |
4809 | } | |
4810 | } | |
4811 | ||
4812 | netif_device_attach(netdev); | |
4813 | ||
4814 | /* If the controller is 82573 and f/w is AMT, do not set | |
4815 | * DRV_LOAD until the interface is up. For all other cases, | |
4816 | * let the f/w know that the h/w is now under the control | |
4817 | * of the driver. */ | |
4818 | if (hw->mac_type != e1000_82573 || | |
4819 | !e1000_check_mng_mode(hw)) | |
4820 | e1000_get_hw_control(adapter); | |
4821 | ||
4822 | } | |
4823 | ||
4824 | /* e1000_main.c */ |