]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/net/e100.c
myri10ge: update firmware headers
[mirror_ubuntu-artful-kernel.git] / drivers / net / e100.c
CommitLineData
1da177e4
LT
1/*******************************************************************************
2
0abb6eb1
AK
3 Intel PRO/100 Linux driver
4 Copyright(c) 1999 - 2006 Intel Corporation.
05479938
JB
5
6 This program is free software; you can redistribute it and/or modify it
0abb6eb1
AK
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
05479938 9
0abb6eb1 10 This program is distributed in the hope it will be useful, but WITHOUT
05479938
JB
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
1da177e4 13 more details.
05479938 14
1da177e4 15 You should have received a copy of the GNU General Public License along with
0abb6eb1
AK
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
05479938 18
0abb6eb1
AK
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
05479938 21
1da177e4
LT
22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
0abb6eb1 24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
1da177e4
LT
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27*******************************************************************************/
28
29/*
30 * e100.c: Intel(R) PRO/100 ethernet driver
31 *
32 * (Re)written 2003 by scott.feldman@intel.com. Based loosely on
33 * original e100 driver, but better described as a munging of
34 * e100, e1000, eepro100, tg3, 8139cp, and other drivers.
35 *
36 * References:
37 * Intel 8255x 10/100 Mbps Ethernet Controller Family,
38 * Open Source Software Developers Manual,
39 * http://sourceforge.net/projects/e1000
40 *
41 *
42 * Theory of Operation
43 *
44 * I. General
45 *
46 * The driver supports Intel(R) 10/100 Mbps PCI Fast Ethernet
47 * controller family, which includes the 82557, 82558, 82559, 82550,
48 * 82551, and 82562 devices. 82558 and greater controllers
49 * integrate the Intel 82555 PHY. The controllers are used in
50 * server and client network interface cards, as well as in
51 * LAN-On-Motherboard (LOM), CardBus, MiniPCI, and ICHx
52 * configurations. 8255x supports a 32-bit linear addressing
53 * mode and operates at 33Mhz PCI clock rate.
54 *
55 * II. Driver Operation
56 *
57 * Memory-mapped mode is used exclusively to access the device's
58 * shared-memory structure, the Control/Status Registers (CSR). All
59 * setup, configuration, and control of the device, including queuing
60 * of Tx, Rx, and configuration commands is through the CSR.
61 * cmd_lock serializes accesses to the CSR command register. cb_lock
62 * protects the shared Command Block List (CBL).
63 *
64 * 8255x is highly MII-compliant and all access to the PHY go
65 * through the Management Data Interface (MDI). Consequently, the
66 * driver leverages the mii.c library shared with other MII-compliant
67 * devices.
68 *
69 * Big- and Little-Endian byte order as well as 32- and 64-bit
70 * archs are supported. Weak-ordered memory and non-cache-coherent
71 * archs are supported.
72 *
73 * III. Transmit
74 *
75 * A Tx skb is mapped and hangs off of a TCB. TCBs are linked
76 * together in a fixed-size ring (CBL) thus forming the flexible mode
77 * memory structure. A TCB marked with the suspend-bit indicates
78 * the end of the ring. The last TCB processed suspends the
79 * controller, and the controller can be restarted by issue a CU
80 * resume command to continue from the suspend point, or a CU start
81 * command to start at a given position in the ring.
82 *
83 * Non-Tx commands (config, multicast setup, etc) are linked
84 * into the CBL ring along with Tx commands. The common structure
85 * used for both Tx and non-Tx commands is the Command Block (CB).
86 *
87 * cb_to_use is the next CB to use for queuing a command; cb_to_clean
88 * is the next CB to check for completion; cb_to_send is the first
89 * CB to start on in case of a previous failure to resume. CB clean
90 * up happens in interrupt context in response to a CU interrupt.
91 * cbs_avail keeps track of number of free CB resources available.
92 *
93 * Hardware padding of short packets to minimum packet size is
94 * enabled. 82557 pads with 7Eh, while the later controllers pad
95 * with 00h.
96 *
97 * IV. Recieve
98 *
99 * The Receive Frame Area (RFA) comprises a ring of Receive Frame
100 * Descriptors (RFD) + data buffer, thus forming the simplified mode
101 * memory structure. Rx skbs are allocated to contain both the RFD
102 * and the data buffer, but the RFD is pulled off before the skb is
103 * indicated. The data buffer is aligned such that encapsulated
104 * protocol headers are u32-aligned. Since the RFD is part of the
105 * mapped shared memory, and completion status is contained within
106 * the RFD, the RFD must be dma_sync'ed to maintain a consistent
107 * view from software and hardware.
108 *
109 * Under typical operation, the receive unit (RU) is start once,
110 * and the controller happily fills RFDs as frames arrive. If
111 * replacement RFDs cannot be allocated, or the RU goes non-active,
112 * the RU must be restarted. Frame arrival generates an interrupt,
113 * and Rx indication and re-allocation happen in the same context,
114 * therefore no locking is required. A software-generated interrupt
115 * is generated from the watchdog to recover from a failed allocation
116 * senario where all Rx resources have been indicated and none re-
117 * placed.
118 *
119 * V. Miscellaneous
120 *
121 * VLAN offloading of tagging, stripping and filtering is not
122 * supported, but driver will accommodate the extra 4-byte VLAN tag
123 * for processing by upper layers. Tx/Rx Checksum offloading is not
124 * supported. Tx Scatter/Gather is not supported. Jumbo Frames is
125 * not supported (hardware limitation).
126 *
127 * MagicPacket(tm) WoL support is enabled/disabled via ethtool.
128 *
129 * Thanks to JC (jchapman@katalix.com) for helping with
130 * testing/troubleshooting the development driver.
131 *
132 * TODO:
133 * o several entry points race with dev->close
134 * o check for tx-no-resources/stop Q races with tx clean/wake Q
ac7c6669
OM
135 *
136 * FIXES:
137 * 2005/12/02 - Michael O'Donnell <Michael.ODonnell at stratus dot com>
138 * - Stratus87247: protect MDI control register manipulations
1da177e4
LT
139 */
140
1da177e4
LT
141#include <linux/module.h>
142#include <linux/moduleparam.h>
143#include <linux/kernel.h>
144#include <linux/types.h>
145#include <linux/slab.h>
146#include <linux/delay.h>
147#include <linux/init.h>
148#include <linux/pci.h>
1e7f0bd8 149#include <linux/dma-mapping.h>
1da177e4
LT
150#include <linux/netdevice.h>
151#include <linux/etherdevice.h>
152#include <linux/mii.h>
153#include <linux/if_vlan.h>
154#include <linux/skbuff.h>
155#include <linux/ethtool.h>
156#include <linux/string.h>
157#include <asm/unaligned.h>
158
159
160#define DRV_NAME "e100"
4e1dc97d 161#define DRV_EXT "-NAPI"
44e4925e 162#define DRV_VERSION "3.5.23-k4"DRV_EXT
1da177e4 163#define DRV_DESCRIPTION "Intel(R) PRO/100 Network Driver"
4e1dc97d 164#define DRV_COPYRIGHT "Copyright(c) 1999-2006 Intel Corporation"
1da177e4
LT
165#define PFX DRV_NAME ": "
166
167#define E100_WATCHDOG_PERIOD (2 * HZ)
168#define E100_NAPI_WEIGHT 16
169
170MODULE_DESCRIPTION(DRV_DESCRIPTION);
171MODULE_AUTHOR(DRV_COPYRIGHT);
172MODULE_LICENSE("GPL");
173MODULE_VERSION(DRV_VERSION);
174
175static int debug = 3;
8fb6f732 176static int eeprom_bad_csum_allow = 0;
27345bb6 177static int use_io = 0;
1da177e4 178module_param(debug, int, 0);
8fb6f732 179module_param(eeprom_bad_csum_allow, int, 0);
27345bb6 180module_param(use_io, int, 0);
1da177e4 181MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
8fb6f732 182MODULE_PARM_DESC(eeprom_bad_csum_allow, "Allow bad eeprom checksums");
27345bb6 183MODULE_PARM_DESC(use_io, "Force use of i/o access mode");
1da177e4
LT
184#define DPRINTK(nlevel, klevel, fmt, args...) \
185 (void)((NETIF_MSG_##nlevel & nic->msg_enable) && \
186 printk(KERN_##klevel PFX "%s: %s: " fmt, nic->netdev->name, \
187 __FUNCTION__ , ## args))
188
189#define INTEL_8255X_ETHERNET_DEVICE(device_id, ich) {\
190 PCI_VENDOR_ID_INTEL, device_id, PCI_ANY_ID, PCI_ANY_ID, \
191 PCI_CLASS_NETWORK_ETHERNET << 8, 0xFFFF00, ich }
192static struct pci_device_id e100_id_table[] = {
193 INTEL_8255X_ETHERNET_DEVICE(0x1029, 0),
194 INTEL_8255X_ETHERNET_DEVICE(0x1030, 0),
195 INTEL_8255X_ETHERNET_DEVICE(0x1031, 3),
196 INTEL_8255X_ETHERNET_DEVICE(0x1032, 3),
197 INTEL_8255X_ETHERNET_DEVICE(0x1033, 3),
198 INTEL_8255X_ETHERNET_DEVICE(0x1034, 3),
199 INTEL_8255X_ETHERNET_DEVICE(0x1038, 3),
200 INTEL_8255X_ETHERNET_DEVICE(0x1039, 4),
201 INTEL_8255X_ETHERNET_DEVICE(0x103A, 4),
202 INTEL_8255X_ETHERNET_DEVICE(0x103B, 4),
203 INTEL_8255X_ETHERNET_DEVICE(0x103C, 4),
204 INTEL_8255X_ETHERNET_DEVICE(0x103D, 4),
205 INTEL_8255X_ETHERNET_DEVICE(0x103E, 4),
206 INTEL_8255X_ETHERNET_DEVICE(0x1050, 5),
207 INTEL_8255X_ETHERNET_DEVICE(0x1051, 5),
208 INTEL_8255X_ETHERNET_DEVICE(0x1052, 5),
209 INTEL_8255X_ETHERNET_DEVICE(0x1053, 5),
210 INTEL_8255X_ETHERNET_DEVICE(0x1054, 5),
211 INTEL_8255X_ETHERNET_DEVICE(0x1055, 5),
212 INTEL_8255X_ETHERNET_DEVICE(0x1056, 5),
213 INTEL_8255X_ETHERNET_DEVICE(0x1057, 5),
214 INTEL_8255X_ETHERNET_DEVICE(0x1059, 0),
215 INTEL_8255X_ETHERNET_DEVICE(0x1064, 6),
216 INTEL_8255X_ETHERNET_DEVICE(0x1065, 6),
217 INTEL_8255X_ETHERNET_DEVICE(0x1066, 6),
218 INTEL_8255X_ETHERNET_DEVICE(0x1067, 6),
219 INTEL_8255X_ETHERNET_DEVICE(0x1068, 6),
220 INTEL_8255X_ETHERNET_DEVICE(0x1069, 6),
221 INTEL_8255X_ETHERNET_DEVICE(0x106A, 6),
222 INTEL_8255X_ETHERNET_DEVICE(0x106B, 6),
042e2fb7
MC
223 INTEL_8255X_ETHERNET_DEVICE(0x1091, 7),
224 INTEL_8255X_ETHERNET_DEVICE(0x1092, 7),
225 INTEL_8255X_ETHERNET_DEVICE(0x1093, 7),
226 INTEL_8255X_ETHERNET_DEVICE(0x1094, 7),
227 INTEL_8255X_ETHERNET_DEVICE(0x1095, 7),
1da177e4
LT
228 INTEL_8255X_ETHERNET_DEVICE(0x1209, 0),
229 INTEL_8255X_ETHERNET_DEVICE(0x1229, 0),
230 INTEL_8255X_ETHERNET_DEVICE(0x2449, 2),
231 INTEL_8255X_ETHERNET_DEVICE(0x2459, 2),
232 INTEL_8255X_ETHERNET_DEVICE(0x245D, 2),
042e2fb7 233 INTEL_8255X_ETHERNET_DEVICE(0x27DC, 7),
1da177e4
LT
234 { 0, }
235};
236MODULE_DEVICE_TABLE(pci, e100_id_table);
237
238enum mac {
239 mac_82557_D100_A = 0,
240 mac_82557_D100_B = 1,
241 mac_82557_D100_C = 2,
242 mac_82558_D101_A4 = 4,
243 mac_82558_D101_B0 = 5,
244 mac_82559_D101M = 8,
245 mac_82559_D101S = 9,
246 mac_82550_D102 = 12,
247 mac_82550_D102_C = 13,
248 mac_82551_E = 14,
249 mac_82551_F = 15,
250 mac_82551_10 = 16,
251 mac_unknown = 0xFF,
252};
253
254enum phy {
255 phy_100a = 0x000003E0,
256 phy_100c = 0x035002A8,
257 phy_82555_tx = 0x015002A8,
258 phy_nsc_tx = 0x5C002000,
259 phy_82562_et = 0x033002A8,
260 phy_82562_em = 0x032002A8,
261 phy_82562_ek = 0x031002A8,
262 phy_82562_eh = 0x017002A8,
263 phy_unknown = 0xFFFFFFFF,
264};
265
266/* CSR (Control/Status Registers) */
267struct csr {
268 struct {
269 u8 status;
270 u8 stat_ack;
271 u8 cmd_lo;
272 u8 cmd_hi;
273 u32 gen_ptr;
274 } scb;
275 u32 port;
276 u16 flash_ctrl;
277 u8 eeprom_ctrl_lo;
278 u8 eeprom_ctrl_hi;
279 u32 mdi_ctrl;
280 u32 rx_dma_count;
281};
282
283enum scb_status {
284 rus_ready = 0x10,
285 rus_mask = 0x3C,
286};
287
ca93ca42
JG
288enum ru_state {
289 RU_SUSPENDED = 0,
290 RU_RUNNING = 1,
291 RU_UNINITIALIZED = -1,
292};
293
1da177e4
LT
294enum scb_stat_ack {
295 stat_ack_not_ours = 0x00,
296 stat_ack_sw_gen = 0x04,
297 stat_ack_rnr = 0x10,
298 stat_ack_cu_idle = 0x20,
299 stat_ack_frame_rx = 0x40,
300 stat_ack_cu_cmd_done = 0x80,
301 stat_ack_not_present = 0xFF,
302 stat_ack_rx = (stat_ack_sw_gen | stat_ack_rnr | stat_ack_frame_rx),
303 stat_ack_tx = (stat_ack_cu_idle | stat_ack_cu_cmd_done),
304};
305
306enum scb_cmd_hi {
307 irq_mask_none = 0x00,
308 irq_mask_all = 0x01,
309 irq_sw_gen = 0x02,
310};
311
312enum scb_cmd_lo {
313 cuc_nop = 0x00,
314 ruc_start = 0x01,
315 ruc_load_base = 0x06,
316 cuc_start = 0x10,
317 cuc_resume = 0x20,
318 cuc_dump_addr = 0x40,
319 cuc_dump_stats = 0x50,
320 cuc_load_base = 0x60,
321 cuc_dump_reset = 0x70,
322};
323
324enum cuc_dump {
325 cuc_dump_complete = 0x0000A005,
326 cuc_dump_reset_complete = 0x0000A007,
327};
05479938 328
1da177e4
LT
329enum port {
330 software_reset = 0x0000,
331 selftest = 0x0001,
332 selective_reset = 0x0002,
333};
334
335enum eeprom_ctrl_lo {
336 eesk = 0x01,
337 eecs = 0x02,
338 eedi = 0x04,
339 eedo = 0x08,
340};
341
342enum mdi_ctrl {
343 mdi_write = 0x04000000,
344 mdi_read = 0x08000000,
345 mdi_ready = 0x10000000,
346};
347
348enum eeprom_op {
349 op_write = 0x05,
350 op_read = 0x06,
351 op_ewds = 0x10,
352 op_ewen = 0x13,
353};
354
355enum eeprom_offsets {
356 eeprom_cnfg_mdix = 0x03,
357 eeprom_id = 0x0A,
358 eeprom_config_asf = 0x0D,
359 eeprom_smbus_addr = 0x90,
360};
361
362enum eeprom_cnfg_mdix {
363 eeprom_mdix_enabled = 0x0080,
364};
365
366enum eeprom_id {
367 eeprom_id_wol = 0x0020,
368};
369
370enum eeprom_config_asf {
371 eeprom_asf = 0x8000,
372 eeprom_gcl = 0x4000,
373};
374
375enum cb_status {
376 cb_complete = 0x8000,
377 cb_ok = 0x2000,
378};
379
380enum cb_command {
381 cb_nop = 0x0000,
382 cb_iaaddr = 0x0001,
383 cb_config = 0x0002,
384 cb_multi = 0x0003,
385 cb_tx = 0x0004,
386 cb_ucode = 0x0005,
387 cb_dump = 0x0006,
388 cb_tx_sf = 0x0008,
389 cb_cid = 0x1f00,
390 cb_i = 0x2000,
391 cb_s = 0x4000,
392 cb_el = 0x8000,
393};
394
395struct rfd {
396 u16 status;
397 u16 command;
398 u32 link;
399 u32 rbd;
400 u16 actual_size;
401 u16 size;
402};
403
404struct rx {
405 struct rx *next, *prev;
406 struct sk_buff *skb;
407 dma_addr_t dma_addr;
408};
409
410#if defined(__BIG_ENDIAN_BITFIELD)
411#define X(a,b) b,a
412#else
413#define X(a,b) a,b
414#endif
415struct config {
416/*0*/ u8 X(byte_count:6, pad0:2);
417/*1*/ u8 X(X(rx_fifo_limit:4, tx_fifo_limit:3), pad1:1);
418/*2*/ u8 adaptive_ifs;
419/*3*/ u8 X(X(X(X(mwi_enable:1, type_enable:1), read_align_enable:1),
420 term_write_cache_line:1), pad3:4);
421/*4*/ u8 X(rx_dma_max_count:7, pad4:1);
422/*5*/ u8 X(tx_dma_max_count:7, dma_max_count_enable:1);
423/*6*/ u8 X(X(X(X(X(X(X(late_scb_update:1, direct_rx_dma:1),
424 tno_intr:1), cna_intr:1), standard_tcb:1), standard_stat_counter:1),
425 rx_discard_overruns:1), rx_save_bad_frames:1);
426/*7*/ u8 X(X(X(X(X(rx_discard_short_frames:1, tx_underrun_retry:2),
427 pad7:2), rx_extended_rfd:1), tx_two_frames_in_fifo:1),
428 tx_dynamic_tbd:1);
429/*8*/ u8 X(X(mii_mode:1, pad8:6), csma_disabled:1);
430/*9*/ u8 X(X(X(X(X(rx_tcpudp_checksum:1, pad9:3), vlan_arp_tco:1),
431 link_status_wake:1), arp_wake:1), mcmatch_wake:1);
432/*10*/ u8 X(X(X(pad10:3, no_source_addr_insertion:1), preamble_length:2),
433 loopback:2);
434/*11*/ u8 X(linear_priority:3, pad11:5);
435/*12*/ u8 X(X(linear_priority_mode:1, pad12:3), ifs:4);
436/*13*/ u8 ip_addr_lo;
437/*14*/ u8 ip_addr_hi;
438/*15*/ u8 X(X(X(X(X(X(X(promiscuous_mode:1, broadcast_disabled:1),
439 wait_after_win:1), pad15_1:1), ignore_ul_bit:1), crc_16_bit:1),
440 pad15_2:1), crs_or_cdt:1);
441/*16*/ u8 fc_delay_lo;
442/*17*/ u8 fc_delay_hi;
443/*18*/ u8 X(X(X(X(X(rx_stripping:1, tx_padding:1), rx_crc_transfer:1),
444 rx_long_ok:1), fc_priority_threshold:3), pad18:1);
445/*19*/ u8 X(X(X(X(X(X(X(addr_wake:1, magic_packet_disable:1),
446 fc_disable:1), fc_restop:1), fc_restart:1), fc_reject:1),
447 full_duplex_force:1), full_duplex_pin:1);
448/*20*/ u8 X(X(X(pad20_1:5, fc_priority_location:1), multi_ia:1), pad20_2:1);
449/*21*/ u8 X(X(pad21_1:3, multicast_all:1), pad21_2:4);
450/*22*/ u8 X(X(rx_d102_mode:1, rx_vlan_drop:1), pad22:6);
451 u8 pad_d102[9];
452};
453
454#define E100_MAX_MULTICAST_ADDRS 64
455struct multi {
456 u16 count;
457 u8 addr[E100_MAX_MULTICAST_ADDRS * ETH_ALEN + 2/*pad*/];
458};
459
460/* Important: keep total struct u32-aligned */
461#define UCODE_SIZE 134
462struct cb {
463 u16 status;
464 u16 command;
465 u32 link;
466 union {
467 u8 iaaddr[ETH_ALEN];
468 u32 ucode[UCODE_SIZE];
469 struct config config;
470 struct multi multi;
471 struct {
472 u32 tbd_array;
473 u16 tcb_byte_count;
474 u8 threshold;
475 u8 tbd_count;
476 struct {
477 u32 buf_addr;
478 u16 size;
479 u16 eol;
480 } tbd;
481 } tcb;
482 u32 dump_buffer_addr;
483 } u;
484 struct cb *next, *prev;
485 dma_addr_t dma_addr;
486 struct sk_buff *skb;
487};
488
489enum loopback {
490 lb_none = 0, lb_mac = 1, lb_phy = 3,
491};
492
493struct stats {
494 u32 tx_good_frames, tx_max_collisions, tx_late_collisions,
495 tx_underruns, tx_lost_crs, tx_deferred, tx_single_collisions,
496 tx_multiple_collisions, tx_total_collisions;
497 u32 rx_good_frames, rx_crc_errors, rx_alignment_errors,
498 rx_resource_errors, rx_overrun_errors, rx_cdt_errors,
499 rx_short_frame_errors;
500 u32 fc_xmt_pause, fc_rcv_pause, fc_rcv_unsupported;
501 u16 xmt_tco_frames, rcv_tco_frames;
502 u32 complete;
503};
504
505struct mem {
506 struct {
507 u32 signature;
508 u32 result;
509 } selftest;
510 struct stats stats;
511 u8 dump_buf[596];
512};
513
514struct param_range {
515 u32 min;
516 u32 max;
517 u32 count;
518};
519
520struct params {
521 struct param_range rfds;
522 struct param_range cbs;
523};
524
525struct nic {
526 /* Begin: frequently used values: keep adjacent for cache effect */
527 u32 msg_enable ____cacheline_aligned;
528 struct net_device *netdev;
529 struct pci_dev *pdev;
530
531 struct rx *rxs ____cacheline_aligned;
532 struct rx *rx_to_use;
533 struct rx *rx_to_clean;
534 struct rfd blank_rfd;
ca93ca42 535 enum ru_state ru_running;
1da177e4
LT
536
537 spinlock_t cb_lock ____cacheline_aligned;
538 spinlock_t cmd_lock;
539 struct csr __iomem *csr;
540 enum scb_cmd_lo cuc_cmd;
541 unsigned int cbs_avail;
bea3348e 542 struct napi_struct napi;
1da177e4
LT
543 struct cb *cbs;
544 struct cb *cb_to_use;
545 struct cb *cb_to_send;
546 struct cb *cb_to_clean;
547 u16 tx_command;
548 /* End: frequently used values: keep adjacent for cache effect */
549
550 enum {
551 ich = (1 << 0),
552 promiscuous = (1 << 1),
553 multicast_all = (1 << 2),
554 wol_magic = (1 << 3),
555 ich_10h_workaround = (1 << 4),
556 } flags ____cacheline_aligned;
557
558 enum mac mac;
559 enum phy phy;
560 struct params params;
1da177e4
LT
561 struct timer_list watchdog;
562 struct timer_list blink_timer;
563 struct mii_if_info mii;
2acdb1e0 564 struct work_struct tx_timeout_task;
1da177e4
LT
565 enum loopback loopback;
566
567 struct mem *mem;
568 dma_addr_t dma_addr;
569
570 dma_addr_t cbs_dma_addr;
571 u8 adaptive_ifs;
572 u8 tx_threshold;
573 u32 tx_frames;
574 u32 tx_collisions;
575 u32 tx_deferred;
576 u32 tx_single_collisions;
577 u32 tx_multiple_collisions;
578 u32 tx_fc_pause;
579 u32 tx_tco_frames;
580
581 u32 rx_fc_pause;
582 u32 rx_fc_unsupported;
583 u32 rx_tco_frames;
584 u32 rx_over_length_errors;
585
1da177e4
LT
586 u16 leds;
587 u16 eeprom_wc;
588 u16 eeprom[256];
ac7c6669 589 spinlock_t mdio_lock;
1da177e4
LT
590};
591
592static inline void e100_write_flush(struct nic *nic)
593{
594 /* Flush previous PCI writes through intermediate bridges
595 * by doing a benign read */
27345bb6 596 (void)ioread8(&nic->csr->scb.status);
1da177e4
LT
597}
598
858119e1 599static void e100_enable_irq(struct nic *nic)
1da177e4
LT
600{
601 unsigned long flags;
602
603 spin_lock_irqsave(&nic->cmd_lock, flags);
27345bb6 604 iowrite8(irq_mask_none, &nic->csr->scb.cmd_hi);
1da177e4 605 e100_write_flush(nic);
ad8c48ad 606 spin_unlock_irqrestore(&nic->cmd_lock, flags);
1da177e4
LT
607}
608
858119e1 609static void e100_disable_irq(struct nic *nic)
1da177e4
LT
610{
611 unsigned long flags;
612
613 spin_lock_irqsave(&nic->cmd_lock, flags);
27345bb6 614 iowrite8(irq_mask_all, &nic->csr->scb.cmd_hi);
1da177e4 615 e100_write_flush(nic);
ad8c48ad 616 spin_unlock_irqrestore(&nic->cmd_lock, flags);
1da177e4
LT
617}
618
619static void e100_hw_reset(struct nic *nic)
620{
621 /* Put CU and RU into idle with a selective reset to get
622 * device off of PCI bus */
27345bb6 623 iowrite32(selective_reset, &nic->csr->port);
1da177e4
LT
624 e100_write_flush(nic); udelay(20);
625
626 /* Now fully reset device */
27345bb6 627 iowrite32(software_reset, &nic->csr->port);
1da177e4
LT
628 e100_write_flush(nic); udelay(20);
629
630 /* Mask off our interrupt line - it's unmasked after reset */
631 e100_disable_irq(nic);
632}
633
634static int e100_self_test(struct nic *nic)
635{
636 u32 dma_addr = nic->dma_addr + offsetof(struct mem, selftest);
637
638 /* Passing the self-test is a pretty good indication
639 * that the device can DMA to/from host memory */
640
641 nic->mem->selftest.signature = 0;
642 nic->mem->selftest.result = 0xFFFFFFFF;
643
27345bb6 644 iowrite32(selftest | dma_addr, &nic->csr->port);
1da177e4
LT
645 e100_write_flush(nic);
646 /* Wait 10 msec for self-test to complete */
647 msleep(10);
648
649 /* Interrupts are enabled after self-test */
650 e100_disable_irq(nic);
651
652 /* Check results of self-test */
653 if(nic->mem->selftest.result != 0) {
654 DPRINTK(HW, ERR, "Self-test failed: result=0x%08X\n",
655 nic->mem->selftest.result);
656 return -ETIMEDOUT;
657 }
658 if(nic->mem->selftest.signature == 0) {
659 DPRINTK(HW, ERR, "Self-test failed: timed out\n");
660 return -ETIMEDOUT;
661 }
662
663 return 0;
664}
665
666static void e100_eeprom_write(struct nic *nic, u16 addr_len, u16 addr, u16 data)
667{
668 u32 cmd_addr_data[3];
669 u8 ctrl;
670 int i, j;
671
672 /* Three cmds: write/erase enable, write data, write/erase disable */
673 cmd_addr_data[0] = op_ewen << (addr_len - 2);
674 cmd_addr_data[1] = (((op_write << addr_len) | addr) << 16) |
675 cpu_to_le16(data);
676 cmd_addr_data[2] = op_ewds << (addr_len - 2);
677
678 /* Bit-bang cmds to write word to eeprom */
679 for(j = 0; j < 3; j++) {
680
681 /* Chip select */
27345bb6 682 iowrite8(eecs | eesk, &nic->csr->eeprom_ctrl_lo);
1da177e4
LT
683 e100_write_flush(nic); udelay(4);
684
685 for(i = 31; i >= 0; i--) {
686 ctrl = (cmd_addr_data[j] & (1 << i)) ?
687 eecs | eedi : eecs;
27345bb6 688 iowrite8(ctrl, &nic->csr->eeprom_ctrl_lo);
1da177e4
LT
689 e100_write_flush(nic); udelay(4);
690
27345bb6 691 iowrite8(ctrl | eesk, &nic->csr->eeprom_ctrl_lo);
1da177e4
LT
692 e100_write_flush(nic); udelay(4);
693 }
694 /* Wait 10 msec for cmd to complete */
695 msleep(10);
696
697 /* Chip deselect */
27345bb6 698 iowrite8(0, &nic->csr->eeprom_ctrl_lo);
1da177e4
LT
699 e100_write_flush(nic); udelay(4);
700 }
701};
702
703/* General technique stolen from the eepro100 driver - very clever */
704static u16 e100_eeprom_read(struct nic *nic, u16 *addr_len, u16 addr)
705{
706 u32 cmd_addr_data;
707 u16 data = 0;
708 u8 ctrl;
709 int i;
710
711 cmd_addr_data = ((op_read << *addr_len) | addr) << 16;
712
713 /* Chip select */
27345bb6 714 iowrite8(eecs | eesk, &nic->csr->eeprom_ctrl_lo);
1da177e4
LT
715 e100_write_flush(nic); udelay(4);
716
717 /* Bit-bang to read word from eeprom */
718 for(i = 31; i >= 0; i--) {
719 ctrl = (cmd_addr_data & (1 << i)) ? eecs | eedi : eecs;
27345bb6 720 iowrite8(ctrl, &nic->csr->eeprom_ctrl_lo);
1da177e4 721 e100_write_flush(nic); udelay(4);
05479938 722
27345bb6 723 iowrite8(ctrl | eesk, &nic->csr->eeprom_ctrl_lo);
1da177e4 724 e100_write_flush(nic); udelay(4);
05479938 725
1da177e4
LT
726 /* Eeprom drives a dummy zero to EEDO after receiving
727 * complete address. Use this to adjust addr_len. */
27345bb6 728 ctrl = ioread8(&nic->csr->eeprom_ctrl_lo);
1da177e4
LT
729 if(!(ctrl & eedo) && i > 16) {
730 *addr_len -= (i - 16);
731 i = 17;
732 }
05479938 733
1da177e4
LT
734 data = (data << 1) | (ctrl & eedo ? 1 : 0);
735 }
736
737 /* Chip deselect */
27345bb6 738 iowrite8(0, &nic->csr->eeprom_ctrl_lo);
1da177e4
LT
739 e100_write_flush(nic); udelay(4);
740
741 return le16_to_cpu(data);
742};
743
744/* Load entire EEPROM image into driver cache and validate checksum */
745static int e100_eeprom_load(struct nic *nic)
746{
747 u16 addr, addr_len = 8, checksum = 0;
748
749 /* Try reading with an 8-bit addr len to discover actual addr len */
750 e100_eeprom_read(nic, &addr_len, 0);
751 nic->eeprom_wc = 1 << addr_len;
752
753 for(addr = 0; addr < nic->eeprom_wc; addr++) {
754 nic->eeprom[addr] = e100_eeprom_read(nic, &addr_len, addr);
755 if(addr < nic->eeprom_wc - 1)
756 checksum += cpu_to_le16(nic->eeprom[addr]);
757 }
758
759 /* The checksum, stored in the last word, is calculated such that
760 * the sum of words should be 0xBABA */
761 checksum = le16_to_cpu(0xBABA - checksum);
762 if(checksum != nic->eeprom[nic->eeprom_wc - 1]) {
763 DPRINTK(PROBE, ERR, "EEPROM corrupted\n");
8fb6f732
DM
764 if (!eeprom_bad_csum_allow)
765 return -EAGAIN;
1da177e4
LT
766 }
767
768 return 0;
769}
770
771/* Save (portion of) driver EEPROM cache to device and update checksum */
772static int e100_eeprom_save(struct nic *nic, u16 start, u16 count)
773{
774 u16 addr, addr_len = 8, checksum = 0;
775
776 /* Try reading with an 8-bit addr len to discover actual addr len */
777 e100_eeprom_read(nic, &addr_len, 0);
778 nic->eeprom_wc = 1 << addr_len;
779
780 if(start + count >= nic->eeprom_wc)
781 return -EINVAL;
782
783 for(addr = start; addr < start + count; addr++)
784 e100_eeprom_write(nic, addr_len, addr, nic->eeprom[addr]);
785
786 /* The checksum, stored in the last word, is calculated such that
787 * the sum of words should be 0xBABA */
788 for(addr = 0; addr < nic->eeprom_wc - 1; addr++)
789 checksum += cpu_to_le16(nic->eeprom[addr]);
790 nic->eeprom[nic->eeprom_wc - 1] = le16_to_cpu(0xBABA - checksum);
791 e100_eeprom_write(nic, addr_len, nic->eeprom_wc - 1,
792 nic->eeprom[nic->eeprom_wc - 1]);
793
794 return 0;
795}
796
962082b6 797#define E100_WAIT_SCB_TIMEOUT 20000 /* we might have to wait 100ms!!! */
e6280f26 798#define E100_WAIT_SCB_FAST 20 /* delay like the old code */
858119e1 799static int e100_exec_cmd(struct nic *nic, u8 cmd, dma_addr_t dma_addr)
1da177e4
LT
800{
801 unsigned long flags;
802 unsigned int i;
803 int err = 0;
804
805 spin_lock_irqsave(&nic->cmd_lock, flags);
806
807 /* Previous command is accepted when SCB clears */
808 for(i = 0; i < E100_WAIT_SCB_TIMEOUT; i++) {
27345bb6 809 if(likely(!ioread8(&nic->csr->scb.cmd_lo)))
1da177e4
LT
810 break;
811 cpu_relax();
e6280f26 812 if(unlikely(i > E100_WAIT_SCB_FAST))
1da177e4
LT
813 udelay(5);
814 }
815 if(unlikely(i == E100_WAIT_SCB_TIMEOUT)) {
816 err = -EAGAIN;
817 goto err_unlock;
818 }
819
820 if(unlikely(cmd != cuc_resume))
27345bb6
JB
821 iowrite32(dma_addr, &nic->csr->scb.gen_ptr);
822 iowrite8(cmd, &nic->csr->scb.cmd_lo);
1da177e4
LT
823
824err_unlock:
825 spin_unlock_irqrestore(&nic->cmd_lock, flags);
826
827 return err;
828}
829
858119e1 830static int e100_exec_cb(struct nic *nic, struct sk_buff *skb,
1da177e4
LT
831 void (*cb_prepare)(struct nic *, struct cb *, struct sk_buff *))
832{
833 struct cb *cb;
834 unsigned long flags;
835 int err = 0;
836
837 spin_lock_irqsave(&nic->cb_lock, flags);
838
839 if(unlikely(!nic->cbs_avail)) {
840 err = -ENOMEM;
841 goto err_unlock;
842 }
843
844 cb = nic->cb_to_use;
845 nic->cb_to_use = cb->next;
846 nic->cbs_avail--;
847 cb->skb = skb;
848
849 if(unlikely(!nic->cbs_avail))
850 err = -ENOSPC;
851
852 cb_prepare(nic, cb, skb);
853
854 /* Order is important otherwise we'll be in a race with h/w:
855 * set S-bit in current first, then clear S-bit in previous. */
856 cb->command |= cpu_to_le16(cb_s);
857 wmb();
858 cb->prev->command &= cpu_to_le16(~cb_s);
859
860 while(nic->cb_to_send != nic->cb_to_use) {
861 if(unlikely(e100_exec_cmd(nic, nic->cuc_cmd,
862 nic->cb_to_send->dma_addr))) {
863 /* Ok, here's where things get sticky. It's
864 * possible that we can't schedule the command
865 * because the controller is too busy, so
866 * let's just queue the command and try again
867 * when another command is scheduled. */
962082b6
MC
868 if(err == -ENOSPC) {
869 //request a reset
870 schedule_work(&nic->tx_timeout_task);
871 }
1da177e4
LT
872 break;
873 } else {
874 nic->cuc_cmd = cuc_resume;
875 nic->cb_to_send = nic->cb_to_send->next;
876 }
877 }
878
879err_unlock:
880 spin_unlock_irqrestore(&nic->cb_lock, flags);
881
882 return err;
883}
884
885static u16 mdio_ctrl(struct nic *nic, u32 addr, u32 dir, u32 reg, u16 data)
886{
887 u32 data_out = 0;
888 unsigned int i;
ac7c6669 889 unsigned long flags;
1da177e4 890
ac7c6669
OM
891
892 /*
893 * Stratus87247: we shouldn't be writing the MDI control
894 * register until the Ready bit shows True. Also, since
895 * manipulation of the MDI control registers is a multi-step
896 * procedure it should be done under lock.
897 */
898 spin_lock_irqsave(&nic->mdio_lock, flags);
899 for (i = 100; i; --i) {
27345bb6 900 if (ioread32(&nic->csr->mdi_ctrl) & mdi_ready)
ac7c6669
OM
901 break;
902 udelay(20);
903 }
904 if (unlikely(!i)) {
905 printk("e100.mdio_ctrl(%s) won't go Ready\n",
906 nic->netdev->name );
907 spin_unlock_irqrestore(&nic->mdio_lock, flags);
908 return 0; /* No way to indicate timeout error */
909 }
27345bb6 910 iowrite32((reg << 16) | (addr << 21) | dir | data, &nic->csr->mdi_ctrl);
1da177e4 911
ac7c6669 912 for (i = 0; i < 100; i++) {
1da177e4 913 udelay(20);
27345bb6 914 if ((data_out = ioread32(&nic->csr->mdi_ctrl)) & mdi_ready)
1da177e4
LT
915 break;
916 }
ac7c6669 917 spin_unlock_irqrestore(&nic->mdio_lock, flags);
1da177e4
LT
918 DPRINTK(HW, DEBUG,
919 "%s:addr=%d, reg=%d, data_in=0x%04X, data_out=0x%04X\n",
920 dir == mdi_read ? "READ" : "WRITE", addr, reg, data, data_out);
921 return (u16)data_out;
922}
923
924static int mdio_read(struct net_device *netdev, int addr, int reg)
925{
926 return mdio_ctrl(netdev_priv(netdev), addr, mdi_read, reg, 0);
927}
928
929static void mdio_write(struct net_device *netdev, int addr, int reg, int data)
930{
931 mdio_ctrl(netdev_priv(netdev), addr, mdi_write, reg, data);
932}
933
934static void e100_get_defaults(struct nic *nic)
935{
2afecc04
JB
936 struct param_range rfds = { .min = 16, .max = 256, .count = 256 };
937 struct param_range cbs = { .min = 64, .max = 256, .count = 128 };
1da177e4 938
1da177e4 939 /* MAC type is encoded as rev ID; exception: ICH is treated as 82559 */
44c10138 940 nic->mac = (nic->flags & ich) ? mac_82559_D101M : nic->pdev->revision;
1da177e4
LT
941 if(nic->mac == mac_unknown)
942 nic->mac = mac_82557_D100_A;
943
944 nic->params.rfds = rfds;
945 nic->params.cbs = cbs;
946
947 /* Quadwords to DMA into FIFO before starting frame transmit */
948 nic->tx_threshold = 0xE0;
949
962082b6
MC
950 /* no interrupt for every tx completion, delay = 256us if not 557*/
951 nic->tx_command = cpu_to_le16(cb_tx | cb_tx_sf |
952 ((nic->mac >= mac_82558_D101_A4) ? cb_cid : cb_i));
1da177e4
LT
953
954 /* Template for a freshly allocated RFD */
ca93ca42 955 nic->blank_rfd.command = cpu_to_le16(cb_el);
1da177e4
LT
956 nic->blank_rfd.rbd = 0xFFFFFFFF;
957 nic->blank_rfd.size = cpu_to_le16(VLAN_ETH_FRAME_LEN);
958
959 /* MII setup */
960 nic->mii.phy_id_mask = 0x1F;
961 nic->mii.reg_num_mask = 0x1F;
962 nic->mii.dev = nic->netdev;
963 nic->mii.mdio_read = mdio_read;
964 nic->mii.mdio_write = mdio_write;
965}
966
967static void e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb)
968{
969 struct config *config = &cb->u.config;
970 u8 *c = (u8 *)config;
971
972 cb->command = cpu_to_le16(cb_config);
973
974 memset(config, 0, sizeof(struct config));
975
976 config->byte_count = 0x16; /* bytes in this struct */
977 config->rx_fifo_limit = 0x8; /* bytes in FIFO before DMA */
978 config->direct_rx_dma = 0x1; /* reserved */
979 config->standard_tcb = 0x1; /* 1=standard, 0=extended */
980 config->standard_stat_counter = 0x1; /* 1=standard, 0=extended */
981 config->rx_discard_short_frames = 0x1; /* 1=discard, 0=pass */
982 config->tx_underrun_retry = 0x3; /* # of underrun retries */
983 config->mii_mode = 0x1; /* 1=MII mode, 0=503 mode */
984 config->pad10 = 0x6;
985 config->no_source_addr_insertion = 0x1; /* 1=no, 0=yes */
986 config->preamble_length = 0x2; /* 0=1, 1=3, 2=7, 3=15 bytes */
987 config->ifs = 0x6; /* x16 = inter frame spacing */
988 config->ip_addr_hi = 0xF2; /* ARP IP filter - not used */
989 config->pad15_1 = 0x1;
990 config->pad15_2 = 0x1;
991 config->crs_or_cdt = 0x0; /* 0=CRS only, 1=CRS or CDT */
992 config->fc_delay_hi = 0x40; /* time delay for fc frame */
993 config->tx_padding = 0x1; /* 1=pad short frames */
994 config->fc_priority_threshold = 0x7; /* 7=priority fc disabled */
995 config->pad18 = 0x1;
996 config->full_duplex_pin = 0x1; /* 1=examine FDX# pin */
997 config->pad20_1 = 0x1F;
998 config->fc_priority_location = 0x1; /* 1=byte#31, 0=byte#19 */
999 config->pad21_1 = 0x5;
1000
1001 config->adaptive_ifs = nic->adaptive_ifs;
1002 config->loopback = nic->loopback;
1003
1004 if(nic->mii.force_media && nic->mii.full_duplex)
1005 config->full_duplex_force = 0x1; /* 1=force, 0=auto */
1006
1007 if(nic->flags & promiscuous || nic->loopback) {
1008 config->rx_save_bad_frames = 0x1; /* 1=save, 0=discard */
1009 config->rx_discard_short_frames = 0x0; /* 1=discard, 0=save */
1010 config->promiscuous_mode = 0x1; /* 1=on, 0=off */
1011 }
1012
1013 if(nic->flags & multicast_all)
1014 config->multicast_all = 0x1; /* 1=accept, 0=no */
1015
6bdacb1a
MC
1016 /* disable WoL when up */
1017 if(netif_running(nic->netdev) || !(nic->flags & wol_magic))
1da177e4
LT
1018 config->magic_packet_disable = 0x1; /* 1=off, 0=on */
1019
1020 if(nic->mac >= mac_82558_D101_A4) {
1021 config->fc_disable = 0x1; /* 1=Tx fc off, 0=Tx fc on */
1022 config->mwi_enable = 0x1; /* 1=enable, 0=disable */
1023 config->standard_tcb = 0x0; /* 1=standard, 0=extended */
1024 config->rx_long_ok = 0x1; /* 1=VLANs ok, 0=standard */
44e4925e 1025 if (nic->mac >= mac_82559_D101M) {
1da177e4 1026 config->tno_intr = 0x1; /* TCO stats enable */
44e4925e
DG
1027 /* Enable TCO in extended config */
1028 if (nic->mac >= mac_82551_10) {
1029 config->byte_count = 0x20; /* extended bytes */
1030 config->rx_d102_mode = 0x1; /* GMRC for TCO */
1031 }
1032 } else {
1da177e4 1033 config->standard_stat_counter = 0x0;
44e4925e 1034 }
1da177e4
LT
1035 }
1036
1037 DPRINTK(HW, DEBUG, "[00-07]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
1038 c[0], c[1], c[2], c[3], c[4], c[5], c[6], c[7]);
1039 DPRINTK(HW, DEBUG, "[08-15]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
1040 c[8], c[9], c[10], c[11], c[12], c[13], c[14], c[15]);
1041 DPRINTK(HW, DEBUG, "[16-23]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
1042 c[16], c[17], c[18], c[19], c[20], c[21], c[22], c[23]);
1043}
1044
2afecc04
JB
1045/********************************************************/
1046/* Micro code for 8086:1229 Rev 8 */
1047/********************************************************/
1048
1049/* Parameter values for the D101M B-step */
1050#define D101M_CPUSAVER_TIMER_DWORD 78
1051#define D101M_CPUSAVER_BUNDLE_DWORD 65
1052#define D101M_CPUSAVER_MIN_SIZE_DWORD 126
1053
1054#define D101M_B_RCVBUNDLE_UCODE \
1055{\
10560x00550215, 0xFFFF0437, 0xFFFFFFFF, 0x06A70789, 0xFFFFFFFF, 0x0558FFFF, \
10570x000C0001, 0x00101312, 0x000C0008, 0x00380216, \
10580x0010009C, 0x00204056, 0x002380CC, 0x00380056, \
10590x0010009C, 0x00244C0B, 0x00000800, 0x00124818, \
10600x00380438, 0x00000000, 0x00140000, 0x00380555, \
10610x00308000, 0x00100662, 0x00100561, 0x000E0408, \
10620x00134861, 0x000C0002, 0x00103093, 0x00308000, \
10630x00100624, 0x00100561, 0x000E0408, 0x00100861, \
10640x000C007E, 0x00222C21, 0x000C0002, 0x00103093, \
10650x00380C7A, 0x00080000, 0x00103090, 0x00380C7A, \
10660x00000000, 0x00000000, 0x00000000, 0x00000000, \
10670x0010009C, 0x00244C2D, 0x00010004, 0x00041000, \
10680x003A0437, 0x00044010, 0x0038078A, 0x00000000, \
10690x00100099, 0x00206C7A, 0x0010009C, 0x00244C48, \
10700x00130824, 0x000C0001, 0x00101213, 0x00260C75, \
10710x00041000, 0x00010004, 0x00130826, 0x000C0006, \
10720x002206A8, 0x0013C926, 0x00101313, 0x003806A8, \
10730x00000000, 0x00000000, 0x00000000, 0x00000000, \
10740x00000000, 0x00000000, 0x00000000, 0x00000000, \
10750x00080600, 0x00101B10, 0x00050004, 0x00100826, \
10760x00101210, 0x00380C34, 0x00000000, 0x00000000, \
10770x0021155B, 0x00100099, 0x00206559, 0x0010009C, \
10780x00244559, 0x00130836, 0x000C0000, 0x00220C62, \
10790x000C0001, 0x00101B13, 0x00229C0E, 0x00210C0E, \
10800x00226C0E, 0x00216C0E, 0x0022FC0E, 0x00215C0E, \
10810x00214C0E, 0x00380555, 0x00010004, 0x00041000, \
10820x00278C67, 0x00040800, 0x00018100, 0x003A0437, \
10830x00130826, 0x000C0001, 0x00220559, 0x00101313, \
10840x00380559, 0x00000000, 0x00000000, 0x00000000, \
10850x00000000, 0x00000000, 0x00000000, 0x00000000, \
10860x00000000, 0x00130831, 0x0010090B, 0x00124813, \
10870x000CFF80, 0x002606AB, 0x00041000, 0x00010004, \
10880x003806A8, 0x00000000, 0x00000000, 0x00000000, \
1089}
1090
1091/********************************************************/
1092/* Micro code for 8086:1229 Rev 9 */
1093/********************************************************/
1094
1095/* Parameter values for the D101S */
1096#define D101S_CPUSAVER_TIMER_DWORD 78
1097#define D101S_CPUSAVER_BUNDLE_DWORD 67
1098#define D101S_CPUSAVER_MIN_SIZE_DWORD 128
1099
1100#define D101S_RCVBUNDLE_UCODE \
1101{\
11020x00550242, 0xFFFF047E, 0xFFFFFFFF, 0x06FF0818, 0xFFFFFFFF, 0x05A6FFFF, \
11030x000C0001, 0x00101312, 0x000C0008, 0x00380243, \
11040x0010009C, 0x00204056, 0x002380D0, 0x00380056, \
11050x0010009C, 0x00244F8B, 0x00000800, 0x00124818, \
11060x0038047F, 0x00000000, 0x00140000, 0x003805A3, \
11070x00308000, 0x00100610, 0x00100561, 0x000E0408, \
11080x00134861, 0x000C0002, 0x00103093, 0x00308000, \
11090x00100624, 0x00100561, 0x000E0408, 0x00100861, \
11100x000C007E, 0x00222FA1, 0x000C0002, 0x00103093, \
11110x00380F90, 0x00080000, 0x00103090, 0x00380F90, \
11120x00000000, 0x00000000, 0x00000000, 0x00000000, \
11130x0010009C, 0x00244FAD, 0x00010004, 0x00041000, \
11140x003A047E, 0x00044010, 0x00380819, 0x00000000, \
11150x00100099, 0x00206FFD, 0x0010009A, 0x0020AFFD, \
11160x0010009C, 0x00244FC8, 0x00130824, 0x000C0001, \
11170x00101213, 0x00260FF7, 0x00041000, 0x00010004, \
11180x00130826, 0x000C0006, 0x00220700, 0x0013C926, \
11190x00101313, 0x00380700, 0x00000000, 0x00000000, \
11200x00000000, 0x00000000, 0x00000000, 0x00000000, \
11210x00080600, 0x00101B10, 0x00050004, 0x00100826, \
11220x00101210, 0x00380FB6, 0x00000000, 0x00000000, \
11230x002115A9, 0x00100099, 0x002065A7, 0x0010009A, \
11240x0020A5A7, 0x0010009C, 0x002445A7, 0x00130836, \
11250x000C0000, 0x00220FE4, 0x000C0001, 0x00101B13, \
11260x00229F8E, 0x00210F8E, 0x00226F8E, 0x00216F8E, \
11270x0022FF8E, 0x00215F8E, 0x00214F8E, 0x003805A3, \
11280x00010004, 0x00041000, 0x00278FE9, 0x00040800, \
11290x00018100, 0x003A047E, 0x00130826, 0x000C0001, \
11300x002205A7, 0x00101313, 0x003805A7, 0x00000000, \
11310x00000000, 0x00000000, 0x00000000, 0x00000000, \
11320x00000000, 0x00000000, 0x00000000, 0x00130831, \
11330x0010090B, 0x00124813, 0x000CFF80, 0x00260703, \
11340x00041000, 0x00010004, 0x00380700 \
1135}
1136
1137/********************************************************/
1138/* Micro code for the 8086:1229 Rev F/10 */
1139/********************************************************/
1140
1141/* Parameter values for the D102 E-step */
1142#define D102_E_CPUSAVER_TIMER_DWORD 42
1143#define D102_E_CPUSAVER_BUNDLE_DWORD 54
1144#define D102_E_CPUSAVER_MIN_SIZE_DWORD 46
1145
1146#define D102_E_RCVBUNDLE_UCODE \
1147{\
11480x007D028F, 0x0E4204F9, 0x14ED0C85, 0x14FA14E9, 0x0EF70E36, 0x1FFF1FFF, \
11490x00E014B9, 0x00000000, 0x00000000, 0x00000000, \
11500x00E014BD, 0x00000000, 0x00000000, 0x00000000, \
11510x00E014D5, 0x00000000, 0x00000000, 0x00000000, \
11520x00000000, 0x00000000, 0x00000000, 0x00000000, \
11530x00E014C1, 0x00000000, 0x00000000, 0x00000000, \
11540x00000000, 0x00000000, 0x00000000, 0x00000000, \
11550x00000000, 0x00000000, 0x00000000, 0x00000000, \
11560x00000000, 0x00000000, 0x00000000, 0x00000000, \
11570x00E014C8, 0x00000000, 0x00000000, 0x00000000, \
11580x00200600, 0x00E014EE, 0x00000000, 0x00000000, \
11590x0030FF80, 0x00940E46, 0x00038200, 0x00102000, \
11600x00E00E43, 0x00000000, 0x00000000, 0x00000000, \
11610x00300006, 0x00E014FB, 0x00000000, 0x00000000, \
11620x00000000, 0x00000000, 0x00000000, 0x00000000, \
11630x00000000, 0x00000000, 0x00000000, 0x00000000, \
11640x00000000, 0x00000000, 0x00000000, 0x00000000, \
11650x00906E41, 0x00800E3C, 0x00E00E39, 0x00000000, \
11660x00906EFD, 0x00900EFD, 0x00E00EF8, 0x00000000, \
11670x00000000, 0x00000000, 0x00000000, 0x00000000, \
11680x00000000, 0x00000000, 0x00000000, 0x00000000, \
11690x00000000, 0x00000000, 0x00000000, 0x00000000, \
11700x00000000, 0x00000000, 0x00000000, 0x00000000, \
11710x00000000, 0x00000000, 0x00000000, 0x00000000, \
11720x00000000, 0x00000000, 0x00000000, 0x00000000, \
11730x00000000, 0x00000000, 0x00000000, 0x00000000, \
11740x00000000, 0x00000000, 0x00000000, 0x00000000, \
11750x00000000, 0x00000000, 0x00000000, 0x00000000, \
11760x00000000, 0x00000000, 0x00000000, 0x00000000, \
11770x00000000, 0x00000000, 0x00000000, 0x00000000, \
11780x00000000, 0x00000000, 0x00000000, 0x00000000, \
11790x00000000, 0x00000000, 0x00000000, 0x00000000, \
11800x00000000, 0x00000000, 0x00000000, 0x00000000, \
1181}
1182
24180333 1183static void e100_setup_ucode(struct nic *nic, struct cb *cb, struct sk_buff *skb)
1da177e4 1184{
2afecc04
JB
1185/* *INDENT-OFF* */
1186 static struct {
1187 u32 ucode[UCODE_SIZE + 1];
1188 u8 mac;
1189 u8 timer_dword;
1190 u8 bundle_dword;
1191 u8 min_size_dword;
1192 } ucode_opts[] = {
1193 { D101M_B_RCVBUNDLE_UCODE,
1194 mac_82559_D101M,
1195 D101M_CPUSAVER_TIMER_DWORD,
1196 D101M_CPUSAVER_BUNDLE_DWORD,
1197 D101M_CPUSAVER_MIN_SIZE_DWORD },
1198 { D101S_RCVBUNDLE_UCODE,
1199 mac_82559_D101S,
1200 D101S_CPUSAVER_TIMER_DWORD,
1201 D101S_CPUSAVER_BUNDLE_DWORD,
1202 D101S_CPUSAVER_MIN_SIZE_DWORD },
1203 { D102_E_RCVBUNDLE_UCODE,
1204 mac_82551_F,
1205 D102_E_CPUSAVER_TIMER_DWORD,
1206 D102_E_CPUSAVER_BUNDLE_DWORD,
1207 D102_E_CPUSAVER_MIN_SIZE_DWORD },
1208 { D102_E_RCVBUNDLE_UCODE,
1209 mac_82551_10,
1210 D102_E_CPUSAVER_TIMER_DWORD,
1211 D102_E_CPUSAVER_BUNDLE_DWORD,
1212 D102_E_CPUSAVER_MIN_SIZE_DWORD },
1213 { {0}, 0, 0, 0, 0}
1214 }, *opts;
1215/* *INDENT-ON* */
1216
1217/*************************************************************************
1218* CPUSaver parameters
1219*
1220* All CPUSaver parameters are 16-bit literals that are part of a
1221* "move immediate value" instruction. By changing the value of
1222* the literal in the instruction before the code is loaded, the
1223* driver can change the algorithm.
1224*
0779bf2d 1225* INTDELAY - This loads the dead-man timer with its initial value.
05479938 1226* When this timer expires the interrupt is asserted, and the
2afecc04
JB
1227* timer is reset each time a new packet is received. (see
1228* BUNDLEMAX below to set the limit on number of chained packets)
1229* The current default is 0x600 or 1536. Experiments show that
1230* the value should probably stay within the 0x200 - 0x1000.
1231*
05479938 1232* BUNDLEMAX -
2afecc04
JB
1233* This sets the maximum number of frames that will be bundled. In
1234* some situations, such as the TCP windowing algorithm, it may be
1235* better to limit the growth of the bundle size than let it go as
1236* high as it can, because that could cause too much added latency.
1237* The default is six, because this is the number of packets in the
1238* default TCP window size. A value of 1 would make CPUSaver indicate
1239* an interrupt for every frame received. If you do not want to put
1240* a limit on the bundle size, set this value to xFFFF.
1241*
05479938 1242* BUNDLESMALL -
2afecc04
JB
1243* This contains a bit-mask describing the minimum size frame that
1244* will be bundled. The default masks the lower 7 bits, which means
1245* that any frame less than 128 bytes in length will not be bundled,
1246* but will instead immediately generate an interrupt. This does
1247* not affect the current bundle in any way. Any frame that is 128
1248* bytes or large will be bundled normally. This feature is meant
1249* to provide immediate indication of ACK frames in a TCP environment.
1250* Customers were seeing poor performance when a machine with CPUSaver
1251* enabled was sending but not receiving. The delay introduced when
1252* the ACKs were received was enough to reduce total throughput, because
1253* the sender would sit idle until the ACK was finally seen.
1254*
1255* The current default is 0xFF80, which masks out the lower 7 bits.
1256* This means that any frame which is x7F (127) bytes or smaller
05479938 1257* will cause an immediate interrupt. Because this value must be a
2afecc04
JB
1258* bit mask, there are only a few valid values that can be used. To
1259* turn this feature off, the driver can write the value xFFFF to the
1260* lower word of this instruction (in the same way that the other
1261* parameters are used). Likewise, a value of 0xF800 (2047) would
1262* cause an interrupt to be generated for every frame, because all
1263* standard Ethernet frames are <= 2047 bytes in length.
1264*************************************************************************/
1265
05479938 1266/* if you wish to disable the ucode functionality, while maintaining the
2afecc04
JB
1267 * workarounds it provides, set the following defines to:
1268 * BUNDLESMALL 0
1269 * BUNDLEMAX 1
1270 * INTDELAY 1
1271 */
1272#define BUNDLESMALL 1
1273#define BUNDLEMAX (u16)6
1274#define INTDELAY (u16)1536 /* 0x600 */
1275
1276 /* do not load u-code for ICH devices */
1277 if (nic->flags & ich)
1278 goto noloaducode;
1279
44c10138 1280 /* Search for ucode match against h/w revision */
2afecc04
JB
1281 for (opts = ucode_opts; opts->mac; opts++) {
1282 int i;
1283 u32 *ucode = opts->ucode;
1284 if (nic->mac != opts->mac)
1285 continue;
1286
1287 /* Insert user-tunable settings */
1288 ucode[opts->timer_dword] &= 0xFFFF0000;
1289 ucode[opts->timer_dword] |= INTDELAY;
1290 ucode[opts->bundle_dword] &= 0xFFFF0000;
1291 ucode[opts->bundle_dword] |= BUNDLEMAX;
1292 ucode[opts->min_size_dword] &= 0xFFFF0000;
1293 ucode[opts->min_size_dword] |= (BUNDLESMALL) ? 0xFFFF : 0xFF80;
1294
1295 for (i = 0; i < UCODE_SIZE; i++)
875521dd 1296 cb->u.ucode[i] = cpu_to_le32(ucode[i]);
24180333 1297 cb->command = cpu_to_le16(cb_ucode | cb_el);
2afecc04
JB
1298 return;
1299 }
1300
1301noloaducode:
24180333
JB
1302 cb->command = cpu_to_le16(cb_nop | cb_el);
1303}
1304
1305static inline int e100_exec_cb_wait(struct nic *nic, struct sk_buff *skb,
1306 void (*cb_prepare)(struct nic *, struct cb *, struct sk_buff *))
1307{
1308 int err = 0, counter = 50;
1309 struct cb *cb = nic->cb_to_clean;
1310
1311 if ((err = e100_exec_cb(nic, NULL, e100_setup_ucode)))
1312 DPRINTK(PROBE,ERR, "ucode cmd failed with error %d\n", err);
05479938 1313
24180333
JB
1314 /* must restart cuc */
1315 nic->cuc_cmd = cuc_start;
1316
1317 /* wait for completion */
1318 e100_write_flush(nic);
1319 udelay(10);
1320
1321 /* wait for possibly (ouch) 500ms */
1322 while (!(cb->status & cpu_to_le16(cb_complete))) {
1323 msleep(10);
1324 if (!--counter) break;
1325 }
05479938 1326
24180333 1327 /* ack any interupts, something could have been set */
27345bb6 1328 iowrite8(~0, &nic->csr->scb.stat_ack);
24180333
JB
1329
1330 /* if the command failed, or is not OK, notify and return */
1331 if (!counter || !(cb->status & cpu_to_le16(cb_ok))) {
1332 DPRINTK(PROBE,ERR, "ucode load failed\n");
1333 err = -EPERM;
1334 }
05479938 1335
24180333 1336 return err;
1da177e4
LT
1337}
1338
1339static void e100_setup_iaaddr(struct nic *nic, struct cb *cb,
1340 struct sk_buff *skb)
1341{
1342 cb->command = cpu_to_le16(cb_iaaddr);
1343 memcpy(cb->u.iaaddr, nic->netdev->dev_addr, ETH_ALEN);
1344}
1345
1346static void e100_dump(struct nic *nic, struct cb *cb, struct sk_buff *skb)
1347{
1348 cb->command = cpu_to_le16(cb_dump);
1349 cb->u.dump_buffer_addr = cpu_to_le32(nic->dma_addr +
1350 offsetof(struct mem, dump_buf));
1351}
1352
1353#define NCONFIG_AUTO_SWITCH 0x0080
1354#define MII_NSC_CONG MII_RESV1
1355#define NSC_CONG_ENABLE 0x0100
1356#define NSC_CONG_TXREADY 0x0400
1357#define ADVERTISE_FC_SUPPORTED 0x0400
1358static int e100_phy_init(struct nic *nic)
1359{
1360 struct net_device *netdev = nic->netdev;
1361 u32 addr;
1362 u16 bmcr, stat, id_lo, id_hi, cong;
1363
1364 /* Discover phy addr by searching addrs in order {1,0,2,..., 31} */
1365 for(addr = 0; addr < 32; addr++) {
1366 nic->mii.phy_id = (addr == 0) ? 1 : (addr == 1) ? 0 : addr;
1367 bmcr = mdio_read(netdev, nic->mii.phy_id, MII_BMCR);
1368 stat = mdio_read(netdev, nic->mii.phy_id, MII_BMSR);
1369 stat = mdio_read(netdev, nic->mii.phy_id, MII_BMSR);
1370 if(!((bmcr == 0xFFFF) || ((stat == 0) && (bmcr == 0))))
1371 break;
1372 }
1373 DPRINTK(HW, DEBUG, "phy_addr = %d\n", nic->mii.phy_id);
1374 if(addr == 32)
1375 return -EAGAIN;
1376
1377 /* Selected the phy and isolate the rest */
1378 for(addr = 0; addr < 32; addr++) {
1379 if(addr != nic->mii.phy_id) {
1380 mdio_write(netdev, addr, MII_BMCR, BMCR_ISOLATE);
1381 } else {
1382 bmcr = mdio_read(netdev, addr, MII_BMCR);
1383 mdio_write(netdev, addr, MII_BMCR,
1384 bmcr & ~BMCR_ISOLATE);
1385 }
1386 }
1387
1388 /* Get phy ID */
1389 id_lo = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID1);
1390 id_hi = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID2);
1391 nic->phy = (u32)id_hi << 16 | (u32)id_lo;
1392 DPRINTK(HW, DEBUG, "phy ID = 0x%08X\n", nic->phy);
1393
1394 /* Handle National tx phys */
1395#define NCS_PHY_MODEL_MASK 0xFFF0FFFF
1396 if((nic->phy & NCS_PHY_MODEL_MASK) == phy_nsc_tx) {
1397 /* Disable congestion control */
1398 cong = mdio_read(netdev, nic->mii.phy_id, MII_NSC_CONG);
1399 cong |= NSC_CONG_TXREADY;
1400 cong &= ~NSC_CONG_ENABLE;
1401 mdio_write(netdev, nic->mii.phy_id, MII_NSC_CONG, cong);
1402 }
1403
05479938 1404 if((nic->mac >= mac_82550_D102) || ((nic->flags & ich) &&
60ffa478
JK
1405 (mdio_read(netdev, nic->mii.phy_id, MII_TPISTATUS) & 0x8000) &&
1406 !(nic->eeprom[eeprom_cnfg_mdix] & eeprom_mdix_enabled))) {
1407 /* enable/disable MDI/MDI-X auto-switching. */
1408 mdio_write(netdev, nic->mii.phy_id, MII_NCONFIG,
1409 nic->mii.force_media ? 0 : NCONFIG_AUTO_SWITCH);
64895145 1410 }
1da177e4
LT
1411
1412 return 0;
1413}
1414
1415static int e100_hw_init(struct nic *nic)
1416{
1417 int err;
1418
1419 e100_hw_reset(nic);
1420
1421 DPRINTK(HW, ERR, "e100_hw_init\n");
1422 if(!in_interrupt() && (err = e100_self_test(nic)))
1423 return err;
1424
1425 if((err = e100_phy_init(nic)))
1426 return err;
1427 if((err = e100_exec_cmd(nic, cuc_load_base, 0)))
1428 return err;
1429 if((err = e100_exec_cmd(nic, ruc_load_base, 0)))
1430 return err;
24180333 1431 if ((err = e100_exec_cb_wait(nic, NULL, e100_setup_ucode)))
1da177e4
LT
1432 return err;
1433 if((err = e100_exec_cb(nic, NULL, e100_configure)))
1434 return err;
1435 if((err = e100_exec_cb(nic, NULL, e100_setup_iaaddr)))
1436 return err;
1437 if((err = e100_exec_cmd(nic, cuc_dump_addr,
1438 nic->dma_addr + offsetof(struct mem, stats))))
1439 return err;
1440 if((err = e100_exec_cmd(nic, cuc_dump_reset, 0)))
1441 return err;
1442
1443 e100_disable_irq(nic);
1444
1445 return 0;
1446}
1447
1448static void e100_multi(struct nic *nic, struct cb *cb, struct sk_buff *skb)
1449{
1450 struct net_device *netdev = nic->netdev;
1451 struct dev_mc_list *list = netdev->mc_list;
1452 u16 i, count = min(netdev->mc_count, E100_MAX_MULTICAST_ADDRS);
1453
1454 cb->command = cpu_to_le16(cb_multi);
1455 cb->u.multi.count = cpu_to_le16(count * ETH_ALEN);
1456 for(i = 0; list && i < count; i++, list = list->next)
1457 memcpy(&cb->u.multi.addr[i*ETH_ALEN], &list->dmi_addr,
1458 ETH_ALEN);
1459}
1460
1461static void e100_set_multicast_list(struct net_device *netdev)
1462{
1463 struct nic *nic = netdev_priv(netdev);
1464
1465 DPRINTK(HW, DEBUG, "mc_count=%d, flags=0x%04X\n",
1466 netdev->mc_count, netdev->flags);
1467
1468 if(netdev->flags & IFF_PROMISC)
1469 nic->flags |= promiscuous;
1470 else
1471 nic->flags &= ~promiscuous;
1472
1473 if(netdev->flags & IFF_ALLMULTI ||
1474 netdev->mc_count > E100_MAX_MULTICAST_ADDRS)
1475 nic->flags |= multicast_all;
1476 else
1477 nic->flags &= ~multicast_all;
1478
1479 e100_exec_cb(nic, NULL, e100_configure);
1480 e100_exec_cb(nic, NULL, e100_multi);
1481}
1482
1483static void e100_update_stats(struct nic *nic)
1484{
09f75cd7
JG
1485 struct net_device *dev = nic->netdev;
1486 struct net_device_stats *ns = &dev->stats;
1da177e4
LT
1487 struct stats *s = &nic->mem->stats;
1488 u32 *complete = (nic->mac < mac_82558_D101_A4) ? &s->fc_xmt_pause :
1489 (nic->mac < mac_82559_D101M) ? (u32 *)&s->xmt_tco_frames :
1490 &s->complete;
1491
1492 /* Device's stats reporting may take several microseconds to
1493 * complete, so where always waiting for results of the
1494 * previous command. */
1495
1496 if(*complete == le32_to_cpu(cuc_dump_reset_complete)) {
1497 *complete = 0;
1498 nic->tx_frames = le32_to_cpu(s->tx_good_frames);
1499 nic->tx_collisions = le32_to_cpu(s->tx_total_collisions);
1500 ns->tx_aborted_errors += le32_to_cpu(s->tx_max_collisions);
1501 ns->tx_window_errors += le32_to_cpu(s->tx_late_collisions);
1502 ns->tx_carrier_errors += le32_to_cpu(s->tx_lost_crs);
1503 ns->tx_fifo_errors += le32_to_cpu(s->tx_underruns);
1504 ns->collisions += nic->tx_collisions;
1505 ns->tx_errors += le32_to_cpu(s->tx_max_collisions) +
1506 le32_to_cpu(s->tx_lost_crs);
1da177e4
LT
1507 ns->rx_length_errors += le32_to_cpu(s->rx_short_frame_errors) +
1508 nic->rx_over_length_errors;
1509 ns->rx_crc_errors += le32_to_cpu(s->rx_crc_errors);
1510 ns->rx_frame_errors += le32_to_cpu(s->rx_alignment_errors);
1511 ns->rx_over_errors += le32_to_cpu(s->rx_overrun_errors);
1512 ns->rx_fifo_errors += le32_to_cpu(s->rx_overrun_errors);
ecf7130b 1513 ns->rx_missed_errors += le32_to_cpu(s->rx_resource_errors);
1da177e4
LT
1514 ns->rx_errors += le32_to_cpu(s->rx_crc_errors) +
1515 le32_to_cpu(s->rx_alignment_errors) +
1516 le32_to_cpu(s->rx_short_frame_errors) +
1517 le32_to_cpu(s->rx_cdt_errors);
1518 nic->tx_deferred += le32_to_cpu(s->tx_deferred);
1519 nic->tx_single_collisions +=
1520 le32_to_cpu(s->tx_single_collisions);
1521 nic->tx_multiple_collisions +=
1522 le32_to_cpu(s->tx_multiple_collisions);
1523 if(nic->mac >= mac_82558_D101_A4) {
1524 nic->tx_fc_pause += le32_to_cpu(s->fc_xmt_pause);
1525 nic->rx_fc_pause += le32_to_cpu(s->fc_rcv_pause);
1526 nic->rx_fc_unsupported +=
1527 le32_to_cpu(s->fc_rcv_unsupported);
1528 if(nic->mac >= mac_82559_D101M) {
1529 nic->tx_tco_frames +=
1530 le16_to_cpu(s->xmt_tco_frames);
1531 nic->rx_tco_frames +=
1532 le16_to_cpu(s->rcv_tco_frames);
1533 }
1534 }
1535 }
1536
05479938 1537
1f53367d
MC
1538 if(e100_exec_cmd(nic, cuc_dump_reset, 0))
1539 DPRINTK(TX_ERR, DEBUG, "exec cuc_dump_reset failed\n");
1da177e4
LT
1540}
1541
1542static void e100_adjust_adaptive_ifs(struct nic *nic, int speed, int duplex)
1543{
1544 /* Adjust inter-frame-spacing (IFS) between two transmits if
1545 * we're getting collisions on a half-duplex connection. */
1546
1547 if(duplex == DUPLEX_HALF) {
1548 u32 prev = nic->adaptive_ifs;
1549 u32 min_frames = (speed == SPEED_100) ? 1000 : 100;
1550
1551 if((nic->tx_frames / 32 < nic->tx_collisions) &&
1552 (nic->tx_frames > min_frames)) {
1553 if(nic->adaptive_ifs < 60)
1554 nic->adaptive_ifs += 5;
1555 } else if (nic->tx_frames < min_frames) {
1556 if(nic->adaptive_ifs >= 5)
1557 nic->adaptive_ifs -= 5;
1558 }
1559 if(nic->adaptive_ifs != prev)
1560 e100_exec_cb(nic, NULL, e100_configure);
1561 }
1562}
1563
1564static void e100_watchdog(unsigned long data)
1565{
1566 struct nic *nic = (struct nic *)data;
1567 struct ethtool_cmd cmd;
1568
1569 DPRINTK(TIMER, DEBUG, "right now = %ld\n", jiffies);
1570
1571 /* mii library handles link maintenance tasks */
1572
1573 mii_ethtool_gset(&nic->mii, &cmd);
1574
1575 if(mii_link_ok(&nic->mii) && !netif_carrier_ok(nic->netdev)) {
1576 DPRINTK(LINK, INFO, "link up, %sMbps, %s-duplex\n",
1577 cmd.speed == SPEED_100 ? "100" : "10",
1578 cmd.duplex == DUPLEX_FULL ? "full" : "half");
1579 } else if(!mii_link_ok(&nic->mii) && netif_carrier_ok(nic->netdev)) {
1580 DPRINTK(LINK, INFO, "link down\n");
1581 }
1582
1583 mii_check_link(&nic->mii);
1584
1585 /* Software generated interrupt to recover from (rare) Rx
05479938
JB
1586 * allocation failure.
1587 * Unfortunately have to use a spinlock to not re-enable interrupts
1588 * accidentally, due to hardware that shares a register between the
1589 * interrupt mask bit and the SW Interrupt generation bit */
1da177e4 1590 spin_lock_irq(&nic->cmd_lock);
27345bb6 1591 iowrite8(ioread8(&nic->csr->scb.cmd_hi) | irq_sw_gen,&nic->csr->scb.cmd_hi);
1da177e4 1592 e100_write_flush(nic);
ad8c48ad 1593 spin_unlock_irq(&nic->cmd_lock);
1da177e4
LT
1594
1595 e100_update_stats(nic);
1596 e100_adjust_adaptive_ifs(nic, cmd.speed, cmd.duplex);
1597
1598 if(nic->mac <= mac_82557_D100_C)
1599 /* Issue a multicast command to workaround a 557 lock up */
1600 e100_set_multicast_list(nic->netdev);
1601
1602 if(nic->flags & ich && cmd.speed==SPEED_10 && cmd.duplex==DUPLEX_HALF)
1603 /* Need SW workaround for ICH[x] 10Mbps/half duplex Tx hang. */
1604 nic->flags |= ich_10h_workaround;
1605 else
1606 nic->flags &= ~ich_10h_workaround;
1607
34c6417b
SH
1608 mod_timer(&nic->watchdog,
1609 round_jiffies(jiffies + E100_WATCHDOG_PERIOD));
1da177e4
LT
1610}
1611
858119e1 1612static void e100_xmit_prepare(struct nic *nic, struct cb *cb,
1da177e4
LT
1613 struct sk_buff *skb)
1614{
1615 cb->command = nic->tx_command;
962082b6 1616 /* interrupt every 16 packets regardless of delay */
996ec353
MC
1617 if((nic->cbs_avail & ~15) == nic->cbs_avail)
1618 cb->command |= cpu_to_le16(cb_i);
1da177e4
LT
1619 cb->u.tcb.tbd_array = cb->dma_addr + offsetof(struct cb, u.tcb.tbd);
1620 cb->u.tcb.tcb_byte_count = 0;
1621 cb->u.tcb.threshold = nic->tx_threshold;
1622 cb->u.tcb.tbd_count = 1;
1623 cb->u.tcb.tbd.buf_addr = cpu_to_le32(pci_map_single(nic->pdev,
1624 skb->data, skb->len, PCI_DMA_TODEVICE));
611494dc 1625 /* check for mapping failure? */
1da177e4
LT
1626 cb->u.tcb.tbd.size = cpu_to_le16(skb->len);
1627}
1628
1629static int e100_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1630{
1631 struct nic *nic = netdev_priv(netdev);
1632 int err;
1633
1634 if(nic->flags & ich_10h_workaround) {
1635 /* SW workaround for ICH[x] 10Mbps/half duplex Tx hang.
1636 Issue a NOP command followed by a 1us delay before
1637 issuing the Tx command. */
1f53367d
MC
1638 if(e100_exec_cmd(nic, cuc_nop, 0))
1639 DPRINTK(TX_ERR, DEBUG, "exec cuc_nop failed\n");
1da177e4
LT
1640 udelay(1);
1641 }
1642
1643 err = e100_exec_cb(nic, skb, e100_xmit_prepare);
1644
1645 switch(err) {
1646 case -ENOSPC:
1647 /* We queued the skb, but now we're out of space. */
1648 DPRINTK(TX_ERR, DEBUG, "No space for CB\n");
1649 netif_stop_queue(netdev);
1650 break;
1651 case -ENOMEM:
1652 /* This is a hard error - log it. */
1653 DPRINTK(TX_ERR, DEBUG, "Out of Tx resources, returning skb\n");
1654 netif_stop_queue(netdev);
1655 return 1;
1656 }
1657
1658 netdev->trans_start = jiffies;
1659 return 0;
1660}
1661
858119e1 1662static int e100_tx_clean(struct nic *nic)
1da177e4 1663{
09f75cd7 1664 struct net_device *dev = nic->netdev;
1da177e4
LT
1665 struct cb *cb;
1666 int tx_cleaned = 0;
1667
1668 spin_lock(&nic->cb_lock);
1669
1da177e4
LT
1670 /* Clean CBs marked complete */
1671 for(cb = nic->cb_to_clean;
1672 cb->status & cpu_to_le16(cb_complete);
1673 cb = nic->cb_to_clean = cb->next) {
dc45010e
JB
1674 DPRINTK(TX_DONE, DEBUG, "cb[%d]->status = 0x%04X\n",
1675 (int)(((void*)cb - (void*)nic->cbs)/sizeof(struct cb)),
1676 cb->status);
1677
1da177e4 1678 if(likely(cb->skb != NULL)) {
09f75cd7
JG
1679 dev->stats.tx_packets++;
1680 dev->stats.tx_bytes += cb->skb->len;
1da177e4
LT
1681
1682 pci_unmap_single(nic->pdev,
1683 le32_to_cpu(cb->u.tcb.tbd.buf_addr),
1684 le16_to_cpu(cb->u.tcb.tbd.size),
1685 PCI_DMA_TODEVICE);
1686 dev_kfree_skb_any(cb->skb);
1687 cb->skb = NULL;
1688 tx_cleaned = 1;
1689 }
1690 cb->status = 0;
1691 nic->cbs_avail++;
1692 }
1693
1694 spin_unlock(&nic->cb_lock);
1695
1696 /* Recover from running out of Tx resources in xmit_frame */
1697 if(unlikely(tx_cleaned && netif_queue_stopped(nic->netdev)))
1698 netif_wake_queue(nic->netdev);
1699
1700 return tx_cleaned;
1701}
1702
1703static void e100_clean_cbs(struct nic *nic)
1704{
1705 if(nic->cbs) {
1706 while(nic->cbs_avail != nic->params.cbs.count) {
1707 struct cb *cb = nic->cb_to_clean;
1708 if(cb->skb) {
1709 pci_unmap_single(nic->pdev,
1710 le32_to_cpu(cb->u.tcb.tbd.buf_addr),
1711 le16_to_cpu(cb->u.tcb.tbd.size),
1712 PCI_DMA_TODEVICE);
1713 dev_kfree_skb(cb->skb);
1714 }
1715 nic->cb_to_clean = nic->cb_to_clean->next;
1716 nic->cbs_avail++;
1717 }
1718 pci_free_consistent(nic->pdev,
1719 sizeof(struct cb) * nic->params.cbs.count,
1720 nic->cbs, nic->cbs_dma_addr);
1721 nic->cbs = NULL;
1722 nic->cbs_avail = 0;
1723 }
1724 nic->cuc_cmd = cuc_start;
1725 nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean =
1726 nic->cbs;
1727}
1728
1729static int e100_alloc_cbs(struct nic *nic)
1730{
1731 struct cb *cb;
1732 unsigned int i, count = nic->params.cbs.count;
1733
1734 nic->cuc_cmd = cuc_start;
1735 nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = NULL;
1736 nic->cbs_avail = 0;
1737
1738 nic->cbs = pci_alloc_consistent(nic->pdev,
1739 sizeof(struct cb) * count, &nic->cbs_dma_addr);
1740 if(!nic->cbs)
1741 return -ENOMEM;
1742
1743 for(cb = nic->cbs, i = 0; i < count; cb++, i++) {
1744 cb->next = (i + 1 < count) ? cb + 1 : nic->cbs;
1745 cb->prev = (i == 0) ? nic->cbs + count - 1 : cb - 1;
1746
1747 cb->dma_addr = nic->cbs_dma_addr + i * sizeof(struct cb);
1748 cb->link = cpu_to_le32(nic->cbs_dma_addr +
1749 ((i+1) % count) * sizeof(struct cb));
1750 cb->skb = NULL;
1751 }
1752
1753 nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = nic->cbs;
1754 nic->cbs_avail = count;
1755
1756 return 0;
1757}
1758
ca93ca42 1759static inline void e100_start_receiver(struct nic *nic, struct rx *rx)
1da177e4 1760{
ca93ca42
JG
1761 if(!nic->rxs) return;
1762 if(RU_SUSPENDED != nic->ru_running) return;
1763
1764 /* handle init time starts */
1765 if(!rx) rx = nic->rxs;
1766
1767 /* (Re)start RU if suspended or idle and RFA is non-NULL */
1768 if(rx->skb) {
1769 e100_exec_cmd(nic, ruc_start, rx->dma_addr);
1770 nic->ru_running = RU_RUNNING;
1771 }
1da177e4
LT
1772}
1773
1774#define RFD_BUF_LEN (sizeof(struct rfd) + VLAN_ETH_FRAME_LEN)
858119e1 1775static int e100_rx_alloc_skb(struct nic *nic, struct rx *rx)
1da177e4 1776{
4187592b 1777 if(!(rx->skb = netdev_alloc_skb(nic->netdev, RFD_BUF_LEN + NET_IP_ALIGN)))
1da177e4
LT
1778 return -ENOMEM;
1779
1780 /* Align, init, and map the RFD. */
1da177e4 1781 skb_reserve(rx->skb, NET_IP_ALIGN);
27d7ff46 1782 skb_copy_to_linear_data(rx->skb, &nic->blank_rfd, sizeof(struct rfd));
1da177e4
LT
1783 rx->dma_addr = pci_map_single(nic->pdev, rx->skb->data,
1784 RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
1785
1f53367d
MC
1786 if(pci_dma_mapping_error(rx->dma_addr)) {
1787 dev_kfree_skb_any(rx->skb);
097688ef 1788 rx->skb = NULL;
1f53367d
MC
1789 rx->dma_addr = 0;
1790 return -ENOMEM;
1791 }
1792
1da177e4
LT
1793 /* Link the RFD to end of RFA by linking previous RFD to
1794 * this one, and clearing EL bit of previous. */
1795 if(rx->prev->skb) {
1796 struct rfd *prev_rfd = (struct rfd *)rx->prev->skb->data;
1797 put_unaligned(cpu_to_le32(rx->dma_addr),
1798 (u32 *)&prev_rfd->link);
1799 wmb();
ca93ca42 1800 prev_rfd->command &= ~cpu_to_le16(cb_el);
1da177e4
LT
1801 pci_dma_sync_single_for_device(nic->pdev, rx->prev->dma_addr,
1802 sizeof(struct rfd), PCI_DMA_TODEVICE);
1803 }
1804
1805 return 0;
1806}
1807
858119e1 1808static int e100_rx_indicate(struct nic *nic, struct rx *rx,
1da177e4
LT
1809 unsigned int *work_done, unsigned int work_to_do)
1810{
09f75cd7 1811 struct net_device *dev = nic->netdev;
1da177e4
LT
1812 struct sk_buff *skb = rx->skb;
1813 struct rfd *rfd = (struct rfd *)skb->data;
1814 u16 rfd_status, actual_size;
1815
1816 if(unlikely(work_done && *work_done >= work_to_do))
1817 return -EAGAIN;
1818
1819 /* Need to sync before taking a peek at cb_complete bit */
1820 pci_dma_sync_single_for_cpu(nic->pdev, rx->dma_addr,
1821 sizeof(struct rfd), PCI_DMA_FROMDEVICE);
1822 rfd_status = le16_to_cpu(rfd->status);
1823
1824 DPRINTK(RX_STATUS, DEBUG, "status=0x%04X\n", rfd_status);
1825
1826 /* If data isn't ready, nothing to indicate */
1827 if(unlikely(!(rfd_status & cb_complete)))
1f53367d 1828 return -ENODATA;
1da177e4
LT
1829
1830 /* Get actual data size */
1831 actual_size = le16_to_cpu(rfd->actual_size) & 0x3FFF;
1832 if(unlikely(actual_size > RFD_BUF_LEN - sizeof(struct rfd)))
1833 actual_size = RFD_BUF_LEN - sizeof(struct rfd);
1834
1835 /* Get data */
1836 pci_unmap_single(nic->pdev, rx->dma_addr,
1837 RFD_BUF_LEN, PCI_DMA_FROMDEVICE);
1838
ca93ca42
JG
1839 /* this allows for a fast restart without re-enabling interrupts */
1840 if(le16_to_cpu(rfd->command) & cb_el)
1841 nic->ru_running = RU_SUSPENDED;
1842
1da177e4
LT
1843 /* Pull off the RFD and put the actual data (minus eth hdr) */
1844 skb_reserve(skb, sizeof(struct rfd));
1845 skb_put(skb, actual_size);
1846 skb->protocol = eth_type_trans(skb, nic->netdev);
1847
1848 if(unlikely(!(rfd_status & cb_ok))) {
1849 /* Don't indicate if hardware indicates errors */
1da177e4 1850 dev_kfree_skb_any(skb);
136df52d 1851 } else if(actual_size > ETH_DATA_LEN + VLAN_ETH_HLEN) {
1da177e4
LT
1852 /* Don't indicate oversized frames */
1853 nic->rx_over_length_errors++;
1da177e4
LT
1854 dev_kfree_skb_any(skb);
1855 } else {
09f75cd7
JG
1856 dev->stats.rx_packets++;
1857 dev->stats.rx_bytes += actual_size;
1da177e4
LT
1858 nic->netdev->last_rx = jiffies;
1859 netif_receive_skb(skb);
1860 if(work_done)
1861 (*work_done)++;
1862 }
1863
1864 rx->skb = NULL;
1865
1866 return 0;
1867}
1868
858119e1 1869static void e100_rx_clean(struct nic *nic, unsigned int *work_done,
1da177e4
LT
1870 unsigned int work_to_do)
1871{
1872 struct rx *rx;
ca93ca42
JG
1873 int restart_required = 0;
1874 struct rx *rx_to_start = NULL;
1875
1876 /* are we already rnr? then pay attention!!! this ensures that
1877 * the state machine progression never allows a start with a
1878 * partially cleaned list, avoiding a race between hardware
1879 * and rx_to_clean when in NAPI mode */
1880 if(RU_SUSPENDED == nic->ru_running)
1881 restart_required = 1;
1da177e4
LT
1882
1883 /* Indicate newly arrived packets */
1884 for(rx = nic->rx_to_clean; rx->skb; rx = nic->rx_to_clean = rx->next) {
ca93ca42
JG
1885 int err = e100_rx_indicate(nic, rx, work_done, work_to_do);
1886 if(-EAGAIN == err) {
1887 /* hit quota so have more work to do, restart once
1888 * cleanup is complete */
1889 restart_required = 0;
1890 break;
1891 } else if(-ENODATA == err)
1da177e4
LT
1892 break; /* No more to clean */
1893 }
1894
ca93ca42
JG
1895 /* save our starting point as the place we'll restart the receiver */
1896 if(restart_required)
1897 rx_to_start = nic->rx_to_clean;
1898
1da177e4
LT
1899 /* Alloc new skbs to refill list */
1900 for(rx = nic->rx_to_use; !rx->skb; rx = nic->rx_to_use = rx->next) {
1901 if(unlikely(e100_rx_alloc_skb(nic, rx)))
1902 break; /* Better luck next time (see watchdog) */
1903 }
ca93ca42
JG
1904
1905 if(restart_required) {
1906 // ack the rnr?
1907 writeb(stat_ack_rnr, &nic->csr->scb.stat_ack);
1908 e100_start_receiver(nic, rx_to_start);
1909 if(work_done)
1910 (*work_done)++;
1911 }
1da177e4
LT
1912}
1913
1914static void e100_rx_clean_list(struct nic *nic)
1915{
1916 struct rx *rx;
1917 unsigned int i, count = nic->params.rfds.count;
1918
ca93ca42
JG
1919 nic->ru_running = RU_UNINITIALIZED;
1920
1da177e4
LT
1921 if(nic->rxs) {
1922 for(rx = nic->rxs, i = 0; i < count; rx++, i++) {
1923 if(rx->skb) {
1924 pci_unmap_single(nic->pdev, rx->dma_addr,
1925 RFD_BUF_LEN, PCI_DMA_FROMDEVICE);
1926 dev_kfree_skb(rx->skb);
1927 }
1928 }
1929 kfree(nic->rxs);
1930 nic->rxs = NULL;
1931 }
1932
1933 nic->rx_to_use = nic->rx_to_clean = NULL;
1da177e4
LT
1934}
1935
1936static int e100_rx_alloc_list(struct nic *nic)
1937{
1938 struct rx *rx;
1939 unsigned int i, count = nic->params.rfds.count;
1940
1941 nic->rx_to_use = nic->rx_to_clean = NULL;
ca93ca42 1942 nic->ru_running = RU_UNINITIALIZED;
1da177e4 1943
c48e3fca 1944 if(!(nic->rxs = kcalloc(count, sizeof(struct rx), GFP_ATOMIC)))
1da177e4 1945 return -ENOMEM;
1da177e4
LT
1946
1947 for(rx = nic->rxs, i = 0; i < count; rx++, i++) {
1948 rx->next = (i + 1 < count) ? rx + 1 : nic->rxs;
1949 rx->prev = (i == 0) ? nic->rxs + count - 1 : rx - 1;
1950 if(e100_rx_alloc_skb(nic, rx)) {
1951 e100_rx_clean_list(nic);
1952 return -ENOMEM;
1953 }
1954 }
1955
1956 nic->rx_to_use = nic->rx_to_clean = nic->rxs;
ca93ca42 1957 nic->ru_running = RU_SUSPENDED;
1da177e4
LT
1958
1959 return 0;
1960}
1961
7d12e780 1962static irqreturn_t e100_intr(int irq, void *dev_id)
1da177e4
LT
1963{
1964 struct net_device *netdev = dev_id;
1965 struct nic *nic = netdev_priv(netdev);
27345bb6 1966 u8 stat_ack = ioread8(&nic->csr->scb.stat_ack);
1da177e4
LT
1967
1968 DPRINTK(INTR, DEBUG, "stat_ack = 0x%02X\n", stat_ack);
1969
1970 if(stat_ack == stat_ack_not_ours || /* Not our interrupt */
1971 stat_ack == stat_ack_not_present) /* Hardware is ejected */
1972 return IRQ_NONE;
1973
1974 /* Ack interrupt(s) */
27345bb6 1975 iowrite8(stat_ack, &nic->csr->scb.stat_ack);
1da177e4 1976
ca93ca42
JG
1977 /* We hit Receive No Resource (RNR); restart RU after cleaning */
1978 if(stat_ack & stat_ack_rnr)
1979 nic->ru_running = RU_SUSPENDED;
1980
bea3348e 1981 if(likely(netif_rx_schedule_prep(netdev, &nic->napi))) {
0685c31b 1982 e100_disable_irq(nic);
bea3348e 1983 __netif_rx_schedule(netdev, &nic->napi);
0685c31b 1984 }
1da177e4
LT
1985
1986 return IRQ_HANDLED;
1987}
1988
bea3348e 1989static int e100_poll(struct napi_struct *napi, int budget)
1da177e4 1990{
bea3348e
SH
1991 struct nic *nic = container_of(napi, struct nic, napi);
1992 struct net_device *netdev = nic->netdev;
ddfce6bb 1993 unsigned int work_done = 0;
1da177e4
LT
1994 int tx_cleaned;
1995
bea3348e 1996 e100_rx_clean(nic, &work_done, budget);
1da177e4
LT
1997 tx_cleaned = e100_tx_clean(nic);
1998
1999 /* If no Rx and Tx cleanup work was done, exit polling mode. */
2000 if((!tx_cleaned && (work_done == 0)) || !netif_running(netdev)) {
bea3348e 2001 netif_rx_complete(netdev, napi);
1da177e4 2002 e100_enable_irq(nic);
1da177e4
LT
2003 }
2004
bea3348e 2005 return work_done;
1da177e4
LT
2006}
2007
2008#ifdef CONFIG_NET_POLL_CONTROLLER
2009static void e100_netpoll(struct net_device *netdev)
2010{
2011 struct nic *nic = netdev_priv(netdev);
611494dc 2012
1da177e4 2013 e100_disable_irq(nic);
7d12e780 2014 e100_intr(nic->pdev->irq, netdev);
1da177e4
LT
2015 e100_tx_clean(nic);
2016 e100_enable_irq(nic);
2017}
2018#endif
2019
1da177e4
LT
2020static int e100_set_mac_address(struct net_device *netdev, void *p)
2021{
2022 struct nic *nic = netdev_priv(netdev);
2023 struct sockaddr *addr = p;
2024
2025 if (!is_valid_ether_addr(addr->sa_data))
2026 return -EADDRNOTAVAIL;
2027
2028 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2029 e100_exec_cb(nic, NULL, e100_setup_iaaddr);
2030
2031 return 0;
2032}
2033
2034static int e100_change_mtu(struct net_device *netdev, int new_mtu)
2035{
2036 if(new_mtu < ETH_ZLEN || new_mtu > ETH_DATA_LEN)
2037 return -EINVAL;
2038 netdev->mtu = new_mtu;
2039 return 0;
2040}
2041
2042static int e100_asf(struct nic *nic)
2043{
2044 /* ASF can be enabled from eeprom */
2045 return((nic->pdev->device >= 0x1050) && (nic->pdev->device <= 0x1057) &&
2046 (nic->eeprom[eeprom_config_asf] & eeprom_asf) &&
2047 !(nic->eeprom[eeprom_config_asf] & eeprom_gcl) &&
2048 ((nic->eeprom[eeprom_smbus_addr] & 0xFF) != 0xFE));
2049}
2050
2051static int e100_up(struct nic *nic)
2052{
2053 int err;
2054
2055 if((err = e100_rx_alloc_list(nic)))
2056 return err;
2057 if((err = e100_alloc_cbs(nic)))
2058 goto err_rx_clean_list;
2059 if((err = e100_hw_init(nic)))
2060 goto err_clean_cbs;
2061 e100_set_multicast_list(nic->netdev);
ca93ca42 2062 e100_start_receiver(nic, NULL);
1da177e4 2063 mod_timer(&nic->watchdog, jiffies);
1fb9df5d 2064 if((err = request_irq(nic->pdev->irq, e100_intr, IRQF_SHARED,
1da177e4
LT
2065 nic->netdev->name, nic->netdev)))
2066 goto err_no_irq;
1da177e4 2067 netif_wake_queue(nic->netdev);
bea3348e 2068 napi_enable(&nic->napi);
0236ebb7
MC
2069 /* enable ints _after_ enabling poll, preventing a race between
2070 * disable ints+schedule */
2071 e100_enable_irq(nic);
1da177e4
LT
2072 return 0;
2073
2074err_no_irq:
2075 del_timer_sync(&nic->watchdog);
2076err_clean_cbs:
2077 e100_clean_cbs(nic);
2078err_rx_clean_list:
2079 e100_rx_clean_list(nic);
2080 return err;
2081}
2082
2083static void e100_down(struct nic *nic)
2084{
0236ebb7 2085 /* wait here for poll to complete */
bea3348e 2086 napi_disable(&nic->napi);
0236ebb7 2087 netif_stop_queue(nic->netdev);
1da177e4
LT
2088 e100_hw_reset(nic);
2089 free_irq(nic->pdev->irq, nic->netdev);
2090 del_timer_sync(&nic->watchdog);
2091 netif_carrier_off(nic->netdev);
1da177e4
LT
2092 e100_clean_cbs(nic);
2093 e100_rx_clean_list(nic);
2094}
2095
2096static void e100_tx_timeout(struct net_device *netdev)
2097{
2098 struct nic *nic = netdev_priv(netdev);
2099
05479938 2100 /* Reset outside of interrupt context, to avoid request_irq
2acdb1e0
MC
2101 * in interrupt context */
2102 schedule_work(&nic->tx_timeout_task);
2103}
2104
c4028958 2105static void e100_tx_timeout_task(struct work_struct *work)
2acdb1e0 2106{
c4028958
DH
2107 struct nic *nic = container_of(work, struct nic, tx_timeout_task);
2108 struct net_device *netdev = nic->netdev;
2acdb1e0 2109
1da177e4 2110 DPRINTK(TX_ERR, DEBUG, "scb.status=0x%02X\n",
27345bb6 2111 ioread8(&nic->csr->scb.status));
1da177e4
LT
2112 e100_down(netdev_priv(netdev));
2113 e100_up(netdev_priv(netdev));
2114}
2115
2116static int e100_loopback_test(struct nic *nic, enum loopback loopback_mode)
2117{
2118 int err;
2119 struct sk_buff *skb;
2120
2121 /* Use driver resources to perform internal MAC or PHY
2122 * loopback test. A single packet is prepared and transmitted
2123 * in loopback mode, and the test passes if the received
2124 * packet compares byte-for-byte to the transmitted packet. */
2125
2126 if((err = e100_rx_alloc_list(nic)))
2127 return err;
2128 if((err = e100_alloc_cbs(nic)))
2129 goto err_clean_rx;
2130
2131 /* ICH PHY loopback is broken so do MAC loopback instead */
2132 if(nic->flags & ich && loopback_mode == lb_phy)
2133 loopback_mode = lb_mac;
2134
2135 nic->loopback = loopback_mode;
2136 if((err = e100_hw_init(nic)))
2137 goto err_loopback_none;
2138
2139 if(loopback_mode == lb_phy)
2140 mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR,
2141 BMCR_LOOPBACK);
2142
ca93ca42 2143 e100_start_receiver(nic, NULL);
1da177e4 2144
4187592b 2145 if(!(skb = netdev_alloc_skb(nic->netdev, ETH_DATA_LEN))) {
1da177e4
LT
2146 err = -ENOMEM;
2147 goto err_loopback_none;
2148 }
2149 skb_put(skb, ETH_DATA_LEN);
2150 memset(skb->data, 0xFF, ETH_DATA_LEN);
2151 e100_xmit_frame(skb, nic->netdev);
2152
2153 msleep(10);
2154
aa49cdd9
JB
2155 pci_dma_sync_single_for_cpu(nic->pdev, nic->rx_to_clean->dma_addr,
2156 RFD_BUF_LEN, PCI_DMA_FROMDEVICE);
2157
1da177e4
LT
2158 if(memcmp(nic->rx_to_clean->skb->data + sizeof(struct rfd),
2159 skb->data, ETH_DATA_LEN))
2160 err = -EAGAIN;
2161
2162err_loopback_none:
2163 mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR, 0);
2164 nic->loopback = lb_none;
1da177e4 2165 e100_clean_cbs(nic);
aa49cdd9 2166 e100_hw_reset(nic);
1da177e4
LT
2167err_clean_rx:
2168 e100_rx_clean_list(nic);
2169 return err;
2170}
2171
2172#define MII_LED_CONTROL 0x1B
2173static void e100_blink_led(unsigned long data)
2174{
2175 struct nic *nic = (struct nic *)data;
2176 enum led_state {
2177 led_on = 0x01,
2178 led_off = 0x04,
2179 led_on_559 = 0x05,
2180 led_on_557 = 0x07,
2181 };
2182
2183 nic->leds = (nic->leds & led_on) ? led_off :
2184 (nic->mac < mac_82559_D101M) ? led_on_557 : led_on_559;
2185 mdio_write(nic->netdev, nic->mii.phy_id, MII_LED_CONTROL, nic->leds);
2186 mod_timer(&nic->blink_timer, jiffies + HZ / 4);
2187}
2188
2189static int e100_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
2190{
2191 struct nic *nic = netdev_priv(netdev);
2192 return mii_ethtool_gset(&nic->mii, cmd);
2193}
2194
2195static int e100_set_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
2196{
2197 struct nic *nic = netdev_priv(netdev);
2198 int err;
2199
2200 mdio_write(netdev, nic->mii.phy_id, MII_BMCR, BMCR_RESET);
2201 err = mii_ethtool_sset(&nic->mii, cmd);
2202 e100_exec_cb(nic, NULL, e100_configure);
2203
2204 return err;
2205}
2206
2207static void e100_get_drvinfo(struct net_device *netdev,
2208 struct ethtool_drvinfo *info)
2209{
2210 struct nic *nic = netdev_priv(netdev);
2211 strcpy(info->driver, DRV_NAME);
2212 strcpy(info->version, DRV_VERSION);
2213 strcpy(info->fw_version, "N/A");
2214 strcpy(info->bus_info, pci_name(nic->pdev));
2215}
2216
2217static int e100_get_regs_len(struct net_device *netdev)
2218{
2219 struct nic *nic = netdev_priv(netdev);
2220#define E100_PHY_REGS 0x1C
2221#define E100_REGS_LEN 1 + E100_PHY_REGS + \
2222 sizeof(nic->mem->dump_buf) / sizeof(u32)
2223 return E100_REGS_LEN * sizeof(u32);
2224}
2225
2226static void e100_get_regs(struct net_device *netdev,
2227 struct ethtool_regs *regs, void *p)
2228{
2229 struct nic *nic = netdev_priv(netdev);
2230 u32 *buff = p;
2231 int i;
2232
44c10138 2233 regs->version = (1 << 24) | nic->pdev->revision;
27345bb6
JB
2234 buff[0] = ioread8(&nic->csr->scb.cmd_hi) << 24 |
2235 ioread8(&nic->csr->scb.cmd_lo) << 16 |
2236 ioread16(&nic->csr->scb.status);
1da177e4
LT
2237 for(i = E100_PHY_REGS; i >= 0; i--)
2238 buff[1 + E100_PHY_REGS - i] =
2239 mdio_read(netdev, nic->mii.phy_id, i);
2240 memset(nic->mem->dump_buf, 0, sizeof(nic->mem->dump_buf));
2241 e100_exec_cb(nic, NULL, e100_dump);
2242 msleep(10);
2243 memcpy(&buff[2 + E100_PHY_REGS], nic->mem->dump_buf,
2244 sizeof(nic->mem->dump_buf));
2245}
2246
2247static void e100_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
2248{
2249 struct nic *nic = netdev_priv(netdev);
2250 wol->supported = (nic->mac >= mac_82558_D101_A4) ? WAKE_MAGIC : 0;
2251 wol->wolopts = (nic->flags & wol_magic) ? WAKE_MAGIC : 0;
2252}
2253
2254static int e100_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
2255{
2256 struct nic *nic = netdev_priv(netdev);
2257
2258 if(wol->wolopts != WAKE_MAGIC && wol->wolopts != 0)
2259 return -EOPNOTSUPP;
2260
2261 if(wol->wolopts)
2262 nic->flags |= wol_magic;
2263 else
2264 nic->flags &= ~wol_magic;
2265
1da177e4
LT
2266 e100_exec_cb(nic, NULL, e100_configure);
2267
2268 return 0;
2269}
2270
2271static u32 e100_get_msglevel(struct net_device *netdev)
2272{
2273 struct nic *nic = netdev_priv(netdev);
2274 return nic->msg_enable;
2275}
2276
2277static void e100_set_msglevel(struct net_device *netdev, u32 value)
2278{
2279 struct nic *nic = netdev_priv(netdev);
2280 nic->msg_enable = value;
2281}
2282
2283static int e100_nway_reset(struct net_device *netdev)
2284{
2285 struct nic *nic = netdev_priv(netdev);
2286 return mii_nway_restart(&nic->mii);
2287}
2288
2289static u32 e100_get_link(struct net_device *netdev)
2290{
2291 struct nic *nic = netdev_priv(netdev);
2292 return mii_link_ok(&nic->mii);
2293}
2294
2295static int e100_get_eeprom_len(struct net_device *netdev)
2296{
2297 struct nic *nic = netdev_priv(netdev);
2298 return nic->eeprom_wc << 1;
2299}
2300
2301#define E100_EEPROM_MAGIC 0x1234
2302static int e100_get_eeprom(struct net_device *netdev,
2303 struct ethtool_eeprom *eeprom, u8 *bytes)
2304{
2305 struct nic *nic = netdev_priv(netdev);
2306
2307 eeprom->magic = E100_EEPROM_MAGIC;
2308 memcpy(bytes, &((u8 *)nic->eeprom)[eeprom->offset], eeprom->len);
2309
2310 return 0;
2311}
2312
2313static int e100_set_eeprom(struct net_device *netdev,
2314 struct ethtool_eeprom *eeprom, u8 *bytes)
2315{
2316 struct nic *nic = netdev_priv(netdev);
2317
2318 if(eeprom->magic != E100_EEPROM_MAGIC)
2319 return -EINVAL;
2320
2321 memcpy(&((u8 *)nic->eeprom)[eeprom->offset], bytes, eeprom->len);
2322
2323 return e100_eeprom_save(nic, eeprom->offset >> 1,
2324 (eeprom->len >> 1) + 1);
2325}
2326
2327static void e100_get_ringparam(struct net_device *netdev,
2328 struct ethtool_ringparam *ring)
2329{
2330 struct nic *nic = netdev_priv(netdev);
2331 struct param_range *rfds = &nic->params.rfds;
2332 struct param_range *cbs = &nic->params.cbs;
2333
2334 ring->rx_max_pending = rfds->max;
2335 ring->tx_max_pending = cbs->max;
2336 ring->rx_mini_max_pending = 0;
2337 ring->rx_jumbo_max_pending = 0;
2338 ring->rx_pending = rfds->count;
2339 ring->tx_pending = cbs->count;
2340 ring->rx_mini_pending = 0;
2341 ring->rx_jumbo_pending = 0;
2342}
2343
2344static int e100_set_ringparam(struct net_device *netdev,
2345 struct ethtool_ringparam *ring)
2346{
2347 struct nic *nic = netdev_priv(netdev);
2348 struct param_range *rfds = &nic->params.rfds;
2349 struct param_range *cbs = &nic->params.cbs;
2350
05479938 2351 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
1da177e4
LT
2352 return -EINVAL;
2353
2354 if(netif_running(netdev))
2355 e100_down(nic);
2356 rfds->count = max(ring->rx_pending, rfds->min);
2357 rfds->count = min(rfds->count, rfds->max);
2358 cbs->count = max(ring->tx_pending, cbs->min);
2359 cbs->count = min(cbs->count, cbs->max);
2360 DPRINTK(DRV, INFO, "Ring Param settings: rx: %d, tx %d\n",
2361 rfds->count, cbs->count);
2362 if(netif_running(netdev))
2363 e100_up(nic);
2364
2365 return 0;
2366}
2367
2368static const char e100_gstrings_test[][ETH_GSTRING_LEN] = {
2369 "Link test (on/offline)",
2370 "Eeprom test (on/offline)",
2371 "Self test (offline)",
2372 "Mac loopback (offline)",
2373 "Phy loopback (offline)",
2374};
2375#define E100_TEST_LEN sizeof(e100_gstrings_test) / ETH_GSTRING_LEN
2376
1da177e4
LT
2377static void e100_diag_test(struct net_device *netdev,
2378 struct ethtool_test *test, u64 *data)
2379{
2380 struct ethtool_cmd cmd;
2381 struct nic *nic = netdev_priv(netdev);
2382 int i, err;
2383
2384 memset(data, 0, E100_TEST_LEN * sizeof(u64));
2385 data[0] = !mii_link_ok(&nic->mii);
2386 data[1] = e100_eeprom_load(nic);
2387 if(test->flags & ETH_TEST_FL_OFFLINE) {
2388
2389 /* save speed, duplex & autoneg settings */
2390 err = mii_ethtool_gset(&nic->mii, &cmd);
2391
2392 if(netif_running(netdev))
2393 e100_down(nic);
2394 data[2] = e100_self_test(nic);
2395 data[3] = e100_loopback_test(nic, lb_mac);
2396 data[4] = e100_loopback_test(nic, lb_phy);
2397
2398 /* restore speed, duplex & autoneg settings */
2399 err = mii_ethtool_sset(&nic->mii, &cmd);
2400
2401 if(netif_running(netdev))
2402 e100_up(nic);
2403 }
2404 for(i = 0; i < E100_TEST_LEN; i++)
2405 test->flags |= data[i] ? ETH_TEST_FL_FAILED : 0;
a074fb86
MC
2406
2407 msleep_interruptible(4 * 1000);
1da177e4
LT
2408}
2409
2410static int e100_phys_id(struct net_device *netdev, u32 data)
2411{
2412 struct nic *nic = netdev_priv(netdev);
2413
2414 if(!data || data > (u32)(MAX_SCHEDULE_TIMEOUT / HZ))
2415 data = (u32)(MAX_SCHEDULE_TIMEOUT / HZ);
2416 mod_timer(&nic->blink_timer, jiffies);
2417 msleep_interruptible(data * 1000);
2418 del_timer_sync(&nic->blink_timer);
2419 mdio_write(netdev, nic->mii.phy_id, MII_LED_CONTROL, 0);
2420
2421 return 0;
2422}
2423
2424static const char e100_gstrings_stats[][ETH_GSTRING_LEN] = {
2425 "rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_errors",
2426 "tx_errors", "rx_dropped", "tx_dropped", "multicast", "collisions",
2427 "rx_length_errors", "rx_over_errors", "rx_crc_errors",
2428 "rx_frame_errors", "rx_fifo_errors", "rx_missed_errors",
2429 "tx_aborted_errors", "tx_carrier_errors", "tx_fifo_errors",
2430 "tx_heartbeat_errors", "tx_window_errors",
2431 /* device-specific stats */
2432 "tx_deferred", "tx_single_collisions", "tx_multi_collisions",
2433 "tx_flow_control_pause", "rx_flow_control_pause",
2434 "rx_flow_control_unsupported", "tx_tco_packets", "rx_tco_packets",
2435};
2436#define E100_NET_STATS_LEN 21
2437#define E100_STATS_LEN sizeof(e100_gstrings_stats) / ETH_GSTRING_LEN
2438
b9f2c044 2439static int e100_get_sset_count(struct net_device *netdev, int sset)
1da177e4 2440{
b9f2c044
JG
2441 switch (sset) {
2442 case ETH_SS_TEST:
2443 return E100_TEST_LEN;
2444 case ETH_SS_STATS:
2445 return E100_STATS_LEN;
2446 default:
2447 return -EOPNOTSUPP;
2448 }
1da177e4
LT
2449}
2450
2451static void e100_get_ethtool_stats(struct net_device *netdev,
2452 struct ethtool_stats *stats, u64 *data)
2453{
2454 struct nic *nic = netdev_priv(netdev);
2455 int i;
2456
2457 for(i = 0; i < E100_NET_STATS_LEN; i++)
09f75cd7 2458 data[i] = ((unsigned long *)&netdev->stats)[i];
1da177e4
LT
2459
2460 data[i++] = nic->tx_deferred;
2461 data[i++] = nic->tx_single_collisions;
2462 data[i++] = nic->tx_multiple_collisions;
2463 data[i++] = nic->tx_fc_pause;
2464 data[i++] = nic->rx_fc_pause;
2465 data[i++] = nic->rx_fc_unsupported;
2466 data[i++] = nic->tx_tco_frames;
2467 data[i++] = nic->rx_tco_frames;
2468}
2469
2470static void e100_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
2471{
2472 switch(stringset) {
2473 case ETH_SS_TEST:
2474 memcpy(data, *e100_gstrings_test, sizeof(e100_gstrings_test));
2475 break;
2476 case ETH_SS_STATS:
2477 memcpy(data, *e100_gstrings_stats, sizeof(e100_gstrings_stats));
2478 break;
2479 }
2480}
2481
7282d491 2482static const struct ethtool_ops e100_ethtool_ops = {
1da177e4
LT
2483 .get_settings = e100_get_settings,
2484 .set_settings = e100_set_settings,
2485 .get_drvinfo = e100_get_drvinfo,
2486 .get_regs_len = e100_get_regs_len,
2487 .get_regs = e100_get_regs,
2488 .get_wol = e100_get_wol,
2489 .set_wol = e100_set_wol,
2490 .get_msglevel = e100_get_msglevel,
2491 .set_msglevel = e100_set_msglevel,
2492 .nway_reset = e100_nway_reset,
2493 .get_link = e100_get_link,
2494 .get_eeprom_len = e100_get_eeprom_len,
2495 .get_eeprom = e100_get_eeprom,
2496 .set_eeprom = e100_set_eeprom,
2497 .get_ringparam = e100_get_ringparam,
2498 .set_ringparam = e100_set_ringparam,
1da177e4
LT
2499 .self_test = e100_diag_test,
2500 .get_strings = e100_get_strings,
2501 .phys_id = e100_phys_id,
1da177e4 2502 .get_ethtool_stats = e100_get_ethtool_stats,
b9f2c044 2503 .get_sset_count = e100_get_sset_count,
1da177e4
LT
2504};
2505
2506static int e100_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2507{
2508 struct nic *nic = netdev_priv(netdev);
2509
2510 return generic_mii_ioctl(&nic->mii, if_mii(ifr), cmd, NULL);
2511}
2512
2513static int e100_alloc(struct nic *nic)
2514{
2515 nic->mem = pci_alloc_consistent(nic->pdev, sizeof(struct mem),
2516 &nic->dma_addr);
2517 return nic->mem ? 0 : -ENOMEM;
2518}
2519
2520static void e100_free(struct nic *nic)
2521{
2522 if(nic->mem) {
2523 pci_free_consistent(nic->pdev, sizeof(struct mem),
2524 nic->mem, nic->dma_addr);
2525 nic->mem = NULL;
2526 }
2527}
2528
2529static int e100_open(struct net_device *netdev)
2530{
2531 struct nic *nic = netdev_priv(netdev);
2532 int err = 0;
2533
2534 netif_carrier_off(netdev);
2535 if((err = e100_up(nic)))
2536 DPRINTK(IFUP, ERR, "Cannot open interface, aborting.\n");
2537 return err;
2538}
2539
2540static int e100_close(struct net_device *netdev)
2541{
2542 e100_down(netdev_priv(netdev));
2543 return 0;
2544}
2545
2546static int __devinit e100_probe(struct pci_dev *pdev,
2547 const struct pci_device_id *ent)
2548{
2549 struct net_device *netdev;
2550 struct nic *nic;
2551 int err;
0795af57 2552 DECLARE_MAC_BUF(mac);
1da177e4
LT
2553
2554 if(!(netdev = alloc_etherdev(sizeof(struct nic)))) {
2555 if(((1 << debug) - 1) & NETIF_MSG_PROBE)
2556 printk(KERN_ERR PFX "Etherdev alloc failed, abort.\n");
2557 return -ENOMEM;
2558 }
2559
2560 netdev->open = e100_open;
2561 netdev->stop = e100_close;
2562 netdev->hard_start_xmit = e100_xmit_frame;
1da177e4
LT
2563 netdev->set_multicast_list = e100_set_multicast_list;
2564 netdev->set_mac_address = e100_set_mac_address;
2565 netdev->change_mtu = e100_change_mtu;
2566 netdev->do_ioctl = e100_do_ioctl;
2567 SET_ETHTOOL_OPS(netdev, &e100_ethtool_ops);
2568 netdev->tx_timeout = e100_tx_timeout;
2569 netdev->watchdog_timeo = E100_WATCHDOG_PERIOD;
1da177e4
LT
2570#ifdef CONFIG_NET_POLL_CONTROLLER
2571 netdev->poll_controller = e100_netpoll;
2572#endif
0eb5a34c 2573 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
1da177e4
LT
2574
2575 nic = netdev_priv(netdev);
bea3348e 2576 netif_napi_add(netdev, &nic->napi, e100_poll, E100_NAPI_WEIGHT);
1da177e4
LT
2577 nic->netdev = netdev;
2578 nic->pdev = pdev;
2579 nic->msg_enable = (1 << debug) - 1;
2580 pci_set_drvdata(pdev, netdev);
2581
2582 if((err = pci_enable_device(pdev))) {
2583 DPRINTK(PROBE, ERR, "Cannot enable PCI device, aborting.\n");
2584 goto err_out_free_dev;
2585 }
2586
2587 if(!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
2588 DPRINTK(PROBE, ERR, "Cannot find proper PCI device "
2589 "base address, aborting.\n");
2590 err = -ENODEV;
2591 goto err_out_disable_pdev;
2592 }
2593
2594 if((err = pci_request_regions(pdev, DRV_NAME))) {
2595 DPRINTK(PROBE, ERR, "Cannot obtain PCI resources, aborting.\n");
2596 goto err_out_disable_pdev;
2597 }
2598
1e7f0bd8 2599 if((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK))) {
1da177e4
LT
2600 DPRINTK(PROBE, ERR, "No usable DMA configuration, aborting.\n");
2601 goto err_out_free_res;
2602 }
2603
1da177e4
LT
2604 SET_NETDEV_DEV(netdev, &pdev->dev);
2605
27345bb6
JB
2606 if (use_io)
2607 DPRINTK(PROBE, INFO, "using i/o access mode\n");
2608
2609 nic->csr = pci_iomap(pdev, (use_io ? 1 : 0), sizeof(struct csr));
1da177e4
LT
2610 if(!nic->csr) {
2611 DPRINTK(PROBE, ERR, "Cannot map device registers, aborting.\n");
2612 err = -ENOMEM;
2613 goto err_out_free_res;
2614 }
2615
2616 if(ent->driver_data)
2617 nic->flags |= ich;
2618 else
2619 nic->flags &= ~ich;
2620
2621 e100_get_defaults(nic);
2622
1f53367d 2623 /* locks must be initialized before calling hw_reset */
1da177e4
LT
2624 spin_lock_init(&nic->cb_lock);
2625 spin_lock_init(&nic->cmd_lock);
ac7c6669 2626 spin_lock_init(&nic->mdio_lock);
1da177e4
LT
2627
2628 /* Reset the device before pci_set_master() in case device is in some
2629 * funky state and has an interrupt pending - hint: we don't have the
2630 * interrupt handler registered yet. */
2631 e100_hw_reset(nic);
2632
2633 pci_set_master(pdev);
2634
2635 init_timer(&nic->watchdog);
2636 nic->watchdog.function = e100_watchdog;
2637 nic->watchdog.data = (unsigned long)nic;
2638 init_timer(&nic->blink_timer);
2639 nic->blink_timer.function = e100_blink_led;
2640 nic->blink_timer.data = (unsigned long)nic;
2641
c4028958 2642 INIT_WORK(&nic->tx_timeout_task, e100_tx_timeout_task);
2acdb1e0 2643
1da177e4
LT
2644 if((err = e100_alloc(nic))) {
2645 DPRINTK(PROBE, ERR, "Cannot alloc driver memory, aborting.\n");
2646 goto err_out_iounmap;
2647 }
2648
1da177e4
LT
2649 if((err = e100_eeprom_load(nic)))
2650 goto err_out_free;
2651
f92d8728
MC
2652 e100_phy_init(nic);
2653
1da177e4 2654 memcpy(netdev->dev_addr, nic->eeprom, ETH_ALEN);
a92dd923 2655 memcpy(netdev->perm_addr, nic->eeprom, ETH_ALEN);
948cd43f
JB
2656 if (!is_valid_ether_addr(netdev->perm_addr)) {
2657 if (!eeprom_bad_csum_allow) {
2658 DPRINTK(PROBE, ERR, "Invalid MAC address from "
2659 "EEPROM, aborting.\n");
2660 err = -EAGAIN;
2661 goto err_out_free;
2662 } else {
2663 DPRINTK(PROBE, ERR, "Invalid MAC address from EEPROM, "
2664 "you MUST configure one.\n");
2665 }
1da177e4
LT
2666 }
2667
2668 /* Wol magic packet can be enabled from eeprom */
2669 if((nic->mac >= mac_82558_D101_A4) &&
2670 (nic->eeprom[eeprom_id] & eeprom_id_wol))
2671 nic->flags |= wol_magic;
2672
6bdacb1a 2673 /* ack any pending wake events, disable PME */
3435dbce
JB
2674 err = pci_enable_wake(pdev, 0, 0);
2675 if (err)
2676 DPRINTK(PROBE, ERR, "Error clearing wake event\n");
1da177e4
LT
2677
2678 strcpy(netdev->name, "eth%d");
2679 if((err = register_netdev(netdev))) {
2680 DPRINTK(PROBE, ERR, "Cannot register net device, aborting.\n");
2681 goto err_out_free;
2682 }
2683
0795af57
JP
2684 DPRINTK(PROBE, INFO, "addr 0x%llx, irq %d, MAC addr %s\n",
2685 (unsigned long long)pci_resource_start(pdev, use_io ? 1 : 0),
2686 pdev->irq, print_mac(mac, netdev->dev_addr));
1da177e4
LT
2687
2688 return 0;
2689
2690err_out_free:
2691 e100_free(nic);
2692err_out_iounmap:
27345bb6 2693 pci_iounmap(pdev, nic->csr);
1da177e4
LT
2694err_out_free_res:
2695 pci_release_regions(pdev);
2696err_out_disable_pdev:
2697 pci_disable_device(pdev);
2698err_out_free_dev:
2699 pci_set_drvdata(pdev, NULL);
2700 free_netdev(netdev);
2701 return err;
2702}
2703
2704static void __devexit e100_remove(struct pci_dev *pdev)
2705{
2706 struct net_device *netdev = pci_get_drvdata(pdev);
2707
2708 if(netdev) {
2709 struct nic *nic = netdev_priv(netdev);
2710 unregister_netdev(netdev);
2711 e100_free(nic);
2712 iounmap(nic->csr);
2713 free_netdev(netdev);
2714 pci_release_regions(pdev);
2715 pci_disable_device(pdev);
2716 pci_set_drvdata(pdev, NULL);
2717 }
2718}
2719
e8e82b76 2720#ifdef CONFIG_PM
1da177e4
LT
2721static int e100_suspend(struct pci_dev *pdev, pm_message_t state)
2722{
2723 struct net_device *netdev = pci_get_drvdata(pdev);
2724 struct nic *nic = netdev_priv(netdev);
2725
824545e7 2726 if (netif_running(netdev))
bea3348e 2727 napi_disable(&nic->napi);
e8e82b76
AK
2728 del_timer_sync(&nic->watchdog);
2729 netif_carrier_off(nic->netdev);
518d8338 2730 netif_device_detach(netdev);
a53a33da 2731
1da177e4 2732 pci_save_state(pdev);
e8e82b76
AK
2733
2734 if ((nic->flags & wol_magic) | e100_asf(nic)) {
2735 pci_enable_wake(pdev, PCI_D3hot, 1);
2736 pci_enable_wake(pdev, PCI_D3cold, 1);
2737 } else {
2738 pci_enable_wake(pdev, PCI_D3hot, 0);
2739 pci_enable_wake(pdev, PCI_D3cold, 0);
2740 }
975b366a 2741
1da177e4 2742 pci_disable_device(pdev);
518d8338 2743 free_irq(pdev->irq, netdev);
e8e82b76 2744 pci_set_power_state(pdev, PCI_D3hot);
1da177e4
LT
2745
2746 return 0;
2747}
2748
2749static int e100_resume(struct pci_dev *pdev)
2750{
2751 struct net_device *netdev = pci_get_drvdata(pdev);
2752 struct nic *nic = netdev_priv(netdev);
2753
975b366a 2754 pci_set_power_state(pdev, PCI_D0);
1da177e4 2755 pci_restore_state(pdev);
6bdacb1a 2756 /* ack any pending wake events, disable PME */
975b366a 2757 pci_enable_wake(pdev, 0, 0);
1da177e4
LT
2758
2759 netif_device_attach(netdev);
975b366a 2760 if (netif_running(netdev))
1da177e4
LT
2761 e100_up(nic);
2762
2763 return 0;
2764}
975b366a 2765#endif /* CONFIG_PM */
1da177e4 2766
d18c3db5 2767static void e100_shutdown(struct pci_dev *pdev)
6bdacb1a 2768{
e8e82b76
AK
2769 struct net_device *netdev = pci_get_drvdata(pdev);
2770 struct nic *nic = netdev_priv(netdev);
2771
824545e7 2772 if (netif_running(netdev))
bea3348e 2773 napi_disable(&nic->napi);
e8e82b76
AK
2774 del_timer_sync(&nic->watchdog);
2775 netif_carrier_off(nic->netdev);
2776
2777 if ((nic->flags & wol_magic) | e100_asf(nic)) {
2778 pci_enable_wake(pdev, PCI_D3hot, 1);
2779 pci_enable_wake(pdev, PCI_D3cold, 1);
2780 } else {
2781 pci_enable_wake(pdev, PCI_D3hot, 0);
2782 pci_enable_wake(pdev, PCI_D3cold, 0);
2783 }
2784
2785 pci_disable_device(pdev);
2786 pci_set_power_state(pdev, PCI_D3hot);
6bdacb1a
MC
2787}
2788
2cc30492
AK
2789/* ------------------ PCI Error Recovery infrastructure -------------- */
2790/**
2791 * e100_io_error_detected - called when PCI error is detected.
2792 * @pdev: Pointer to PCI device
2793 * @state: The current pci conneection state
2794 */
2795static pci_ers_result_t e100_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
2796{
2797 struct net_device *netdev = pci_get_drvdata(pdev);
bea3348e 2798 struct nic *nic = netdev_priv(netdev);
2cc30492
AK
2799
2800 /* Similar to calling e100_down(), but avoids adpater I/O. */
2801 netdev->stop(netdev);
2802
2803 /* Detach; put netif into state similar to hotplug unplug. */
bea3348e 2804 napi_enable(&nic->napi);
2cc30492 2805 netif_device_detach(netdev);
b1d26f24 2806 pci_disable_device(pdev);
2cc30492
AK
2807
2808 /* Request a slot reset. */
2809 return PCI_ERS_RESULT_NEED_RESET;
2810}
2811
2812/**
2813 * e100_io_slot_reset - called after the pci bus has been reset.
2814 * @pdev: Pointer to PCI device
2815 *
2816 * Restart the card from scratch.
2817 */
2818static pci_ers_result_t e100_io_slot_reset(struct pci_dev *pdev)
2819{
2820 struct net_device *netdev = pci_get_drvdata(pdev);
2821 struct nic *nic = netdev_priv(netdev);
2822
2823 if (pci_enable_device(pdev)) {
2824 printk(KERN_ERR "e100: Cannot re-enable PCI device after reset.\n");
2825 return PCI_ERS_RESULT_DISCONNECT;
2826 }
2827 pci_set_master(pdev);
2828
2829 /* Only one device per card can do a reset */
2830 if (0 != PCI_FUNC(pdev->devfn))
2831 return PCI_ERS_RESULT_RECOVERED;
2832 e100_hw_reset(nic);
2833 e100_phy_init(nic);
2834
2835 return PCI_ERS_RESULT_RECOVERED;
2836}
2837
2838/**
2839 * e100_io_resume - resume normal operations
2840 * @pdev: Pointer to PCI device
2841 *
2842 * Resume normal operations after an error recovery
2843 * sequence has been completed.
2844 */
2845static void e100_io_resume(struct pci_dev *pdev)
2846{
2847 struct net_device *netdev = pci_get_drvdata(pdev);
2848 struct nic *nic = netdev_priv(netdev);
2849
2850 /* ack any pending wake events, disable PME */
2851 pci_enable_wake(pdev, 0, 0);
2852
2853 netif_device_attach(netdev);
2854 if (netif_running(netdev)) {
2855 e100_open(netdev);
2856 mod_timer(&nic->watchdog, jiffies);
2857 }
2858}
2859
2860static struct pci_error_handlers e100_err_handler = {
2861 .error_detected = e100_io_error_detected,
2862 .slot_reset = e100_io_slot_reset,
2863 .resume = e100_io_resume,
2864};
6bdacb1a 2865
1da177e4
LT
2866static struct pci_driver e100_driver = {
2867 .name = DRV_NAME,
2868 .id_table = e100_id_table,
2869 .probe = e100_probe,
2870 .remove = __devexit_p(e100_remove),
e8e82b76 2871#ifdef CONFIG_PM
975b366a 2872 /* Power Management hooks */
1da177e4
LT
2873 .suspend = e100_suspend,
2874 .resume = e100_resume,
2875#endif
05479938 2876 .shutdown = e100_shutdown,
2cc30492 2877 .err_handler = &e100_err_handler,
1da177e4
LT
2878};
2879
2880static int __init e100_init_module(void)
2881{
2882 if(((1 << debug) - 1) & NETIF_MSG_DRV) {
2883 printk(KERN_INFO PFX "%s, %s\n", DRV_DESCRIPTION, DRV_VERSION);
2884 printk(KERN_INFO PFX "%s\n", DRV_COPYRIGHT);
2885 }
29917620 2886 return pci_register_driver(&e100_driver);
1da177e4
LT
2887}
2888
2889static void __exit e100_cleanup_module(void)
2890{
2891 pci_unregister_driver(&e100_driver);
2892}
2893
2894module_init(e100_init_module);
2895module_exit(e100_cleanup_module);