]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/net/sundance.c
Merge with /pub/scm/linux/kernel/git/torvalds/linux-2.6.git
[mirror_ubuntu-artful-kernel.git] / drivers / net / sundance.c
1 /* sundance.c: A Linux device driver for the Sundance ST201 "Alta". */
2 /*
3 Written 1999-2000 by Donald Becker.
4
5 This software may be used and distributed according to the terms of
6 the GNU General Public License (GPL), incorporated herein by reference.
7 Drivers based on or derived from this code fall under the GPL and must
8 retain the authorship, copyright and license notice. This file is not
9 a complete program and may only be used when the entire operating
10 system is licensed under the GPL.
11
12 The author may be reached as becker@scyld.com, or C/O
13 Scyld Computing Corporation
14 410 Severn Ave., Suite 210
15 Annapolis MD 21403
16
17 Support and updates available at
18 http://www.scyld.com/network/sundance.html
19
20
21 Version LK1.01a (jgarzik):
22 - Replace some MII-related magic numbers with constants
23
24 Version LK1.02 (D-Link):
25 - Add new board to PCI ID list
26 - Fix multicast bug
27
28 Version LK1.03 (D-Link):
29 - New Rx scheme, reduce Rx congestion
30 - Option to disable flow control
31
32 Version LK1.04 (D-Link):
33 - Tx timeout recovery
34 - More support for ethtool.
35
36 Version LK1.04a:
37 - Remove unused/constant members from struct pci_id_info
38 (which then allows removal of 'drv_flags' from private struct)
39 (jgarzik)
40 - If no phy is found, fail to load that board (jgarzik)
41 - Always start phy id scan at id 1 to avoid problems (Donald Becker)
42 - Autodetect where mii_preable_required is needed,
43 default to not needed. (Donald Becker)
44
45 Version LK1.04b:
46 - Remove mii_preamble_required module parameter (Donald Becker)
47 - Add per-interface mii_preamble_required (setting is autodetected)
48 (Donald Becker)
49 - Remove unnecessary cast from void pointer (jgarzik)
50 - Re-align comments in private struct (jgarzik)
51
52 Version LK1.04c (jgarzik):
53 - Support bitmapped message levels (NETIF_MSG_xxx), and the
54 two ethtool ioctls that get/set them
55 - Don't hand-code MII ethtool support, use standard API/lib
56
57 Version LK1.04d:
58 - Merge from Donald Becker's sundance.c: (Jason Lunz)
59 * proper support for variably-sized MTUs
60 * default to PIO, to fix chip bugs
61 - Add missing unregister_netdev (Jason Lunz)
62 - Add CONFIG_SUNDANCE_MMIO config option (jgarzik)
63 - Better rx buf size calculation (Donald Becker)
64
65 Version LK1.05 (D-Link):
66 - Fix DFE-580TX packet drop issue (for DL10050C)
67 - Fix reset_tx logic
68
69 Version LK1.06 (D-Link):
70 - Fix crash while unloading driver
71
72 Versin LK1.06b (D-Link):
73 - New tx scheme, adaptive tx_coalesce
74
75 Version LK1.07 (D-Link):
76 - Fix tx bugs in big-endian machines
77 - Remove unused max_interrupt_work module parameter, the new
78 NAPI-like rx scheme doesn't need it.
79 - Remove redundancy get_stats() in intr_handler(), those
80 I/O access could affect performance in ARM-based system
81 - Add Linux software VLAN support
82
83 Version LK1.08 (Philippe De Muyter phdm@macqel.be):
84 - Fix bug of custom mac address
85 (StationAddr register only accept word write)
86
87 Version LK1.09 (D-Link):
88 - Fix the flowctrl bug.
89 - Set Pause bit in MII ANAR if flow control enabled.
90
91 Version LK1.09a (ICPlus):
92 - Add the delay time in reading the contents of EEPROM
93
94 Version LK1.10 (Philippe De Muyter phdm@macqel.be):
95 - Make 'unblock interface after Tx underrun' work
96
97 */
98
99 #define DRV_NAME "sundance"
100 #define DRV_VERSION "1.01+LK1.10"
101 #define DRV_RELDATE "28-Oct-2005"
102
103
104 /* The user-configurable values.
105 These may be modified when a driver module is loaded.*/
106 static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
107 /* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
108 Typical is a 64 element hash table based on the Ethernet CRC. */
109 static int multicast_filter_limit = 32;
110
111 /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
112 Setting to > 1518 effectively disables this feature.
113 This chip can receive into offset buffers, so the Alpha does not
114 need a copy-align. */
115 static int rx_copybreak;
116 static int flowctrl=1;
117
118 /* media[] specifies the media type the NIC operates at.
119 autosense Autosensing active media.
120 10mbps_hd 10Mbps half duplex.
121 10mbps_fd 10Mbps full duplex.
122 100mbps_hd 100Mbps half duplex.
123 100mbps_fd 100Mbps full duplex.
124 0 Autosensing active media.
125 1 10Mbps half duplex.
126 2 10Mbps full duplex.
127 3 100Mbps half duplex.
128 4 100Mbps full duplex.
129 */
130 #define MAX_UNITS 8
131 static char *media[MAX_UNITS];
132
133
134 /* Operational parameters that are set at compile time. */
135
136 /* Keep the ring sizes a power of two for compile efficiency.
137 The compiler will convert <unsigned>'%'<2^N> into a bit mask.
138 Making the Tx ring too large decreases the effectiveness of channel
139 bonding and packet priority, and more than 128 requires modifying the
140 Tx error recovery.
141 Large receive rings merely waste memory. */
142 #define TX_RING_SIZE 32
143 #define TX_QUEUE_LEN (TX_RING_SIZE - 1) /* Limit ring entries actually used. */
144 #define RX_RING_SIZE 64
145 #define RX_BUDGET 32
146 #define TX_TOTAL_SIZE TX_RING_SIZE*sizeof(struct netdev_desc)
147 #define RX_TOTAL_SIZE RX_RING_SIZE*sizeof(struct netdev_desc)
148
149 /* Operational parameters that usually are not changed. */
150 /* Time in jiffies before concluding the transmitter is hung. */
151 #define TX_TIMEOUT (4*HZ)
152 #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
153
154 /* Include files, designed to support most kernel versions 2.0.0 and later. */
155 #include <linux/module.h>
156 #include <linux/kernel.h>
157 #include <linux/string.h>
158 #include <linux/timer.h>
159 #include <linux/errno.h>
160 #include <linux/ioport.h>
161 #include <linux/slab.h>
162 #include <linux/interrupt.h>
163 #include <linux/pci.h>
164 #include <linux/netdevice.h>
165 #include <linux/etherdevice.h>
166 #include <linux/skbuff.h>
167 #include <linux/init.h>
168 #include <linux/bitops.h>
169 #include <asm/uaccess.h>
170 #include <asm/processor.h> /* Processor type for cache alignment. */
171 #include <asm/io.h>
172 #include <linux/delay.h>
173 #include <linux/spinlock.h>
174 #ifndef _COMPAT_WITH_OLD_KERNEL
175 #include <linux/crc32.h>
176 #include <linux/ethtool.h>
177 #include <linux/mii.h>
178 #else
179 #include "crc32.h"
180 #include "ethtool.h"
181 #include "mii.h"
182 #include "compat.h"
183 #endif
184
185 /* These identify the driver base version and may not be removed. */
186 static char version[] __devinitdata =
187 KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " Written by Donald Becker\n"
188 KERN_INFO " http://www.scyld.com/network/sundance.html\n";
189
190 MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
191 MODULE_DESCRIPTION("Sundance Alta Ethernet driver");
192 MODULE_LICENSE("GPL");
193
194 module_param(debug, int, 0);
195 module_param(rx_copybreak, int, 0);
196 module_param_array(media, charp, NULL, 0);
197 module_param(flowctrl, int, 0);
198 MODULE_PARM_DESC(debug, "Sundance Alta debug level (0-5)");
199 MODULE_PARM_DESC(rx_copybreak, "Sundance Alta copy breakpoint for copy-only-tiny-frames");
200 MODULE_PARM_DESC(flowctrl, "Sundance Alta flow control [0|1]");
201
202 /*
203 Theory of Operation
204
205 I. Board Compatibility
206
207 This driver is designed for the Sundance Technologies "Alta" ST201 chip.
208
209 II. Board-specific settings
210
211 III. Driver operation
212
213 IIIa. Ring buffers
214
215 This driver uses two statically allocated fixed-size descriptor lists
216 formed into rings by a branch from the final descriptor to the beginning of
217 the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
218 Some chips explicitly use only 2^N sized rings, while others use a
219 'next descriptor' pointer that the driver forms into rings.
220
221 IIIb/c. Transmit/Receive Structure
222
223 This driver uses a zero-copy receive and transmit scheme.
224 The driver allocates full frame size skbuffs for the Rx ring buffers at
225 open() time and passes the skb->data field to the chip as receive data
226 buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
227 a fresh skbuff is allocated and the frame is copied to the new skbuff.
228 When the incoming frame is larger, the skbuff is passed directly up the
229 protocol stack. Buffers consumed this way are replaced by newly allocated
230 skbuffs in a later phase of receives.
231
232 The RX_COPYBREAK value is chosen to trade-off the memory wasted by
233 using a full-sized skbuff for small frames vs. the copying costs of larger
234 frames. New boards are typically used in generously configured machines
235 and the underfilled buffers have negligible impact compared to the benefit of
236 a single allocation size, so the default value of zero results in never
237 copying packets. When copying is done, the cost is usually mitigated by using
238 a combined copy/checksum routine. Copying also preloads the cache, which is
239 most useful with small frames.
240
241 A subtle aspect of the operation is that the IP header at offset 14 in an
242 ethernet frame isn't longword aligned for further processing.
243 Unaligned buffers are permitted by the Sundance hardware, so
244 frames are received into the skbuff at an offset of "+2", 16-byte aligning
245 the IP header.
246
247 IIId. Synchronization
248
249 The driver runs as two independent, single-threaded flows of control. One
250 is the send-packet routine, which enforces single-threaded use by the
251 dev->tbusy flag. The other thread is the interrupt handler, which is single
252 threaded by the hardware and interrupt handling software.
253
254 The send packet thread has partial control over the Tx ring and 'dev->tbusy'
255 flag. It sets the tbusy flag whenever it's queuing a Tx packet. If the next
256 queue slot is empty, it clears the tbusy flag when finished otherwise it sets
257 the 'lp->tx_full' flag.
258
259 The interrupt handler has exclusive control over the Rx ring and records stats
260 from the Tx ring. After reaping the stats, it marks the Tx queue entry as
261 empty by incrementing the dirty_tx mark. Iff the 'lp->tx_full' flag is set, it
262 clears both the tx_full and tbusy flags.
263
264 IV. Notes
265
266 IVb. References
267
268 The Sundance ST201 datasheet, preliminary version.
269 The Kendin KS8723 datasheet, preliminary version.
270 The ICplus IP100 datasheet, preliminary version.
271 http://www.scyld.com/expert/100mbps.html
272 http://www.scyld.com/expert/NWay.html
273
274 IVc. Errata
275
276 */
277
278 /* Work-around for Kendin chip bugs. */
279 #ifndef CONFIG_SUNDANCE_MMIO
280 #define USE_IO_OPS 1
281 #endif
282
283 static struct pci_device_id sundance_pci_tbl[] = {
284 {0x1186, 0x1002, 0x1186, 0x1002, 0, 0, 0},
285 {0x1186, 0x1002, 0x1186, 0x1003, 0, 0, 1},
286 {0x1186, 0x1002, 0x1186, 0x1012, 0, 0, 2},
287 {0x1186, 0x1002, 0x1186, 0x1040, 0, 0, 3},
288 {0x1186, 0x1002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4},
289 {0x13F0, 0x0201, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5},
290 {0,}
291 };
292 MODULE_DEVICE_TABLE(pci, sundance_pci_tbl);
293
294 enum {
295 netdev_io_size = 128
296 };
297
298 struct pci_id_info {
299 const char *name;
300 };
301 static struct pci_id_info pci_id_tbl[] = {
302 {"D-Link DFE-550TX FAST Ethernet Adapter"},
303 {"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"},
304 {"D-Link DFE-580TX 4 port Server Adapter"},
305 {"D-Link DFE-530TXS FAST Ethernet Adapter"},
306 {"D-Link DL10050-based FAST Ethernet Adapter"},
307 {"Sundance Technology Alta"},
308 {NULL,}, /* 0 terminated list. */
309 };
310
311 /* This driver was written to use PCI memory space, however x86-oriented
312 hardware often uses I/O space accesses. */
313
314 /* Offsets to the device registers.
315 Unlike software-only systems, device drivers interact with complex hardware.
316 It's not useful to define symbolic names for every register bit in the
317 device. The name can only partially document the semantics and make
318 the driver longer and more difficult to read.
319 In general, only the important configuration values or bits changed
320 multiple times should be defined symbolically.
321 */
322 enum alta_offsets {
323 DMACtrl = 0x00,
324 TxListPtr = 0x04,
325 TxDMABurstThresh = 0x08,
326 TxDMAUrgentThresh = 0x09,
327 TxDMAPollPeriod = 0x0a,
328 RxDMAStatus = 0x0c,
329 RxListPtr = 0x10,
330 DebugCtrl0 = 0x1a,
331 DebugCtrl1 = 0x1c,
332 RxDMABurstThresh = 0x14,
333 RxDMAUrgentThresh = 0x15,
334 RxDMAPollPeriod = 0x16,
335 LEDCtrl = 0x1a,
336 ASICCtrl = 0x30,
337 EEData = 0x34,
338 EECtrl = 0x36,
339 TxStartThresh = 0x3c,
340 RxEarlyThresh = 0x3e,
341 FlashAddr = 0x40,
342 FlashData = 0x44,
343 TxStatus = 0x46,
344 TxFrameId = 0x47,
345 DownCounter = 0x18,
346 IntrClear = 0x4a,
347 IntrEnable = 0x4c,
348 IntrStatus = 0x4e,
349 MACCtrl0 = 0x50,
350 MACCtrl1 = 0x52,
351 StationAddr = 0x54,
352 MaxFrameSize = 0x5A,
353 RxMode = 0x5c,
354 MIICtrl = 0x5e,
355 MulticastFilter0 = 0x60,
356 MulticastFilter1 = 0x64,
357 RxOctetsLow = 0x68,
358 RxOctetsHigh = 0x6a,
359 TxOctetsLow = 0x6c,
360 TxOctetsHigh = 0x6e,
361 TxFramesOK = 0x70,
362 RxFramesOK = 0x72,
363 StatsCarrierError = 0x74,
364 StatsLateColl = 0x75,
365 StatsMultiColl = 0x76,
366 StatsOneColl = 0x77,
367 StatsTxDefer = 0x78,
368 RxMissed = 0x79,
369 StatsTxXSDefer = 0x7a,
370 StatsTxAbort = 0x7b,
371 StatsBcastTx = 0x7c,
372 StatsBcastRx = 0x7d,
373 StatsMcastTx = 0x7e,
374 StatsMcastRx = 0x7f,
375 /* Aliased and bogus values! */
376 RxStatus = 0x0c,
377 };
378 enum ASICCtrl_HiWord_bit {
379 GlobalReset = 0x0001,
380 RxReset = 0x0002,
381 TxReset = 0x0004,
382 DMAReset = 0x0008,
383 FIFOReset = 0x0010,
384 NetworkReset = 0x0020,
385 HostReset = 0x0040,
386 ResetBusy = 0x0400,
387 };
388
389 /* Bits in the interrupt status/mask registers. */
390 enum intr_status_bits {
391 IntrSummary=0x0001, IntrPCIErr=0x0002, IntrMACCtrl=0x0008,
392 IntrTxDone=0x0004, IntrRxDone=0x0010, IntrRxStart=0x0020,
393 IntrDrvRqst=0x0040,
394 StatsMax=0x0080, LinkChange=0x0100,
395 IntrTxDMADone=0x0200, IntrRxDMADone=0x0400,
396 };
397
398 /* Bits in the RxMode register. */
399 enum rx_mode_bits {
400 AcceptAllIPMulti=0x20, AcceptMultiHash=0x10, AcceptAll=0x08,
401 AcceptBroadcast=0x04, AcceptMulticast=0x02, AcceptMyPhys=0x01,
402 };
403 /* Bits in MACCtrl. */
404 enum mac_ctrl0_bits {
405 EnbFullDuplex=0x20, EnbRcvLargeFrame=0x40,
406 EnbFlowCtrl=0x100, EnbPassRxCRC=0x200,
407 };
408 enum mac_ctrl1_bits {
409 StatsEnable=0x0020, StatsDisable=0x0040, StatsEnabled=0x0080,
410 TxEnable=0x0100, TxDisable=0x0200, TxEnabled=0x0400,
411 RxEnable=0x0800, RxDisable=0x1000, RxEnabled=0x2000,
412 };
413
414 /* The Rx and Tx buffer descriptors. */
415 /* Note that using only 32 bit fields simplifies conversion to big-endian
416 architectures. */
417 struct netdev_desc {
418 u32 next_desc;
419 u32 status;
420 struct desc_frag { u32 addr, length; } frag[1];
421 };
422
423 /* Bits in netdev_desc.status */
424 enum desc_status_bits {
425 DescOwn=0x8000,
426 DescEndPacket=0x4000,
427 DescEndRing=0x2000,
428 LastFrag=0x80000000,
429 DescIntrOnTx=0x8000,
430 DescIntrOnDMADone=0x80000000,
431 DisableAlign = 0x00000001,
432 };
433
434 #define PRIV_ALIGN 15 /* Required alignment mask */
435 /* Use __attribute__((aligned (L1_CACHE_BYTES))) to maintain alignment
436 within the structure. */
437 #define MII_CNT 4
438 struct netdev_private {
439 /* Descriptor rings first for alignment. */
440 struct netdev_desc *rx_ring;
441 struct netdev_desc *tx_ring;
442 struct sk_buff* rx_skbuff[RX_RING_SIZE];
443 struct sk_buff* tx_skbuff[TX_RING_SIZE];
444 dma_addr_t tx_ring_dma;
445 dma_addr_t rx_ring_dma;
446 struct net_device_stats stats;
447 struct timer_list timer; /* Media monitoring timer. */
448 /* Frequently used values: keep some adjacent for cache effect. */
449 spinlock_t lock;
450 spinlock_t rx_lock; /* Group with Tx control cache line. */
451 int msg_enable;
452 int chip_id;
453 unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
454 unsigned int rx_buf_sz; /* Based on MTU+slack. */
455 struct netdev_desc *last_tx; /* Last Tx descriptor used. */
456 unsigned int cur_tx, dirty_tx;
457 /* These values are keep track of the transceiver/media in use. */
458 unsigned int flowctrl:1;
459 unsigned int default_port:4; /* Last dev->if_port value. */
460 unsigned int an_enable:1;
461 unsigned int speed;
462 struct tasklet_struct rx_tasklet;
463 struct tasklet_struct tx_tasklet;
464 int budget;
465 int cur_task;
466 /* Multicast and receive mode. */
467 spinlock_t mcastlock; /* SMP lock multicast updates. */
468 u16 mcast_filter[4];
469 /* MII transceiver section. */
470 struct mii_if_info mii_if;
471 int mii_preamble_required;
472 unsigned char phys[MII_CNT]; /* MII device addresses, only first one used. */
473 struct pci_dev *pci_dev;
474 void __iomem *base;
475 unsigned char pci_rev_id;
476 };
477
478 /* The station address location in the EEPROM. */
479 #define EEPROM_SA_OFFSET 0x10
480 #define DEFAULT_INTR (IntrRxDMADone | IntrPCIErr | \
481 IntrDrvRqst | IntrTxDone | StatsMax | \
482 LinkChange)
483
484 static int change_mtu(struct net_device *dev, int new_mtu);
485 static int eeprom_read(void __iomem *ioaddr, int location);
486 static int mdio_read(struct net_device *dev, int phy_id, int location);
487 static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
488 static int netdev_open(struct net_device *dev);
489 static void check_duplex(struct net_device *dev);
490 static void netdev_timer(unsigned long data);
491 static void tx_timeout(struct net_device *dev);
492 static void init_ring(struct net_device *dev);
493 static int start_tx(struct sk_buff *skb, struct net_device *dev);
494 static int reset_tx (struct net_device *dev);
495 static irqreturn_t intr_handler(int irq, void *dev_instance, struct pt_regs *regs);
496 static void rx_poll(unsigned long data);
497 static void tx_poll(unsigned long data);
498 static void refill_rx (struct net_device *dev);
499 static void netdev_error(struct net_device *dev, int intr_status);
500 static void netdev_error(struct net_device *dev, int intr_status);
501 static void set_rx_mode(struct net_device *dev);
502 static int __set_mac_addr(struct net_device *dev);
503 static struct net_device_stats *get_stats(struct net_device *dev);
504 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
505 static int netdev_close(struct net_device *dev);
506 static struct ethtool_ops ethtool_ops;
507
508 static void sundance_reset(struct net_device *dev, unsigned long reset_cmd)
509 {
510 struct netdev_private *np = netdev_priv(dev);
511 void __iomem *ioaddr = np->base + ASICCtrl;
512 int countdown;
513
514 /* ST201 documentation states ASICCtrl is a 32bit register */
515 iowrite32 (reset_cmd | ioread32 (ioaddr), ioaddr);
516 /* ST201 documentation states reset can take up to 1 ms */
517 countdown = 10 + 1;
518 while (ioread32 (ioaddr) & (ResetBusy << 16)) {
519 if (--countdown == 0) {
520 printk(KERN_WARNING "%s : reset not completed !!\n", dev->name);
521 break;
522 }
523 udelay(100);
524 }
525 }
526
527 static int __devinit sundance_probe1 (struct pci_dev *pdev,
528 const struct pci_device_id *ent)
529 {
530 struct net_device *dev;
531 struct netdev_private *np;
532 static int card_idx;
533 int chip_idx = ent->driver_data;
534 int irq;
535 int i;
536 void __iomem *ioaddr;
537 u16 mii_ctl;
538 void *ring_space;
539 dma_addr_t ring_dma;
540 #ifdef USE_IO_OPS
541 int bar = 0;
542 #else
543 int bar = 1;
544 #endif
545 int phy, phy_idx = 0;
546
547
548 /* when built into the kernel, we only print version if device is found */
549 #ifndef MODULE
550 static int printed_version;
551 if (!printed_version++)
552 printk(version);
553 #endif
554
555 if (pci_enable_device(pdev))
556 return -EIO;
557 pci_set_master(pdev);
558
559 irq = pdev->irq;
560
561 dev = alloc_etherdev(sizeof(*np));
562 if (!dev)
563 return -ENOMEM;
564 SET_MODULE_OWNER(dev);
565 SET_NETDEV_DEV(dev, &pdev->dev);
566
567 if (pci_request_regions(pdev, DRV_NAME))
568 goto err_out_netdev;
569
570 ioaddr = pci_iomap(pdev, bar, netdev_io_size);
571 if (!ioaddr)
572 goto err_out_res;
573
574 for (i = 0; i < 3; i++)
575 ((u16 *)dev->dev_addr)[i] =
576 le16_to_cpu(eeprom_read(ioaddr, i + EEPROM_SA_OFFSET));
577 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
578
579 dev->base_addr = (unsigned long)ioaddr;
580 dev->irq = irq;
581
582 np = netdev_priv(dev);
583 np->base = ioaddr;
584 np->pci_dev = pdev;
585 np->chip_id = chip_idx;
586 np->msg_enable = (1 << debug) - 1;
587 spin_lock_init(&np->lock);
588 tasklet_init(&np->rx_tasklet, rx_poll, (unsigned long)dev);
589 tasklet_init(&np->tx_tasklet, tx_poll, (unsigned long)dev);
590
591 ring_space = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &ring_dma);
592 if (!ring_space)
593 goto err_out_cleardev;
594 np->tx_ring = (struct netdev_desc *)ring_space;
595 np->tx_ring_dma = ring_dma;
596
597 ring_space = pci_alloc_consistent(pdev, RX_TOTAL_SIZE, &ring_dma);
598 if (!ring_space)
599 goto err_out_unmap_tx;
600 np->rx_ring = (struct netdev_desc *)ring_space;
601 np->rx_ring_dma = ring_dma;
602
603 np->mii_if.dev = dev;
604 np->mii_if.mdio_read = mdio_read;
605 np->mii_if.mdio_write = mdio_write;
606 np->mii_if.phy_id_mask = 0x1f;
607 np->mii_if.reg_num_mask = 0x1f;
608
609 /* The chip-specific entries in the device structure. */
610 dev->open = &netdev_open;
611 dev->hard_start_xmit = &start_tx;
612 dev->stop = &netdev_close;
613 dev->get_stats = &get_stats;
614 dev->set_multicast_list = &set_rx_mode;
615 dev->do_ioctl = &netdev_ioctl;
616 SET_ETHTOOL_OPS(dev, &ethtool_ops);
617 dev->tx_timeout = &tx_timeout;
618 dev->watchdog_timeo = TX_TIMEOUT;
619 dev->change_mtu = &change_mtu;
620 pci_set_drvdata(pdev, dev);
621
622 pci_read_config_byte(pdev, PCI_REVISION_ID, &np->pci_rev_id);
623
624 i = register_netdev(dev);
625 if (i)
626 goto err_out_unmap_rx;
627
628 printk(KERN_INFO "%s: %s at %p, ",
629 dev->name, pci_id_tbl[chip_idx].name, ioaddr);
630 for (i = 0; i < 5; i++)
631 printk("%2.2x:", dev->dev_addr[i]);
632 printk("%2.2x, IRQ %d.\n", dev->dev_addr[i], irq);
633
634 np->phys[0] = 1; /* Default setting */
635 np->mii_preamble_required++;
636 for (phy = 1; phy <= 32 && phy_idx < MII_CNT; phy++) {
637 int mii_status = mdio_read(dev, phy, MII_BMSR);
638 int phyx = phy & 0x1f;
639 if (mii_status != 0xffff && mii_status != 0x0000) {
640 np->phys[phy_idx++] = phyx;
641 np->mii_if.advertising = mdio_read(dev, phyx, MII_ADVERTISE);
642 if ((mii_status & 0x0040) == 0)
643 np->mii_preamble_required++;
644 printk(KERN_INFO "%s: MII PHY found at address %d, status "
645 "0x%4.4x advertising %4.4x.\n",
646 dev->name, phyx, mii_status, np->mii_if.advertising);
647 }
648 }
649 np->mii_preamble_required--;
650
651 if (phy_idx == 0) {
652 printk(KERN_INFO "%s: No MII transceiver found, aborting. ASIC status %x\n",
653 dev->name, ioread32(ioaddr + ASICCtrl));
654 goto err_out_unregister;
655 }
656
657 np->mii_if.phy_id = np->phys[0];
658
659 /* Parse override configuration */
660 np->an_enable = 1;
661 if (card_idx < MAX_UNITS) {
662 if (media[card_idx] != NULL) {
663 np->an_enable = 0;
664 if (strcmp (media[card_idx], "100mbps_fd") == 0 ||
665 strcmp (media[card_idx], "4") == 0) {
666 np->speed = 100;
667 np->mii_if.full_duplex = 1;
668 } else if (strcmp (media[card_idx], "100mbps_hd") == 0
669 || strcmp (media[card_idx], "3") == 0) {
670 np->speed = 100;
671 np->mii_if.full_duplex = 0;
672 } else if (strcmp (media[card_idx], "10mbps_fd") == 0 ||
673 strcmp (media[card_idx], "2") == 0) {
674 np->speed = 10;
675 np->mii_if.full_duplex = 1;
676 } else if (strcmp (media[card_idx], "10mbps_hd") == 0 ||
677 strcmp (media[card_idx], "1") == 0) {
678 np->speed = 10;
679 np->mii_if.full_duplex = 0;
680 } else {
681 np->an_enable = 1;
682 }
683 }
684 if (flowctrl == 1)
685 np->flowctrl = 1;
686 }
687
688 /* Fibre PHY? */
689 if (ioread32 (ioaddr + ASICCtrl) & 0x80) {
690 /* Default 100Mbps Full */
691 if (np->an_enable) {
692 np->speed = 100;
693 np->mii_if.full_duplex = 1;
694 np->an_enable = 0;
695 }
696 }
697 /* Reset PHY */
698 mdio_write (dev, np->phys[0], MII_BMCR, BMCR_RESET);
699 mdelay (300);
700 /* If flow control enabled, we need to advertise it.*/
701 if (np->flowctrl)
702 mdio_write (dev, np->phys[0], MII_ADVERTISE, np->mii_if.advertising | 0x0400);
703 mdio_write (dev, np->phys[0], MII_BMCR, BMCR_ANENABLE|BMCR_ANRESTART);
704 /* Force media type */
705 if (!np->an_enable) {
706 mii_ctl = 0;
707 mii_ctl |= (np->speed == 100) ? BMCR_SPEED100 : 0;
708 mii_ctl |= (np->mii_if.full_duplex) ? BMCR_FULLDPLX : 0;
709 mdio_write (dev, np->phys[0], MII_BMCR, mii_ctl);
710 printk (KERN_INFO "Override speed=%d, %s duplex\n",
711 np->speed, np->mii_if.full_duplex ? "Full" : "Half");
712
713 }
714
715 /* Perhaps move the reset here? */
716 /* Reset the chip to erase previous misconfiguration. */
717 if (netif_msg_hw(np))
718 printk("ASIC Control is %x.\n", ioread32(ioaddr + ASICCtrl));
719 iowrite16(0x00ff, ioaddr + ASICCtrl + 2);
720 if (netif_msg_hw(np))
721 printk("ASIC Control is now %x.\n", ioread32(ioaddr + ASICCtrl));
722
723 card_idx++;
724 return 0;
725
726 err_out_unregister:
727 unregister_netdev(dev);
728 err_out_unmap_rx:
729 pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma);
730 err_out_unmap_tx:
731 pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma);
732 err_out_cleardev:
733 pci_set_drvdata(pdev, NULL);
734 pci_iounmap(pdev, ioaddr);
735 err_out_res:
736 pci_release_regions(pdev);
737 err_out_netdev:
738 free_netdev (dev);
739 return -ENODEV;
740 }
741
742 static int change_mtu(struct net_device *dev, int new_mtu)
743 {
744 if ((new_mtu < 68) || (new_mtu > 8191)) /* Set by RxDMAFrameLen */
745 return -EINVAL;
746 if (netif_running(dev))
747 return -EBUSY;
748 dev->mtu = new_mtu;
749 return 0;
750 }
751
752 #define eeprom_delay(ee_addr) ioread32(ee_addr)
753 /* Read the EEPROM and MII Management Data I/O (MDIO) interfaces. */
754 static int __devinit eeprom_read(void __iomem *ioaddr, int location)
755 {
756 int boguscnt = 10000; /* Typical 1900 ticks. */
757 iowrite16(0x0200 | (location & 0xff), ioaddr + EECtrl);
758 do {
759 eeprom_delay(ioaddr + EECtrl);
760 if (! (ioread16(ioaddr + EECtrl) & 0x8000)) {
761 return ioread16(ioaddr + EEData);
762 }
763 } while (--boguscnt > 0);
764 return 0;
765 }
766
767 /* MII transceiver control section.
768 Read and write the MII registers using software-generated serial
769 MDIO protocol. See the MII specifications or DP83840A data sheet
770 for details.
771
772 The maximum data clock rate is 2.5 Mhz. The minimum timing is usually
773 met by back-to-back 33Mhz PCI cycles. */
774 #define mdio_delay() ioread8(mdio_addr)
775
776 enum mii_reg_bits {
777 MDIO_ShiftClk=0x0001, MDIO_Data=0x0002, MDIO_EnbOutput=0x0004,
778 };
779 #define MDIO_EnbIn (0)
780 #define MDIO_WRITE0 (MDIO_EnbOutput)
781 #define MDIO_WRITE1 (MDIO_Data | MDIO_EnbOutput)
782
783 /* Generate the preamble required for initial synchronization and
784 a few older transceivers. */
785 static void mdio_sync(void __iomem *mdio_addr)
786 {
787 int bits = 32;
788
789 /* Establish sync by sending at least 32 logic ones. */
790 while (--bits >= 0) {
791 iowrite8(MDIO_WRITE1, mdio_addr);
792 mdio_delay();
793 iowrite8(MDIO_WRITE1 | MDIO_ShiftClk, mdio_addr);
794 mdio_delay();
795 }
796 }
797
798 static int mdio_read(struct net_device *dev, int phy_id, int location)
799 {
800 struct netdev_private *np = netdev_priv(dev);
801 void __iomem *mdio_addr = np->base + MIICtrl;
802 int mii_cmd = (0xf6 << 10) | (phy_id << 5) | location;
803 int i, retval = 0;
804
805 if (np->mii_preamble_required)
806 mdio_sync(mdio_addr);
807
808 /* Shift the read command bits out. */
809 for (i = 15; i >= 0; i--) {
810 int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
811
812 iowrite8(dataval, mdio_addr);
813 mdio_delay();
814 iowrite8(dataval | MDIO_ShiftClk, mdio_addr);
815 mdio_delay();
816 }
817 /* Read the two transition, 16 data, and wire-idle bits. */
818 for (i = 19; i > 0; i--) {
819 iowrite8(MDIO_EnbIn, mdio_addr);
820 mdio_delay();
821 retval = (retval << 1) | ((ioread8(mdio_addr) & MDIO_Data) ? 1 : 0);
822 iowrite8(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
823 mdio_delay();
824 }
825 return (retval>>1) & 0xffff;
826 }
827
828 static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
829 {
830 struct netdev_private *np = netdev_priv(dev);
831 void __iomem *mdio_addr = np->base + MIICtrl;
832 int mii_cmd = (0x5002 << 16) | (phy_id << 23) | (location<<18) | value;
833 int i;
834
835 if (np->mii_preamble_required)
836 mdio_sync(mdio_addr);
837
838 /* Shift the command bits out. */
839 for (i = 31; i >= 0; i--) {
840 int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
841
842 iowrite8(dataval, mdio_addr);
843 mdio_delay();
844 iowrite8(dataval | MDIO_ShiftClk, mdio_addr);
845 mdio_delay();
846 }
847 /* Clear out extra bits. */
848 for (i = 2; i > 0; i--) {
849 iowrite8(MDIO_EnbIn, mdio_addr);
850 mdio_delay();
851 iowrite8(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
852 mdio_delay();
853 }
854 return;
855 }
856
857 static int netdev_open(struct net_device *dev)
858 {
859 struct netdev_private *np = netdev_priv(dev);
860 void __iomem *ioaddr = np->base;
861 int i;
862
863 /* Do we need to reset the chip??? */
864
865 i = request_irq(dev->irq, &intr_handler, SA_SHIRQ, dev->name, dev);
866 if (i)
867 return i;
868
869 if (netif_msg_ifup(np))
870 printk(KERN_DEBUG "%s: netdev_open() irq %d.\n",
871 dev->name, dev->irq);
872 init_ring(dev);
873
874 iowrite32(np->rx_ring_dma, ioaddr + RxListPtr);
875 /* The Tx list pointer is written as packets are queued. */
876
877 /* Initialize other registers. */
878 __set_mac_addr(dev);
879 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
880 iowrite16(dev->mtu + 18, ioaddr + MaxFrameSize);
881 #else
882 iowrite16(dev->mtu + 14, ioaddr + MaxFrameSize);
883 #endif
884 if (dev->mtu > 2047)
885 iowrite32(ioread32(ioaddr + ASICCtrl) | 0x0C, ioaddr + ASICCtrl);
886
887 /* Configure the PCI bus bursts and FIFO thresholds. */
888
889 if (dev->if_port == 0)
890 dev->if_port = np->default_port;
891
892 spin_lock_init(&np->mcastlock);
893
894 set_rx_mode(dev);
895 iowrite16(0, ioaddr + IntrEnable);
896 iowrite16(0, ioaddr + DownCounter);
897 /* Set the chip to poll every N*320nsec. */
898 iowrite8(100, ioaddr + RxDMAPollPeriod);
899 iowrite8(127, ioaddr + TxDMAPollPeriod);
900 /* Fix DFE-580TX packet drop issue */
901 if (np->pci_rev_id >= 0x14)
902 iowrite8(0x01, ioaddr + DebugCtrl1);
903 netif_start_queue(dev);
904
905 iowrite16 (StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl1);
906
907 if (netif_msg_ifup(np))
908 printk(KERN_DEBUG "%s: Done netdev_open(), status: Rx %x Tx %x "
909 "MAC Control %x, %4.4x %4.4x.\n",
910 dev->name, ioread32(ioaddr + RxStatus), ioread8(ioaddr + TxStatus),
911 ioread32(ioaddr + MACCtrl0),
912 ioread16(ioaddr + MACCtrl1), ioread16(ioaddr + MACCtrl0));
913
914 /* Set the timer to check for link beat. */
915 init_timer(&np->timer);
916 np->timer.expires = jiffies + 3*HZ;
917 np->timer.data = (unsigned long)dev;
918 np->timer.function = &netdev_timer; /* timer handler */
919 add_timer(&np->timer);
920
921 /* Enable interrupts by setting the interrupt mask. */
922 iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
923
924 return 0;
925 }
926
927 static void check_duplex(struct net_device *dev)
928 {
929 struct netdev_private *np = netdev_priv(dev);
930 void __iomem *ioaddr = np->base;
931 int mii_lpa = mdio_read(dev, np->phys[0], MII_LPA);
932 int negotiated = mii_lpa & np->mii_if.advertising;
933 int duplex;
934
935 /* Force media */
936 if (!np->an_enable || mii_lpa == 0xffff) {
937 if (np->mii_if.full_duplex)
938 iowrite16 (ioread16 (ioaddr + MACCtrl0) | EnbFullDuplex,
939 ioaddr + MACCtrl0);
940 return;
941 }
942
943 /* Autonegotiation */
944 duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040;
945 if (np->mii_if.full_duplex != duplex) {
946 np->mii_if.full_duplex = duplex;
947 if (netif_msg_link(np))
948 printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d "
949 "negotiated capability %4.4x.\n", dev->name,
950 duplex ? "full" : "half", np->phys[0], negotiated);
951 iowrite16(ioread16(ioaddr + MACCtrl0) | duplex ? 0x20 : 0, ioaddr + MACCtrl0);
952 }
953 }
954
955 static void netdev_timer(unsigned long data)
956 {
957 struct net_device *dev = (struct net_device *)data;
958 struct netdev_private *np = netdev_priv(dev);
959 void __iomem *ioaddr = np->base;
960 int next_tick = 10*HZ;
961
962 if (netif_msg_timer(np)) {
963 printk(KERN_DEBUG "%s: Media selection timer tick, intr status %4.4x, "
964 "Tx %x Rx %x.\n",
965 dev->name, ioread16(ioaddr + IntrEnable),
966 ioread8(ioaddr + TxStatus), ioread32(ioaddr + RxStatus));
967 }
968 check_duplex(dev);
969 np->timer.expires = jiffies + next_tick;
970 add_timer(&np->timer);
971 }
972
973 static void tx_timeout(struct net_device *dev)
974 {
975 struct netdev_private *np = netdev_priv(dev);
976 void __iomem *ioaddr = np->base;
977 unsigned long flag;
978
979 netif_stop_queue(dev);
980 tasklet_disable(&np->tx_tasklet);
981 iowrite16(0, ioaddr + IntrEnable);
982 printk(KERN_WARNING "%s: Transmit timed out, TxStatus %2.2x "
983 "TxFrameId %2.2x,"
984 " resetting...\n", dev->name, ioread8(ioaddr + TxStatus),
985 ioread8(ioaddr + TxFrameId));
986
987 {
988 int i;
989 for (i=0; i<TX_RING_SIZE; i++) {
990 printk(KERN_DEBUG "%02x %08llx %08x %08x(%02x) %08x %08x\n", i,
991 (unsigned long long)(np->tx_ring_dma + i*sizeof(*np->tx_ring)),
992 le32_to_cpu(np->tx_ring[i].next_desc),
993 le32_to_cpu(np->tx_ring[i].status),
994 (le32_to_cpu(np->tx_ring[i].status) >> 2) & 0xff,
995 le32_to_cpu(np->tx_ring[i].frag[0].addr),
996 le32_to_cpu(np->tx_ring[i].frag[0].length));
997 }
998 printk(KERN_DEBUG "TxListPtr=%08x netif_queue_stopped=%d\n",
999 ioread32(np->base + TxListPtr),
1000 netif_queue_stopped(dev));
1001 printk(KERN_DEBUG "cur_tx=%d(%02x) dirty_tx=%d(%02x)\n",
1002 np->cur_tx, np->cur_tx % TX_RING_SIZE,
1003 np->dirty_tx, np->dirty_tx % TX_RING_SIZE);
1004 printk(KERN_DEBUG "cur_rx=%d dirty_rx=%d\n", np->cur_rx, np->dirty_rx);
1005 printk(KERN_DEBUG "cur_task=%d\n", np->cur_task);
1006 }
1007 spin_lock_irqsave(&np->lock, flag);
1008
1009 /* Stop and restart the chip's Tx processes . */
1010 reset_tx(dev);
1011 spin_unlock_irqrestore(&np->lock, flag);
1012
1013 dev->if_port = 0;
1014
1015 dev->trans_start = jiffies;
1016 np->stats.tx_errors++;
1017 if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
1018 netif_wake_queue(dev);
1019 }
1020 iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
1021 tasklet_enable(&np->tx_tasklet);
1022 }
1023
1024
1025 /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
1026 static void init_ring(struct net_device *dev)
1027 {
1028 struct netdev_private *np = netdev_priv(dev);
1029 int i;
1030
1031 np->cur_rx = np->cur_tx = 0;
1032 np->dirty_rx = np->dirty_tx = 0;
1033 np->cur_task = 0;
1034
1035 np->rx_buf_sz = (dev->mtu <= 1520 ? PKT_BUF_SZ : dev->mtu + 16);
1036
1037 /* Initialize all Rx descriptors. */
1038 for (i = 0; i < RX_RING_SIZE; i++) {
1039 np->rx_ring[i].next_desc = cpu_to_le32(np->rx_ring_dma +
1040 ((i+1)%RX_RING_SIZE)*sizeof(*np->rx_ring));
1041 np->rx_ring[i].status = 0;
1042 np->rx_ring[i].frag[0].length = 0;
1043 np->rx_skbuff[i] = NULL;
1044 }
1045
1046 /* Fill in the Rx buffers. Handle allocation failure gracefully. */
1047 for (i = 0; i < RX_RING_SIZE; i++) {
1048 struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz);
1049 np->rx_skbuff[i] = skb;
1050 if (skb == NULL)
1051 break;
1052 skb->dev = dev; /* Mark as being used by this device. */
1053 skb_reserve(skb, 2); /* 16 byte align the IP header. */
1054 np->rx_ring[i].frag[0].addr = cpu_to_le32(
1055 pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz,
1056 PCI_DMA_FROMDEVICE));
1057 np->rx_ring[i].frag[0].length = cpu_to_le32(np->rx_buf_sz | LastFrag);
1058 }
1059 np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
1060
1061 for (i = 0; i < TX_RING_SIZE; i++) {
1062 np->tx_skbuff[i] = NULL;
1063 np->tx_ring[i].status = 0;
1064 }
1065 return;
1066 }
1067
1068 static void tx_poll (unsigned long data)
1069 {
1070 struct net_device *dev = (struct net_device *)data;
1071 struct netdev_private *np = netdev_priv(dev);
1072 unsigned head = np->cur_task % TX_RING_SIZE;
1073 struct netdev_desc *txdesc =
1074 &np->tx_ring[(np->cur_tx - 1) % TX_RING_SIZE];
1075
1076 /* Chain the next pointer */
1077 for (; np->cur_tx - np->cur_task > 0; np->cur_task++) {
1078 int entry = np->cur_task % TX_RING_SIZE;
1079 txdesc = &np->tx_ring[entry];
1080 if (np->last_tx) {
1081 np->last_tx->next_desc = cpu_to_le32(np->tx_ring_dma +
1082 entry*sizeof(struct netdev_desc));
1083 }
1084 np->last_tx = txdesc;
1085 }
1086 /* Indicate the latest descriptor of tx ring */
1087 txdesc->status |= cpu_to_le32(DescIntrOnTx);
1088
1089 if (ioread32 (np->base + TxListPtr) == 0)
1090 iowrite32 (np->tx_ring_dma + head * sizeof(struct netdev_desc),
1091 np->base + TxListPtr);
1092 return;
1093 }
1094
1095 static int
1096 start_tx (struct sk_buff *skb, struct net_device *dev)
1097 {
1098 struct netdev_private *np = netdev_priv(dev);
1099 struct netdev_desc *txdesc;
1100 unsigned entry;
1101
1102 /* Calculate the next Tx descriptor entry. */
1103 entry = np->cur_tx % TX_RING_SIZE;
1104 np->tx_skbuff[entry] = skb;
1105 txdesc = &np->tx_ring[entry];
1106
1107 txdesc->next_desc = 0;
1108 txdesc->status = cpu_to_le32 ((entry << 2) | DisableAlign);
1109 txdesc->frag[0].addr = cpu_to_le32 (pci_map_single (np->pci_dev, skb->data,
1110 skb->len,
1111 PCI_DMA_TODEVICE));
1112 txdesc->frag[0].length = cpu_to_le32 (skb->len | LastFrag);
1113
1114 /* Increment cur_tx before tasklet_schedule() */
1115 np->cur_tx++;
1116 mb();
1117 /* Schedule a tx_poll() task */
1118 tasklet_schedule(&np->tx_tasklet);
1119
1120 /* On some architectures: explicitly flush cache lines here. */
1121 if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 1
1122 && !netif_queue_stopped(dev)) {
1123 /* do nothing */
1124 } else {
1125 netif_stop_queue (dev);
1126 }
1127 dev->trans_start = jiffies;
1128 if (netif_msg_tx_queued(np)) {
1129 printk (KERN_DEBUG
1130 "%s: Transmit frame #%d queued in slot %d.\n",
1131 dev->name, np->cur_tx, entry);
1132 }
1133 return 0;
1134 }
1135
1136 /* Reset hardware tx and free all of tx buffers */
1137 static int
1138 reset_tx (struct net_device *dev)
1139 {
1140 struct netdev_private *np = netdev_priv(dev);
1141 void __iomem *ioaddr = np->base;
1142 struct sk_buff *skb;
1143 int i;
1144 int irq = in_interrupt();
1145
1146 /* Reset tx logic, TxListPtr will be cleaned */
1147 iowrite16 (TxDisable, ioaddr + MACCtrl1);
1148 iowrite16 (TxReset | DMAReset | FIFOReset | NetworkReset,
1149 ioaddr + ASICCtrl + 2);
1150 for (i=50; i > 0; i--) {
1151 if ((ioread16(ioaddr + ASICCtrl + 2) & ResetBusy) == 0)
1152 break;
1153 mdelay(1);
1154 }
1155 /* free all tx skbuff */
1156 for (i = 0; i < TX_RING_SIZE; i++) {
1157 skb = np->tx_skbuff[i];
1158 if (skb) {
1159 pci_unmap_single(np->pci_dev,
1160 np->tx_ring[i].frag[0].addr, skb->len,
1161 PCI_DMA_TODEVICE);
1162 if (irq)
1163 dev_kfree_skb_irq (skb);
1164 else
1165 dev_kfree_skb (skb);
1166 np->tx_skbuff[i] = NULL;
1167 np->stats.tx_dropped++;
1168 }
1169 }
1170 np->cur_tx = np->dirty_tx = 0;
1171 np->cur_task = 0;
1172 iowrite16 (StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl1);
1173 return 0;
1174 }
1175
1176 /* The interrupt handler cleans up after the Tx thread,
1177 and schedule a Rx thread work */
1178 static irqreturn_t intr_handler(int irq, void *dev_instance, struct pt_regs *rgs)
1179 {
1180 struct net_device *dev = (struct net_device *)dev_instance;
1181 struct netdev_private *np = netdev_priv(dev);
1182 void __iomem *ioaddr = np->base;
1183 int hw_frame_id;
1184 int tx_cnt;
1185 int tx_status;
1186 int handled = 0;
1187
1188
1189 do {
1190 int intr_status = ioread16(ioaddr + IntrStatus);
1191 iowrite16(intr_status, ioaddr + IntrStatus);
1192
1193 if (netif_msg_intr(np))
1194 printk(KERN_DEBUG "%s: Interrupt, status %4.4x.\n",
1195 dev->name, intr_status);
1196
1197 if (!(intr_status & DEFAULT_INTR))
1198 break;
1199
1200 handled = 1;
1201
1202 if (intr_status & (IntrRxDMADone)) {
1203 iowrite16(DEFAULT_INTR & ~(IntrRxDone|IntrRxDMADone),
1204 ioaddr + IntrEnable);
1205 if (np->budget < 0)
1206 np->budget = RX_BUDGET;
1207 tasklet_schedule(&np->rx_tasklet);
1208 }
1209 if (intr_status & (IntrTxDone | IntrDrvRqst)) {
1210 tx_status = ioread16 (ioaddr + TxStatus);
1211 for (tx_cnt=32; tx_status & 0x80; --tx_cnt) {
1212 if (netif_msg_tx_done(np))
1213 printk
1214 ("%s: Transmit status is %2.2x.\n",
1215 dev->name, tx_status);
1216 if (tx_status & 0x1e) {
1217 if (netif_msg_tx_err(np))
1218 printk("%s: Transmit error status %4.4x.\n",
1219 dev->name, tx_status);
1220 np->stats.tx_errors++;
1221 if (tx_status & 0x10)
1222 np->stats.tx_fifo_errors++;
1223 if (tx_status & 0x08)
1224 np->stats.collisions++;
1225 if (tx_status & 0x04)
1226 np->stats.tx_fifo_errors++;
1227 if (tx_status & 0x02)
1228 np->stats.tx_window_errors++;
1229 /*
1230 ** This reset has been verified on
1231 ** DFE-580TX boards ! phdm@macqel.be.
1232 */
1233 if (tx_status & 0x10) { /* TxUnderrun */
1234 unsigned short txthreshold;
1235
1236 txthreshold = ioread16 (ioaddr + TxStartThresh);
1237 /* Restart Tx FIFO and transmitter */
1238 sundance_reset(dev, (NetworkReset|FIFOReset|TxReset) << 16);
1239 iowrite16 (txthreshold, ioaddr + TxStartThresh);
1240 /* No need to reset the Tx pointer here */
1241 }
1242 /* Restart the Tx. */
1243 iowrite16 (TxEnable, ioaddr + MACCtrl1);
1244 }
1245 /* Yup, this is a documentation bug. It cost me *hours*. */
1246 iowrite16 (0, ioaddr + TxStatus);
1247 if (tx_cnt < 0) {
1248 iowrite32(5000, ioaddr + DownCounter);
1249 break;
1250 }
1251 tx_status = ioread16 (ioaddr + TxStatus);
1252 }
1253 hw_frame_id = (tx_status >> 8) & 0xff;
1254 } else {
1255 hw_frame_id = ioread8(ioaddr + TxFrameId);
1256 }
1257
1258 if (np->pci_rev_id >= 0x14) {
1259 spin_lock(&np->lock);
1260 for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
1261 int entry = np->dirty_tx % TX_RING_SIZE;
1262 struct sk_buff *skb;
1263 int sw_frame_id;
1264 sw_frame_id = (le32_to_cpu(
1265 np->tx_ring[entry].status) >> 2) & 0xff;
1266 if (sw_frame_id == hw_frame_id &&
1267 !(le32_to_cpu(np->tx_ring[entry].status)
1268 & 0x00010000))
1269 break;
1270 if (sw_frame_id == (hw_frame_id + 1) %
1271 TX_RING_SIZE)
1272 break;
1273 skb = np->tx_skbuff[entry];
1274 /* Free the original skb. */
1275 pci_unmap_single(np->pci_dev,
1276 np->tx_ring[entry].frag[0].addr,
1277 skb->len, PCI_DMA_TODEVICE);
1278 dev_kfree_skb_irq (np->tx_skbuff[entry]);
1279 np->tx_skbuff[entry] = NULL;
1280 np->tx_ring[entry].frag[0].addr = 0;
1281 np->tx_ring[entry].frag[0].length = 0;
1282 }
1283 spin_unlock(&np->lock);
1284 } else {
1285 spin_lock(&np->lock);
1286 for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
1287 int entry = np->dirty_tx % TX_RING_SIZE;
1288 struct sk_buff *skb;
1289 if (!(le32_to_cpu(np->tx_ring[entry].status)
1290 & 0x00010000))
1291 break;
1292 skb = np->tx_skbuff[entry];
1293 /* Free the original skb. */
1294 pci_unmap_single(np->pci_dev,
1295 np->tx_ring[entry].frag[0].addr,
1296 skb->len, PCI_DMA_TODEVICE);
1297 dev_kfree_skb_irq (np->tx_skbuff[entry]);
1298 np->tx_skbuff[entry] = NULL;
1299 np->tx_ring[entry].frag[0].addr = 0;
1300 np->tx_ring[entry].frag[0].length = 0;
1301 }
1302 spin_unlock(&np->lock);
1303 }
1304
1305 if (netif_queue_stopped(dev) &&
1306 np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
1307 /* The ring is no longer full, clear busy flag. */
1308 netif_wake_queue (dev);
1309 }
1310 /* Abnormal error summary/uncommon events handlers. */
1311 if (intr_status & (IntrPCIErr | LinkChange | StatsMax))
1312 netdev_error(dev, intr_status);
1313 } while (0);
1314 if (netif_msg_intr(np))
1315 printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
1316 dev->name, ioread16(ioaddr + IntrStatus));
1317 return IRQ_RETVAL(handled);
1318 }
1319
1320 static void rx_poll(unsigned long data)
1321 {
1322 struct net_device *dev = (struct net_device *)data;
1323 struct netdev_private *np = netdev_priv(dev);
1324 int entry = np->cur_rx % RX_RING_SIZE;
1325 int boguscnt = np->budget;
1326 void __iomem *ioaddr = np->base;
1327 int received = 0;
1328
1329 /* If EOP is set on the next entry, it's a new packet. Send it up. */
1330 while (1) {
1331 struct netdev_desc *desc = &(np->rx_ring[entry]);
1332 u32 frame_status = le32_to_cpu(desc->status);
1333 int pkt_len;
1334
1335 if (--boguscnt < 0) {
1336 goto not_done;
1337 }
1338 if (!(frame_status & DescOwn))
1339 break;
1340 pkt_len = frame_status & 0x1fff; /* Chip omits the CRC. */
1341 if (netif_msg_rx_status(np))
1342 printk(KERN_DEBUG " netdev_rx() status was %8.8x.\n",
1343 frame_status);
1344 if (frame_status & 0x001f4000) {
1345 /* There was a error. */
1346 if (netif_msg_rx_err(np))
1347 printk(KERN_DEBUG " netdev_rx() Rx error was %8.8x.\n",
1348 frame_status);
1349 np->stats.rx_errors++;
1350 if (frame_status & 0x00100000) np->stats.rx_length_errors++;
1351 if (frame_status & 0x00010000) np->stats.rx_fifo_errors++;
1352 if (frame_status & 0x00060000) np->stats.rx_frame_errors++;
1353 if (frame_status & 0x00080000) np->stats.rx_crc_errors++;
1354 if (frame_status & 0x00100000) {
1355 printk(KERN_WARNING "%s: Oversized Ethernet frame,"
1356 " status %8.8x.\n",
1357 dev->name, frame_status);
1358 }
1359 } else {
1360 struct sk_buff *skb;
1361 #ifndef final_version
1362 if (netif_msg_rx_status(np))
1363 printk(KERN_DEBUG " netdev_rx() normal Rx pkt length %d"
1364 ", bogus_cnt %d.\n",
1365 pkt_len, boguscnt);
1366 #endif
1367 /* Check if the packet is long enough to accept without copying
1368 to a minimally-sized skbuff. */
1369 if (pkt_len < rx_copybreak
1370 && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
1371 skb->dev = dev;
1372 skb_reserve(skb, 2); /* 16 byte align the IP header */
1373 pci_dma_sync_single_for_cpu(np->pci_dev,
1374 desc->frag[0].addr,
1375 np->rx_buf_sz,
1376 PCI_DMA_FROMDEVICE);
1377
1378 eth_copy_and_sum(skb, np->rx_skbuff[entry]->data, pkt_len, 0);
1379 pci_dma_sync_single_for_device(np->pci_dev,
1380 desc->frag[0].addr,
1381 np->rx_buf_sz,
1382 PCI_DMA_FROMDEVICE);
1383 skb_put(skb, pkt_len);
1384 } else {
1385 pci_unmap_single(np->pci_dev,
1386 desc->frag[0].addr,
1387 np->rx_buf_sz,
1388 PCI_DMA_FROMDEVICE);
1389 skb_put(skb = np->rx_skbuff[entry], pkt_len);
1390 np->rx_skbuff[entry] = NULL;
1391 }
1392 skb->protocol = eth_type_trans(skb, dev);
1393 /* Note: checksum -> skb->ip_summed = CHECKSUM_UNNECESSARY; */
1394 netif_rx(skb);
1395 dev->last_rx = jiffies;
1396 }
1397 entry = (entry + 1) % RX_RING_SIZE;
1398 received++;
1399 }
1400 np->cur_rx = entry;
1401 refill_rx (dev);
1402 np->budget -= received;
1403 iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
1404 return;
1405
1406 not_done:
1407 np->cur_rx = entry;
1408 refill_rx (dev);
1409 if (!received)
1410 received = 1;
1411 np->budget -= received;
1412 if (np->budget <= 0)
1413 np->budget = RX_BUDGET;
1414 tasklet_schedule(&np->rx_tasklet);
1415 return;
1416 }
1417
1418 static void refill_rx (struct net_device *dev)
1419 {
1420 struct netdev_private *np = netdev_priv(dev);
1421 int entry;
1422 int cnt = 0;
1423
1424 /* Refill the Rx ring buffers. */
1425 for (;(np->cur_rx - np->dirty_rx + RX_RING_SIZE) % RX_RING_SIZE > 0;
1426 np->dirty_rx = (np->dirty_rx + 1) % RX_RING_SIZE) {
1427 struct sk_buff *skb;
1428 entry = np->dirty_rx % RX_RING_SIZE;
1429 if (np->rx_skbuff[entry] == NULL) {
1430 skb = dev_alloc_skb(np->rx_buf_sz);
1431 np->rx_skbuff[entry] = skb;
1432 if (skb == NULL)
1433 break; /* Better luck next round. */
1434 skb->dev = dev; /* Mark as being used by this device. */
1435 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
1436 np->rx_ring[entry].frag[0].addr = cpu_to_le32(
1437 pci_map_single(np->pci_dev, skb->data,
1438 np->rx_buf_sz, PCI_DMA_FROMDEVICE));
1439 }
1440 /* Perhaps we need not reset this field. */
1441 np->rx_ring[entry].frag[0].length =
1442 cpu_to_le32(np->rx_buf_sz | LastFrag);
1443 np->rx_ring[entry].status = 0;
1444 cnt++;
1445 }
1446 return;
1447 }
1448 static void netdev_error(struct net_device *dev, int intr_status)
1449 {
1450 struct netdev_private *np = netdev_priv(dev);
1451 void __iomem *ioaddr = np->base;
1452 u16 mii_ctl, mii_advertise, mii_lpa;
1453 int speed;
1454
1455 if (intr_status & LinkChange) {
1456 if (np->an_enable) {
1457 mii_advertise = mdio_read (dev, np->phys[0], MII_ADVERTISE);
1458 mii_lpa= mdio_read (dev, np->phys[0], MII_LPA);
1459 mii_advertise &= mii_lpa;
1460 printk (KERN_INFO "%s: Link changed: ", dev->name);
1461 if (mii_advertise & ADVERTISE_100FULL) {
1462 np->speed = 100;
1463 printk ("100Mbps, full duplex\n");
1464 } else if (mii_advertise & ADVERTISE_100HALF) {
1465 np->speed = 100;
1466 printk ("100Mbps, half duplex\n");
1467 } else if (mii_advertise & ADVERTISE_10FULL) {
1468 np->speed = 10;
1469 printk ("10Mbps, full duplex\n");
1470 } else if (mii_advertise & ADVERTISE_10HALF) {
1471 np->speed = 10;
1472 printk ("10Mbps, half duplex\n");
1473 } else
1474 printk ("\n");
1475
1476 } else {
1477 mii_ctl = mdio_read (dev, np->phys[0], MII_BMCR);
1478 speed = (mii_ctl & BMCR_SPEED100) ? 100 : 10;
1479 np->speed = speed;
1480 printk (KERN_INFO "%s: Link changed: %dMbps ,",
1481 dev->name, speed);
1482 printk ("%s duplex.\n", (mii_ctl & BMCR_FULLDPLX) ?
1483 "full" : "half");
1484 }
1485 check_duplex (dev);
1486 if (np->flowctrl && np->mii_if.full_duplex) {
1487 iowrite16(ioread16(ioaddr + MulticastFilter1+2) | 0x0200,
1488 ioaddr + MulticastFilter1+2);
1489 iowrite16(ioread16(ioaddr + MACCtrl0) | EnbFlowCtrl,
1490 ioaddr + MACCtrl0);
1491 }
1492 }
1493 if (intr_status & StatsMax) {
1494 get_stats(dev);
1495 }
1496 if (intr_status & IntrPCIErr) {
1497 printk(KERN_ERR "%s: Something Wicked happened! %4.4x.\n",
1498 dev->name, intr_status);
1499 /* We must do a global reset of DMA to continue. */
1500 }
1501 }
1502
1503 static struct net_device_stats *get_stats(struct net_device *dev)
1504 {
1505 struct netdev_private *np = netdev_priv(dev);
1506 void __iomem *ioaddr = np->base;
1507 int i;
1508
1509 /* We should lock this segment of code for SMP eventually, although
1510 the vulnerability window is very small and statistics are
1511 non-critical. */
1512 /* The chip only need report frame silently dropped. */
1513 np->stats.rx_missed_errors += ioread8(ioaddr + RxMissed);
1514 np->stats.tx_packets += ioread16(ioaddr + TxFramesOK);
1515 np->stats.rx_packets += ioread16(ioaddr + RxFramesOK);
1516 np->stats.collisions += ioread8(ioaddr + StatsLateColl);
1517 np->stats.collisions += ioread8(ioaddr + StatsMultiColl);
1518 np->stats.collisions += ioread8(ioaddr + StatsOneColl);
1519 np->stats.tx_carrier_errors += ioread8(ioaddr + StatsCarrierError);
1520 ioread8(ioaddr + StatsTxDefer);
1521 for (i = StatsTxDefer; i <= StatsMcastRx; i++)
1522 ioread8(ioaddr + i);
1523 np->stats.tx_bytes += ioread16(ioaddr + TxOctetsLow);
1524 np->stats.tx_bytes += ioread16(ioaddr + TxOctetsHigh) << 16;
1525 np->stats.rx_bytes += ioread16(ioaddr + RxOctetsLow);
1526 np->stats.rx_bytes += ioread16(ioaddr + RxOctetsHigh) << 16;
1527
1528 return &np->stats;
1529 }
1530
1531 static void set_rx_mode(struct net_device *dev)
1532 {
1533 struct netdev_private *np = netdev_priv(dev);
1534 void __iomem *ioaddr = np->base;
1535 u16 mc_filter[4]; /* Multicast hash filter */
1536 u32 rx_mode;
1537 int i;
1538
1539 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1540 /* Unconditionally log net taps. */
1541 printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name);
1542 memset(mc_filter, 0xff, sizeof(mc_filter));
1543 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptAll | AcceptMyPhys;
1544 } else if ((dev->mc_count > multicast_filter_limit)
1545 || (dev->flags & IFF_ALLMULTI)) {
1546 /* Too many to match, or accept all multicasts. */
1547 memset(mc_filter, 0xff, sizeof(mc_filter));
1548 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
1549 } else if (dev->mc_count) {
1550 struct dev_mc_list *mclist;
1551 int bit;
1552 int index;
1553 int crc;
1554 memset (mc_filter, 0, sizeof (mc_filter));
1555 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
1556 i++, mclist = mclist->next) {
1557 crc = ether_crc_le (ETH_ALEN, mclist->dmi_addr);
1558 for (index=0, bit=0; bit < 6; bit++, crc <<= 1)
1559 if (crc & 0x80000000) index |= 1 << bit;
1560 mc_filter[index/16] |= (1 << (index % 16));
1561 }
1562 rx_mode = AcceptBroadcast | AcceptMultiHash | AcceptMyPhys;
1563 } else {
1564 iowrite8(AcceptBroadcast | AcceptMyPhys, ioaddr + RxMode);
1565 return;
1566 }
1567 if (np->mii_if.full_duplex && np->flowctrl)
1568 mc_filter[3] |= 0x0200;
1569
1570 for (i = 0; i < 4; i++)
1571 iowrite16(mc_filter[i], ioaddr + MulticastFilter0 + i*2);
1572 iowrite8(rx_mode, ioaddr + RxMode);
1573 }
1574
1575 static int __set_mac_addr(struct net_device *dev)
1576 {
1577 struct netdev_private *np = netdev_priv(dev);
1578 u16 addr16;
1579
1580 addr16 = (dev->dev_addr[0] | (dev->dev_addr[1] << 8));
1581 iowrite16(addr16, np->base + StationAddr);
1582 addr16 = (dev->dev_addr[2] | (dev->dev_addr[3] << 8));
1583 iowrite16(addr16, np->base + StationAddr+2);
1584 addr16 = (dev->dev_addr[4] | (dev->dev_addr[5] << 8));
1585 iowrite16(addr16, np->base + StationAddr+4);
1586 return 0;
1587 }
1588
1589 static int check_if_running(struct net_device *dev)
1590 {
1591 if (!netif_running(dev))
1592 return -EINVAL;
1593 return 0;
1594 }
1595
1596 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1597 {
1598 struct netdev_private *np = netdev_priv(dev);
1599 strcpy(info->driver, DRV_NAME);
1600 strcpy(info->version, DRV_VERSION);
1601 strcpy(info->bus_info, pci_name(np->pci_dev));
1602 }
1603
1604 static int get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1605 {
1606 struct netdev_private *np = netdev_priv(dev);
1607 spin_lock_irq(&np->lock);
1608 mii_ethtool_gset(&np->mii_if, ecmd);
1609 spin_unlock_irq(&np->lock);
1610 return 0;
1611 }
1612
1613 static int set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1614 {
1615 struct netdev_private *np = netdev_priv(dev);
1616 int res;
1617 spin_lock_irq(&np->lock);
1618 res = mii_ethtool_sset(&np->mii_if, ecmd);
1619 spin_unlock_irq(&np->lock);
1620 return res;
1621 }
1622
1623 static int nway_reset(struct net_device *dev)
1624 {
1625 struct netdev_private *np = netdev_priv(dev);
1626 return mii_nway_restart(&np->mii_if);
1627 }
1628
1629 static u32 get_link(struct net_device *dev)
1630 {
1631 struct netdev_private *np = netdev_priv(dev);
1632 return mii_link_ok(&np->mii_if);
1633 }
1634
1635 static u32 get_msglevel(struct net_device *dev)
1636 {
1637 struct netdev_private *np = netdev_priv(dev);
1638 return np->msg_enable;
1639 }
1640
1641 static void set_msglevel(struct net_device *dev, u32 val)
1642 {
1643 struct netdev_private *np = netdev_priv(dev);
1644 np->msg_enable = val;
1645 }
1646
1647 static struct ethtool_ops ethtool_ops = {
1648 .begin = check_if_running,
1649 .get_drvinfo = get_drvinfo,
1650 .get_settings = get_settings,
1651 .set_settings = set_settings,
1652 .nway_reset = nway_reset,
1653 .get_link = get_link,
1654 .get_msglevel = get_msglevel,
1655 .set_msglevel = set_msglevel,
1656 .get_perm_addr = ethtool_op_get_perm_addr,
1657 };
1658
1659 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1660 {
1661 struct netdev_private *np = netdev_priv(dev);
1662 void __iomem *ioaddr = np->base;
1663 int rc;
1664 int i;
1665
1666 if (!netif_running(dev))
1667 return -EINVAL;
1668
1669 spin_lock_irq(&np->lock);
1670 rc = generic_mii_ioctl(&np->mii_if, if_mii(rq), cmd, NULL);
1671 spin_unlock_irq(&np->lock);
1672 switch (cmd) {
1673 case SIOCDEVPRIVATE:
1674 for (i=0; i<TX_RING_SIZE; i++) {
1675 printk(KERN_DEBUG "%02x %08llx %08x %08x(%02x) %08x %08x\n", i,
1676 (unsigned long long)(np->tx_ring_dma + i*sizeof(*np->tx_ring)),
1677 le32_to_cpu(np->tx_ring[i].next_desc),
1678 le32_to_cpu(np->tx_ring[i].status),
1679 (le32_to_cpu(np->tx_ring[i].status) >> 2)
1680 & 0xff,
1681 le32_to_cpu(np->tx_ring[i].frag[0].addr),
1682 le32_to_cpu(np->tx_ring[i].frag[0].length));
1683 }
1684 printk(KERN_DEBUG "TxListPtr=%08x netif_queue_stopped=%d\n",
1685 ioread32(np->base + TxListPtr),
1686 netif_queue_stopped(dev));
1687 printk(KERN_DEBUG "cur_tx=%d(%02x) dirty_tx=%d(%02x)\n",
1688 np->cur_tx, np->cur_tx % TX_RING_SIZE,
1689 np->dirty_tx, np->dirty_tx % TX_RING_SIZE);
1690 printk(KERN_DEBUG "cur_rx=%d dirty_rx=%d\n", np->cur_rx, np->dirty_rx);
1691 printk(KERN_DEBUG "cur_task=%d\n", np->cur_task);
1692 printk(KERN_DEBUG "TxStatus=%04x\n", ioread16(ioaddr + TxStatus));
1693 return 0;
1694 }
1695
1696
1697 return rc;
1698 }
1699
1700 static int netdev_close(struct net_device *dev)
1701 {
1702 struct netdev_private *np = netdev_priv(dev);
1703 void __iomem *ioaddr = np->base;
1704 struct sk_buff *skb;
1705 int i;
1706
1707 netif_stop_queue(dev);
1708
1709 if (netif_msg_ifdown(np)) {
1710 printk(KERN_DEBUG "%s: Shutting down ethercard, status was Tx %2.2x "
1711 "Rx %4.4x Int %2.2x.\n",
1712 dev->name, ioread8(ioaddr + TxStatus),
1713 ioread32(ioaddr + RxStatus), ioread16(ioaddr + IntrStatus));
1714 printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n",
1715 dev->name, np->cur_tx, np->dirty_tx, np->cur_rx, np->dirty_rx);
1716 }
1717
1718 /* Disable interrupts by clearing the interrupt mask. */
1719 iowrite16(0x0000, ioaddr + IntrEnable);
1720
1721 /* Stop the chip's Tx and Rx processes. */
1722 iowrite16(TxDisable | RxDisable | StatsDisable, ioaddr + MACCtrl1);
1723
1724 /* Wait and kill tasklet */
1725 tasklet_kill(&np->rx_tasklet);
1726 tasklet_kill(&np->tx_tasklet);
1727
1728 #ifdef __i386__
1729 if (netif_msg_hw(np)) {
1730 printk("\n"KERN_DEBUG" Tx ring at %8.8x:\n",
1731 (int)(np->tx_ring_dma));
1732 for (i = 0; i < TX_RING_SIZE; i++)
1733 printk(" #%d desc. %4.4x %8.8x %8.8x.\n",
1734 i, np->tx_ring[i].status, np->tx_ring[i].frag[0].addr,
1735 np->tx_ring[i].frag[0].length);
1736 printk("\n"KERN_DEBUG " Rx ring %8.8x:\n",
1737 (int)(np->rx_ring_dma));
1738 for (i = 0; i < /*RX_RING_SIZE*/4 ; i++) {
1739 printk(KERN_DEBUG " #%d desc. %4.4x %4.4x %8.8x\n",
1740 i, np->rx_ring[i].status, np->rx_ring[i].frag[0].addr,
1741 np->rx_ring[i].frag[0].length);
1742 }
1743 }
1744 #endif /* __i386__ debugging only */
1745
1746 free_irq(dev->irq, dev);
1747
1748 del_timer_sync(&np->timer);
1749
1750 /* Free all the skbuffs in the Rx queue. */
1751 for (i = 0; i < RX_RING_SIZE; i++) {
1752 np->rx_ring[i].status = 0;
1753 np->rx_ring[i].frag[0].addr = 0xBADF00D0; /* An invalid address. */
1754 skb = np->rx_skbuff[i];
1755 if (skb) {
1756 pci_unmap_single(np->pci_dev,
1757 np->rx_ring[i].frag[0].addr, np->rx_buf_sz,
1758 PCI_DMA_FROMDEVICE);
1759 dev_kfree_skb(skb);
1760 np->rx_skbuff[i] = NULL;
1761 }
1762 }
1763 for (i = 0; i < TX_RING_SIZE; i++) {
1764 skb = np->tx_skbuff[i];
1765 if (skb) {
1766 pci_unmap_single(np->pci_dev,
1767 np->tx_ring[i].frag[0].addr, skb->len,
1768 PCI_DMA_TODEVICE);
1769 dev_kfree_skb(skb);
1770 np->tx_skbuff[i] = NULL;
1771 }
1772 }
1773
1774 return 0;
1775 }
1776
1777 static void __devexit sundance_remove1 (struct pci_dev *pdev)
1778 {
1779 struct net_device *dev = pci_get_drvdata(pdev);
1780
1781 if (dev) {
1782 struct netdev_private *np = netdev_priv(dev);
1783
1784 unregister_netdev(dev);
1785 pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring,
1786 np->rx_ring_dma);
1787 pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring,
1788 np->tx_ring_dma);
1789 pci_iounmap(pdev, np->base);
1790 pci_release_regions(pdev);
1791 free_netdev(dev);
1792 pci_set_drvdata(pdev, NULL);
1793 }
1794 }
1795
1796 static struct pci_driver sundance_driver = {
1797 .name = DRV_NAME,
1798 .id_table = sundance_pci_tbl,
1799 .probe = sundance_probe1,
1800 .remove = __devexit_p(sundance_remove1),
1801 };
1802
1803 static int __init sundance_init(void)
1804 {
1805 /* when a module, this is printed whether or not devices are found in probe */
1806 #ifdef MODULE
1807 printk(version);
1808 #endif
1809 return pci_module_init(&sundance_driver);
1810 }
1811
1812 static void __exit sundance_exit(void)
1813 {
1814 pci_unregister_driver(&sundance_driver);
1815 }
1816
1817 module_init(sundance_init);
1818 module_exit(sundance_exit);
1819
1820