]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/net/sundance.c
sundance: Convert to DMA API
[mirror_ubuntu-artful-kernel.git] / drivers / net / sundance.c
CommitLineData
1da177e4
LT
1/* sundance.c: A Linux device driver for the Sundance ST201 "Alta". */
2/*
3 Written 1999-2000 by Donald Becker.
4
5 This software may be used and distributed according to the terms of
6 the GNU General Public License (GPL), incorporated herein by reference.
7 Drivers based on or derived from this code fall under the GPL and must
8 retain the authorship, copyright and license notice. This file is not
9 a complete program and may only be used when the entire operating
10 system is licensed under the GPL.
11
12 The author may be reached as becker@scyld.com, or C/O
13 Scyld Computing Corporation
14 410 Severn Ave., Suite 210
15 Annapolis MD 21403
16
17 Support and updates available at
18 http://www.scyld.com/network/sundance.html
03a8c661 19 [link no longer provides useful info -jgarzik]
e714d99c
PDM
20 Archives of the mailing list are still available at
21 http://www.beowulf.org/pipermail/netdrivers/
1da177e4 22
1da177e4
LT
23*/
24
25#define DRV_NAME "sundance"
d5b20697
AG
26#define DRV_VERSION "1.2"
27#define DRV_RELDATE "11-Sep-2006"
1da177e4
LT
28
29
30/* The user-configurable values.
31 These may be modified when a driver module is loaded.*/
32static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
33/* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
34 Typical is a 64 element hash table based on the Ethernet CRC. */
f71e1309 35static const int multicast_filter_limit = 32;
1da177e4
LT
36
37/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
38 Setting to > 1518 effectively disables this feature.
39 This chip can receive into offset buffers, so the Alpha does not
40 need a copy-align. */
41static int rx_copybreak;
42static int flowctrl=1;
43
44/* media[] specifies the media type the NIC operates at.
45 autosense Autosensing active media.
46 10mbps_hd 10Mbps half duplex.
47 10mbps_fd 10Mbps full duplex.
48 100mbps_hd 100Mbps half duplex.
49 100mbps_fd 100Mbps full duplex.
50 0 Autosensing active media.
51 1 10Mbps half duplex.
52 2 10Mbps full duplex.
53 3 100Mbps half duplex.
54 4 100Mbps full duplex.
55*/
56#define MAX_UNITS 8
57static char *media[MAX_UNITS];
58
59
60/* Operational parameters that are set at compile time. */
61
62/* Keep the ring sizes a power of two for compile efficiency.
63 The compiler will convert <unsigned>'%'<2^N> into a bit mask.
64 Making the Tx ring too large decreases the effectiveness of channel
65 bonding and packet priority, and more than 128 requires modifying the
66 Tx error recovery.
67 Large receive rings merely waste memory. */
68#define TX_RING_SIZE 32
69#define TX_QUEUE_LEN (TX_RING_SIZE - 1) /* Limit ring entries actually used. */
70#define RX_RING_SIZE 64
71#define RX_BUDGET 32
72#define TX_TOTAL_SIZE TX_RING_SIZE*sizeof(struct netdev_desc)
73#define RX_TOTAL_SIZE RX_RING_SIZE*sizeof(struct netdev_desc)
74
75/* Operational parameters that usually are not changed. */
76/* Time in jiffies before concluding the transmitter is hung. */
77#define TX_TIMEOUT (4*HZ)
78#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
79
80/* Include files, designed to support most kernel versions 2.0.0 and later. */
81#include <linux/module.h>
82#include <linux/kernel.h>
83#include <linux/string.h>
84#include <linux/timer.h>
85#include <linux/errno.h>
86#include <linux/ioport.h>
1da177e4
LT
87#include <linux/interrupt.h>
88#include <linux/pci.h>
89#include <linux/netdevice.h>
90#include <linux/etherdevice.h>
91#include <linux/skbuff.h>
92#include <linux/init.h>
93#include <linux/bitops.h>
94#include <asm/uaccess.h>
95#include <asm/processor.h> /* Processor type for cache alignment. */
96#include <asm/io.h>
97#include <linux/delay.h>
98#include <linux/spinlock.h>
0c8a745f 99#include <linux/dma-mapping.h>
1da177e4
LT
100#ifndef _COMPAT_WITH_OLD_KERNEL
101#include <linux/crc32.h>
102#include <linux/ethtool.h>
103#include <linux/mii.h>
104#else
105#include "crc32.h"
106#include "ethtool.h"
107#include "mii.h"
108#include "compat.h"
109#endif
110
111/* These identify the driver base version and may not be removed. */
3af0fe39
SH
112static const char version[] __devinitconst =
113 KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE
114 " Written by Donald Becker\n";
1da177e4
LT
115
116MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
117MODULE_DESCRIPTION("Sundance Alta Ethernet driver");
118MODULE_LICENSE("GPL");
119
120module_param(debug, int, 0);
121module_param(rx_copybreak, int, 0);
122module_param_array(media, charp, NULL, 0);
123module_param(flowctrl, int, 0);
124MODULE_PARM_DESC(debug, "Sundance Alta debug level (0-5)");
125MODULE_PARM_DESC(rx_copybreak, "Sundance Alta copy breakpoint for copy-only-tiny-frames");
126MODULE_PARM_DESC(flowctrl, "Sundance Alta flow control [0|1]");
127
128/*
129 Theory of Operation
130
131I. Board Compatibility
132
133This driver is designed for the Sundance Technologies "Alta" ST201 chip.
134
135II. Board-specific settings
136
137III. Driver operation
138
139IIIa. Ring buffers
140
141This driver uses two statically allocated fixed-size descriptor lists
142formed into rings by a branch from the final descriptor to the beginning of
143the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
144Some chips explicitly use only 2^N sized rings, while others use a
145'next descriptor' pointer that the driver forms into rings.
146
147IIIb/c. Transmit/Receive Structure
148
149This driver uses a zero-copy receive and transmit scheme.
150The driver allocates full frame size skbuffs for the Rx ring buffers at
151open() time and passes the skb->data field to the chip as receive data
152buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
153a fresh skbuff is allocated and the frame is copied to the new skbuff.
154When the incoming frame is larger, the skbuff is passed directly up the
155protocol stack. Buffers consumed this way are replaced by newly allocated
156skbuffs in a later phase of receives.
157
158The RX_COPYBREAK value is chosen to trade-off the memory wasted by
159using a full-sized skbuff for small frames vs. the copying costs of larger
160frames. New boards are typically used in generously configured machines
161and the underfilled buffers have negligible impact compared to the benefit of
162a single allocation size, so the default value of zero results in never
163copying packets. When copying is done, the cost is usually mitigated by using
164a combined copy/checksum routine. Copying also preloads the cache, which is
165most useful with small frames.
166
167A subtle aspect of the operation is that the IP header at offset 14 in an
168ethernet frame isn't longword aligned for further processing.
169Unaligned buffers are permitted by the Sundance hardware, so
170frames are received into the skbuff at an offset of "+2", 16-byte aligning
171the IP header.
172
173IIId. Synchronization
174
175The driver runs as two independent, single-threaded flows of control. One
176is the send-packet routine, which enforces single-threaded use by the
177dev->tbusy flag. The other thread is the interrupt handler, which is single
178threaded by the hardware and interrupt handling software.
179
180The send packet thread has partial control over the Tx ring and 'dev->tbusy'
181flag. It sets the tbusy flag whenever it's queuing a Tx packet. If the next
182queue slot is empty, it clears the tbusy flag when finished otherwise it sets
183the 'lp->tx_full' flag.
184
185The interrupt handler has exclusive control over the Rx ring and records stats
186from the Tx ring. After reaping the stats, it marks the Tx queue entry as
187empty by incrementing the dirty_tx mark. Iff the 'lp->tx_full' flag is set, it
188clears both the tx_full and tbusy flags.
189
190IV. Notes
191
192IVb. References
193
194The Sundance ST201 datasheet, preliminary version.
b71b95ef
PDM
195The Kendin KS8723 datasheet, preliminary version.
196The ICplus IP100 datasheet, preliminary version.
197http://www.scyld.com/expert/100mbps.html
198http://www.scyld.com/expert/NWay.html
1da177e4
LT
199
200IVc. Errata
201
202*/
203
204/* Work-around for Kendin chip bugs. */
205#ifndef CONFIG_SUNDANCE_MMIO
206#define USE_IO_OPS 1
207#endif
208
a3aa1884 209static DEFINE_PCI_DEVICE_TABLE(sundance_pci_tbl) = {
46009c8b
JG
210 { 0x1186, 0x1002, 0x1186, 0x1002, 0, 0, 0 },
211 { 0x1186, 0x1002, 0x1186, 0x1003, 0, 0, 1 },
212 { 0x1186, 0x1002, 0x1186, 0x1012, 0, 0, 2 },
213 { 0x1186, 0x1002, 0x1186, 0x1040, 0, 0, 3 },
214 { 0x1186, 0x1002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 },
215 { 0x13F0, 0x0201, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5 },
216 { 0x13F0, 0x0200, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 6 },
217 { }
1da177e4
LT
218};
219MODULE_DEVICE_TABLE(pci, sundance_pci_tbl);
220
221enum {
222 netdev_io_size = 128
223};
224
225struct pci_id_info {
226 const char *name;
227};
46009c8b 228static const struct pci_id_info pci_id_tbl[] __devinitdata = {
1da177e4
LT
229 {"D-Link DFE-550TX FAST Ethernet Adapter"},
230 {"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"},
231 {"D-Link DFE-580TX 4 port Server Adapter"},
232 {"D-Link DFE-530TXS FAST Ethernet Adapter"},
233 {"D-Link DL10050-based FAST Ethernet Adapter"},
234 {"Sundance Technology Alta"},
1668b19f 235 {"IC Plus Corporation IP100A FAST Ethernet Adapter"},
46009c8b 236 { } /* terminate list. */
1da177e4
LT
237};
238
239/* This driver was written to use PCI memory space, however x86-oriented
240 hardware often uses I/O space accesses. */
241
242/* Offsets to the device registers.
243 Unlike software-only systems, device drivers interact with complex hardware.
244 It's not useful to define symbolic names for every register bit in the
245 device. The name can only partially document the semantics and make
246 the driver longer and more difficult to read.
247 In general, only the important configuration values or bits changed
248 multiple times should be defined symbolically.
249*/
250enum alta_offsets {
251 DMACtrl = 0x00,
252 TxListPtr = 0x04,
253 TxDMABurstThresh = 0x08,
254 TxDMAUrgentThresh = 0x09,
255 TxDMAPollPeriod = 0x0a,
256 RxDMAStatus = 0x0c,
257 RxListPtr = 0x10,
258 DebugCtrl0 = 0x1a,
259 DebugCtrl1 = 0x1c,
260 RxDMABurstThresh = 0x14,
261 RxDMAUrgentThresh = 0x15,
262 RxDMAPollPeriod = 0x16,
263 LEDCtrl = 0x1a,
264 ASICCtrl = 0x30,
265 EEData = 0x34,
266 EECtrl = 0x36,
1da177e4
LT
267 FlashAddr = 0x40,
268 FlashData = 0x44,
269 TxStatus = 0x46,
270 TxFrameId = 0x47,
271 DownCounter = 0x18,
272 IntrClear = 0x4a,
273 IntrEnable = 0x4c,
274 IntrStatus = 0x4e,
275 MACCtrl0 = 0x50,
276 MACCtrl1 = 0x52,
277 StationAddr = 0x54,
278 MaxFrameSize = 0x5A,
279 RxMode = 0x5c,
280 MIICtrl = 0x5e,
281 MulticastFilter0 = 0x60,
282 MulticastFilter1 = 0x64,
283 RxOctetsLow = 0x68,
284 RxOctetsHigh = 0x6a,
285 TxOctetsLow = 0x6c,
286 TxOctetsHigh = 0x6e,
287 TxFramesOK = 0x70,
288 RxFramesOK = 0x72,
289 StatsCarrierError = 0x74,
290 StatsLateColl = 0x75,
291 StatsMultiColl = 0x76,
292 StatsOneColl = 0x77,
293 StatsTxDefer = 0x78,
294 RxMissed = 0x79,
295 StatsTxXSDefer = 0x7a,
296 StatsTxAbort = 0x7b,
297 StatsBcastTx = 0x7c,
298 StatsBcastRx = 0x7d,
299 StatsMcastTx = 0x7e,
300 StatsMcastRx = 0x7f,
301 /* Aliased and bogus values! */
302 RxStatus = 0x0c,
303};
304enum ASICCtrl_HiWord_bit {
305 GlobalReset = 0x0001,
306 RxReset = 0x0002,
307 TxReset = 0x0004,
308 DMAReset = 0x0008,
309 FIFOReset = 0x0010,
310 NetworkReset = 0x0020,
311 HostReset = 0x0040,
312 ResetBusy = 0x0400,
313};
314
315/* Bits in the interrupt status/mask registers. */
316enum intr_status_bits {
317 IntrSummary=0x0001, IntrPCIErr=0x0002, IntrMACCtrl=0x0008,
318 IntrTxDone=0x0004, IntrRxDone=0x0010, IntrRxStart=0x0020,
319 IntrDrvRqst=0x0040,
320 StatsMax=0x0080, LinkChange=0x0100,
321 IntrTxDMADone=0x0200, IntrRxDMADone=0x0400,
322};
323
324/* Bits in the RxMode register. */
325enum rx_mode_bits {
326 AcceptAllIPMulti=0x20, AcceptMultiHash=0x10, AcceptAll=0x08,
327 AcceptBroadcast=0x04, AcceptMulticast=0x02, AcceptMyPhys=0x01,
328};
329/* Bits in MACCtrl. */
330enum mac_ctrl0_bits {
331 EnbFullDuplex=0x20, EnbRcvLargeFrame=0x40,
332 EnbFlowCtrl=0x100, EnbPassRxCRC=0x200,
333};
334enum mac_ctrl1_bits {
335 StatsEnable=0x0020, StatsDisable=0x0040, StatsEnabled=0x0080,
336 TxEnable=0x0100, TxDisable=0x0200, TxEnabled=0x0400,
337 RxEnable=0x0800, RxDisable=0x1000, RxEnabled=0x2000,
338};
339
340/* The Rx and Tx buffer descriptors. */
341/* Note that using only 32 bit fields simplifies conversion to big-endian
342 architectures. */
343struct netdev_desc {
14c9d9b0
AV
344 __le32 next_desc;
345 __le32 status;
346 struct desc_frag { __le32 addr, length; } frag[1];
1da177e4
LT
347};
348
349/* Bits in netdev_desc.status */
350enum desc_status_bits {
351 DescOwn=0x8000,
352 DescEndPacket=0x4000,
353 DescEndRing=0x2000,
354 LastFrag=0x80000000,
355 DescIntrOnTx=0x8000,
356 DescIntrOnDMADone=0x80000000,
357 DisableAlign = 0x00000001,
358};
359
360#define PRIV_ALIGN 15 /* Required alignment mask */
361/* Use __attribute__((aligned (L1_CACHE_BYTES))) to maintain alignment
362 within the structure. */
363#define MII_CNT 4
364struct netdev_private {
365 /* Descriptor rings first for alignment. */
366 struct netdev_desc *rx_ring;
367 struct netdev_desc *tx_ring;
368 struct sk_buff* rx_skbuff[RX_RING_SIZE];
369 struct sk_buff* tx_skbuff[TX_RING_SIZE];
370 dma_addr_t tx_ring_dma;
371 dma_addr_t rx_ring_dma;
1da177e4
LT
372 struct timer_list timer; /* Media monitoring timer. */
373 /* Frequently used values: keep some adjacent for cache effect. */
374 spinlock_t lock;
375 spinlock_t rx_lock; /* Group with Tx control cache line. */
376 int msg_enable;
377 int chip_id;
378 unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
379 unsigned int rx_buf_sz; /* Based on MTU+slack. */
380 struct netdev_desc *last_tx; /* Last Tx descriptor used. */
381 unsigned int cur_tx, dirty_tx;
382 /* These values are keep track of the transceiver/media in use. */
383 unsigned int flowctrl:1;
384 unsigned int default_port:4; /* Last dev->if_port value. */
385 unsigned int an_enable:1;
386 unsigned int speed;
387 struct tasklet_struct rx_tasklet;
388 struct tasklet_struct tx_tasklet;
389 int budget;
390 int cur_task;
391 /* Multicast and receive mode. */
392 spinlock_t mcastlock; /* SMP lock multicast updates. */
393 u16 mcast_filter[4];
394 /* MII transceiver section. */
395 struct mii_if_info mii_if;
396 int mii_preamble_required;
397 unsigned char phys[MII_CNT]; /* MII device addresses, only first one used. */
398 struct pci_dev *pci_dev;
399 void __iomem *base;
1da177e4
LT
400};
401
402/* The station address location in the EEPROM. */
403#define EEPROM_SA_OFFSET 0x10
404#define DEFAULT_INTR (IntrRxDMADone | IntrPCIErr | \
405 IntrDrvRqst | IntrTxDone | StatsMax | \
406 LinkChange)
407
408static int change_mtu(struct net_device *dev, int new_mtu);
409static int eeprom_read(void __iomem *ioaddr, int location);
410static int mdio_read(struct net_device *dev, int phy_id, int location);
411static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
50500155 412static int mdio_wait_link(struct net_device *dev, int wait);
1da177e4
LT
413static int netdev_open(struct net_device *dev);
414static void check_duplex(struct net_device *dev);
415static void netdev_timer(unsigned long data);
416static void tx_timeout(struct net_device *dev);
417static void init_ring(struct net_device *dev);
61357325 418static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev);
1da177e4 419static int reset_tx (struct net_device *dev);
7d12e780 420static irqreturn_t intr_handler(int irq, void *dev_instance);
1da177e4
LT
421static void rx_poll(unsigned long data);
422static void tx_poll(unsigned long data);
423static void refill_rx (struct net_device *dev);
424static void netdev_error(struct net_device *dev, int intr_status);
425static void netdev_error(struct net_device *dev, int intr_status);
426static void set_rx_mode(struct net_device *dev);
427static int __set_mac_addr(struct net_device *dev);
428static struct net_device_stats *get_stats(struct net_device *dev);
429static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
430static int netdev_close(struct net_device *dev);
7282d491 431static const struct ethtool_ops ethtool_ops;
1da177e4 432
b71b95ef
PDM
433static void sundance_reset(struct net_device *dev, unsigned long reset_cmd)
434{
435 struct netdev_private *np = netdev_priv(dev);
436 void __iomem *ioaddr = np->base + ASICCtrl;
437 int countdown;
438
439 /* ST201 documentation states ASICCtrl is a 32bit register */
440 iowrite32 (reset_cmd | ioread32 (ioaddr), ioaddr);
441 /* ST201 documentation states reset can take up to 1 ms */
442 countdown = 10 + 1;
443 while (ioread32 (ioaddr) & (ResetBusy << 16)) {
444 if (--countdown == 0) {
445 printk(KERN_WARNING "%s : reset not completed !!\n", dev->name);
446 break;
447 }
448 udelay(100);
449 }
450}
451
633a277e
SH
452static const struct net_device_ops netdev_ops = {
453 .ndo_open = netdev_open,
454 .ndo_stop = netdev_close,
455 .ndo_start_xmit = start_tx,
456 .ndo_get_stats = get_stats,
457 .ndo_set_multicast_list = set_rx_mode,
458 .ndo_do_ioctl = netdev_ioctl,
459 .ndo_tx_timeout = tx_timeout,
460 .ndo_change_mtu = change_mtu,
461 .ndo_set_mac_address = eth_mac_addr,
462 .ndo_validate_addr = eth_validate_addr,
463};
464
1da177e4
LT
465static int __devinit sundance_probe1 (struct pci_dev *pdev,
466 const struct pci_device_id *ent)
467{
468 struct net_device *dev;
469 struct netdev_private *np;
470 static int card_idx;
471 int chip_idx = ent->driver_data;
472 int irq;
473 int i;
474 void __iomem *ioaddr;
475 u16 mii_ctl;
476 void *ring_space;
477 dma_addr_t ring_dma;
478#ifdef USE_IO_OPS
479 int bar = 0;
480#else
481 int bar = 1;
482#endif
ac1d49f8 483 int phy, phy_end, phy_idx = 0;
1da177e4
LT
484
485/* when built into the kernel, we only print version if device is found */
486#ifndef MODULE
487 static int printed_version;
488 if (!printed_version++)
489 printk(version);
490#endif
491
492 if (pci_enable_device(pdev))
493 return -EIO;
494 pci_set_master(pdev);
495
496 irq = pdev->irq;
497
498 dev = alloc_etherdev(sizeof(*np));
499 if (!dev)
500 return -ENOMEM;
1da177e4
LT
501 SET_NETDEV_DEV(dev, &pdev->dev);
502
503 if (pci_request_regions(pdev, DRV_NAME))
504 goto err_out_netdev;
505
506 ioaddr = pci_iomap(pdev, bar, netdev_io_size);
507 if (!ioaddr)
508 goto err_out_res;
509
510 for (i = 0; i < 3; i++)
14c9d9b0
AV
511 ((__le16 *)dev->dev_addr)[i] =
512 cpu_to_le16(eeprom_read(ioaddr, i + EEPROM_SA_OFFSET));
30d60a82 513 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
1da177e4
LT
514
515 dev->base_addr = (unsigned long)ioaddr;
516 dev->irq = irq;
517
518 np = netdev_priv(dev);
519 np->base = ioaddr;
520 np->pci_dev = pdev;
521 np->chip_id = chip_idx;
522 np->msg_enable = (1 << debug) - 1;
523 spin_lock_init(&np->lock);
524 tasklet_init(&np->rx_tasklet, rx_poll, (unsigned long)dev);
525 tasklet_init(&np->tx_tasklet, tx_poll, (unsigned long)dev);
526
0c8a745f
DK
527 ring_space = dma_alloc_coherent(&pdev->dev, TX_TOTAL_SIZE,
528 &ring_dma, GFP_KERNEL);
1da177e4
LT
529 if (!ring_space)
530 goto err_out_cleardev;
531 np->tx_ring = (struct netdev_desc *)ring_space;
532 np->tx_ring_dma = ring_dma;
533
0c8a745f
DK
534 ring_space = dma_alloc_coherent(&pdev->dev, RX_TOTAL_SIZE,
535 &ring_dma, GFP_KERNEL);
1da177e4
LT
536 if (!ring_space)
537 goto err_out_unmap_tx;
538 np->rx_ring = (struct netdev_desc *)ring_space;
539 np->rx_ring_dma = ring_dma;
540
541 np->mii_if.dev = dev;
542 np->mii_if.mdio_read = mdio_read;
543 np->mii_if.mdio_write = mdio_write;
544 np->mii_if.phy_id_mask = 0x1f;
545 np->mii_if.reg_num_mask = 0x1f;
546
547 /* The chip-specific entries in the device structure. */
633a277e 548 dev->netdev_ops = &netdev_ops;
1da177e4 549 SET_ETHTOOL_OPS(dev, &ethtool_ops);
1da177e4 550 dev->watchdog_timeo = TX_TIMEOUT;
633a277e 551
1da177e4
LT
552 pci_set_drvdata(pdev, dev);
553
1da177e4
LT
554 i = register_netdev(dev);
555 if (i)
556 goto err_out_unmap_rx;
557
e174961c 558 printk(KERN_INFO "%s: %s at %p, %pM, IRQ %d.\n",
0795af57 559 dev->name, pci_id_tbl[chip_idx].name, ioaddr,
e174961c 560 dev->dev_addr, irq);
1da177e4 561
67ec2f80
JL
562 np->phys[0] = 1; /* Default setting */
563 np->mii_preamble_required++;
ac1d49f8 564
0d615ec2
ACM
565 /*
566 * It seems some phys doesn't deal well with address 0 being accessed
ac1d49f8 567 * first
0d615ec2 568 */
ac1d49f8
JG
569 if (sundance_pci_tbl[np->chip_id].device == 0x0200) {
570 phy = 0;
571 phy_end = 31;
572 } else {
573 phy = 1;
574 phy_end = 32; /* wraps to zero, due to 'phy & 0x1f' */
575 }
576 for (; phy <= phy_end && phy_idx < MII_CNT; phy++) {
b06c093e 577 int phyx = phy & 0x1f;
0d615ec2 578 int mii_status = mdio_read(dev, phyx, MII_BMSR);
67ec2f80 579 if (mii_status != 0xffff && mii_status != 0x0000) {
b06c093e
JL
580 np->phys[phy_idx++] = phyx;
581 np->mii_if.advertising = mdio_read(dev, phyx, MII_ADVERTISE);
67ec2f80
JL
582 if ((mii_status & 0x0040) == 0)
583 np->mii_preamble_required++;
584 printk(KERN_INFO "%s: MII PHY found at address %d, status "
585 "0x%4.4x advertising %4.4x.\n",
b06c093e 586 dev->name, phyx, mii_status, np->mii_if.advertising);
1da177e4 587 }
67ec2f80
JL
588 }
589 np->mii_preamble_required--;
1da177e4 590
67ec2f80
JL
591 if (phy_idx == 0) {
592 printk(KERN_INFO "%s: No MII transceiver found, aborting. ASIC status %x\n",
593 dev->name, ioread32(ioaddr + ASICCtrl));
594 goto err_out_unregister;
1da177e4
LT
595 }
596
67ec2f80
JL
597 np->mii_if.phy_id = np->phys[0];
598
1da177e4
LT
599 /* Parse override configuration */
600 np->an_enable = 1;
601 if (card_idx < MAX_UNITS) {
602 if (media[card_idx] != NULL) {
603 np->an_enable = 0;
604 if (strcmp (media[card_idx], "100mbps_fd") == 0 ||
605 strcmp (media[card_idx], "4") == 0) {
606 np->speed = 100;
607 np->mii_if.full_duplex = 1;
8e95a202
JP
608 } else if (strcmp (media[card_idx], "100mbps_hd") == 0 ||
609 strcmp (media[card_idx], "3") == 0) {
1da177e4
LT
610 np->speed = 100;
611 np->mii_if.full_duplex = 0;
612 } else if (strcmp (media[card_idx], "10mbps_fd") == 0 ||
613 strcmp (media[card_idx], "2") == 0) {
614 np->speed = 10;
615 np->mii_if.full_duplex = 1;
616 } else if (strcmp (media[card_idx], "10mbps_hd") == 0 ||
617 strcmp (media[card_idx], "1") == 0) {
618 np->speed = 10;
619 np->mii_if.full_duplex = 0;
620 } else {
621 np->an_enable = 1;
622 }
623 }
624 if (flowctrl == 1)
625 np->flowctrl = 1;
626 }
627
628 /* Fibre PHY? */
629 if (ioread32 (ioaddr + ASICCtrl) & 0x80) {
630 /* Default 100Mbps Full */
631 if (np->an_enable) {
632 np->speed = 100;
633 np->mii_if.full_duplex = 1;
634 np->an_enable = 0;
635 }
636 }
637 /* Reset PHY */
638 mdio_write (dev, np->phys[0], MII_BMCR, BMCR_RESET);
639 mdelay (300);
640 /* If flow control enabled, we need to advertise it.*/
641 if (np->flowctrl)
642 mdio_write (dev, np->phys[0], MII_ADVERTISE, np->mii_if.advertising | 0x0400);
643 mdio_write (dev, np->phys[0], MII_BMCR, BMCR_ANENABLE|BMCR_ANRESTART);
644 /* Force media type */
645 if (!np->an_enable) {
646 mii_ctl = 0;
647 mii_ctl |= (np->speed == 100) ? BMCR_SPEED100 : 0;
648 mii_ctl |= (np->mii_if.full_duplex) ? BMCR_FULLDPLX : 0;
649 mdio_write (dev, np->phys[0], MII_BMCR, mii_ctl);
650 printk (KERN_INFO "Override speed=%d, %s duplex\n",
651 np->speed, np->mii_if.full_duplex ? "Full" : "Half");
652
653 }
654
655 /* Perhaps move the reset here? */
656 /* Reset the chip to erase previous misconfiguration. */
657 if (netif_msg_hw(np))
658 printk("ASIC Control is %x.\n", ioread32(ioaddr + ASICCtrl));
e714d99c 659 sundance_reset(dev, 0x00ff << 16);
1da177e4
LT
660 if (netif_msg_hw(np))
661 printk("ASIC Control is now %x.\n", ioread32(ioaddr + ASICCtrl));
662
663 card_idx++;
664 return 0;
665
666err_out_unregister:
667 unregister_netdev(dev);
668err_out_unmap_rx:
0c8a745f
DK
669 dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE,
670 np->rx_ring, np->rx_ring_dma);
1da177e4 671err_out_unmap_tx:
0c8a745f
DK
672 dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE,
673 np->tx_ring, np->tx_ring_dma);
1da177e4
LT
674err_out_cleardev:
675 pci_set_drvdata(pdev, NULL);
676 pci_iounmap(pdev, ioaddr);
677err_out_res:
678 pci_release_regions(pdev);
679err_out_netdev:
680 free_netdev (dev);
681 return -ENODEV;
682}
683
684static int change_mtu(struct net_device *dev, int new_mtu)
685{
686 if ((new_mtu < 68) || (new_mtu > 8191)) /* Set by RxDMAFrameLen */
687 return -EINVAL;
688 if (netif_running(dev))
689 return -EBUSY;
690 dev->mtu = new_mtu;
691 return 0;
692}
693
694#define eeprom_delay(ee_addr) ioread32(ee_addr)
695/* Read the EEPROM and MII Management Data I/O (MDIO) interfaces. */
696static int __devinit eeprom_read(void __iomem *ioaddr, int location)
697{
698 int boguscnt = 10000; /* Typical 1900 ticks. */
699 iowrite16(0x0200 | (location & 0xff), ioaddr + EECtrl);
700 do {
701 eeprom_delay(ioaddr + EECtrl);
702 if (! (ioread16(ioaddr + EECtrl) & 0x8000)) {
703 return ioread16(ioaddr + EEData);
704 }
705 } while (--boguscnt > 0);
706 return 0;
707}
708
709/* MII transceiver control section.
710 Read and write the MII registers using software-generated serial
711 MDIO protocol. See the MII specifications or DP83840A data sheet
712 for details.
713
714 The maximum data clock rate is 2.5 Mhz. The minimum timing is usually
715 met by back-to-back 33Mhz PCI cycles. */
716#define mdio_delay() ioread8(mdio_addr)
717
718enum mii_reg_bits {
719 MDIO_ShiftClk=0x0001, MDIO_Data=0x0002, MDIO_EnbOutput=0x0004,
720};
721#define MDIO_EnbIn (0)
722#define MDIO_WRITE0 (MDIO_EnbOutput)
723#define MDIO_WRITE1 (MDIO_Data | MDIO_EnbOutput)
724
725/* Generate the preamble required for initial synchronization and
726 a few older transceivers. */
727static void mdio_sync(void __iomem *mdio_addr)
728{
729 int bits = 32;
730
731 /* Establish sync by sending at least 32 logic ones. */
732 while (--bits >= 0) {
733 iowrite8(MDIO_WRITE1, mdio_addr);
734 mdio_delay();
735 iowrite8(MDIO_WRITE1 | MDIO_ShiftClk, mdio_addr);
736 mdio_delay();
737 }
738}
739
740static int mdio_read(struct net_device *dev, int phy_id, int location)
741{
742 struct netdev_private *np = netdev_priv(dev);
743 void __iomem *mdio_addr = np->base + MIICtrl;
744 int mii_cmd = (0xf6 << 10) | (phy_id << 5) | location;
745 int i, retval = 0;
746
747 if (np->mii_preamble_required)
748 mdio_sync(mdio_addr);
749
750 /* Shift the read command bits out. */
751 for (i = 15; i >= 0; i--) {
752 int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
753
754 iowrite8(dataval, mdio_addr);
755 mdio_delay();
756 iowrite8(dataval | MDIO_ShiftClk, mdio_addr);
757 mdio_delay();
758 }
759 /* Read the two transition, 16 data, and wire-idle bits. */
760 for (i = 19; i > 0; i--) {
761 iowrite8(MDIO_EnbIn, mdio_addr);
762 mdio_delay();
763 retval = (retval << 1) | ((ioread8(mdio_addr) & MDIO_Data) ? 1 : 0);
764 iowrite8(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
765 mdio_delay();
766 }
767 return (retval>>1) & 0xffff;
768}
769
770static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
771{
772 struct netdev_private *np = netdev_priv(dev);
773 void __iomem *mdio_addr = np->base + MIICtrl;
774 int mii_cmd = (0x5002 << 16) | (phy_id << 23) | (location<<18) | value;
775 int i;
776
777 if (np->mii_preamble_required)
778 mdio_sync(mdio_addr);
779
780 /* Shift the command bits out. */
781 for (i = 31; i >= 0; i--) {
782 int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
783
784 iowrite8(dataval, mdio_addr);
785 mdio_delay();
786 iowrite8(dataval | MDIO_ShiftClk, mdio_addr);
787 mdio_delay();
788 }
789 /* Clear out extra bits. */
790 for (i = 2; i > 0; i--) {
791 iowrite8(MDIO_EnbIn, mdio_addr);
792 mdio_delay();
793 iowrite8(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
794 mdio_delay();
795 }
1da177e4
LT
796}
797
50500155
DN
798static int mdio_wait_link(struct net_device *dev, int wait)
799{
800 int bmsr;
801 int phy_id;
802 struct netdev_private *np;
803
804 np = netdev_priv(dev);
805 phy_id = np->phys[0];
806
807 do {
808 bmsr = mdio_read(dev, phy_id, MII_BMSR);
809 if (bmsr & 0x0004)
810 return 0;
811 mdelay(1);
812 } while (--wait > 0);
813 return -1;
814}
815
1da177e4
LT
816static int netdev_open(struct net_device *dev)
817{
818 struct netdev_private *np = netdev_priv(dev);
819 void __iomem *ioaddr = np->base;
acd70c2b 820 unsigned long flags;
1da177e4
LT
821 int i;
822
823 /* Do we need to reset the chip??? */
824
a0607fd3 825 i = request_irq(dev->irq, intr_handler, IRQF_SHARED, dev->name, dev);
1da177e4
LT
826 if (i)
827 return i;
828
829 if (netif_msg_ifup(np))
830 printk(KERN_DEBUG "%s: netdev_open() irq %d.\n",
831 dev->name, dev->irq);
832 init_ring(dev);
833
834 iowrite32(np->rx_ring_dma, ioaddr + RxListPtr);
835 /* The Tx list pointer is written as packets are queued. */
836
837 /* Initialize other registers. */
838 __set_mac_addr(dev);
839#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
840 iowrite16(dev->mtu + 18, ioaddr + MaxFrameSize);
841#else
842 iowrite16(dev->mtu + 14, ioaddr + MaxFrameSize);
843#endif
844 if (dev->mtu > 2047)
845 iowrite32(ioread32(ioaddr + ASICCtrl) | 0x0C, ioaddr + ASICCtrl);
846
847 /* Configure the PCI bus bursts and FIFO thresholds. */
848
849 if (dev->if_port == 0)
850 dev->if_port = np->default_port;
851
852 spin_lock_init(&np->mcastlock);
853
854 set_rx_mode(dev);
855 iowrite16(0, ioaddr + IntrEnable);
856 iowrite16(0, ioaddr + DownCounter);
857 /* Set the chip to poll every N*320nsec. */
858 iowrite8(100, ioaddr + RxDMAPollPeriod);
859 iowrite8(127, ioaddr + TxDMAPollPeriod);
860 /* Fix DFE-580TX packet drop issue */
44c10138 861 if (np->pci_dev->revision >= 0x14)
1da177e4
LT
862 iowrite8(0x01, ioaddr + DebugCtrl1);
863 netif_start_queue(dev);
864
acd70c2b
JH
865 spin_lock_irqsave(&np->lock, flags);
866 reset_tx(dev);
867 spin_unlock_irqrestore(&np->lock, flags);
868
1da177e4
LT
869 iowrite16 (StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl1);
870
871 if (netif_msg_ifup(np))
872 printk(KERN_DEBUG "%s: Done netdev_open(), status: Rx %x Tx %x "
873 "MAC Control %x, %4.4x %4.4x.\n",
874 dev->name, ioread32(ioaddr + RxStatus), ioread8(ioaddr + TxStatus),
875 ioread32(ioaddr + MACCtrl0),
876 ioread16(ioaddr + MACCtrl1), ioread16(ioaddr + MACCtrl0));
877
878 /* Set the timer to check for link beat. */
879 init_timer(&np->timer);
880 np->timer.expires = jiffies + 3*HZ;
881 np->timer.data = (unsigned long)dev;
c061b18d 882 np->timer.function = netdev_timer; /* timer handler */
1da177e4
LT
883 add_timer(&np->timer);
884
885 /* Enable interrupts by setting the interrupt mask. */
886 iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
887
888 return 0;
889}
890
891static void check_duplex(struct net_device *dev)
892{
893 struct netdev_private *np = netdev_priv(dev);
894 void __iomem *ioaddr = np->base;
895 int mii_lpa = mdio_read(dev, np->phys[0], MII_LPA);
896 int negotiated = mii_lpa & np->mii_if.advertising;
897 int duplex;
898
899 /* Force media */
900 if (!np->an_enable || mii_lpa == 0xffff) {
901 if (np->mii_if.full_duplex)
902 iowrite16 (ioread16 (ioaddr + MACCtrl0) | EnbFullDuplex,
903 ioaddr + MACCtrl0);
904 return;
905 }
906
907 /* Autonegotiation */
908 duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040;
909 if (np->mii_if.full_duplex != duplex) {
910 np->mii_if.full_duplex = duplex;
911 if (netif_msg_link(np))
912 printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d "
913 "negotiated capability %4.4x.\n", dev->name,
914 duplex ? "full" : "half", np->phys[0], negotiated);
62660e28 915 iowrite16(ioread16(ioaddr + MACCtrl0) | (duplex ? 0x20 : 0), ioaddr + MACCtrl0);
1da177e4
LT
916 }
917}
918
919static void netdev_timer(unsigned long data)
920{
921 struct net_device *dev = (struct net_device *)data;
922 struct netdev_private *np = netdev_priv(dev);
923 void __iomem *ioaddr = np->base;
924 int next_tick = 10*HZ;
925
926 if (netif_msg_timer(np)) {
927 printk(KERN_DEBUG "%s: Media selection timer tick, intr status %4.4x, "
928 "Tx %x Rx %x.\n",
929 dev->name, ioread16(ioaddr + IntrEnable),
930 ioread8(ioaddr + TxStatus), ioread32(ioaddr + RxStatus));
931 }
932 check_duplex(dev);
933 np->timer.expires = jiffies + next_tick;
934 add_timer(&np->timer);
935}
936
937static void tx_timeout(struct net_device *dev)
938{
939 struct netdev_private *np = netdev_priv(dev);
940 void __iomem *ioaddr = np->base;
941 unsigned long flag;
6aa20a22 942
1da177e4
LT
943 netif_stop_queue(dev);
944 tasklet_disable(&np->tx_tasklet);
945 iowrite16(0, ioaddr + IntrEnable);
946 printk(KERN_WARNING "%s: Transmit timed out, TxStatus %2.2x "
947 "TxFrameId %2.2x,"
948 " resetting...\n", dev->name, ioread8(ioaddr + TxStatus),
949 ioread8(ioaddr + TxFrameId));
950
951 {
952 int i;
953 for (i=0; i<TX_RING_SIZE; i++) {
954 printk(KERN_DEBUG "%02x %08llx %08x %08x(%02x) %08x %08x\n", i,
955 (unsigned long long)(np->tx_ring_dma + i*sizeof(*np->tx_ring)),
956 le32_to_cpu(np->tx_ring[i].next_desc),
957 le32_to_cpu(np->tx_ring[i].status),
958 (le32_to_cpu(np->tx_ring[i].status) >> 2) & 0xff,
6aa20a22 959 le32_to_cpu(np->tx_ring[i].frag[0].addr),
1da177e4
LT
960 le32_to_cpu(np->tx_ring[i].frag[0].length));
961 }
6aa20a22
JG
962 printk(KERN_DEBUG "TxListPtr=%08x netif_queue_stopped=%d\n",
963 ioread32(np->base + TxListPtr),
1da177e4 964 netif_queue_stopped(dev));
6aa20a22 965 printk(KERN_DEBUG "cur_tx=%d(%02x) dirty_tx=%d(%02x)\n",
1da177e4
LT
966 np->cur_tx, np->cur_tx % TX_RING_SIZE,
967 np->dirty_tx, np->dirty_tx % TX_RING_SIZE);
968 printk(KERN_DEBUG "cur_rx=%d dirty_rx=%d\n", np->cur_rx, np->dirty_rx);
969 printk(KERN_DEBUG "cur_task=%d\n", np->cur_task);
970 }
971 spin_lock_irqsave(&np->lock, flag);
972
973 /* Stop and restart the chip's Tx processes . */
974 reset_tx(dev);
975 spin_unlock_irqrestore(&np->lock, flag);
976
977 dev->if_port = 0;
978
1ae5dc34 979 dev->trans_start = jiffies; /* prevent tx timeout */
553e2335 980 dev->stats.tx_errors++;
1da177e4
LT
981 if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
982 netif_wake_queue(dev);
983 }
984 iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
985 tasklet_enable(&np->tx_tasklet);
986}
987
988
989/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
990static void init_ring(struct net_device *dev)
991{
992 struct netdev_private *np = netdev_priv(dev);
993 int i;
994
995 np->cur_rx = np->cur_tx = 0;
996 np->dirty_rx = np->dirty_tx = 0;
997 np->cur_task = 0;
998
999 np->rx_buf_sz = (dev->mtu <= 1520 ? PKT_BUF_SZ : dev->mtu + 16);
1000
1001 /* Initialize all Rx descriptors. */
1002 for (i = 0; i < RX_RING_SIZE; i++) {
1003 np->rx_ring[i].next_desc = cpu_to_le32(np->rx_ring_dma +
1004 ((i+1)%RX_RING_SIZE)*sizeof(*np->rx_ring));
1005 np->rx_ring[i].status = 0;
1006 np->rx_ring[i].frag[0].length = 0;
1007 np->rx_skbuff[i] = NULL;
1008 }
1009
1010 /* Fill in the Rx buffers. Handle allocation failure gracefully. */
1011 for (i = 0; i < RX_RING_SIZE; i++) {
1012 struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz);
1013 np->rx_skbuff[i] = skb;
1014 if (skb == NULL)
1015 break;
1016 skb->dev = dev; /* Mark as being used by this device. */
1017 skb_reserve(skb, 2); /* 16 byte align the IP header. */
1018 np->rx_ring[i].frag[0].addr = cpu_to_le32(
0c8a745f
DK
1019 dma_map_single(&np->pci_dev->dev, skb->data,
1020 np->rx_buf_sz, DMA_FROM_DEVICE));
1da177e4
LT
1021 np->rx_ring[i].frag[0].length = cpu_to_le32(np->rx_buf_sz | LastFrag);
1022 }
1023 np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
1024
1025 for (i = 0; i < TX_RING_SIZE; i++) {
1026 np->tx_skbuff[i] = NULL;
1027 np->tx_ring[i].status = 0;
1028 }
1da177e4
LT
1029}
1030
1031static void tx_poll (unsigned long data)
1032{
1033 struct net_device *dev = (struct net_device *)data;
1034 struct netdev_private *np = netdev_priv(dev);
1035 unsigned head = np->cur_task % TX_RING_SIZE;
6aa20a22 1036 struct netdev_desc *txdesc =
1da177e4 1037 &np->tx_ring[(np->cur_tx - 1) % TX_RING_SIZE];
6aa20a22 1038
1da177e4
LT
1039 /* Chain the next pointer */
1040 for (; np->cur_tx - np->cur_task > 0; np->cur_task++) {
1041 int entry = np->cur_task % TX_RING_SIZE;
1042 txdesc = &np->tx_ring[entry];
1043 if (np->last_tx) {
1044 np->last_tx->next_desc = cpu_to_le32(np->tx_ring_dma +
1045 entry*sizeof(struct netdev_desc));
1046 }
1047 np->last_tx = txdesc;
1048 }
1049 /* Indicate the latest descriptor of tx ring */
1050 txdesc->status |= cpu_to_le32(DescIntrOnTx);
1051
1052 if (ioread32 (np->base + TxListPtr) == 0)
1053 iowrite32 (np->tx_ring_dma + head * sizeof(struct netdev_desc),
1054 np->base + TxListPtr);
1da177e4
LT
1055}
1056
61357325 1057static netdev_tx_t
1da177e4
LT
1058start_tx (struct sk_buff *skb, struct net_device *dev)
1059{
1060 struct netdev_private *np = netdev_priv(dev);
1061 struct netdev_desc *txdesc;
1062 unsigned entry;
1063
1064 /* Calculate the next Tx descriptor entry. */
1065 entry = np->cur_tx % TX_RING_SIZE;
1066 np->tx_skbuff[entry] = skb;
1067 txdesc = &np->tx_ring[entry];
1068
1069 txdesc->next_desc = 0;
1070 txdesc->status = cpu_to_le32 ((entry << 2) | DisableAlign);
0c8a745f
DK
1071 txdesc->frag[0].addr = cpu_to_le32(dma_map_single(&np->pci_dev->dev,
1072 skb->data, skb->len, DMA_TO_DEVICE));
1da177e4
LT
1073 txdesc->frag[0].length = cpu_to_le32 (skb->len | LastFrag);
1074
1075 /* Increment cur_tx before tasklet_schedule() */
1076 np->cur_tx++;
1077 mb();
1078 /* Schedule a tx_poll() task */
1079 tasklet_schedule(&np->tx_tasklet);
1080
1081 /* On some architectures: explicitly flush cache lines here. */
8e95a202
JP
1082 if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 1 &&
1083 !netif_queue_stopped(dev)) {
1da177e4
LT
1084 /* do nothing */
1085 } else {
1086 netif_stop_queue (dev);
1087 }
1da177e4
LT
1088 if (netif_msg_tx_queued(np)) {
1089 printk (KERN_DEBUG
1090 "%s: Transmit frame #%d queued in slot %d.\n",
1091 dev->name, np->cur_tx, entry);
1092 }
6ed10654 1093 return NETDEV_TX_OK;
1da177e4
LT
1094}
1095
1096/* Reset hardware tx and free all of tx buffers */
1097static int
1098reset_tx (struct net_device *dev)
1099{
1100 struct netdev_private *np = netdev_priv(dev);
1101 void __iomem *ioaddr = np->base;
1102 struct sk_buff *skb;
1103 int i;
1104 int irq = in_interrupt();
6aa20a22 1105
1da177e4
LT
1106 /* Reset tx logic, TxListPtr will be cleaned */
1107 iowrite16 (TxDisable, ioaddr + MACCtrl1);
e714d99c
PDM
1108 sundance_reset(dev, (NetworkReset|FIFOReset|DMAReset|TxReset) << 16);
1109
1da177e4
LT
1110 /* free all tx skbuff */
1111 for (i = 0; i < TX_RING_SIZE; i++) {
2109f89f
JH
1112 np->tx_ring[i].next_desc = 0;
1113
1da177e4
LT
1114 skb = np->tx_skbuff[i];
1115 if (skb) {
0c8a745f 1116 dma_unmap_single(&np->pci_dev->dev,
14c9d9b0 1117 le32_to_cpu(np->tx_ring[i].frag[0].addr),
0c8a745f 1118 skb->len, DMA_TO_DEVICE);
1da177e4
LT
1119 if (irq)
1120 dev_kfree_skb_irq (skb);
1121 else
1122 dev_kfree_skb (skb);
1123 np->tx_skbuff[i] = NULL;
553e2335 1124 dev->stats.tx_dropped++;
1da177e4
LT
1125 }
1126 }
1127 np->cur_tx = np->dirty_tx = 0;
1128 np->cur_task = 0;
2109f89f 1129
bca79eb7 1130 np->last_tx = NULL;
2109f89f
JH
1131 iowrite8(127, ioaddr + TxDMAPollPeriod);
1132
1da177e4
LT
1133 iowrite16 (StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl1);
1134 return 0;
1135}
1136
6aa20a22 1137/* The interrupt handler cleans up after the Tx thread,
1da177e4 1138 and schedule a Rx thread work */
7d12e780 1139static irqreturn_t intr_handler(int irq, void *dev_instance)
1da177e4
LT
1140{
1141 struct net_device *dev = (struct net_device *)dev_instance;
1142 struct netdev_private *np = netdev_priv(dev);
1143 void __iomem *ioaddr = np->base;
1144 int hw_frame_id;
1145 int tx_cnt;
1146 int tx_status;
1147 int handled = 0;
e242040d 1148 int i;
1da177e4
LT
1149
1150
1151 do {
1152 int intr_status = ioread16(ioaddr + IntrStatus);
1153 iowrite16(intr_status, ioaddr + IntrStatus);
1154
1155 if (netif_msg_intr(np))
1156 printk(KERN_DEBUG "%s: Interrupt, status %4.4x.\n",
1157 dev->name, intr_status);
1158
1159 if (!(intr_status & DEFAULT_INTR))
1160 break;
1161
1162 handled = 1;
1163
1164 if (intr_status & (IntrRxDMADone)) {
1165 iowrite16(DEFAULT_INTR & ~(IntrRxDone|IntrRxDMADone),
1166 ioaddr + IntrEnable);
1167 if (np->budget < 0)
1168 np->budget = RX_BUDGET;
1169 tasklet_schedule(&np->rx_tasklet);
1170 }
1171 if (intr_status & (IntrTxDone | IntrDrvRqst)) {
1172 tx_status = ioread16 (ioaddr + TxStatus);
1173 for (tx_cnt=32; tx_status & 0x80; --tx_cnt) {
1174 if (netif_msg_tx_done(np))
1175 printk
1176 ("%s: Transmit status is %2.2x.\n",
1177 dev->name, tx_status);
1178 if (tx_status & 0x1e) {
b71b95ef
PDM
1179 if (netif_msg_tx_err(np))
1180 printk("%s: Transmit error status %4.4x.\n",
1181 dev->name, tx_status);
553e2335 1182 dev->stats.tx_errors++;
1da177e4 1183 if (tx_status & 0x10)
553e2335 1184 dev->stats.tx_fifo_errors++;
1da177e4 1185 if (tx_status & 0x08)
553e2335 1186 dev->stats.collisions++;
b71b95ef 1187 if (tx_status & 0x04)
553e2335 1188 dev->stats.tx_fifo_errors++;
1da177e4 1189 if (tx_status & 0x02)
553e2335 1190 dev->stats.tx_window_errors++;
e242040d 1191
b71b95ef
PDM
1192 /*
1193 ** This reset has been verified on
1194 ** DFE-580TX boards ! phdm@macqel.be.
1195 */
1196 if (tx_status & 0x10) { /* TxUnderrun */
b71b95ef
PDM
1197 /* Restart Tx FIFO and transmitter */
1198 sundance_reset(dev, (NetworkReset|FIFOReset|TxReset) << 16);
b71b95ef 1199 /* No need to reset the Tx pointer here */
1da177e4 1200 }
2109f89f
JH
1201 /* Restart the Tx. Need to make sure tx enabled */
1202 i = 10;
1203 do {
1204 iowrite16(ioread16(ioaddr + MACCtrl1) | TxEnable, ioaddr + MACCtrl1);
1205 if (ioread16(ioaddr + MACCtrl1) & TxEnabled)
1206 break;
1207 mdelay(1);
1208 } while (--i);
1da177e4
LT
1209 }
1210 /* Yup, this is a documentation bug. It cost me *hours*. */
1211 iowrite16 (0, ioaddr + TxStatus);
1212 if (tx_cnt < 0) {
1213 iowrite32(5000, ioaddr + DownCounter);
1214 break;
1215 }
1216 tx_status = ioread16 (ioaddr + TxStatus);
1217 }
1218 hw_frame_id = (tx_status >> 8) & 0xff;
1219 } else {
1220 hw_frame_id = ioread8(ioaddr + TxFrameId);
1221 }
6aa20a22 1222
44c10138 1223 if (np->pci_dev->revision >= 0x14) {
1da177e4
LT
1224 spin_lock(&np->lock);
1225 for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
1226 int entry = np->dirty_tx % TX_RING_SIZE;
1227 struct sk_buff *skb;
1228 int sw_frame_id;
1229 sw_frame_id = (le32_to_cpu(
1230 np->tx_ring[entry].status) >> 2) & 0xff;
1231 if (sw_frame_id == hw_frame_id &&
1232 !(le32_to_cpu(np->tx_ring[entry].status)
1233 & 0x00010000))
1234 break;
6aa20a22 1235 if (sw_frame_id == (hw_frame_id + 1) %
1da177e4
LT
1236 TX_RING_SIZE)
1237 break;
1238 skb = np->tx_skbuff[entry];
1239 /* Free the original skb. */
0c8a745f 1240 dma_unmap_single(&np->pci_dev->dev,
14c9d9b0 1241 le32_to_cpu(np->tx_ring[entry].frag[0].addr),
0c8a745f 1242 skb->len, DMA_TO_DEVICE);
1da177e4
LT
1243 dev_kfree_skb_irq (np->tx_skbuff[entry]);
1244 np->tx_skbuff[entry] = NULL;
1245 np->tx_ring[entry].frag[0].addr = 0;
1246 np->tx_ring[entry].frag[0].length = 0;
1247 }
1248 spin_unlock(&np->lock);
1249 } else {
1250 spin_lock(&np->lock);
1251 for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
1252 int entry = np->dirty_tx % TX_RING_SIZE;
1253 struct sk_buff *skb;
6aa20a22 1254 if (!(le32_to_cpu(np->tx_ring[entry].status)
1da177e4
LT
1255 & 0x00010000))
1256 break;
1257 skb = np->tx_skbuff[entry];
1258 /* Free the original skb. */
0c8a745f 1259 dma_unmap_single(&np->pci_dev->dev,
14c9d9b0 1260 le32_to_cpu(np->tx_ring[entry].frag[0].addr),
0c8a745f 1261 skb->len, DMA_TO_DEVICE);
1da177e4
LT
1262 dev_kfree_skb_irq (np->tx_skbuff[entry]);
1263 np->tx_skbuff[entry] = NULL;
1264 np->tx_ring[entry].frag[0].addr = 0;
1265 np->tx_ring[entry].frag[0].length = 0;
1266 }
1267 spin_unlock(&np->lock);
1268 }
6aa20a22 1269
1da177e4
LT
1270 if (netif_queue_stopped(dev) &&
1271 np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
1272 /* The ring is no longer full, clear busy flag. */
1273 netif_wake_queue (dev);
1274 }
1275 /* Abnormal error summary/uncommon events handlers. */
1276 if (intr_status & (IntrPCIErr | LinkChange | StatsMax))
1277 netdev_error(dev, intr_status);
1278 } while (0);
1279 if (netif_msg_intr(np))
1280 printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
1281 dev->name, ioread16(ioaddr + IntrStatus));
1282 return IRQ_RETVAL(handled);
1283}
1284
1285static void rx_poll(unsigned long data)
1286{
1287 struct net_device *dev = (struct net_device *)data;
1288 struct netdev_private *np = netdev_priv(dev);
1289 int entry = np->cur_rx % RX_RING_SIZE;
1290 int boguscnt = np->budget;
1291 void __iomem *ioaddr = np->base;
1292 int received = 0;
1293
1294 /* If EOP is set on the next entry, it's a new packet. Send it up. */
1295 while (1) {
1296 struct netdev_desc *desc = &(np->rx_ring[entry]);
1297 u32 frame_status = le32_to_cpu(desc->status);
1298 int pkt_len;
1299
1300 if (--boguscnt < 0) {
1301 goto not_done;
1302 }
1303 if (!(frame_status & DescOwn))
1304 break;
1305 pkt_len = frame_status & 0x1fff; /* Chip omits the CRC. */
1306 if (netif_msg_rx_status(np))
1307 printk(KERN_DEBUG " netdev_rx() status was %8.8x.\n",
1308 frame_status);
1309 if (frame_status & 0x001f4000) {
1310 /* There was a error. */
1311 if (netif_msg_rx_err(np))
1312 printk(KERN_DEBUG " netdev_rx() Rx error was %8.8x.\n",
1313 frame_status);
553e2335
ED
1314 dev->stats.rx_errors++;
1315 if (frame_status & 0x00100000)
1316 dev->stats.rx_length_errors++;
1317 if (frame_status & 0x00010000)
1318 dev->stats.rx_fifo_errors++;
1319 if (frame_status & 0x00060000)
1320 dev->stats.rx_frame_errors++;
1321 if (frame_status & 0x00080000)
1322 dev->stats.rx_crc_errors++;
1da177e4
LT
1323 if (frame_status & 0x00100000) {
1324 printk(KERN_WARNING "%s: Oversized Ethernet frame,"
1325 " status %8.8x.\n",
1326 dev->name, frame_status);
1327 }
1328 } else {
1329 struct sk_buff *skb;
1330#ifndef final_version
1331 if (netif_msg_rx_status(np))
1332 printk(KERN_DEBUG " netdev_rx() normal Rx pkt length %d"
1333 ", bogus_cnt %d.\n",
1334 pkt_len, boguscnt);
1335#endif
1336 /* Check if the packet is long enough to accept without copying
1337 to a minimally-sized skbuff. */
8e95a202
JP
1338 if (pkt_len < rx_copybreak &&
1339 (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
1da177e4 1340 skb_reserve(skb, 2); /* 16 byte align the IP header */
0c8a745f
DK
1341 dma_sync_single_for_cpu(&np->pci_dev->dev,
1342 le32_to_cpu(desc->frag[0].addr),
1343 np->rx_buf_sz, DMA_FROM_DEVICE);
8c7b7faa 1344 skb_copy_to_linear_data(skb, np->rx_skbuff[entry]->data, pkt_len);
0c8a745f
DK
1345 dma_sync_single_for_device(&np->pci_dev->dev,
1346 le32_to_cpu(desc->frag[0].addr),
1347 np->rx_buf_sz, DMA_FROM_DEVICE);
1da177e4
LT
1348 skb_put(skb, pkt_len);
1349 } else {
0c8a745f 1350 dma_unmap_single(&np->pci_dev->dev,
14c9d9b0 1351 le32_to_cpu(desc->frag[0].addr),
0c8a745f 1352 np->rx_buf_sz, DMA_FROM_DEVICE);
1da177e4
LT
1353 skb_put(skb = np->rx_skbuff[entry], pkt_len);
1354 np->rx_skbuff[entry] = NULL;
1355 }
1356 skb->protocol = eth_type_trans(skb, dev);
1357 /* Note: checksum -> skb->ip_summed = CHECKSUM_UNNECESSARY; */
1358 netif_rx(skb);
1da177e4
LT
1359 }
1360 entry = (entry + 1) % RX_RING_SIZE;
1361 received++;
1362 }
1363 np->cur_rx = entry;
1364 refill_rx (dev);
1365 np->budget -= received;
1366 iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
1367 return;
1368
1369not_done:
1370 np->cur_rx = entry;
1371 refill_rx (dev);
1372 if (!received)
1373 received = 1;
1374 np->budget -= received;
1375 if (np->budget <= 0)
1376 np->budget = RX_BUDGET;
1377 tasklet_schedule(&np->rx_tasklet);
1da177e4
LT
1378}
1379
1380static void refill_rx (struct net_device *dev)
1381{
1382 struct netdev_private *np = netdev_priv(dev);
1383 int entry;
1384 int cnt = 0;
1385
1386 /* Refill the Rx ring buffers. */
1387 for (;(np->cur_rx - np->dirty_rx + RX_RING_SIZE) % RX_RING_SIZE > 0;
1388 np->dirty_rx = (np->dirty_rx + 1) % RX_RING_SIZE) {
1389 struct sk_buff *skb;
1390 entry = np->dirty_rx % RX_RING_SIZE;
1391 if (np->rx_skbuff[entry] == NULL) {
1392 skb = dev_alloc_skb(np->rx_buf_sz);
1393 np->rx_skbuff[entry] = skb;
1394 if (skb == NULL)
1395 break; /* Better luck next round. */
1396 skb->dev = dev; /* Mark as being used by this device. */
1397 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
1398 np->rx_ring[entry].frag[0].addr = cpu_to_le32(
0c8a745f
DK
1399 dma_map_single(&np->pci_dev->dev, skb->data,
1400 np->rx_buf_sz, DMA_FROM_DEVICE));
1da177e4
LT
1401 }
1402 /* Perhaps we need not reset this field. */
1403 np->rx_ring[entry].frag[0].length =
1404 cpu_to_le32(np->rx_buf_sz | LastFrag);
1405 np->rx_ring[entry].status = 0;
1406 cnt++;
1407 }
1da177e4
LT
1408}
1409static void netdev_error(struct net_device *dev, int intr_status)
1410{
1411 struct netdev_private *np = netdev_priv(dev);
1412 void __iomem *ioaddr = np->base;
1413 u16 mii_ctl, mii_advertise, mii_lpa;
1414 int speed;
1415
1416 if (intr_status & LinkChange) {
50500155
DN
1417 if (mdio_wait_link(dev, 10) == 0) {
1418 printk(KERN_INFO "%s: Link up\n", dev->name);
1419 if (np->an_enable) {
1420 mii_advertise = mdio_read(dev, np->phys[0],
1421 MII_ADVERTISE);
1422 mii_lpa = mdio_read(dev, np->phys[0], MII_LPA);
1423 mii_advertise &= mii_lpa;
1424 printk(KERN_INFO "%s: Link changed: ",
1425 dev->name);
1426 if (mii_advertise & ADVERTISE_100FULL) {
1427 np->speed = 100;
1428 printk("100Mbps, full duplex\n");
1429 } else if (mii_advertise & ADVERTISE_100HALF) {
1430 np->speed = 100;
1431 printk("100Mbps, half duplex\n");
1432 } else if (mii_advertise & ADVERTISE_10FULL) {
1433 np->speed = 10;
1434 printk("10Mbps, full duplex\n");
1435 } else if (mii_advertise & ADVERTISE_10HALF) {
1436 np->speed = 10;
1437 printk("10Mbps, half duplex\n");
1438 } else
1439 printk("\n");
1da177e4 1440
50500155
DN
1441 } else {
1442 mii_ctl = mdio_read(dev, np->phys[0], MII_BMCR);
1443 speed = (mii_ctl & BMCR_SPEED100) ? 100 : 10;
1444 np->speed = speed;
1445 printk(KERN_INFO "%s: Link changed: %dMbps ,",
1446 dev->name, speed);
1447 printk("%s duplex.\n",
1448 (mii_ctl & BMCR_FULLDPLX) ?
1449 "full" : "half");
1450 }
1451 check_duplex(dev);
1452 if (np->flowctrl && np->mii_if.full_duplex) {
1453 iowrite16(ioread16(ioaddr + MulticastFilter1+2) | 0x0200,
1454 ioaddr + MulticastFilter1+2);
1455 iowrite16(ioread16(ioaddr + MACCtrl0) | EnbFlowCtrl,
1456 ioaddr + MACCtrl0);
1457 }
1458 netif_carrier_on(dev);
1da177e4 1459 } else {
50500155
DN
1460 printk(KERN_INFO "%s: Link down\n", dev->name);
1461 netif_carrier_off(dev);
1da177e4
LT
1462 }
1463 }
1464 if (intr_status & StatsMax) {
1465 get_stats(dev);
1466 }
1467 if (intr_status & IntrPCIErr) {
1468 printk(KERN_ERR "%s: Something Wicked happened! %4.4x.\n",
1469 dev->name, intr_status);
1470 /* We must do a global reset of DMA to continue. */
1471 }
1472}
1473
1474static struct net_device_stats *get_stats(struct net_device *dev)
1475{
1476 struct netdev_private *np = netdev_priv(dev);
1477 void __iomem *ioaddr = np->base;
1478 int i;
1479
1480 /* We should lock this segment of code for SMP eventually, although
1481 the vulnerability window is very small and statistics are
1482 non-critical. */
1483 /* The chip only need report frame silently dropped. */
553e2335
ED
1484 dev->stats.rx_missed_errors += ioread8(ioaddr + RxMissed);
1485 dev->stats.tx_packets += ioread16(ioaddr + TxFramesOK);
1486 dev->stats.rx_packets += ioread16(ioaddr + RxFramesOK);
1487 dev->stats.collisions += ioread8(ioaddr + StatsLateColl);
1488 dev->stats.collisions += ioread8(ioaddr + StatsMultiColl);
1489 dev->stats.collisions += ioread8(ioaddr + StatsOneColl);
1490 dev->stats.tx_carrier_errors += ioread8(ioaddr + StatsCarrierError);
1da177e4
LT
1491 ioread8(ioaddr + StatsTxDefer);
1492 for (i = StatsTxDefer; i <= StatsMcastRx; i++)
1493 ioread8(ioaddr + i);
553e2335
ED
1494 dev->stats.tx_bytes += ioread16(ioaddr + TxOctetsLow);
1495 dev->stats.tx_bytes += ioread16(ioaddr + TxOctetsHigh) << 16;
1496 dev->stats.rx_bytes += ioread16(ioaddr + RxOctetsLow);
1497 dev->stats.rx_bytes += ioread16(ioaddr + RxOctetsHigh) << 16;
1da177e4 1498
553e2335 1499 return &dev->stats;
1da177e4
LT
1500}
1501
1502static void set_rx_mode(struct net_device *dev)
1503{
1504 struct netdev_private *np = netdev_priv(dev);
1505 void __iomem *ioaddr = np->base;
1506 u16 mc_filter[4]; /* Multicast hash filter */
1507 u32 rx_mode;
1508 int i;
1509
1510 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1da177e4
LT
1511 memset(mc_filter, 0xff, sizeof(mc_filter));
1512 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptAll | AcceptMyPhys;
4cd24eaf 1513 } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
8e95a202 1514 (dev->flags & IFF_ALLMULTI)) {
1da177e4
LT
1515 /* Too many to match, or accept all multicasts. */
1516 memset(mc_filter, 0xff, sizeof(mc_filter));
1517 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
4cd24eaf 1518 } else if (!netdev_mc_empty(dev)) {
22bedad3 1519 struct netdev_hw_addr *ha;
1da177e4
LT
1520 int bit;
1521 int index;
1522 int crc;
1523 memset (mc_filter, 0, sizeof (mc_filter));
22bedad3
JP
1524 netdev_for_each_mc_addr(ha, dev) {
1525 crc = ether_crc_le(ETH_ALEN, ha->addr);
1da177e4
LT
1526 for (index=0, bit=0; bit < 6; bit++, crc <<= 1)
1527 if (crc & 0x80000000) index |= 1 << bit;
1528 mc_filter[index/16] |= (1 << (index % 16));
1529 }
1530 rx_mode = AcceptBroadcast | AcceptMultiHash | AcceptMyPhys;
1531 } else {
1532 iowrite8(AcceptBroadcast | AcceptMyPhys, ioaddr + RxMode);
1533 return;
1534 }
1535 if (np->mii_if.full_duplex && np->flowctrl)
1536 mc_filter[3] |= 0x0200;
1537
1538 for (i = 0; i < 4; i++)
1539 iowrite16(mc_filter[i], ioaddr + MulticastFilter0 + i*2);
1540 iowrite8(rx_mode, ioaddr + RxMode);
1541}
1542
1543static int __set_mac_addr(struct net_device *dev)
1544{
1545 struct netdev_private *np = netdev_priv(dev);
1546 u16 addr16;
1547
1548 addr16 = (dev->dev_addr[0] | (dev->dev_addr[1] << 8));
1549 iowrite16(addr16, np->base + StationAddr);
1550 addr16 = (dev->dev_addr[2] | (dev->dev_addr[3] << 8));
1551 iowrite16(addr16, np->base + StationAddr+2);
1552 addr16 = (dev->dev_addr[4] | (dev->dev_addr[5] << 8));
1553 iowrite16(addr16, np->base + StationAddr+4);
1554 return 0;
1555}
1556
1557static int check_if_running(struct net_device *dev)
1558{
1559 if (!netif_running(dev))
1560 return -EINVAL;
1561 return 0;
1562}
1563
1564static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1565{
1566 struct netdev_private *np = netdev_priv(dev);
1567 strcpy(info->driver, DRV_NAME);
1568 strcpy(info->version, DRV_VERSION);
1569 strcpy(info->bus_info, pci_name(np->pci_dev));
1570}
1571
1572static int get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1573{
1574 struct netdev_private *np = netdev_priv(dev);
1575 spin_lock_irq(&np->lock);
1576 mii_ethtool_gset(&np->mii_if, ecmd);
1577 spin_unlock_irq(&np->lock);
1578 return 0;
1579}
1580
1581static int set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1582{
1583 struct netdev_private *np = netdev_priv(dev);
1584 int res;
1585 spin_lock_irq(&np->lock);
1586 res = mii_ethtool_sset(&np->mii_if, ecmd);
1587 spin_unlock_irq(&np->lock);
1588 return res;
1589}
1590
1591static int nway_reset(struct net_device *dev)
1592{
1593 struct netdev_private *np = netdev_priv(dev);
1594 return mii_nway_restart(&np->mii_if);
1595}
1596
1597static u32 get_link(struct net_device *dev)
1598{
1599 struct netdev_private *np = netdev_priv(dev);
1600 return mii_link_ok(&np->mii_if);
1601}
1602
1603static u32 get_msglevel(struct net_device *dev)
1604{
1605 struct netdev_private *np = netdev_priv(dev);
1606 return np->msg_enable;
1607}
1608
1609static void set_msglevel(struct net_device *dev, u32 val)
1610{
1611 struct netdev_private *np = netdev_priv(dev);
1612 np->msg_enable = val;
1613}
1614
7282d491 1615static const struct ethtool_ops ethtool_ops = {
1da177e4
LT
1616 .begin = check_if_running,
1617 .get_drvinfo = get_drvinfo,
1618 .get_settings = get_settings,
1619 .set_settings = set_settings,
1620 .nway_reset = nway_reset,
1621 .get_link = get_link,
1622 .get_msglevel = get_msglevel,
1623 .set_msglevel = set_msglevel,
1624};
1625
1626static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1627{
1628 struct netdev_private *np = netdev_priv(dev);
1da177e4 1629 int rc;
1da177e4
LT
1630
1631 if (!netif_running(dev))
1632 return -EINVAL;
1633
1634 spin_lock_irq(&np->lock);
1635 rc = generic_mii_ioctl(&np->mii_if, if_mii(rq), cmd, NULL);
1636 spin_unlock_irq(&np->lock);
1da177e4
LT
1637
1638 return rc;
1639}
1640
1641static int netdev_close(struct net_device *dev)
1642{
1643 struct netdev_private *np = netdev_priv(dev);
1644 void __iomem *ioaddr = np->base;
1645 struct sk_buff *skb;
1646 int i;
1647
31f817e9
JH
1648 /* Wait and kill tasklet */
1649 tasklet_kill(&np->rx_tasklet);
1650 tasklet_kill(&np->tx_tasklet);
1651 np->cur_tx = 0;
1652 np->dirty_tx = 0;
1653 np->cur_task = 0;
bca79eb7 1654 np->last_tx = NULL;
31f817e9 1655
1da177e4
LT
1656 netif_stop_queue(dev);
1657
1658 if (netif_msg_ifdown(np)) {
1659 printk(KERN_DEBUG "%s: Shutting down ethercard, status was Tx %2.2x "
1660 "Rx %4.4x Int %2.2x.\n",
1661 dev->name, ioread8(ioaddr + TxStatus),
1662 ioread32(ioaddr + RxStatus), ioread16(ioaddr + IntrStatus));
1663 printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n",
1664 dev->name, np->cur_tx, np->dirty_tx, np->cur_rx, np->dirty_rx);
1665 }
1666
1667 /* Disable interrupts by clearing the interrupt mask. */
1668 iowrite16(0x0000, ioaddr + IntrEnable);
1669
acd70c2b
JH
1670 /* Disable Rx and Tx DMA for safely release resource */
1671 iowrite32(0x500, ioaddr + DMACtrl);
1672
1da177e4
LT
1673 /* Stop the chip's Tx and Rx processes. */
1674 iowrite16(TxDisable | RxDisable | StatsDisable, ioaddr + MACCtrl1);
1675
31f817e9
JH
1676 for (i = 2000; i > 0; i--) {
1677 if ((ioread32(ioaddr + DMACtrl) & 0xc000) == 0)
1678 break;
1679 mdelay(1);
1680 }
1681
1682 iowrite16(GlobalReset | DMAReset | FIFOReset | NetworkReset,
1683 ioaddr +ASICCtrl + 2);
1684
1685 for (i = 2000; i > 0; i--) {
1686 if ((ioread16(ioaddr + ASICCtrl +2) & ResetBusy) == 0)
1687 break;
1688 mdelay(1);
1689 }
1da177e4
LT
1690
1691#ifdef __i386__
1692 if (netif_msg_hw(np)) {
ad361c98 1693 printk(KERN_DEBUG " Tx ring at %8.8x:\n",
1da177e4
LT
1694 (int)(np->tx_ring_dma));
1695 for (i = 0; i < TX_RING_SIZE; i++)
ad361c98 1696 printk(KERN_DEBUG " #%d desc. %4.4x %8.8x %8.8x.\n",
1da177e4
LT
1697 i, np->tx_ring[i].status, np->tx_ring[i].frag[0].addr,
1698 np->tx_ring[i].frag[0].length);
ad361c98 1699 printk(KERN_DEBUG " Rx ring %8.8x:\n",
1da177e4
LT
1700 (int)(np->rx_ring_dma));
1701 for (i = 0; i < /*RX_RING_SIZE*/4 ; i++) {
1702 printk(KERN_DEBUG " #%d desc. %4.4x %4.4x %8.8x\n",
1703 i, np->rx_ring[i].status, np->rx_ring[i].frag[0].addr,
1704 np->rx_ring[i].frag[0].length);
1705 }
1706 }
1707#endif /* __i386__ debugging only */
1708
1709 free_irq(dev->irq, dev);
1710
1711 del_timer_sync(&np->timer);
1712
1713 /* Free all the skbuffs in the Rx queue. */
1714 for (i = 0; i < RX_RING_SIZE; i++) {
1715 np->rx_ring[i].status = 0;
1da177e4
LT
1716 skb = np->rx_skbuff[i];
1717 if (skb) {
0c8a745f 1718 dma_unmap_single(&np->pci_dev->dev,
14c9d9b0 1719 le32_to_cpu(np->rx_ring[i].frag[0].addr),
0c8a745f 1720 np->rx_buf_sz, DMA_FROM_DEVICE);
1da177e4
LT
1721 dev_kfree_skb(skb);
1722 np->rx_skbuff[i] = NULL;
1723 }
14c9d9b0 1724 np->rx_ring[i].frag[0].addr = cpu_to_le32(0xBADF00D0); /* poison */
1da177e4
LT
1725 }
1726 for (i = 0; i < TX_RING_SIZE; i++) {
31f817e9 1727 np->tx_ring[i].next_desc = 0;
1da177e4
LT
1728 skb = np->tx_skbuff[i];
1729 if (skb) {
0c8a745f 1730 dma_unmap_single(&np->pci_dev->dev,
14c9d9b0 1731 le32_to_cpu(np->tx_ring[i].frag[0].addr),
0c8a745f 1732 skb->len, DMA_TO_DEVICE);
1da177e4
LT
1733 dev_kfree_skb(skb);
1734 np->tx_skbuff[i] = NULL;
1735 }
1736 }
1737
1738 return 0;
1739}
1740
1741static void __devexit sundance_remove1 (struct pci_dev *pdev)
1742{
1743 struct net_device *dev = pci_get_drvdata(pdev);
1744
1745 if (dev) {
0c8a745f
DK
1746 struct netdev_private *np = netdev_priv(dev);
1747 unregister_netdev(dev);
1748 dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE,
1749 np->rx_ring, np->rx_ring_dma);
1750 dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE,
1751 np->tx_ring, np->tx_ring_dma);
1752 pci_iounmap(pdev, np->base);
1753 pci_release_regions(pdev);
1754 free_netdev(dev);
1755 pci_set_drvdata(pdev, NULL);
1da177e4
LT
1756 }
1757}
1758
61a21455
DK
1759#ifdef CONFIG_PM
1760
1761static int sundance_suspend(struct pci_dev *pci_dev, pm_message_t state)
1762{
1763 struct net_device *dev = pci_get_drvdata(pci_dev);
1764
1765 if (!netif_running(dev))
1766 return 0;
1767
1768 netdev_close(dev);
1769 netif_device_detach(dev);
1770
1771 pci_save_state(pci_dev);
1772 pci_set_power_state(pci_dev, pci_choose_state(pci_dev, state));
1773
1774 return 0;
1775}
1776
1777static int sundance_resume(struct pci_dev *pci_dev)
1778{
1779 struct net_device *dev = pci_get_drvdata(pci_dev);
1780 int err = 0;
1781
1782 if (!netif_running(dev))
1783 return 0;
1784
1785 pci_set_power_state(pci_dev, PCI_D0);
1786 pci_restore_state(pci_dev);
1787
1788 err = netdev_open(dev);
1789 if (err) {
1790 printk(KERN_ERR "%s: Can't resume interface!\n",
1791 dev->name);
1792 goto out;
1793 }
1794
1795 netif_device_attach(dev);
1796
1797out:
1798 return err;
1799}
1800
1801#endif /* CONFIG_PM */
1802
1da177e4
LT
1803static struct pci_driver sundance_driver = {
1804 .name = DRV_NAME,
1805 .id_table = sundance_pci_tbl,
1806 .probe = sundance_probe1,
1807 .remove = __devexit_p(sundance_remove1),
61a21455
DK
1808#ifdef CONFIG_PM
1809 .suspend = sundance_suspend,
1810 .resume = sundance_resume,
1811#endif /* CONFIG_PM */
1da177e4
LT
1812};
1813
1814static int __init sundance_init(void)
1815{
1816/* when a module, this is printed whether or not devices are found in probe */
1817#ifdef MODULE
1818 printk(version);
1819#endif
29917620 1820 return pci_register_driver(&sundance_driver);
1da177e4
LT
1821}
1822
1823static void __exit sundance_exit(void)
1824{
1825 pci_unregister_driver(&sundance_driver);
1826}
1827
1828module_init(sundance_init);
1829module_exit(sundance_exit);
1830
1831